pax_global_header00006660000000000000000000000064134247203500014512gustar00rootroot0000000000000052 comment=145dba9dc3b019eef99d3acea3019346c4481c73 libkqueue-2.3.1/000077500000000000000000000000001342472035000135035ustar00rootroot00000000000000libkqueue-2.3.1/.gitignore000066400000000000000000000004601342472035000154730ustar00rootroot00000000000000.vagrant/ config.h *.pc *.log CMakeCache.txt CMakeFiles/ CPackConfig.cmake CPackSourceConfig.cmake CTestTestfile.cmake Makefile Testing/ _CPack_Packages/ cmake_install.cmake install_manifest*.txt kqueue-*.deb libkqueue.so test/CMakeCache.txt test/CMakeFiles/ test/cmake_install.cmake test/libkqueue-test libkqueue-2.3.1/.travis.yml000066400000000000000000000011321342472035000156110ustar00rootroot00000000000000language: c compiler: - clang - gcc dist: trusty env: global: - M_PERTURB=0x42 addons: apt: packages: - doxygen before_install: - $CC --version before_script: # Cmake that ships with Trusty (v3.2.2) is too old, we need at least v3.4.3 - mkdir /tmp/cmake && wget -O cmake.sh https://cmake.org/files/v3.10/cmake-3.10.0-Linux-x86_64.sh && chmod +x cmake.sh && ./cmake.sh --prefix=/tmp/cmake --skip-license - /tmp/cmake/bin/cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_LIBDIR=lib -DENABLE_TESTING=YES ./ - make -j2 - /tmp/cmake/bin/cpack -G DEB script: - make test libkqueue-2.3.1/BUGS000066400000000000000000000046571342472035000142020ustar00rootroot00000000000000 * On Windows, you need to supply -DMAKE_STATIC in CFLAGS when building the static library. This does not apply when using cmake. * When passing a knote pointer to the kernel, the reference count of the knote structure should be incremented. Conversely, when the pointer has been returned from the kernel and the event unregistered from the kernel, the reference count should be decremented. * Some functions should crash instead of silently printing a debug message.. for example, knote_release(). * knote_get_by_ident uses 'short' for the ident, but the actual datatype is 'uintptr_t'. * need to uninitialize library after fork() using pthread_atfork() * Solaris unit test failure. LD_LIBRARY_PATH="..:/usr/sfw/lib/64" ./kqtest 1: test_peer_close_detection() 2: test_kqueue() 3: test_kevent_socket_add() 4: test_kevent_socket_del() 5: test_kevent_socket_add_without_ev_add() 6: test_kevent_socket_get() [read.c:84]: Unexpected event:_test_no_kevents(): [ident=7, filter=-1, flags = 1 (EV_ADD), fflags = 0, data=0, udata=fffffd7fff08c6b4]: Error 0 * There are a number of stub functions that silently fail or succeed. These need to be cleaned up; at a minimum, they should emit very loud debugging output saying "FIXME -- UNIMPLEMENTED". $ grep STUB src/*/*.c src/linux/proc.c: return (-1); /*STUB*/ src/linux/proc.c: return (0); /* STUB */ src/linux/proc.c: return (0); /* STUB */ src/linux/proc.c: return (0); /* STUB */ src/linux/proc.c: return (0); /* STUB */ src/linux/proc.c: return (0); /* STUB */ src/linux/read.c: return (-1); /* STUB */ src/linux/timer.c: return (0); /* STUB */ src/linux/vnode.c: return (-1); /* FIXME - STUB */ src/linux/write.c: return (-1); /* STUB */ src/posix/timer.c: return (-1); /* STUB */ src/solaris/socket.c: return (-1); /* STUB */ src/solaris/timer.c: return (-1); /* STUB */ src/windows/read.c: return (-1); /* STUB */ src/windows/timer.c: return (0); /* STUB */ * kqueue() should defer thread cancellation until the end. * kevent() should defer thread cancellation and call pthread_testcancel() before and after the call to kevent_wait(). This may require changing the way that EINTR is handled, to make sure that the EINTR is propagated up the call stack to kevent(). libkqueue-2.3.1/CMakeLists.txt000066400000000000000000000203571342472035000162520ustar00rootroot00000000000000# # Copyright (c) 2011 Marius Zwicker # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # # Note: In order for RPM packaging to work correctly version >= 3.8 is required cmake_minimum_required(VERSION 3.8.0) cmake_policy(SET CMP0063 OLD) project(libkqueue VERSION 2.3.0 LANGUAGES C) enable_testing() set(CMAKE_C_VISIBILITY_PRESET hidden) set(CMAKE_POSITION_INDEPENDENT_CODE ON) set(CMAKE_C_STANDARD_REQUIRED ON) set(CMAKE_C_STANDARD 99) set(CMAKE_BUILD_TYPE RelWithDebInfo) option(STATIC_KQUEUE "build libkqueue as static library" OFF) set(CMAKE_THREAD_PREFER_PTHREAD TRUE) set(THREADS_PREFER_PTHREAD_FLAG TRUE) find_package(Threads REQUIRED) include(CheckIncludeFiles) include(CheckSymbolExists) include(GNUInstallDirs) check_include_files(sys/signalfd.h HAVE_SYS_SIGNALFD_H) check_include_files(sys/timerfd.h HAVE_SYS_TIMERFD_H) check_include_files(sys/eventfd.h HAVE_SYS_EVENTFD_H) if(ENABLE_TESTING) check_include_files(err.h HAVE_ERR_H) endif() check_symbol_exists(EPOLLRDHUP sys/epoll.h HAVE_EPOLLRDHUP) check_symbol_exists(NOTE_TRUNCATE sys/event.h HAVE_NOTE_TRUNCATE) if(ENABLE_TESTING) check_symbol_exists(NOTE_REVOKE sys/event.h HAVE_NOTE_REVOKE) endif() set(CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE) check_symbol_exists(ppoll poll.h HAVE_DECL_PPOLL) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/config.h.in ${CMAKE_CURRENT_BINARY_DIR}/config.h) include_directories(${CMAKE_CURRENT_BINARY_DIR}) set(LIBKQUEUE_HEADERS include/sys/event.h) set(LIBKQUEUE_SOURCES src/common/alloc.h src/common/debug.h src/common/filter.c src/common/kevent.c src/common/knote.c src/common/kqueue.c src/common/map.c src/common/private.h src/common/queue.h src/common/tree.h) if(CMAKE_SYSTEM_NAME MATCHES Windows) list(APPEND LIBKQUEUE_SOURCES src/windows/platform.c src/windows/platform.h src/windows/read.c src/windows/stdint.h src/windows/timer.c src/windows/user.c) elseif(CMAKE_SYSTEM_NAME MATCHES "(Solaris|SunOS)") list(APPEND LIBKQUEUE_SOURCES src/posix/platform.c src/posix/platform.h src/solaris/platform.c src/solaris/platform.h src/solaris/signal.c src/solaris/socket.c src/solaris/timer.c src/solaris/user.c) elseif(CMAKE_SYSTEM_NAME STREQUAL Linux) list(APPEND LIBKQUEUE_SOURCES src/posix/platform.c src/posix/platform.h src/linux/platform.c src/linux/platform.h src/linux/read.c src/linux/signal.c src/linux/timer.c src/linux/user.c src/linux/vnode.c src/linux/write.c) else() message(FATAL_ERROR "unsupported host os") endif() source_group(includes FILES ${LIBKQUEUE_HEADERS}) source_group(src FILES ${LIBKQUEUE_SOURCES}) if(STATIC_KQUEUE) set(LIBRARY_TYPE STATIC) else() set(LIBRARY_TYPE SHARED) endif() add_library(kqueue ${LIBRARY_TYPE} ${LIBKQUEUE_SOURCES} ${LIBKQUEUE_HEADERS}) set_target_properties(kqueue PROPERTIES DEBUG_POSTFIX "d") if(WIN32) target_compile_definitions(kqueue PRIVATE _USRDLL;_WINDLL) target_compile_definitions(kqueue PRIVATE _CRT_SECURE_NO_WARNINGS) else() target_compile_definitions(kqueue PRIVATE _XOPEN_SOURCE=600) endif() target_include_directories(kqueue PRIVATE include) if(NOT WIN32) target_include_directories(kqueue PRIVATE src/common) endif() if(CMAKE_C_COMPILER_ID MATCHES GNU) target_compile_options(kqueue PRIVATE -Wall -Werror) endif() if(MINGW AND CMAKE_C_COMPILER_ID MATCHES GNU) target_compile_options(kqueue PRIVATE -march=i486) endif() if(WIN32) target_link_libraries(kqueue PRIVATE Ws2_32) endif() target_link_libraries(kqueue PRIVATE Threads::Threads) if(ENABLE_TESTING) add_subdirectory(test) endif() configure_file("${CMAKE_SOURCE_DIR}/libkqueue.pc.in" "${CMAKE_BINARY_DIR}/libkqueue.pc" @ONLY) # # Avoid conflicts by not trying to create existing directories # set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION ${CMAKE_INSTALL_FULL_MANDIR} ${CMAKE_INSTALL_FULL_MANDIR}/man2 ${CMAKE_INSTALL_FULL_INCLUDEDIR}/sys) install(FILES "include/sys/event.h" DESTINATION "${CMAKE_INSTALL_FULL_INCLUDEDIR}/sys" COMPONENT headers) install(TARGETS kqueue DESTINATION "${CMAKE_INSTALL_FULL_LIBDIR}" COMPONENT libraries) install(FILES kqueue.2 DESTINATION "${CMAKE_INSTALL_FULL_MANDIR}/man2" COMPONENT man) install(FILES "${CMAKE_BINARY_DIR}/libkqueue.pc" DESTINATION "${CMAKE_INSTALL_FULL_LIBDIR}/pkgconfig" COMPONENT pkgconfig) set(CPACK_PACKAGE_NAME ${PROJECT_NAME}) set(CPACK_PACKAGE_VERSION ${PROJECT_VERSION}) set(CPACK_PACKAGE_VERSION_MAJOR ${PROJECT_VERSION_MAJOR}) set(CPACK_PACKAGE_VERSION_MINOR ${PROJECT_VERSION_MINOR}) set(CPACK_PACKAGE_VERSION_PATCH ${PROJECT_VERSION_PATCH}) set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Mark Heily ") # Specify the location of source files to be installed by the debuginfo package set(CPACK_BUILD_SOURCE_DIRS ${CMAKE_SOURCE_DIR}/src) # Group the components into packages # - devel contains header files and man pages and becomes libkqueue-devel # - main contains everything else and becomes libkqueue set(CPACK_COMPONENT_HEADERS_GROUP "devel") set(CPACK_COMPONENT_LIBRARIES_GROUP "main") set(CPACK_COMPONENT_MAN_GROUP "devel") set(CPACK_COMPONENT_PKGCONFIG_GROUP "main") set(CPACK_COMPONENT_HEADERS_DEPENDS "libraries") # # Metadata common to all packaging systems # set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Userspace implementation of the kqueue event notification mechanism") set(CPACK_COMPONENT_MAIN_DESCRIPTION "A user space implementation of the kqueue(2) kernel event notification mechanism. libkqueue acts as a translator between the kevent structure and the native kernel facilities.") set(CPACK_COMPONENT_DEVEL_DESCRIPTION "Development files for ${PROJECT_NAME}-${PROJECT_VERSION}") # # RPM Specific configuration # set(CPACK_RPM_PACKAGE_LICENSE "MIT and BSD") set(CPACK_RPM_PACKAGE_URL "http://sourceforge.net/p/libkqueue/wiki/Home/") set(CPACK_RPM_MAIN_PACKAGE_GROUP "System Environment/Libraries") set(CPACK_RPM_MAIN_PACKAGE_DESCRIPTION ${CPACK_COMPONENT_MAIN_DESCRIPTION}) set(CPACK_RPM_DEVEL_PACKAGE_GROUP "Development/Libraries") set(CPACK_RPM_DEVEL_PACKAGE_SUMMARY ${CPACK_COMPONENT_DEVEL_DESCRIPTION}) set(CPACK_RPM_DEVEL_PACKAGE_REQUIRES "${CPACK_PACKAGE_NAME} = %{version}-%{release}") set(CPACK_RPM_MAIN_COMPONENT "main") # Nominate the component to be packed into libkqueue set(CPACK_RPM_COMPONENT_INSTALL ON) # Enable component based packaging (generate multiple packages) set(CPACK_RPM_FILE_NAME RPM-DEFAULT) # Use rpmbuild's package naming scheme # # Build a debuginfo package containing the source an debugging symbols # set(CPACK_RPM_MAIN_DEBUGINFO_PACKAGE ON) set(CPACK_RPM_DEBUGINFO_SINGLE_PACKAGE ON) set(CPACK_RPM_MAIN_BUILD_SOURCE_DIRS_PREFIX /usr/src/debug/${PROJECT_NAME}-${PROJECT_VERSION}) # # DEB Specific configuration # set(CPACK_DEBIAN_MAIN_PACKAGE_NAME "${CPACK_PACKAGE_NAME}") set(CPACK_DEBIAN_MAIN_PACKAGE_SECTION "libs") set(CPACK_DEBIAN_DEVEL_PACKAGE_NAME "${CPACK_PACKAGE_NAME}-dev") set(CPACK_DEBIAN_DEVEL_PACKAGE_SECTION "libdevel") set(CPACK_DEBIAN_DEVEL_PACKAGE_DEPENDS "${CPACK_PACKAGE_NAME} (= ${PROJECT_VERSION})") set(CPACK_DEB_COMPONENT_INSTALL ON) # Enable component based packaging (generate multiple packages) set(CPACK_DEBIAN_FILE_NAME DEB-DEFAULT) # Use default Debian package naming scheme include(CPack) libkqueue-2.3.1/ChangeLog000066400000000000000000000224231342472035000152600ustar00rootroot000000000000002019-01-31 v2.3.1 ------------------------------------------------------------------------ * Socket errors support * Packaging updates * Put the man page somewhere it can be found * Move man page to development package * For Debian, put headers in a separate dev package * Fix package description for Debian packaging * knote: rethink KNFL_SOCKET flag knote: support more kn_flags and socket errors * fix naming for write evfilt callbacks 2018-11-25 v2.3.0 ------------------------------------------------------------------------ * kevent: refactor to remove goto statement (Credit: Cameron Nemo) * Multiple fixes for compilation issues with GCC 8.1 (Credit: Arran Cudbard-Bell) * Add a monitoring thread that takes care of cleaning up closed kqueues (Credit: Philippe Wooding) * Multiple bug fixes for leaking descriptors in kn_delete() (Credit: Philippe Wooding) * Fix build errors on Linux systems that do not use glibc (Credit: maxice8) 2017-11-26 v2.2.0 ------------------------------------------------------------------------ * Fixed: the EOF flag should only be set on stream sockets with a zero length r ead Author: Arran Cudbard-Bell * Fix to deal with inotify events containing filenames correctly. Author: Arran Cudbard-Bell * Fix to allow libpcap FDs to be used. Author: Philippe Wooding * Switch to CMake as the build and packaging system. 2016-07-24 v2.1.0 ------------------------------------------------------------------------ * Fix an endianness issue when calling ioctl on Linux. (Credit: Vivian Kong) * Allow tests to be built from separate build directory. (Credit: Chris Bailey) * Add support for kevent64(2) timers from Darwin. (Credit: Hubertus Franke) * Set EPOLLONESHOT for EPOLL_CTL_ADD after knote creation. (Credit: Ian Partridge) * Add configure arg to disable install and build noinst ltlibrary. (Credit: Ian Partridge) * Allow libkqueue to be used as a submodule within another build process. (Credit: Ian Partridge) * Fix a build failure on Linux/AArch64 using Ubuntu 16.04. (Credit: Galen Rhodes) 2016-02-06 v2.0.4 ------------------------------------------------------------------------ * Fix incorrect boolean logic in src/linux/read.c (Credit: marcos69, closes #5) * Fix a build failure on mips64el. (Credit: YunQiang Su, from Debian Bug #754376) 2014-03-09 v2.0.3 ------------------------------------------------------------------------ * Fix the generation of libkqueue.pc 2014-02-23 v2.0.2 ------------------------------------------------------------------------ * Switch from SVN to Git * Switch hosting providers to Github * Switch to autoconf/automake 2013-05-08 v2.0.1 r646 ------------------------------------------------------------------------ * Improvements to the RPM package spec (Credit: Eric Wong) 2013-04-29 v2.0 r639 ------------------------------------------------------------------------ * Use ppoll(2) where possible to improve scalability. (Credit: Eric Wong) * Optimize kevent() when it is called with a zero timeout. (Credit: Eric Wong) * Avoid calling getsockopt() on file descriptors that are not sockets. (Credit: Eric Wong) * Call epoll_wait after linux_kevent_wait_hires() (Credit: Eric Wong) * Detect regular files correctly (Credit: Eric Wong) * Ensure that calling close() on a file descriptor will remove any kevents that reference the descriptor. [merged from ^/branches/stable@551] * Remove the SERIALIZE_KEVENT macro, and always protect kevent_copyin() and kevent_copyout() with a mutex. * Remove fine-grained locking at the knote level. * Add a counter that increments on each each kevent() call. When printing debug information within kevent(), display the value of the counter. This will be helpful when debugging a multithreaded program that may have multiple kevent() calls executing in parallel. (Credit: Julien Blache) 2010-09-18 v1.0 r344 ------------------------------------------------------------------------ * Support older Linux kernels that do not have the EPOLLRDHUP flag. * Add a portable implementation of the EVFILT_TIMER filter. * Add Solaris to the list of supported platforms. * Fixed the 'make rpm' target to work on CentOS 5. * Modified the manpage to remove unimplemented features. 2010-08-05 v0.9.3 r309 ------------------------------------------------------------------------ * Fix a typo in kevent_copyin() that caused EV_RECEIPT to set the data field incorrectly in some cases. (Credit to Julien Blache for discovering and fixing this bug) 2010-08-05 v0.9.2 r289 ------------------------------------------------------------------------ * Fix some build failures on 32-bit platforms related to the debugging codepaths being enabled by default. 2010-08-04 v0.9.1 r286 ------------------------------------------------------------------------ * Prevent dumping of EVFILT_VNODE debugging information to STDOUT. * Fix the 'make clean' target in the testsuite. 2010-08-01 v0.9 r285 ------------------------------------------------------------------------ * Set kevent.data = 1 for passive sockets that have at least one pending connection. (Credit to Julien Blache for finding and researching this bug) * Fix various compilation errors under Solaris. (Credit to Joakim Johansson for testing and providing patches) * Use the KQUEUE_DEBUG environment variable to turn on debugging output. 2010-07-21 v0.8 r264 ------------------------------------------------------------------------ * Fix a bug that prevented a knote with the EV_DISPATCH flag from being re-enabled after an event had been triggered. (Credit to Julien Blache for finding and researching this bug) 2010-06-08 v0.7 r248 ------------------------------------------------------------------------ * Add Debian packaging to the ports/ directory and improve the 'make deb' target. * Set the library soname version. * Switch from -fPIC to -fpic as the default in CFLAGS. 2010-03-28 v0.6 r238 ------------------------------------------------------------------------ * Experimental Linux kernel module. * Implement knote modification for signals. * Implement POSIX signal.c 2010-02-09 v0.5 r200 ------------------------------------------------------------------------ * Prevent namespace pollution by hiding all ELF symbols except for kqueue() and kevent(). * Add reference counting to the garbage collection mechanism so that a kqueue object is never destroyed while multiple threads are using it. * Improve scalability by using red-black trees instead of linked lists. * Refactor the internal API to promote modularity and code reuse. Five methods are added to each filter: create, modify, delete, enable, disable. These replace the copyin() method, which was overly complicated. * Remove the fine-grained locking at the filter level, and replace it with coarse locking inside kevent(). This simplifys the locking implementation and provides a stronger guarantee of reentrancy. * Initial attempt at writing a Linux kernel module. It fails to link because sys_epoll_create() and other event-related syscalls are not available to kernelspace (?). Need to ask LKML for guidance. * Make unit tests threadsafe and created a stresstest which runs the unit tests in parallel. * Use helper functions to reduce the amount of duplicate code in the unit tests. 2009-12-26 v0.4 r133 ------------------------------------------------------------------------ * Incomplete and experimental support for Solaris added. * Lots of work on the test suite. * Replace the buggy GC thread with an event-based alternative. * Do not implicitly set EV_CLEAR in the EVFILT_USER filter. * Adjust the eventlist when EV_RECEIPT causes it to be modified. 2009-11-10 v0.3 r84 ------------------------------------------------------------------------ * The EVFILT_USER filter has been implemented, but needs additional testing. * The EVFILT_PROC filter is partially implemented on Linux, but is currently broken. * The unit tests have been moved to a separate subdirectory and now function under OpenBSD 4.4 using the native kqueue(2) and kevent(2) system calls. * The kqueue_free() function has been removed. * A helper thread performs garbage collection when close(2) is called on the file descriptor returned by kqueue(). * All symbols in that are not implemented are now undefined. * Major internal reorganization of the source tree. * A copy-and-paste error in vnode.c has been fixed. * The pthreads library is now required. 2009-11-07 v0.2 r59 ------------------------------------------------------------------------ * Implement EVFILT_TIMER on Linux. * Fix another 'make install' problem reported by Mario Schwalbe. * Do not link the test program with the pthreads library. * pkg-config no longer requires linking with -lpthread and -lrt. 2009-11-05 v0.1 r49 ------------------------------------------------------------------------ * Initial stable release. libkqueue-2.3.1/LICENSE000066400000000000000000000040511342472035000145100ustar00rootroot00000000000000== all source == Copyright (c) 2009 Mark Heily Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. == event.h == Copyright (c) 1999,2000,2001 Jonathan Lemon All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. libkqueue-2.3.1/README.md000066400000000000000000000044051342472035000147650ustar00rootroot00000000000000libkqueue ========= [![Travis CI build status][BuildStatus]][BuildStatusLink] A user space implementation of the kqueue(2) kernel event notification mechanism libkqueue acts as a translator between the kevent structure and the native kernel facilities on Linux, Android, Solaris, and Windows. Supported Event Types --------------------- * vnode * socket * proc * user * timer Installation - Linux, Solaris ----------------------------- cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_LIBDIR=lib make make install Installation - Red Hat ---------------------- cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_LIBDIR=lib make cpack -G RPM Installation - Debian --------------------- cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_LIBDIR=lib make cpack -G DEB Installation - Android ---------------------- cmake -G "Unix Makefiles" -DCMAKE_C_COMPILER= -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_LIBDIR=lib make Windows (Visual Studio Project) ------------------------------- cmake -G "Visual Studio 14 2015" cmake --build . Windows (clang/C2) (Visual Studio Project) ------------------------------------------ cmake -G "Visual Studio 14 2015" -T "LLVM-vs2014" cmake --build . Xcode (project) --------------- cmake -G "Xcode" Running Unit Tests ------------------ cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_LIBDIR=lib -DENABLE_TESTING=YES make make test Building Applications --------------------- CFLAGS += -I/usr/include/kqueue LDFLAGS += -lkqueue Tutorials & Examples -------------------- [Kqueues for Fun and Profit](http://doc.geoffgarside.co.uk/kqueue) [Handling TCP Connections with Kqueue Event Notification](http://eradman.com/posts//kqueue-tcp.html) Releases History ---------------- 2.0 add support for Android _2013-04-29_ 1.0 stable relesae for Linux, Solaris, and Windows _2010-09-18_ [BuildStatus]: https://travis-ci.org/mheily/libkqueue.svg?branch=master "Travis CI status" [BuildStatusLink]: https://travis-ci.org/mheily/libkqueue libkqueue-2.3.1/TODO000066400000000000000000000011411342472035000141700ustar00rootroot00000000000000 * Create a FILTER_DECL() macro that initializes the 'struct filter' object, with all members properly initialized. Then -Wno-missing-field-initializers can be removed from CFLAGS. * Implement the knote_modify() hook for all filters. * Add a dbg_printf() statement within kevent_wait() to report the value of the timeout. * Fix the crasher w/ corruption in test/vnode.c * Add the kevent64() syscall as implemented on MacOS X. This guarantees that 64-bit values can be used in the 'udata' field on 32-bit platforms. * Check other filters for the EV_DISPATCH bug that was fixed in r252. libkqueue-2.3.1/Vagrantfile000066400000000000000000000057141342472035000156770ustar00rootroot00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : # All Vagrant configuration is done below. The "2" in Vagrant.configure # configures the configuration version (we support older styles for # backwards compatibility). Please don't change it unless you know what # you're doing. Vagrant.configure("2") do |config| # The most common configuration options are documented and commented below. # For a complete reference, please see the online documentation at # https://docs.vagrantup.com. # Every Vagrant development environment requires a box. You can search for # boxes at https://atlas.hashicorp.com/search. config.vm.box = "ubuntu/xenial64" # Disable automatic box update checking. If you disable this, then # boxes will only be checked for updates when the user runs # `vagrant box outdated`. This is not recommended. # config.vm.box_check_update = false # Create a forwarded port mapping which allows access to a specific port # within the machine from a port on the host machine. In the example below, # accessing "localhost:8080" will access port 80 on the guest machine. # config.vm.network "forwarded_port", guest: 80, host: 8080 # Create a private network, which allows host-only access to the machine # using a specific IP. # config.vm.network "private_network", ip: "192.168.33.10" # Create a public network, which generally matched to bridged network. # Bridged networks make the machine appear as another physical device on # your network. # config.vm.network "public_network" # Share an additional folder to the guest VM. The first argument is # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. # config.vm.synced_folder "../data", "/vagrant_data" # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. # Example for VirtualBox: # # config.vm.provider "virtualbox" do |vb| # # Display the VirtualBox GUI when booting the machine # vb.gui = true # # # Customize the amount of memory on the VM: # vb.memory = "1024" # end # # View the documentation for the provider you are using for more # information on available options. # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies # such as FTP and Heroku are also available. See the documentation at # https://docs.vagrantup.com/v2/push/atlas.html for more information. # config.push.define "atlas" do |push| # push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME" # end # Enable provisioning with a shell script. Additional provisioners such as # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the # documentation for more information about their specific syntax and use. config.vm.provision "shell", inline: <<-SHELL apt-get update apt-get install -y gcc make cmake SHELL end libkqueue-2.3.1/config.h.in000066400000000000000000000004041342472035000155240ustar00rootroot00000000000000 #cmakedefine01 HAVE_SYS_SIGNALFD_H #cmakedefine01 HAVE_SYS_TIMERFD_H #cmakedefine01 HAVE_SYS_EVENTFD_H #cmakedefine01 HAVE_ERR_H #cmakedefine01 HAVE_EPOLLRDHUP #cmakedefine01 HAVE_NOTE_TRUNCATE #cmakedefine01 HAVE_DECL_PPOLL #cmakedefine01 HAVE_NOTE_REVOKE libkqueue-2.3.1/include/000077500000000000000000000000001342472035000151265ustar00rootroot00000000000000libkqueue-2.3.1/include/sys/000077500000000000000000000000001342472035000157445ustar00rootroot00000000000000libkqueue-2.3.1/include/sys/event.h000066400000000000000000000164561342472035000172520ustar00rootroot00000000000000/*- * Copyright (c) 2009 Mark Heily * Copyright (c) 1999,2000,2001 Jonathan Lemon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD SVN Revision 197533$ */ #ifndef _SYS_EVENT_H_ #define _SYS_EVENT_H_ #include #ifdef __KERNEL__ #define intptr_t long #else #include #if defined(_WIN32) && _MSC_VER < 1600 && !defined(__MINGW32__) # include "../../src/windows/stdint.h" #else # include #endif #define LIBKQUEUE 1 #endif struct timespec; #define EVFILT_READ (-1) #define EVFILT_WRITE (-2) #define EVFILT_AIO (-3) /* attached to aio requests */ #define EVFILT_VNODE (-4) /* attached to vnodes */ #define EVFILT_PROC (-5) /* attached to struct proc */ #define EVFILT_SIGNAL (-6) /* attached to struct proc */ #define EVFILT_TIMER (-7) /* timers */ #define EVFILT_NETDEV (-8) /* network devices */ #define EVFILT_FS (-9) /* filesystem events */ #define EVFILT_LIO (-10) /* attached to lio requests */ #define EVFILT_USER (-11) /* User events */ #define EVFILT_SYSCOUNT 11 #define EV_SET(kevp_, a, b, c, d, e, f) do { \ struct kevent *kevp = (kevp_); \ (kevp)->ident = (a); \ (kevp)->filter = (b); \ (kevp)->flags = (c); \ (kevp)->fflags = (d); \ (kevp)->data = (e); \ (kevp)->udata = (f); \ } while(0) struct kevent { uintptr_t ident; /* identifier for this event */ short filter; /* filter for event */ unsigned short flags; unsigned int fflags; intptr_t data; void *udata; /* opaque user data identifier */ }; /* actions */ #define EV_ADD 0x0001 /* add event to kq (implies enable) */ #define EV_DELETE 0x0002 /* delete event from kq */ #define EV_ENABLE 0x0004 /* enable event */ #define EV_DISABLE 0x0008 /* disable event (not reported) */ /* flags */ #define EV_ONESHOT 0x0010 /* only report one occurrence */ #define EV_CLEAR 0x0020 /* clear event state after reporting */ #define EV_RECEIPT 0x0040 /* force EV_ERROR on success, data=0 */ #define EV_DISPATCH 0x0080 /* disable event after reporting */ #define EV_SYSFLAGS 0xF000 /* reserved by system */ #define EV_FLAG1 0x2000 /* filter-specific flag */ /* returned values */ #define EV_EOF 0x8000 /* EOF detected */ #define EV_ERROR 0x4000 /* error, data contains errno */ /* * data/hint flags/masks for EVFILT_USER * * On input, the top two bits of fflags specifies how the lower twenty four * bits should be applied to the stored value of fflags. * * On output, the top two bits will always be set to NOTE_FFNOP and the * remaining twenty four bits will contain the stored fflags value. */ #define NOTE_FFNOP 0x00000000 /* ignore input fflags */ #define NOTE_FFAND 0x40000000 /* AND fflags */ #define NOTE_FFOR 0x80000000 /* OR fflags */ #define NOTE_FFCOPY 0xc0000000 /* copy fflags */ #define NOTE_FFCTRLMASK 0xc0000000 /* masks for operations */ #define NOTE_FFLAGSMASK 0x00ffffff #define NOTE_TRIGGER 0x01000000 /* Cause the event to be triggered for output. */ /* * data/hint flags for EVFILT_{READ|WRITE} */ #define NOTE_LOWAT 0x0001 /* low water mark */ #undef NOTE_LOWAT /* Not supported on Linux */ /* * data/hint flags for EVFILT_VNODE */ #define NOTE_DELETE 0x0001 /* vnode was removed */ #define NOTE_WRITE 0x0002 /* data contents changed */ #define NOTE_EXTEND 0x0004 /* size increased */ #define NOTE_ATTRIB 0x0008 /* attributes changed */ #define NOTE_LINK 0x0010 /* link count changed */ #define NOTE_RENAME 0x0020 /* vnode was renamed */ #define NOTE_REVOKE 0x0040 /* vnode access was revoked */ #undef NOTE_REVOKE /* Not supported on Linux */ /* * data/hint flags for EVFILT_PROC */ #define NOTE_EXIT 0x80000000 /* process exited */ #define NOTE_FORK 0x40000000 /* process forked */ #define NOTE_EXEC 0x20000000 /* process exec'd */ #define NOTE_PCTRLMASK 0xf0000000 /* mask for hint bits */ #define NOTE_PDATAMASK 0x000fffff /* mask for pid */ /* additional flags for EVFILT_PROC */ #define NOTE_TRACK 0x00000001 /* follow across forks */ #define NOTE_TRACKERR 0x00000002 /* could not track child */ #define NOTE_CHILD 0x00000004 /* am a child process */ /* * data/hint flags for EVFILT_NETDEV */ #define NOTE_LINKUP 0x0001 /* link is up */ #define NOTE_LINKDOWN 0x0002 /* link is down */ #define NOTE_LINKINV 0x0004 /* link state is invalid */ /* KLUDGE: This is from on FreeBSD and is used by the EVFILT_FS filter. */ /* vfsquery flags */ #define VQ_NOTRESP 0x0001 /* server down */ #define VQ_NEEDAUTH 0x0002 /* server bad auth */ #define VQ_LOWDISK 0x0004 /* we're low on space */ #define VQ_MOUNT 0x0008 /* new filesystem arrived */ #define VQ_UNMOUNT 0x0010 /* filesystem has left */ #define VQ_DEAD 0x0020 /* filesystem is dead, needs force unmount */ #define VQ_ASSIST 0x0040 /* filesystem needs assistance from external program */ #define VQ_NOTRESPLOCK 0x0080 /* server lockd down */ /* * data/hint flags for EVFILT_TIMER as suported and defined in kevent64 */ #define NOTE_SECONDS 0x0001 /* time specified in seconds */ #define NOTE_USECONDS 0x0002 /* time specified in micro seconds */ #define NOTE_NSECONDS 0x0004 /* time specified in nano seconds */ #define NOTE_ABSOLUTE 0x0008 /* data is an absolute timeout */ #ifndef __KERNEL__ #ifdef __cplusplus extern "C" { #endif #ifdef _WIN32 #if (_MSC_VER < 1900) struct timespec { time_t tv_sec; long tv_nsec; }; #else #include #endif __declspec(dllexport) int kqueue(void); __declspec(dllexport) int kevent(int kq, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); #ifdef MAKE_STATIC __declspec(dllexport) int libkqueue_init(); #endif #else int kqueue(void); int kevent(int kq, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); #ifdef MAKE_STATIC int libkqueue_init(); #endif #endif #ifdef __cplusplus } #endif #endif /* !__KERNEL__* */ #endif /* !_SYS_EVENT_H_ */ libkqueue-2.3.1/kern/000077500000000000000000000000001342472035000144425ustar00rootroot00000000000000libkqueue-2.3.1/kern/Makefile000066400000000000000000000022461342472035000161060ustar00rootroot00000000000000# # Copyright (c) 2010 Mark Heily # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # obj-m = kqueue.o KVERSION = $(shell uname -r) all: kqueue.ko modtest kqueue.ko: kqueue.c make -C /lib/modules/`uname -r`/build M=$(PWD) modules clean: make -C /lib/modules/`uname -r`/build M=$(PWD) clean rm -f modtest update: all rmmod kqueue insmod ./kqueue.ko sleep 2 chmod 777 /dev/kqueue modtest: test.c gcc -o modtest -Wall -Werror test.c edit: $(EDITOR) Makefile *.[ch] check: modtest ./modtest libkqueue-2.3.1/kern/kqueue.c000066400000000000000000000172171342472035000161150ustar00rootroot00000000000000/*- * Copyright (c) 2010 Mark Heily * * Includes portions of /usr/src/sys/kern/kern_event.c which is * * Copyright (c) 1999,2000,2001 Jonathan Lemon * Copyright 2004 John-Mark Gurney * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Portions based on $FreeBSD: src/sys/kern/kern_event.c,v 1.126.2.1.2.1 2009/10/25 01:10:29 kensmith Exp $ $FreeBSD: src/sys/sys/eventvar.h,v 1.6.30.1.2.1 2009/10/25 01:10:29 kensmith Exp $ */ #include #include #include #include #include #include #include #include #include #include #include "../include/sys/event.h" #include "queue.h" struct kqueue; struct kfilter; struct knote; static int kqueue_open (struct inode *inode, struct file *file); static int kqueue_release (struct inode *inode, struct file *file); static int kqueue_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); static ssize_t kqueue_read(struct file *file, char __user *buf, size_t lbuf, loff_t *ppos); static ssize_t kqueue_write(struct file *file, const char __user *buf, size_t lbuf, loff_t *ppos); struct file_operations fops = { .owner = THIS_MODULE, .ioctl = kqueue_ioctl, .open = kqueue_open, .release = kqueue_release, .read = kqueue_read, .write = kqueue_write, }; struct kfilter { struct rb_root kf_note; }; struct kqueue { spinlock_t kq_lock; int kq_count; /* number of pending events */ struct kfilter kq_filt[EVFILT_SYSCOUNT]; }; #ifdef TODO struct filterops { int f_isfd; /* true if ident == filedescriptor */ int (*f_attach)(struct knote *kn); void (*f_detach)(struct knote *kn); int (*f_event)(struct knote *kn, long hint); }; static struct kfilter { struct filterops kf_fop; int for_refcnt; } sysfilt_ops[EVFILT_SYSCOUNT]; = { { &file_filtops }, /* EVFILT_READ */ { &file_filtops }, /* EVFILT_WRITE */ { &null_filtops }, /* EVFILT_AIO */ { &file_filtops }, /* EVFILT_VNODE */ { &proc_filtops }, /* EVFILT_PROC */ { &sig_filtops }, /* EVFILT_SIGNAL */ { &timer_filtops }, /* EVFILT_TIMER */ { &file_filtops }, /* EVFILT_NETDEV */ { &fs_filtops }, /* EVFILT_FS */ { &null_filtops }, /* EVFILT_LIO */ }; #endif static int major; static struct class *kqueue_class; static struct task_struct *kq_thread; static struct kfilter * kfilter_lookup(struct kqueue *kq, int filt) { if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) return NULL; return &kq->kq_filt[~filt]; } //only for sleeping during testing #include static int kqueue_main(void *arg) { printk(KERN_INFO "kqueue thread started...\n"); while (!kthread_should_stop()) { msleep(5000); printk(KERN_INFO "kqueue thread awake...\n"); } printk(KERN_INFO "kqueue stopping...\n"); return 0; } static int kqueue_open (struct inode *inode, struct file *file) { struct kqueue *kq; int i; printk("kqueue_open\n"); kq = kmalloc(sizeof(*kq), GFP_KERNEL); if (kq == NULL) { printk("kqueue: kmalloc failed\n"); return -1; } spin_lock_init(&kq->kq_lock); for (i = 0; i < EVFILT_SYSCOUNT; i++) kq->kq_filt[i].kf_note = RB_ROOT; file->private_data = kq; return 0; } static int kqueue_release (struct inode *inode, struct file *file) { printk("kqueue_release\n"); kfree(file->private_data); return 0; } static int kqueue_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { int fd; if (copy_from_user(&fd, (int *)arg, sizeof(int))) return -EFAULT; printk(KERN_INFO "added fd %d\n", fd); return 0; } static ssize_t kqueue_read(struct file *file, char __user *buf, size_t lbuf, loff_t *ppos) { struct kqueue *kq = file->private_data; spin_lock(&kq->kq_lock); //STUB spin_unlock(&kq->kq_lock); return sizeof(struct kevent); } static ssize_t kqueue_write(struct file *file, const char __user *buf, size_t lbuf, loff_t *ppos) { struct kqueue *kq = file->private_data; struct kevent kev; struct kfilter *filt; size_t i, nchanges; if ((lbuf % sizeof(struct kevent)) != 0) return -EINVAL; nchanges = lbuf / sizeof(struct kevent); for (i = 0; i < nchanges; i++) { if (copy_from_user(&kev, (struct kevent *) buf, sizeof(kev))) return -EFAULT; filt = kfilter_lookup(kq, kev.filter); if (filt == NULL) return -EINVAL; #ifdef DEADWOOD spin_lock(&kq->kq_lock); printk("%zu bytes, nchanges=%zu", lbuf, nchanges); spin_unlock(&kq->kq_lock); #endif buf += sizeof(kev); } return sizeof(struct kevent); } static int __init kqueue_start(void) { int rv = 0; printk(KERN_INFO "Loading kqueue module...\n"); /* Register as a character device */ major = register_chrdev(0, "kqueue", &fops); if (major < 0) { printk(KERN_WARNING "register_chrdev() failed"); return major; } /* Create /dev/kqueue */ kqueue_class = class_create(THIS_MODULE, "kqueue"); device_create(kqueue_class, NULL, MKDEV(major,0), NULL, "kqueue"); printk(KERN_INFO "Creating helper thread...\n"); kq_thread = kthread_create(kqueue_main, NULL, "kqueue"); if (IS_ERR(kq_thread)) { rv = PTR_ERR(kq_thread); goto err_out; } wake_up_process(kq_thread); printk(KERN_INFO "Finished loading kqueue module...\n"); return rv; err_out: //TODO: cleanup return rv; } static void __exit kqueue_end(void) { printk(KERN_INFO "Unloading kqueue module\n"); /* Remove /dev/kqueue */ device_destroy(kqueue_class, MKDEV(major,0)); class_destroy(kqueue_class); unregister_chrdev(major, "kqueue"); kthread_stop(kq_thread); } module_init(kqueue_start); module_exit(kqueue_end); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Mark Heily "); MODULE_DESCRIPTION("kqueue(2) compatibility"); libkqueue-2.3.1/kern/queue.h000066400000000000000000000511221342472035000157400ustar00rootroot00000000000000/*- * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)queue.h 8.5 (Berkeley) 8/20/94 * $FreeBSD: src/sys/sys/queue.h,v 1.72.2.1.2.1 2009/10/25 01:10:29 kensmith Exp $ */ #ifndef _SYS_QUEUE_H_ #define _SYS_QUEUE_H_ /* * This file defines four types of data structures: singly-linked lists, * singly-linked tail queues, lists and tail queues. * * A singly-linked list is headed by a single forward pointer. The elements * are singly linked for minimum space and pointer manipulation overhead at * the expense of O(n) removal for arbitrary elements. New elements can be * added to the list after an existing element or at the head of the list. * Elements being removed from the head of the list should use the explicit * macro for this purpose for optimum efficiency. A singly-linked list may * only be traversed in the forward direction. Singly-linked lists are ideal * for applications with large datasets and few or no removals or for * implementing a LIFO queue. * * A singly-linked tail queue is headed by a pair of pointers, one to the * head of the list and the other to the tail of the list. The elements are * singly linked for minimum space and pointer manipulation overhead at the * expense of O(n) removal for arbitrary elements. New elements can be added * to the list after an existing element, at the head of the list, or at the * end of the list. Elements being removed from the head of the tail queue * should use the explicit macro for this purpose for optimum efficiency. * A singly-linked tail queue may only be traversed in the forward direction. * Singly-linked tail queues are ideal for applications with large datasets * and few or no removals or for implementing a FIFO queue. * * A list is headed by a single forward pointer (or an array of forward * pointers for a hash table header). The elements are doubly linked * so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before * or after an existing element or at the head of the list. A list * may only be traversed in the forward direction. * * A tail queue is headed by a pair of pointers, one to the head of the * list and the other to the tail of the list. The elements are doubly * linked so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before or * after an existing element, at the head of the list, or at the end of * the list. A tail queue may be traversed in either direction. * * For details on the use of these macros, see the queue(3) manual page. * * * SLIST LIST STAILQ TAILQ * _HEAD + + + + * _HEAD_INITIALIZER + + + + * _ENTRY + + + + * _INIT + + + + * _EMPTY + + + + * _FIRST + + + + * _NEXT + + + + * _PREV - - - + * _LAST - - + + * _FOREACH + + + + * _FOREACH_SAFE + + + + * _FOREACH_REVERSE - - - + * _FOREACH_REVERSE_SAFE - - - + * _INSERT_HEAD + + + + * _INSERT_BEFORE - + - + * _INSERT_AFTER + + + + * _INSERT_TAIL - - + + * _CONCAT - - + + * _REMOVE_AFTER + - + - * _REMOVE_HEAD + - + - * _REMOVE + + + + * */ #ifdef QUEUE_MACRO_DEBUG /* Store the last 2 places the queue element or head was altered */ struct qm_trace { char * lastfile; int lastline; char * prevfile; int prevline; }; #define TRACEBUF struct qm_trace trace; #define TRASHIT(x) do {(x) = (void *)-1;} while (0) #define QMD_TRACE_HEAD(head) do { \ (head)->trace.prevline = (head)->trace.lastline; \ (head)->trace.prevfile = (head)->trace.lastfile; \ (head)->trace.lastline = __LINE__; \ (head)->trace.lastfile = __FILE__; \ } while (0) #define QMD_TRACE_ELEM(elem) do { \ (elem)->trace.prevline = (elem)->trace.lastline; \ (elem)->trace.prevfile = (elem)->trace.lastfile; \ (elem)->trace.lastline = __LINE__; \ (elem)->trace.lastfile = __FILE__; \ } while (0) #else #define QMD_TRACE_ELEM(elem) #define QMD_TRACE_HEAD(head) #define TRACEBUF #define TRASHIT(x) #endif /* QUEUE_MACRO_DEBUG */ /* * Singly-linked List declarations. */ #define SLIST_HEAD(name, type) \ struct name { \ struct type *slh_first; /* first element */ \ } #define SLIST_HEAD_INITIALIZER(head) \ { NULL } #define SLIST_ENTRY(type) \ struct { \ struct type *sle_next; /* next element */ \ } /* * Singly-linked List functions. */ #define SLIST_EMPTY(head) ((head)->slh_first == NULL) #define SLIST_FIRST(head) ((head)->slh_first) #define SLIST_FOREACH(var, head, field) \ for ((var) = SLIST_FIRST((head)); \ (var); \ (var) = SLIST_NEXT((var), field)) #define SLIST_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = SLIST_FIRST((head)); \ (var) && ((tvar) = SLIST_NEXT((var), field), 1); \ (var) = (tvar)) #define SLIST_FOREACH_PREVPTR(var, varp, head, field) \ for ((varp) = &SLIST_FIRST((head)); \ ((var) = *(varp)) != NULL; \ (varp) = &SLIST_NEXT((var), field)) #define SLIST_INIT(head) do { \ SLIST_FIRST((head)) = NULL; \ } while (0) #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \ SLIST_NEXT((slistelm), field) = (elm); \ } while (0) #define SLIST_INSERT_HEAD(head, elm, field) do { \ SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \ SLIST_FIRST((head)) = (elm); \ } while (0) #define SLIST_NEXT(elm, field) ((elm)->field.sle_next) #define SLIST_REMOVE(head, elm, type, field) do { \ if (SLIST_FIRST((head)) == (elm)) { \ SLIST_REMOVE_HEAD((head), field); \ } \ else { \ struct type *curelm = SLIST_FIRST((head)); \ while (SLIST_NEXT(curelm, field) != (elm)) \ curelm = SLIST_NEXT(curelm, field); \ SLIST_REMOVE_AFTER(curelm, field); \ } \ TRASHIT((elm)->field.sle_next); \ } while (0) #define SLIST_REMOVE_AFTER(elm, field) do { \ SLIST_NEXT(elm, field) = \ SLIST_NEXT(SLIST_NEXT(elm, field), field); \ } while (0) #define SLIST_REMOVE_HEAD(head, field) do { \ SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \ } while (0) /* * Singly-linked Tail queue declarations. */ #define STAILQ_HEAD(name, type) \ struct name { \ struct type *stqh_first;/* first element */ \ struct type **stqh_last;/* addr of last next element */ \ } #define STAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).stqh_first } #define STAILQ_ENTRY(type) \ struct { \ struct type *stqe_next; /* next element */ \ } /* * Singly-linked Tail queue functions. */ #define STAILQ_CONCAT(head1, head2) do { \ if (!STAILQ_EMPTY((head2))) { \ *(head1)->stqh_last = (head2)->stqh_first; \ (head1)->stqh_last = (head2)->stqh_last; \ STAILQ_INIT((head2)); \ } \ } while (0) #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) #define STAILQ_FIRST(head) ((head)->stqh_first) #define STAILQ_FOREACH(var, head, field) \ for((var) = STAILQ_FIRST((head)); \ (var); \ (var) = STAILQ_NEXT((var), field)) #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = STAILQ_FIRST((head)); \ (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ (var) = (tvar)) #define STAILQ_INIT(head) do { \ STAILQ_FIRST((head)) = NULL; \ (head)->stqh_last = &STAILQ_FIRST((head)); \ } while (0) #define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\ (head)->stqh_last = &STAILQ_NEXT((elm), field); \ STAILQ_NEXT((tqelm), field) = (elm); \ } while (0) #define STAILQ_INSERT_HEAD(head, elm, field) do { \ if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \ (head)->stqh_last = &STAILQ_NEXT((elm), field); \ STAILQ_FIRST((head)) = (elm); \ } while (0) #define STAILQ_INSERT_TAIL(head, elm, field) do { \ STAILQ_NEXT((elm), field) = NULL; \ *(head)->stqh_last = (elm); \ (head)->stqh_last = &STAILQ_NEXT((elm), field); \ } while (0) #define STAILQ_LAST(head, type, field) \ (STAILQ_EMPTY((head)) ? \ NULL : \ ((struct type *)(void *) \ ((char *)((head)->stqh_last) - __offsetof(struct type, field)))) #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) #define STAILQ_REMOVE(head, elm, type, field) do { \ if (STAILQ_FIRST((head)) == (elm)) { \ STAILQ_REMOVE_HEAD((head), field); \ } \ else { \ struct type *curelm = STAILQ_FIRST((head)); \ while (STAILQ_NEXT(curelm, field) != (elm)) \ curelm = STAILQ_NEXT(curelm, field); \ STAILQ_REMOVE_AFTER(head, curelm, field); \ } \ TRASHIT((elm)->field.stqe_next); \ } while (0) #define STAILQ_REMOVE_HEAD(head, field) do { \ if ((STAILQ_FIRST((head)) = \ STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \ (head)->stqh_last = &STAILQ_FIRST((head)); \ } while (0) #define STAILQ_REMOVE_AFTER(head, elm, field) do { \ if ((STAILQ_NEXT(elm, field) = \ STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \ (head)->stqh_last = &STAILQ_NEXT((elm), field); \ } while (0) #define STAILQ_SWAP(head1, head2, type) do { \ struct type *swap_first = STAILQ_FIRST(head1); \ struct type **swap_last = (head1)->stqh_last; \ STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \ (head1)->stqh_last = (head2)->stqh_last; \ STAILQ_FIRST(head2) = swap_first; \ (head2)->stqh_last = swap_last; \ if (STAILQ_EMPTY(head1)) \ (head1)->stqh_last = &STAILQ_FIRST(head1); \ if (STAILQ_EMPTY(head2)) \ (head2)->stqh_last = &STAILQ_FIRST(head2); \ } while (0) /* * List declarations. * NOTE: LIST_HEAD conflicts with a Linux macro. */ #define FIXME_LIST_HEAD(name, type) \ struct name { \ struct type *lh_first; /* first element */ \ } #define LIST_HEAD_INITIALIZER(head) \ { NULL } #define LIST_ENTRY(type) \ struct { \ struct type *le_next; /* next element */ \ struct type **le_prev; /* address of previous next element */ \ } /* * List functions. */ #if (defined(_KERNEL) && defined(INVARIANTS)) #define QMD_LIST_CHECK_HEAD(head, field) do { \ if (LIST_FIRST((head)) != NULL && \ LIST_FIRST((head))->field.le_prev != \ &LIST_FIRST((head))) \ panic("Bad list head %p first->prev != head", (head)); \ } while (0) #define QMD_LIST_CHECK_NEXT(elm, field) do { \ if (LIST_NEXT((elm), field) != NULL && \ LIST_NEXT((elm), field)->field.le_prev != \ &((elm)->field.le_next)) \ panic("Bad link elm %p next->prev != elm", (elm)); \ } while (0) #define QMD_LIST_CHECK_PREV(elm, field) do { \ if (*(elm)->field.le_prev != (elm)) \ panic("Bad link elm %p prev->next != elm", (elm)); \ } while (0) #else #define QMD_LIST_CHECK_HEAD(head, field) #define QMD_LIST_CHECK_NEXT(elm, field) #define QMD_LIST_CHECK_PREV(elm, field) #endif /* (_KERNEL && INVARIANTS) */ #define LIST_EMPTY(head) ((head)->lh_first == NULL) #define LIST_FIRST(head) ((head)->lh_first) #define LIST_FOREACH(var, head, field) \ for ((var) = LIST_FIRST((head)); \ (var); \ (var) = LIST_NEXT((var), field)) #define LIST_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = LIST_FIRST((head)); \ (var) && ((tvar) = LIST_NEXT((var), field), 1); \ (var) = (tvar)) #define LIST_INIT(head) do { \ LIST_FIRST((head)) = NULL; \ } while (0) #define LIST_INSERT_AFTER(listelm, elm, field) do { \ QMD_LIST_CHECK_NEXT(listelm, field); \ if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\ LIST_NEXT((listelm), field)->field.le_prev = \ &LIST_NEXT((elm), field); \ LIST_NEXT((listelm), field) = (elm); \ (elm)->field.le_prev = &LIST_NEXT((listelm), field); \ } while (0) #define LIST_INSERT_BEFORE(listelm, elm, field) do { \ QMD_LIST_CHECK_PREV(listelm, field); \ (elm)->field.le_prev = (listelm)->field.le_prev; \ LIST_NEXT((elm), field) = (listelm); \ *(listelm)->field.le_prev = (elm); \ (listelm)->field.le_prev = &LIST_NEXT((elm), field); \ } while (0) #define LIST_INSERT_HEAD(head, elm, field) do { \ QMD_LIST_CHECK_HEAD((head), field); \ if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\ LIST_FIRST((head)) = (elm); \ (elm)->field.le_prev = &LIST_FIRST((head)); \ } while (0) #define LIST_NEXT(elm, field) ((elm)->field.le_next) #define LIST_REMOVE(elm, field) do { \ QMD_LIST_CHECK_NEXT(elm, field); \ QMD_LIST_CHECK_PREV(elm, field); \ if (LIST_NEXT((elm), field) != NULL) \ LIST_NEXT((elm), field)->field.le_prev = \ (elm)->field.le_prev; \ *(elm)->field.le_prev = LIST_NEXT((elm), field); \ TRASHIT((elm)->field.le_next); \ TRASHIT((elm)->field.le_prev); \ } while (0) #define LIST_SWAP(head1, head2, type, field) do { \ struct type *swap_tmp = LIST_FIRST((head1)); \ LIST_FIRST((head1)) = LIST_FIRST((head2)); \ LIST_FIRST((head2)) = swap_tmp; \ if ((swap_tmp = LIST_FIRST((head1))) != NULL) \ swap_tmp->field.le_prev = &LIST_FIRST((head1)); \ if ((swap_tmp = LIST_FIRST((head2))) != NULL) \ swap_tmp->field.le_prev = &LIST_FIRST((head2)); \ } while (0) /* * Tail queue declarations. */ #define TAILQ_HEAD(name, type) \ struct name { \ struct type *tqh_first; /* first element */ \ struct type **tqh_last; /* addr of last next element */ \ TRACEBUF \ } #define TAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).tqh_first } #define TAILQ_ENTRY(type) \ struct { \ struct type *tqe_next; /* next element */ \ struct type **tqe_prev; /* address of previous next element */ \ TRACEBUF \ } /* * Tail queue functions. */ #if (defined(_KERNEL) && defined(INVARIANTS)) #define QMD_TAILQ_CHECK_HEAD(head, field) do { \ if (!TAILQ_EMPTY(head) && \ TAILQ_FIRST((head))->field.tqe_prev != \ &TAILQ_FIRST((head))) \ panic("Bad tailq head %p first->prev != head", (head)); \ } while (0) #define QMD_TAILQ_CHECK_TAIL(head, field) do { \ if (*(head)->tqh_last != NULL) \ panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head)); \ } while (0) #define QMD_TAILQ_CHECK_NEXT(elm, field) do { \ if (TAILQ_NEXT((elm), field) != NULL && \ TAILQ_NEXT((elm), field)->field.tqe_prev != \ &((elm)->field.tqe_next)) \ panic("Bad link elm %p next->prev != elm", (elm)); \ } while (0) #define QMD_TAILQ_CHECK_PREV(elm, field) do { \ if (*(elm)->field.tqe_prev != (elm)) \ panic("Bad link elm %p prev->next != elm", (elm)); \ } while (0) #else #define QMD_TAILQ_CHECK_HEAD(head, field) #define QMD_TAILQ_CHECK_TAIL(head, headname) #define QMD_TAILQ_CHECK_NEXT(elm, field) #define QMD_TAILQ_CHECK_PREV(elm, field) #endif /* (_KERNEL && INVARIANTS) */ #define TAILQ_CONCAT(head1, head2, field) do { \ if (!TAILQ_EMPTY(head2)) { \ *(head1)->tqh_last = (head2)->tqh_first; \ (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ (head1)->tqh_last = (head2)->tqh_last; \ TAILQ_INIT((head2)); \ QMD_TRACE_HEAD(head1); \ QMD_TRACE_HEAD(head2); \ } \ } while (0) #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) #define TAILQ_FIRST(head) ((head)->tqh_first) #define TAILQ_FOREACH(var, head, field) \ for ((var) = TAILQ_FIRST((head)); \ (var); \ (var) = TAILQ_NEXT((var), field)) #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = TAILQ_FIRST((head)); \ (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ (var) = (tvar)) #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ for ((var) = TAILQ_LAST((head), headname); \ (var); \ (var) = TAILQ_PREV((var), headname, field)) #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \ for ((var) = TAILQ_LAST((head), headname); \ (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \ (var) = (tvar)) #define TAILQ_INIT(head) do { \ TAILQ_FIRST((head)) = NULL; \ (head)->tqh_last = &TAILQ_FIRST((head)); \ QMD_TRACE_HEAD(head); \ } while (0) #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ QMD_TAILQ_CHECK_NEXT(listelm, field); \ if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\ TAILQ_NEXT((elm), field)->field.tqe_prev = \ &TAILQ_NEXT((elm), field); \ else { \ (head)->tqh_last = &TAILQ_NEXT((elm), field); \ QMD_TRACE_HEAD(head); \ } \ TAILQ_NEXT((listelm), field) = (elm); \ (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \ QMD_TRACE_ELEM(&(elm)->field); \ QMD_TRACE_ELEM(&listelm->field); \ } while (0) #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ QMD_TAILQ_CHECK_PREV(listelm, field); \ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ TAILQ_NEXT((elm), field) = (listelm); \ *(listelm)->field.tqe_prev = (elm); \ (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \ QMD_TRACE_ELEM(&(elm)->field); \ QMD_TRACE_ELEM(&listelm->field); \ } while (0) #define TAILQ_INSERT_HEAD(head, elm, field) do { \ QMD_TAILQ_CHECK_HEAD(head, field); \ if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \ TAILQ_FIRST((head))->field.tqe_prev = \ &TAILQ_NEXT((elm), field); \ else \ (head)->tqh_last = &TAILQ_NEXT((elm), field); \ TAILQ_FIRST((head)) = (elm); \ (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ QMD_TRACE_HEAD(head); \ QMD_TRACE_ELEM(&(elm)->field); \ } while (0) #define TAILQ_INSERT_TAIL(head, elm, field) do { \ QMD_TAILQ_CHECK_TAIL(head, field); \ TAILQ_NEXT((elm), field) = NULL; \ (elm)->field.tqe_prev = (head)->tqh_last; \ *(head)->tqh_last = (elm); \ (head)->tqh_last = &TAILQ_NEXT((elm), field); \ QMD_TRACE_HEAD(head); \ QMD_TRACE_ELEM(&(elm)->field); \ } while (0) #define TAILQ_LAST(head, headname) \ (*(((struct headname *)((head)->tqh_last))->tqh_last)) #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) #define TAILQ_PREV(elm, headname, field) \ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) #define TAILQ_REMOVE(head, elm, field) do { \ QMD_TAILQ_CHECK_NEXT(elm, field); \ QMD_TAILQ_CHECK_PREV(elm, field); \ if ((TAILQ_NEXT((elm), field)) != NULL) \ TAILQ_NEXT((elm), field)->field.tqe_prev = \ (elm)->field.tqe_prev; \ else { \ (head)->tqh_last = (elm)->field.tqe_prev; \ QMD_TRACE_HEAD(head); \ } \ *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \ TRASHIT((elm)->field.tqe_next); \ TRASHIT((elm)->field.tqe_prev); \ QMD_TRACE_ELEM(&(elm)->field); \ } while (0) #define TAILQ_SWAP(head1, head2, type, field) do { \ struct type *swap_first = (head1)->tqh_first; \ struct type **swap_last = (head1)->tqh_last; \ (head1)->tqh_first = (head2)->tqh_first; \ (head1)->tqh_last = (head2)->tqh_last; \ (head2)->tqh_first = swap_first; \ (head2)->tqh_last = swap_last; \ if ((swap_first = (head1)->tqh_first) != NULL) \ swap_first->field.tqe_prev = &(head1)->tqh_first; \ else \ (head1)->tqh_last = &(head1)->tqh_first; \ if ((swap_first = (head2)->tqh_first) != NULL) \ swap_first->field.tqe_prev = &(head2)->tqh_first; \ else \ (head2)->tqh_last = &(head2)->tqh_first; \ } while (0) #endif /* !_SYS_QUEUE_H_ */ libkqueue-2.3.1/kern/test.c000066400000000000000000000027221342472035000155700ustar00rootroot00000000000000/* * Copyright (c) 2010 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include "../include/sys/event.h" int main(int argc, char **argv) { struct kevent kev; int fd; fd = open("/dev/kqueue", O_RDWR); if (fd < 0) err(1, "open()"); printf("kqfd = %d\n", fd); EV_SET(&kev, 1, EVFILT_READ, EV_ADD, 0, 0, NULL); #if OLD int x; x = 1; if (ioctl(fd, 1234, (char *) &x) < 0) err(1, "ioctl"); x = 2; if (ioctl(fd, 1234, (char *) &x) < 0) err(1, "ioctl"); #endif if (write(fd, &kev, sizeof(kev)) < 0) err(1, "write"); close(fd); puts("ok"); exit(0); } libkqueue-2.3.1/kqlite/000077500000000000000000000000001342472035000147745ustar00rootroot00000000000000libkqueue-2.3.1/kqlite/Makefile000066400000000000000000000005301342472035000164320ustar00rootroot00000000000000test-lite: test-lite.c kqlite.c lite.h gcc -D_GNU_SOURCE=1 -g -O0 -std=c99 -Wall -Werror -o test-lite test-lite.c kqlite.c #TODO: test-dispatch: test-dispatch.c lite.h gcc -D_GNU_SOURCE=1 -g -O0 -std=c99 -Wall -Werror -fopenmp -o test-dispatch kqlite.c test-dispatch.c dispatch.c check: test-lite ./test-lite clean: rm -f test-lite *.o libkqueue-2.3.1/kqlite/README000066400000000000000000000014411342472035000156540ustar00rootroot00000000000000kqlite has the following goals: * be lightweight and efficient * provide a strict subset of the functionality of kqueue(2) and kevent(2) * closely resemble the kqueue API, but not guarantee 100% compatibility * support modern POSIX operating systems It should be possible to switch between kqlite and the full libkqueue using a few preprocessor macros: #if LIBKQUEUE #define kqueue_t int #define kq_init kqueue #define kq_event kevent #define kq_free close #endif Here are the differences between kqlite and kqueue: * Function names are different: kqueue() == kq_init() kevent() == kq_event() close() == kq_free() * kqueue() returns an int, while kq_init returns an opaque kqueue_t type. libkqueue-2.3.1/kqlite/dispatch.c000066400000000000000000000026371342472035000167470ustar00rootroot00000000000000/* * Copyright (c) 2013 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #ifdef _OPENMP #include #endif /* _OPENMP */ /* * EXPERIMENTAL dispatching API */ void kq_dispatch(kqueue_t kq, void (*cb)(kqueue_t, struct kevent)) { const int maxevents = 64; /* Should be more like 2xNCPU */ struct kevent events[maxevents]; ssize_t nevents; int i; for (;;) { nevents = kq_event(kq, NULL, 0, (struct kevent *) &events, maxevents, NULL); if (nevents < 0) abort(); #pragma omp parallel { for (i = 0; i < nevents; i++) { #pragma omp single nowait (*cb)(kq, events[i]); } } } } libkqueue-2.3.1/kqlite/kqlite.c000066400000000000000000000313501342472035000164330ustar00rootroot00000000000000/* * Copyright (c) 2013 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include "./lite.h" #include "./utarray.h" /* The maximum number of events that can be returned in a single kq_event() call */ #define EPEV_BUF_MAX 512 #include /* Debugging macros */ #define dbg_puts(s) dbg_printf("%s", (s)) #define dbg_printf(fmt,...) fprintf(stderr, "kq [%d]: %s(): "fmt"\n", \ 0 /*TODO: thread id */, __func__, __VA_ARGS__) /* Determine what type of kernel event system to use. */ #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__OpenBSD__) || defined(__NetBSD__) #define USE_KQUEUE #include #elif defined(__linux__) #define USE_EPOLL #include #include #include #include #include #include #include //XXX - TEMPORARY DURING DEVELOPMENT #define KQ_THREADSAFE 1 #ifdef KQ_THREADSAFE #include #endif static char * epoll_event_to_str(struct epoll_event *); #else #error Unsupported operating system type #endif struct kqueue { #if defined(USE_KQUEUE) int kqfd; /* kqueue(2) descriptor */ #elif defined(USE_EPOLL) int epfd; /* epoll */ int inofd; /* inotify */ int sigfd; /* signalfd */ int timefd; /* timerfd */ int readfd, writefd; /* epoll descriptors for EVFILT_READ & EVFILT_WRITE */ sigset_t sigmask; /* All of the active knotes for each filter. The index in the array matches the 'ident' parameter of the 'struct kevent' in the knote. */ UT_array *knote[EVFILT_SYSCOUNT]; /* This allows all kevents to share a single inotify descriptor. * Key: inotify watch descriptor returned by inotify_add_watch() * Value: pointer to knote */ UT_array *ino_knote; #ifdef KQ_THREADSAFE pthread_mutex_t kq_mtx; #endif #else #error Undefined event system #endif }; /* A knote is used to store information about a kevent while it is being monitored. Once it fires, information from the knote is returned to the caller. */ struct knote { struct kevent kev; union { int timerfd; /* Each EVFILT_TIMER kevent has a timerfd */ int ino_wd; /* EVFILT_VNODE: index within kq->ino_knote */ } aux; int deleted; /* When EV_DELETE is used, it marks the knote deleted instead of freeing the object. This helps with threadsafety by ensuring that threads don't try to access a freed object. It doesn't help with memory usage, as the memory is never reclaimed. */ }; static inline void kq_lock(kqueue_t kq) { #ifdef KQ_THREADSAFE if (pthread_mutex_lock(&kq->kq_mtx) != 0) abort(); #endif } static inline void kq_unlock(kqueue_t kq) { #ifdef KQ_THREADSAFE if (pthread_mutex_unlock(&kq->kq_mtx) != 0) abort(); #endif } UT_icd knote_icd = { sizeof(struct knote), NULL, NULL, NULL }; /* Initialize the event descriptor */ kqueue_t kq_init(void) { struct kqueue *kq; #if defined(USE_KQUEUE) if ((kq = calloc(1, sizeof(*kq))) == NULL) return (NULL); kq->kqfd = kqueue(); if (kq->kqfd < 0) { free(kq); return (NULL); } #elif defined(USE_EPOLL) struct epoll_event epev; if ((kq = malloc(sizeof(*kq))) == NULL) return (NULL); #ifdef KQ_THREADSAFE if (pthread_mutex_init(&kq->kq_mtx, NULL) != 0) goto errout; #endif /* Create an index of kevents to allow lookups from epev.data.u32 */ for (int i = 0; i < EVFILT_SYSCOUNT; i++) utarray_new(kq->knote[i], &knote_icd); /* Initialize all the event descriptors */ sigemptyset(&kq->sigmask); kq->sigfd = signalfd(-1, &kq->sigmask, 0); kq->inofd = inotify_init(); kq->epfd = epoll_create(10); kq->readfd = epoll_create(10); kq->writefd = epoll_create(10); kq->timefd = timerfd_create(CLOCK_MONOTONIC, 0); if (kq->sigfd < 0 || kq->inofd < 0 || kq->epfd < 0 || kq->readfd < 0 || kq->writefd < 0 || kq->timefd < 0) goto errout; /* Add the signalfd descriptor to the epollset */ epev.events = EPOLLIN; epev.data.u32 = EVFILT_SIGNAL; if (epoll_ctl(kq->epfd, EPOLL_CTL_ADD, kq->sigfd, &epev) < 0) goto errout; /* Add the readfd descriptor to the epollset */ epev.events = EPOLLIN; epev.data.u32 = EVFILT_READ; if (epoll_ctl(kq->epfd, EPOLL_CTL_ADD, kq->readfd, &epev) < 0) goto errout; /* Add the writefd descriptor to the epollset */ epev.events = EPOLLIN; epev.data.u32 = EVFILT_WRITE; if (epoll_ctl(kq->epfd, EPOLL_CTL_ADD, kq->writefd, &epev) < 0) goto errout; /* Add the inotify descriptor to the epollset */ /* if ((kev = malloc(sizeof(*kev))) == NULL) goto errout; EV_SET(kev, EVFILT_VNODE, EVFILT_VNODE, 0, 0, 0, NULL); epev.events = EPOLLIN; epev.data.u32 = 1; utarray_push_back(kq->kev, kev); if (epoll_ctl(kq->epfd, EPOLL_CTL_ADD, kq->inofd, &epev) < 0) goto errout; */ //TODO: consider applying FD_CLOEXEC to all descriptors // FIXME: check that all members of kq->wfd are valid return (kq); errout: kq_free(kq); return (NULL); #endif } void kq_free(kqueue_t kq) { #if defined(USE_KQUEUE) close(kq.kqfd); #elif defined(USE_EPOLL) close(kq->sigfd); close(kq->inofd); close(kq->epfd); close(kq->readfd); close(kq->writefd); close(kq->timefd); //FIXME: need to free each individual knote for (int i = 0; i < EVFILT_SYSCOUNT; i++) utarray_free(kq->knote[i]); # ifdef KQ_THREADSAFE pthread_mutex_destroy(&kq->kq_mtx); # endif #endif free(kq); } #if defined(USE_EPOLL) /* Create a knote object */ static int knote_add(kqueue_t kq, const struct kevent *kev) { struct knote *kn; assert(kev->filter < EVFILT_SYSCOUNT); kn = malloc(sizeof(*kn)); if (kn == NULL) return (-1); memcpy (&kn->kev, kev, sizeof(kn->kev)); kq_lock(kq); utarray_insert(kq->knote[kev->filter], kn, kev->ident); kq_unlock(kq); return (0); } /* Lookup a 'struct kevent' that was previously stored in a knote object */ static struct knote * knote_lookup(kqueue_t kq, short filter, uint32_t ident) { struct knote *p; kq_lock(kq); p = (struct knote *) utarray_eltptr(kq->knote[filter], ident); //TODO: refcounting kq_unlock(kq); return (p); } /* Add a new item to the list of events to be monitored */ static inline int kq_add(kqueue_t kq, const struct kevent *ev) { int rv = 0; struct epoll_event epev; int sigfd; epev.data.u32 = ev->filter; if (knote_add(kq, ev) < 0) abort(); //TODO: errorhandle switch (ev->filter) { case EVFILT_READ: epev.events = EPOLLIN; rv = epoll_ctl(kq->readfd, EPOLL_CTL_ADD, ev->ident, &epev); break; case EVFILT_WRITE: epev.events = EPOLLOUT; rv = epoll_ctl(kq->writefd, EPOLL_CTL_ADD, ev->ident, &epev); break; case EVFILT_VNODE: epev.events = EPOLLIN; rv = epoll_ctl(kq->epfd, EPOLL_CTL_ADD, ev->ident, &epev); rv = -1; break; case EVFILT_SIGNAL: kq_lock(kq); sigaddset(&kq->sigmask, ev->ident); sigfd = signalfd(kq->sigfd, &kq->sigmask, 0); kq_unlock(kq); if (sigfd < 0) { rv = -1; } else { rv = 0; } dbg_printf("added signal %d, rv = %d", (int)ev->ident, rv); break; case EVFILT_TIMER: //TODO rv = -1; break; default: rv = -1; return (-1); } if (rv < 0) { dbg_printf("failed; errno = %s", strerror(errno)); } dbg_printf("done. rv = %d", rv); // if (rv < 0) // free(evcopy); return (rv); } /* Delete an item from the list of events to be monitored */ static int kq_delete(kqueue_t kq, const struct kevent *ev) { int rv = 0; int sigfd; struct epoll_event epev; switch (ev->ident) { case EVFILT_READ: case EVFILT_WRITE: rv = epoll_ctl(kq->epfd, EPOLL_CTL_DEL, ev->ident, &epev); break; case EVFILT_VNODE: //TODO break; case EVFILT_SIGNAL: kq_lock(kq); sigdelset(&kq->sigmask, ev->ident); sigfd = signalfd(kq->sigfd, &kq->sigmask, 0); kq_unlock(kq); if (sigfd < 0) { rv = -1; } else { rv = 0; } break; case EVFILT_TIMER: //TODO break; default: rv = 0; break; } return (rv); } #endif /* defined(USE_EPOLL) */ /* Read a signal from the signalfd */ static inline int _get_signal(struct kevent *dst, kqueue_t kq) { struct knote *kn; struct signalfd_siginfo sig; ssize_t n; n = read(kq->sigfd, &sig, sizeof(sig)); if (n < 0 || n != sizeof(sig)) { abort(); } kn = knote_lookup(kq, EVFILT_SIGNAL, sig.ssi_signo); memcpy(dst, &kn->kev, sizeof(*dst)); return (0); } /* Equivalent to kevent() */ int kq_event(kqueue_t kq, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout) { int rv = 0; struct kevent *dst; //struct knote *kn; #if defined(USE_KQUEUE) return kevent(kq->kqfd, changelist, nchanges, eventlist, nevents, timeout); #elif defined(USE_EPOLL) struct epoll_event epev_buf[EPEV_BUF_MAX]; struct epoll_event *epev; size_t epev_wait_max; int i, epev_cnt, eptimeout; /* Process each item on the changelist */ for (i = 0; i < nchanges; i++) { if (changelist[i].flags & EV_ADD) { rv = kq_add(kq, &changelist[i]); } else if (changelist[i].flags & EV_DELETE) { rv = kq_delete(kq, &changelist[i]); } else { rv = -1; } if (rv < 0) return (-1); } /* Convert timeout to the format used by epoll_wait() */ if (timeout == NULL) eptimeout = -1; else eptimeout = (1000 * timeout->tv_sec) + (timeout->tv_nsec / 1000000); /* Wait for events and put them into a buffer */ if (nevents > EPEV_BUF_MAX) { epev_wait_max = EPEV_BUF_MAX; } else { epev_wait_max = nevents; } epev_cnt = epoll_wait(kq->epfd, &epev_buf[0], epev_wait_max, eptimeout); if (epev_cnt < 0) { return (-1); //FIXME: handle timeout } else if (epev_cnt == 0) { dbg_puts("timed out"); } dbg_printf("whee -- got %d event(s)", epev_cnt); /* Determine what events have occurred and copy the result to the caller */ for (i = 0; i < epev_cnt; i++) { dst = &eventlist[i]; epev = &epev_buf[i]; dbg_printf("got event: %s", epoll_event_to_str(epev)); switch (epev->data.u32) { case EVFILT_SIGNAL: (void)_get_signal(dst, kq);//FIXME: errorhandle break; case EVFILT_VNODE: //TODO break; case EVFILT_TIMER: //TODO break; case EVFILT_READ: case EVFILT_WRITE: //memcpy(dst, kevp, sizeof(*dst)); break; default: abort(); } } return (rv == 1 ? 0 : -1); #endif } #if defined(USE_EPOLL) static char * epoll_event_to_str(struct epoll_event *evt) { static __thread char buf[128]; if (evt == NULL) return "(null)"; #define EPEVT_DUMP(attrib) \ if (evt->events & attrib) \ strcat(&buf[0], #attrib" "); snprintf(&buf[0], 128, " { data = %p, events = ", evt->data.ptr); EPEVT_DUMP(EPOLLIN); EPEVT_DUMP(EPOLLOUT); #if defined(HAVE_EPOLLRDHUP) EPEVT_DUMP(EPOLLRDHUP); #endif EPEVT_DUMP(EPOLLONESHOT); EPEVT_DUMP(EPOLLET); strcat(&buf[0], "}\n"); return (&buf[0]); #undef EPEVT_DUMP } #endif libkqueue-2.3.1/kqlite/lite.h000066400000000000000000000132531342472035000161060ustar00rootroot00000000000000/*- * Copyright (c) 2013 Mark Heily * Copyright (c) 1999,2000,2001 Jonathan Lemon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD SVN Revision 197533$ */ #ifndef _KQUEUE_LITE_H #define _KQUEUE_LITE_H #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__OpenBSD__) || defined(__NetBSD__) #include #else #include #include #include #define EV_SET(kevp_, a, b, c, d, e, f) do { \ struct kevent *kevp = (kevp_); \ (kevp)->ident = (a); \ (kevp)->filter = (b); \ (kevp)->flags = (c); \ (kevp)->fflags = (d); \ (kevp)->data = (e); \ (kevp)->udata = (f); \ } while(0) struct kevent { uintptr_t ident; /* identifier for this event */ short filter; /* filter for event */ unsigned short flags; unsigned int fflags; intptr_t data; void *udata; /* opaque user data identifier */ }; /* actions */ #define EV_ADD 0x0001 /* add event to kq (implies enable) */ #define EV_DELETE 0x0002 /* delete event from kq */ #define EV_ENABLE 0x0004 /* enable event */ #define EV_DISABLE 0x0008 /* disable event (not reported) */ /* flags */ #define EV_ONESHOT 0x0010 /* only report one occurrence */ #define EV_CLEAR 0x0020 /* clear event state after reporting */ #define EV_RECEIPT 0x0040 /* force EV_ERROR on success, data=0 */ #define EV_DISPATCH 0x0080 /* disable event after reporting */ #define EV_SYSFLAGS 0xF000 /* reserved by system */ #define EV_FLAG1 0x2000 /* filter-specific flag */ /* returned values */ #define EV_EOF 0x8000 /* EOF detected */ #define EV_ERROR 0x4000 /* error, data contains errno */ /* * data/hint flags/masks for EVFILT_USER * * On input, the top two bits of fflags specifies how the lower twenty four * bits should be applied to the stored value of fflags. * * On output, the top two bits will always be set to NOTE_FFNOP and the * remaining twenty four bits will contain the stored fflags value. */ #define NOTE_FFNOP 0x00000000 /* ignore input fflags */ #define NOTE_FFAND 0x40000000 /* AND fflags */ #define NOTE_FFOR 0x80000000 /* OR fflags */ #define NOTE_FFCOPY 0xc0000000 /* copy fflags */ #define NOTE_FFCTRLMASK 0xc0000000 /* masks for operations */ #define NOTE_FFLAGSMASK 0x00ffffff #define NOTE_TRIGGER 0x01000000 /* Cause the event to be triggered for output. */ /* * data/hint flags for EVFILT_{READ|WRITE} */ #define NOTE_LOWAT 0x0001 /* low water mark */ #undef NOTE_LOWAT /* Not supported on Linux */ /* * data/hint flags for EVFILT_VNODE */ #define NOTE_DELETE 0x0001 /* vnode was removed */ #define NOTE_WRITE 0x0002 /* data contents changed */ #define NOTE_EXTEND 0x0004 /* size increased */ #define NOTE_ATTRIB 0x0008 /* attributes changed */ #define NOTE_LINK 0x0010 /* link count changed */ #define NOTE_RENAME 0x0020 /* vnode was renamed */ #define NOTE_REVOKE 0x0040 /* vnode access was revoked */ #undef NOTE_REVOKE /* Not supported on Linux */ /* * data/hint flags for EVFILT_PROC */ #define NOTE_EXIT 0x80000000 /* process exited */ #define NOTE_FORK 0x40000000 /* process forked */ #define NOTE_EXEC 0x20000000 /* process exec'd */ #define NOTE_PCTRLMASK 0xf0000000 /* mask for hint bits */ #define NOTE_PDATAMASK 0x000fffff /* mask for pid */ /* additional flags for EVFILT_PROC */ #define NOTE_TRACK 0x00000001 /* follow across forks */ #define NOTE_TRACKERR 0x00000002 /* could not track child */ #define NOTE_CHILD 0x00000004 /* am a child process */ /* * data/hint flags for EVFILT_NETDEV */ #define NOTE_LINKUP 0x0001 /* link is up */ #define NOTE_LINKDOWN 0x0002 /* link is down */ #define NOTE_LINKINV 0x0004 /* link state is invalid */ /* Linux supports a subset of these filters. */ #define EVFILT_READ (0) #define EVFILT_WRITE (1) #define EVFILT_VNODE (2) #define EVFILT_SIGNAL (3) #define EVFILT_TIMER (4) #define EVFILT_SYSCOUNT (5) #endif /* defined(__FreeBSD__) etc.. */ /* kqueue_t - the event descriptor */ typedef struct kqueue *kqueue_t; /* Initialize the event descriptor */ kqueue_t kq_init(); /* Free the event descriptor */ void kq_free(kqueue_t kq); /* Equivalent to kevent() */ int kq_event(kqueue_t kq, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); /* Dispatch kevents using multiple threads */ void kq_dispatch(kqueue_t, void (*)(kqueue_t, struct kevent)); #endif /* ! _KQUEUE_LITE_H */ libkqueue-2.3.1/kqlite/test-lite.c000066400000000000000000000043521342472035000170560ustar00rootroot00000000000000/* * Copyright (c) 2013 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "./lite.h" #include #include #include #include #include #include #include void test_evfilt_write(kqueue_t kq) { struct kevent kev; int sockfd[2]; puts("testing EVFILT_WRITE.. "); if (socketpair(AF_UNIX, SOCK_STREAM, 0, sockfd) < 0) abort(); EV_SET(&kev, sockfd[1], EVFILT_WRITE, EV_ADD | EV_ENABLE, 0, 0, NULL); kq_event(kq, &kev, 1, 0, 0, NULL); puts("installed EVFILT_WRITE handler"); if (write(sockfd[0], "hi", 2) < 2) abort(); /* wait for the event */ puts("waiting for event"); kq_event(kq, NULL, 0, &kev, 1, NULL); puts ("got it"); close(sockfd[0]); close(sockfd[1]); } void test_evfilt_signal(kqueue_t kq) { struct kevent kev; sigset_t mask; /* Block the normal signal handler mechanism */ sigemptyset(&mask); sigaddset(&mask, SIGUSR1); if (sigprocmask(SIG_BLOCK, &mask, NULL) == -1) abort(); EV_SET(&kev, SIGUSR1, EVFILT_SIGNAL, EV_ADD | EV_ENABLE, 0, 0, NULL); kq_event(kq, &kev, 1, 0, 0, NULL); puts("installed SIGUSR1 handler"); if (kill(getpid(), SIGUSR1) < 0) abort(); /* wait for the event */ puts("waiting for SIGUSR1"); kq_event(kq, NULL, 0, &kev, 1, NULL); puts ("got it"); } int main() { kqueue_t kq; kq = kq_init(); test_evfilt_signal(kq); test_evfilt_write(kq); kq_free(kq); puts("ok"); exit(0); } libkqueue-2.3.1/kqlite/utarray.h000066400000000000000000000303751342472035000166440ustar00rootroot00000000000000/* Copyright (c) 2008-2013, Troy D. Hanson http://troydhanson.github.com/uthash/ All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* a dynamic array implementation using macros */ #ifndef UTARRAY_H #define UTARRAY_H #define UTARRAY_VERSION 1.9.8 #ifdef __GNUC__ #define _UNUSED_ __attribute__ ((__unused__)) #else #define _UNUSED_ #endif #include /* size_t */ #include /* memset, etc */ #include /* exit */ #define oom() exit(-1) typedef void (ctor_f)(void *dst, const void *src); typedef void (dtor_f)(void *elt); typedef void (init_f)(void *elt); typedef struct { size_t sz; init_f *init; ctor_f *copy; dtor_f *dtor; } UT_icd; typedef struct { unsigned i,n;/* i: index of next available slot, n: num slots */ UT_icd icd; /* initializer, copy and destructor functions */ char *d; /* n slots of size icd->sz*/ } UT_array; #define utarray_init(a,_icd) do { \ memset(a,0,sizeof(UT_array)); \ (a)->icd=*_icd; \ } while(0) #define utarray_done(a) do { \ if ((a)->n) { \ if ((a)->icd.dtor) { \ size_t _ut_i; \ for(_ut_i=0; _ut_i < (a)->i; _ut_i++) { \ (a)->icd.dtor(utarray_eltptr(a,_ut_i)); \ } \ } \ free((a)->d); \ } \ (a)->n=0; \ } while(0) #define utarray_new(a,_icd) do { \ a=(UT_array*)malloc(sizeof(UT_array)); \ utarray_init(a,_icd); \ } while(0) #define utarray_free(a) do { \ utarray_done(a); \ free(a); \ } while(0) #define utarray_reserve(a,by) do { \ if (((a)->i+by) > ((a)->n)) { \ while(((a)->i+by) > ((a)->n)) { (a)->n = ((a)->n ? (2*(a)->n) : 8); } \ if ( ((a)->d=(char*)realloc((a)->d, (a)->n*(a)->icd.sz)) == NULL) oom(); \ } \ } while(0) #define utarray_push_back(a,p) do { \ utarray_reserve(a,1); \ if ((a)->icd.copy) { (a)->icd.copy( _utarray_eltptr(a,(a)->i++), p); } \ else { memcpy(_utarray_eltptr(a,(a)->i++), p, (a)->icd.sz); }; \ } while(0) #define utarray_pop_back(a) do { \ if ((a)->icd.dtor) { (a)->icd.dtor( _utarray_eltptr(a,--((a)->i))); } \ else { (a)->i--; } \ } while(0) #define utarray_extend_back(a) do { \ utarray_reserve(a,1); \ if ((a)->icd.init) { (a)->icd.init(_utarray_eltptr(a,(a)->i)); } \ else { memset(_utarray_eltptr(a,(a)->i),0,(a)->icd.sz); } \ (a)->i++; \ } while(0) #define utarray_len(a) ((a)->i) #define utarray_eltptr(a,j) (((j) < (a)->i) ? _utarray_eltptr(a,j) : NULL) #define _utarray_eltptr(a,j) ((char*)((a)->d + ((a)->icd.sz*(j) ))) #define utarray_insert(a,p,j) do { \ if (j > (a)->i) utarray_resize(a,j); \ utarray_reserve(a,1); \ if ((j) < (a)->i) { \ memmove( _utarray_eltptr(a,(j)+1), _utarray_eltptr(a,j), \ ((a)->i - (j))*((a)->icd.sz)); \ } \ if ((a)->icd.copy) { (a)->icd.copy( _utarray_eltptr(a,j), p); } \ else { memcpy(_utarray_eltptr(a,j), p, (a)->icd.sz); }; \ (a)->i++; \ } while(0) #define utarray_inserta(a,w,j) do { \ if (utarray_len(w) == 0) break; \ if (j > (a)->i) utarray_resize(a,j); \ utarray_reserve(a,utarray_len(w)); \ if ((j) < (a)->i) { \ memmove(_utarray_eltptr(a,(j)+utarray_len(w)), \ _utarray_eltptr(a,j), \ ((a)->i - (j))*((a)->icd.sz)); \ } \ if ((a)->icd.copy) { \ size_t _ut_i; \ for(_ut_i=0;_ut_i<(w)->i;_ut_i++) { \ (a)->icd.copy(_utarray_eltptr(a,j+_ut_i), _utarray_eltptr(w,_ut_i)); \ } \ } else { \ memcpy(_utarray_eltptr(a,j), _utarray_eltptr(w,0), \ utarray_len(w)*((a)->icd.sz)); \ } \ (a)->i += utarray_len(w); \ } while(0) #define utarray_resize(dst,num) do { \ size_t _ut_i; \ if (dst->i > (size_t)(num)) { \ if ((dst)->icd.dtor) { \ for(_ut_i=num; _ut_i < dst->i; _ut_i++) { \ (dst)->icd.dtor(utarray_eltptr(dst,_ut_i)); \ } \ } \ } else if (dst->i < (size_t)(num)) { \ utarray_reserve(dst,num-dst->i); \ if ((dst)->icd.init) { \ for(_ut_i=dst->i; _ut_i < num; _ut_i++) { \ (dst)->icd.init(utarray_eltptr(dst,_ut_i)); \ } \ } else { \ memset(_utarray_eltptr(dst,dst->i),0,(dst)->icd.sz*(num-dst->i)); \ } \ } \ dst->i = num; \ } while(0) #define utarray_concat(dst,src) do { \ utarray_inserta((dst),(src),utarray_len(dst)); \ } while(0) #define utarray_erase(a,pos,len) do { \ if ((a)->icd.dtor) { \ size_t _ut_i; \ for(_ut_i=0; _ut_i < len; _ut_i++) { \ (a)->icd.dtor(utarray_eltptr((a),pos+_ut_i)); \ } \ } \ if ((a)->i > (pos+len)) { \ memmove( _utarray_eltptr((a),pos), _utarray_eltptr((a),pos+len), \ (((a)->i)-(pos+len))*((a)->icd.sz)); \ } \ (a)->i -= (len); \ } while(0) #define utarray_renew(a,u) do { \ if (a) utarray_clear(a); \ else utarray_new((a),(u)); \ } while(0) #define utarray_clear(a) do { \ if ((a)->i > 0) { \ if ((a)->icd.dtor) { \ size_t _ut_i; \ for(_ut_i=0; _ut_i < (a)->i; _ut_i++) { \ (a)->icd.dtor(utarray_eltptr(a,_ut_i)); \ } \ } \ (a)->i = 0; \ } \ } while(0) #define utarray_sort(a,cmp) do { \ qsort((a)->d, (a)->i, (a)->icd.sz, cmp); \ } while(0) #define utarray_find(a,v,cmp) bsearch((v),(a)->d,(a)->i,(a)->icd.sz,cmp) #define utarray_front(a) (((a)->i) ? (_utarray_eltptr(a,0)) : NULL) #define utarray_next(a,e) (((e)==NULL) ? utarray_front(a) : ((((a)->i) > (utarray_eltidx(a,e)+1)) ? _utarray_eltptr(a,utarray_eltidx(a,e)+1) : NULL)) #define utarray_prev(a,e) (((e)==NULL) ? utarray_back(a) : ((utarray_eltidx(a,e) > 0) ? _utarray_eltptr(a,utarray_eltidx(a,e)-1) : NULL)) #define utarray_back(a) (((a)->i) ? (_utarray_eltptr(a,(a)->i-1)) : NULL) #define utarray_eltidx(a,e) (((char*)(e) >= (char*)((a)->d)) ? (((char*)(e) - (char*)((a)->d))/(ssize_t)(a)->icd.sz) : -1) /* last we pre-define a few icd for common utarrays of ints and strings */ static void utarray_str_cpy(void *dst, const void *src) { char **_src = (char**)src, **_dst = (char**)dst; *_dst = (*_src == NULL) ? NULL : strdup(*_src); } static void utarray_str_dtor(void *elt) { char **eltc = (char**)elt; if (*eltc) free(*eltc); } static const UT_icd ut_str_icd _UNUSED_ = {sizeof(char*),NULL,utarray_str_cpy,utarray_str_dtor}; static const UT_icd ut_int_icd _UNUSED_ = {sizeof(int),NULL,NULL,NULL}; static const UT_icd ut_ptr_icd _UNUSED_ = {sizeof(void*),NULL,NULL,NULL}; #endif /* UTARRAY_H */ libkqueue-2.3.1/kqueue.2000066400000000000000000000337411342472035000150750ustar00rootroot00000000000000.\" $FreeBSD: Revision: 197243$ .\" Copyright (c) 2010 Mark Heily .\" Copyright (c) 2000 Jonathan Lemon .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" .Dd September 17, 2010 .Dt KQUEUE 2 .Os .Sh NAME .Nm kqueue , .Nm kevent .Nd kernel event notification mechanism .Sh SYNOPSIS .In sys/types.h .In sys/event.h .In sys/time.h .Ft int .Fn kqueue "void" .Ft int .Fn kevent "int kq" "const struct kevent *changelist" "int nchanges" "struct kevent *eventlist" "int nevents" "const struct timespec *timeout" .Fn EV_SET "&kev" ident filter flags fflags data udata .Sh DESCRIPTION The .Fn kqueue system call provides a generic method of notifying the user when an event happens or a condition holds, based on the results of small pieces of kernel code termed filters. A kevent is identified by the (ident, filter) pair; there may only be one unique kevent per kqueue. .Pp The filter is executed upon the initial registration of a kevent in order to detect whether a preexisting condition is present, and is also executed whenever an event is passed to the filter for evaluation. If the filter determines that the condition should be reported, then the kevent is placed on the kqueue for the user to retrieve. .Pp The filter is also run when the user attempts to retrieve the kevent from the kqueue. If the filter indicates that the condition that triggered the event no longer holds, the kevent is removed from the kqueue and is not returned. .Pp Multiple events which trigger the filter do not result in multiple kevents being placed on the kqueue; instead, the filter will aggregate the events into a single struct kevent. Calling .Fn close on a file descriptor will remove any kevents that reference the descriptor. .Pp The .Fn kqueue system call creates a new kernel event queue and returns a descriptor. The queue is not inherited by a child created with .Xr fork 2 . However, if .Xr rfork 2 is called without the .Dv RFFDG flag, then the descriptor table is shared, which will allow sharing of the kqueue between two processes. .Pp The .Fn kevent system call is used to register events with the queue, and return any pending events to the user. The .Fa changelist argument is a pointer to an array of .Va kevent structures, as defined in .In sys/event.h . All changes contained in the .Fa changelist are applied before any pending events are read from the queue. The .Fa nchanges argument gives the size of .Fa changelist . The .Fa eventlist argument is a pointer to an array of kevent structures. The .Fa nevents argument determines the size of .Fa eventlist . When .Fa nevents is zero, .Fn kevent will return immediately even if there is a .Fa timeout specified unlike .Xr select 2 . If .Fa timeout is a non-NULL pointer, it specifies a maximum interval to wait for an event, which will be interpreted as a struct timespec. If .Fa timeout is a NULL pointer, .Fn kevent waits indefinitely. To effect a poll, the .Fa timeout argument should be non-NULL, pointing to a zero-valued .Va timespec structure. The same array may be used for the .Fa changelist and .Fa eventlist . .Pp The .Fn EV_SET macro is provided for ease of initializing a kevent structure. .Pp The .Va kevent structure is defined as: .Bd -literal struct kevent { uintptr_t ident; /* identifier for this event */ short filter; /* filter for event */ u_short flags; /* action flags for kqueue */ u_int fflags; /* filter flag value */ intptr_t data; /* filter data value */ void *udata; /* opaque user data identifier */ }; .Ed .Pp The fields of .Fa struct kevent are: .Bl -tag -width XXXfilter .It ident Value used to identify this event. The exact interpretation is determined by the attached filter, but often is a file descriptor. .It filter Identifies the kernel filter used to process this event. The pre-defined system filters are described below. .It flags Actions to perform on the event. .It fflags Filter-specific flags. .It data Filter-specific data value. .It udata Opaque user-defined value passed through the kernel unchanged. .El .Pp The .Va flags field can contain the following values: .Bl -tag -width XXXEV_ONESHOT .It EV_ADD Adds the event to the kqueue. Re-adding an existing event will modify the parameters of the original event, and not result in a duplicate entry. Adding an event automatically enables it, unless overridden by the EV_DISABLE flag. .It EV_ENABLE Permit .Fn kevent to return the event if it is triggered. .It EV_DISABLE Disable the event so .Fn kevent will not return it. The filter itself is not disabled. .It EV_DISPATCH Disable the event source immediately after delivery of an event. See .Dv EV_DISABLE above. .It EV_DELETE Removes the event from the kqueue. Events which are attached to file descriptors are automatically deleted on the last close of the descriptor. .It EV_RECEIPT This flag is useful for making bulk changes to a kqueue without draining any pending events. When passed as input, it forces .Dv EV_ERROR to always be returned. When a filter is successfully added the .Va data field will be zero. .It EV_ONESHOT Causes the event to return only the first occurrence of the filter being triggered. After the user retrieves the event from the kqueue, it is deleted. .It EV_CLEAR After the event is retrieved by the user, its state is reset. This is useful for filters which report state transitions instead of the current state. Note that some filters may automatically set this flag internally. .It EV_EOF Filters may set this flag to indicate filter-specific EOF condition. .It EV_ERROR See .Sx RETURN VALUES below. .El .Pp The predefined system filters are listed below. Arguments may be passed to and from the filter via the .Va fflags and .Va data fields in the kevent structure. .Bl -tag -width EVFILT_SIGNAL .It EVFILT_READ Takes a descriptor as the identifier, and returns whenever there is data available to read. The behavior of the filter is slightly different depending on the descriptor type. .Pp .Bl -tag -width 2n .It Sockets Sockets which have previously been passed to .Fn listen return when there is an incoming connection pending. .Va data contains the size of the listen backlog. .Pp Other socket descriptors return when there is data to be read, subject to the .Dv SO_RCVLOWAT value of the socket buffer. This may be overridden with a per-filter low water mark at the time the filter is added by setting the NOTE_LOWAT flag in .Va fflags , and specifying the new low water mark in .Va data . On return, .Va data contains the number of bytes of protocol data available to read. .Pp If the read direction of the socket has shutdown, then the filter also sets EV_EOF in .Va flags , and returns the socket error (if any) in .Va fflags . It is possible for EOF to be returned (indicating the connection is gone) while there is still data pending in the socket buffer. .It Vnodes Returns when the file pointer is not at the end of file. .Va data contains the offset from current position to end of file, and may be negative. .It "Fifos, Pipes" Returns when the there is data to read; .Va data contains the number of bytes available. .Pp When the last writer disconnects, the filter will set EV_EOF in .Va flags . This may be cleared by passing in EV_CLEAR, at which point the filter will resume waiting for data to become available before returning. .It "BPF devices" Returns when the BPF buffer is full, the BPF timeout has expired, or when the BPF has .Dq immediate mode enabled and there is any data to read; .Va data contains the number of bytes available. .El .It EVFILT_WRITE Takes a descriptor as the identifier, and returns whenever it is possible to write to the descriptor. For sockets, pipes and fifos, .Va data will contain the amount of space remaining in the write buffer. The filter will set EV_EOF when the reader disconnects, and for the fifo case, this may be cleared by use of EV_CLEAR. Note that this filter is not supported for vnodes or BPF devices. .Pp For sockets, the low water mark and socket error handling is identical to the EVFILT_READ case. .It EVFILT_VNODE Takes a file descriptor as the identifier and the events to watch for in .Va fflags , and returns when one or more of the requested events occurs on the descriptor. The events to monitor are: .Bl -tag -width XXNOTE_RENAME .It NOTE_DELETE The .Fn unlink system call was called on the file referenced by the descriptor. .It NOTE_WRITE A write occurred on the file referenced by the descriptor. .It NOTE_EXTEND The file referenced by the descriptor was extended. .It NOTE_ATTRIB The file referenced by the descriptor had its attributes changed. .It NOTE_LINK The link count on the file changed. .It NOTE_RENAME The file referenced by the descriptor was renamed. .El .Pp On return, .Va fflags contains the events which triggered the filter. .It EVFILT_SIGNAL Takes the signal number to monitor as the identifier and returns when the given signal is delivered to the process. This overrides the .Fn signal and .Fn sigaction facilities, and has a higher precedence. The filter will record all attempts to deliver a signal to a process, even if the signal has been marked as SIG_IGN. .Va data returns the number of times the signal has occurred since the last call to .Fn kevent . This filter automatically sets the EV_CLEAR flag internally. .It EVFILT_TIMER Establishes an arbitrary timer identified by .Va ident . When adding a timer, .Va data specifies the timeout period and .Va fflags can be set to one of the following: .Bl -tag -width XXNOTE_RENAME .It NOTE_SECONDS data is in seconds .It NOTE_USECONDS data is in microseconds .It NOTE_NSECONDS data is in nanoseconds .It NOTE_ABSOLUTE data is an absolute timeout .El .Pp If fflags is not set, the default is milliseconds. The timer will be periodic unless EV_ONESHOT is specified. On return, .Va data contains the number of times the timeout has expired since the last call to .Fn kevent . This filter automatically sets the EV_CLEAR flag internally. There is a system wide limit on the number of timers which is controlled by the .Va kern.kq_calloutmax sysctl. .It Dv EVFILT_USER Establishes a user event identified by .Va ident which is not assosicated with any kernel mechanism but is triggered by user level code. The lower 24 bits of the .Va fflags may be used for user defined flags and manipulated using the following: .Bl -tag -width XXNOTE_FFLAGSMASK .It Dv NOTE_FFNOP Ignore the input .Va fflags . .It Dv NOTE_FFAND Bitwise AND .Va fflags . .It Dv NOTE_FFOR Bitwise OR .Va fflags . .It Dv NOTE_COPY Copy .Va fflags . .It Dv NOTE_FFCTRLMASK Control mask for .Va fflags . .It Dv NOTE_FFLAGSMASK User defined flag mask for .Va fflags . .El .Pp A user event is triggered for output with the following: .Bl -tag -width XXNOTE_FFLAGSMASK .It Dv NOTE_TRIGGER Cause the event to be triggered. .El .Pp On return, .Va fflags contains the users defined flags in the lower 24 bits. .El .Sh RETURN VALUES The .Fn kqueue system call creates a new kernel event queue and returns a file descriptor. If there was an error creating the kernel event queue, a value of -1 is returned and errno set. .Pp The .Fn kevent system call returns the number of events placed in the .Fa eventlist , up to the value given by .Fa nevents . If an error occurs while processing an element of the .Fa changelist and there is enough room in the .Fa eventlist , then the event will be placed in the .Fa eventlist with .Dv EV_ERROR set in .Va flags and the system error in .Va data . Otherwise, .Dv -1 will be returned, and .Dv errno will be set to indicate the error condition. If the time limit expires, then .Fn kevent returns 0. .Sh ERRORS The .Fn kqueue system call fails if: .Bl -tag -width Er .It Bq Er ENOMEM The kernel failed to allocate enough memory for the kernel queue. .It Bq Er EMFILE The per-process descriptor table is full. .It Bq Er ENFILE The system file table is full. .El .Pp The .Fn kevent system call fails if: .Bl -tag -width Er .It Bq Er EACCES The process does not have permission to register a filter. .It Bq Er EFAULT There was an error reading or writing the .Va kevent structure. .It Bq Er EBADF The specified descriptor is invalid. .It Bq Er EINTR A signal was delivered before the timeout expired and before any events were placed on the kqueue for return. .It Bq Er EINVAL The specified time limit or filter is invalid. .It Bq Er ENOENT The event could not be found to be modified or deleted. .It Bq Er ENOMEM No memory was available to register the event or, in the special case of a timer, the maximum number of timers has been exceeded. This maximum is configurable via the .Va kern.kq_calloutmax sysctl. .It Bq Er ESRCH The specified process to attach to does not exist. .El .Sh SEE ALSO .Xr aio_error 2 , .Xr aio_read 2 , .Xr aio_return 2 , .Xr poll 2 , .Xr read 2 , .Xr select 2 , .Xr sigaction 2 , .Xr write 2 , .Xr signal 3 .Sh HISTORY The .Fn kqueue and .Fn kevent system calls first appeared in .Fx 4.1 . .Sh AUTHORS The .Fn kqueue system and this manual page were written by .An Jonathan Lemon Aq jlemon@FreeBSD.org . libkqueue-2.3.1/libkqueue.pc.in000066400000000000000000000005141342472035000164220ustar00rootroot00000000000000prefix=@CMAKE_INSTALL_PREFIX@ exec_prefix=@CMAKE_INSTALL_PREFIX@ libdir=@CMAKE_INSTALL_FULL_LIBDIR@ includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@ Name: libkqueue Description: Emulates FreeBSD kqueue(2) on other platforms Version: @PROJECT_VERSION@ Requires: Libs: -lkqueue Libs.private: -lpthread -lrt Cflags: -I${includedir}/kqueue libkqueue-2.3.1/src/000077500000000000000000000000001342472035000142725ustar00rootroot00000000000000libkqueue-2.3.1/src/common/000077500000000000000000000000001342472035000155625ustar00rootroot00000000000000libkqueue-2.3.1/src/common/alloc.h000066400000000000000000000047141342472035000170330ustar00rootroot00000000000000/* * Copyright (c) 2011 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * A simple fixed-size memory allocator. * * Each translation unit in a program can include this header and * have access to it's own private memory allocator. This can be useful * for improving the performance of programs which frequently allocate * and deallocate objects with a fixed size. * * The allocator must be initialized by calling mem_init(). This * function takes two arguments: the object size, and the maximum * number of objects in the cache. * * The functions mem_alloc() and mem_free() have similar semantics * to the traditional malloc() and free() calls. The main difference * is that mem_alloc() does not allow you to specify a specific size. * */ #include #ifndef _WIN32 # include #endif static __thread struct { void **ac_cache; /* An array of reusable memory objects */ size_t ac_count; /* The number of objects in the cache */ size_t ac_max; /* The maximum number of cached objects */ size_t ac_size; /* The size, in bytes, of each object */ } _ma; static inline int mem_init(size_t objsize, size_t cachesize) { _ma.ac_size = objsize; _ma.ac_cache = malloc(cachesize * sizeof(void *)); return (_ma.ac_cache == NULL ? -1 : 0); } static inline void * mem_alloc(void) { if (_ma.ac_count > 0) return (_ma.ac_cache[_ma.ac_count--]); else return (malloc(_ma.ac_size)); } static inline void * mem_calloc(void) { void *p; p = mem_alloc(); if (p != NULL) memset(p, 0, _ma.ac_size); return (p); } static inline void mem_free(void *ptr) { if (_ma.ac_count < _ma.ac_max) _ma.ac_cache[_ma.ac_count++] = ptr; else free(ptr); } libkqueue-2.3.1/src/common/debug.h000066400000000000000000000122311342472035000170200ustar00rootroot00000000000000/* * Copyright (c) 2011 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _DEBUG_H #define _DEBUG_H #include #include #ifdef _WIN32 # include #else # include #endif extern int DEBUG_KQUEUE; extern char *KQUEUE_DEBUG_IDENT; #if defined(__linux__) # include # define THREAD_ID ((pid_t) syscall(__NR_gettid)) #elif defined(__sun) # define THREAD_ID ((int) pthread_self()) #elif defined(_WIN32) # define THREAD_ID (int)(GetCurrentThreadId()) #else # error Unsupported platform #endif #ifndef NDEBUG #define dbg_puts(str) do { \ if (DEBUG_KQUEUE) \ fprintf(stderr, "%s [%d]: %s(): %s\n", \ KQUEUE_DEBUG_IDENT, THREAD_ID, __func__, str); \ } while (0) #define dbg_printf(fmt,...) do { \ if (DEBUG_KQUEUE) \ fprintf(stderr, "%s [%d]: %s(): "fmt"\n", \ KQUEUE_DEBUG_IDENT, THREAD_ID, __func__, __VA_ARGS__); \ } while (0) #define dbg_perror(str) do { \ if (DEBUG_KQUEUE) \ fprintf(stderr, "%s [%d]: %s(): %s: %s (errno=%d)\n", \ KQUEUE_DEBUG_IDENT, THREAD_ID, __func__, str, \ strerror(errno), errno); \ } while (0) # define reset_errno() do { errno = 0; } while (0) # if defined(_WIN32) # define dbg_lasterror(str) do { \ if (DEBUG_KQUEUE) \ fprintf(stderr, "%s: [%d] %s(): %s: (LastError=%d)\n", \ KQUEUE_DEBUG_IDENT, THREAD_ID, __func__, str, (int)GetLastError()); \ } while (0) # define dbg_wsalasterror(str) do { \ if (DEBUG_KQUEUE) \ fprintf(stderr, "%s: [%d] %s(): %s: (WSALastError=%d)\n", \ KQUEUE_DEBUG_IDENT, THREAD_ID, __func__, str, (int)WSAGetLastError()); \ } while (0) # else # define dbg_lasterror(str) ; # define dbg_wsalasterror(str) ; # endif /* * Tracing mutexes are a thin wrapper around the pthread_mutex_t * datatype that tracks and reports when a mutex is locked or unlocked. * It also allows you to assert that a mutex has (or has not) been locked * by calling tracing_mutex_assert(). */ # define MTX_UNLOCKED 0 # define MTX_LOCKED 1 typedef struct { pthread_mutex_t mtx_lock; int mtx_status; int mtx_owner; } tracing_mutex_t; # define tracing_mutex_init(mtx, attr) do { \ pthread_mutex_init(&(mtx)->mtx_lock, (attr)); \ (mtx)->mtx_status = MTX_UNLOCKED; \ (mtx)->mtx_owner = -1; \ } while (0) # define tracing_mutex_destroy(mtx) pthread_mutex_destroy(&(mtx)->mtx_lock) # define tracing_mutex_assert(x,y) do { \ if ((y) == MTX_UNLOCKED) \ assert((x)->mtx_status == MTX_UNLOCKED || (x)->mtx_owner != THREAD_ID); \ else if ((y) == MTX_LOCKED) \ assert((x)->mtx_status == MTX_LOCKED && (x)->mtx_owner == THREAD_ID); \ else \ abort(); \ } while (0) # define tracing_mutex_lock(x) do { \ dbg_printf("waiting for %s", #x); \ pthread_mutex_lock(&((x)->mtx_lock)); \ dbg_printf("locked %s", #x); \ (x)->mtx_owner = THREAD_ID; \ (x)->mtx_status = MTX_LOCKED; \ } while (0) # define tracing_mutex_unlock(x) do { \ (x)->mtx_status = MTX_UNLOCKED; \ (x)->mtx_owner = -1; \ pthread_mutex_unlock(&((x)->mtx_lock)); \ dbg_printf("unlocked %s", # x); \ } while (0) #else /* NDEBUG */ # define dbg_puts(str) do {} while (0) # define dbg_printf(fmt,...) do {} while (0) # define dbg_perror(str) do {} while (0) # define dbg_lasterror(str) do {} while (0) # define dbg_wsalasterror(str) do {} while (0) # define reset_errno() do {} while (0) # define MTX_UNLOCKED # define MTX_LOCKED # define tracing_mutex_t pthread_mutex_t # define tracing_mutex_init pthread_mutex_init # define tracing_mutex_destroy pthread_mutex_destroy # define tracing_mutex_assert(x,y) do {} while (0) # define tracing_mutex_lock pthread_mutex_lock # define tracing_mutex_unlock pthread_mutex_unlock #endif #endif /* ! _DEBUG_H */ libkqueue-2.3.1/src/common/filter.c000066400000000000000000000113101342472035000172070ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include "private.h" extern const struct filter evfilt_read; extern const struct filter evfilt_write; extern const struct filter evfilt_signal; extern const struct filter evfilt_vnode; extern const struct filter evfilt_proc; extern const struct filter evfilt_timer; extern const struct filter evfilt_user; static int filter_register(struct kqueue *kq, short filter, const struct filter *src) { struct filter *dst; unsigned int filt; int rv = 0; filt = (-1 * filter) - 1; if (filt >= EVFILT_SYSCOUNT) return (-1); dst = &kq->kq_filt[filt]; memcpy(dst, src, sizeof(*src)); dst->kf_kqueue = kq; RB_INIT(&dst->kf_knote); pthread_rwlock_init(&dst->kf_knote_mtx, NULL); if (src->kf_id == 0) { dbg_puts("filter is not implemented"); return (0); } assert(src->kf_copyout); assert(src->kn_create); assert(src->kn_modify); assert(src->kn_delete); assert(src->kn_enable); assert(src->kn_disable); /* Perform (optional) per-filter initialization */ if (src->kf_init != NULL) { rv = src->kf_init(dst); if (rv < 0) { dbg_puts("filter failed to initialize"); dst->kf_id = 0; return (-1); } } #if DEADWOOD /* Add the filter's event descriptor to the main fdset */ if (dst->kf_pfd > 0) { FD_SET(dst->kf_pfd, &kq->kq_fds); if (dst->kf_pfd > kq->kq_nfds) kq->kq_nfds = dst->kf_pfd; dbg_printf("fds: added %d (nfds=%d)", dst->kf_pfd, kq->kq_nfds); } dbg_printf("filter %d (%s) registered", filter, filter_name(filter)); #endif /* FIXME: should totally remove const from src */ if (kqops.filter_init != NULL && kqops.filter_init(kq, dst) < 0) return (-1); return (0); } int filter_register_all(struct kqueue *kq) { int rv; FD_ZERO(&kq->kq_fds); rv = 0; rv += filter_register(kq, EVFILT_READ, &evfilt_read); rv += filter_register(kq, EVFILT_WRITE, &evfilt_write); rv += filter_register(kq, EVFILT_SIGNAL, &evfilt_signal); rv += filter_register(kq, EVFILT_VNODE, &evfilt_vnode); rv += filter_register(kq, EVFILT_PROC, &evfilt_proc); rv += filter_register(kq, EVFILT_TIMER, &evfilt_timer); rv += filter_register(kq, EVFILT_USER, &evfilt_user); kq->kq_nfds++; if (rv != 0) { filter_unregister_all(kq); return (-1); } else { dbg_puts("complete"); return (0); } } void filter_unregister_all(struct kqueue *kq) { int i; for (i = 0; i < EVFILT_SYSCOUNT; i++) { if (kq->kq_filt[i].kf_id == 0) continue; if (kq->kq_filt[i].kf_destroy != NULL) kq->kq_filt[i].kf_destroy(&kq->kq_filt[i]); knote_free_all(&kq->kq_filt[i]); if (kqops.filter_free != NULL) kqops.filter_free(kq, &kq->kq_filt[i]); } memset(&kq->kq_filt[0], 0, sizeof(kq->kq_filt)); } int filter_lookup(struct filter **filt, struct kqueue *kq, short id) { if (~id < 0 || ~id >= EVFILT_SYSCOUNT) { dbg_printf("invalid id: id %d ~id %d", id, (~id)); errno = EINVAL; *filt = NULL; return (-1); } *filt = &kq->kq_filt[~id]; if ((*filt)->kf_copyout == NULL) { dbg_printf("filter %s is not implemented", filter_name(id)); errno = ENOSYS; *filt = NULL; return (-1); } return (0); } const char * filter_name(short filt) { int id; const char *fname[EVFILT_SYSCOUNT] = { "EVFILT_READ", "EVFILT_WRITE", "EVFILT_AIO", "EVFILT_VNODE", "EVFILT_PROC", "EVFILT_SIGNAL", "EVFILT_TIMER", "EVFILT_NETDEV", "EVFILT_FS", "EVFILT_LIO", "EVFILT_USER" }; id = ~filt; if (id < 0 || id >= EVFILT_SYSCOUNT) return "EVFILT_INVALID"; else return fname[id]; } libkqueue-2.3.1/src/common/kevent.c000066400000000000000000000203731342472035000172270ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* To get asprintf(3) */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include "private.h" static const char * kevent_filter_dump(const struct kevent *kev) { static __thread char buf[64]; snprintf(&buf[0], sizeof(buf), "%d (%s)", kev->filter, filter_name(kev->filter)); return ((const char *) &buf[0]); } static const char * kevent_fflags_dump(const struct kevent *kev) { static __thread char buf[1024]; #define KEVFFL_DUMP(attrib) \ if (kev->fflags & attrib) \ strncat((char *) &buf[0], #attrib" ", 64); snprintf(buf, sizeof(buf), "fflags=0x%04x (", kev->fflags); if (kev->filter == EVFILT_VNODE) { KEVFFL_DUMP(NOTE_DELETE); KEVFFL_DUMP(NOTE_WRITE); KEVFFL_DUMP(NOTE_EXTEND); KEVFFL_DUMP(NOTE_ATTRIB); KEVFFL_DUMP(NOTE_LINK); KEVFFL_DUMP(NOTE_RENAME); } else if (kev->filter == EVFILT_USER) { KEVFFL_DUMP(NOTE_FFNOP); KEVFFL_DUMP(NOTE_FFAND); KEVFFL_DUMP(NOTE_FFOR); KEVFFL_DUMP(NOTE_FFCOPY); KEVFFL_DUMP(NOTE_TRIGGER); } else { buf[0] = ' '; } buf[strlen(buf) - 1] = ')'; #undef KEVFFL_DUMP return ((const char *) &buf[0]); } static const char * kevent_flags_dump(const struct kevent *kev) { static __thread char buf[1024]; #define KEVFL_DUMP(attrib) \ if (kev->flags & attrib) \ strncat((char *) &buf[0], #attrib" ", 64); snprintf(buf, sizeof(buf), "flags=0x%04x (", kev->flags); KEVFL_DUMP(EV_ADD); KEVFL_DUMP(EV_ENABLE); KEVFL_DUMP(EV_DISABLE); KEVFL_DUMP(EV_DELETE); KEVFL_DUMP(EV_ONESHOT); KEVFL_DUMP(EV_CLEAR); KEVFL_DUMP(EV_EOF); KEVFL_DUMP(EV_ERROR); KEVFL_DUMP(EV_DISPATCH); KEVFL_DUMP(EV_RECEIPT); buf[strlen(buf) - 1] = ')'; #undef KEVFL_DUMP return ((const char *) &buf[0]); } const char * kevent_dump(const struct kevent *kev) { static __thread char buf[2147]; snprintf((char *) &buf[0], sizeof(buf), "{ ident=%d, filter=%s, %s, %s, data=%d, udata=%p }", (u_int) kev->ident, kevent_filter_dump(kev), kevent_flags_dump(kev), kevent_fflags_dump(kev), (int) kev->data, kev->udata); return ((const char *) &buf[0]); } static int kevent_copyin_one(struct kqueue *kq, const struct kevent *src) { struct knote *kn = NULL; struct filter *filt; int rv = 0; if (src->flags & EV_DISPATCH && src->flags & EV_ONESHOT) { dbg_puts("Error: EV_DISPATCH and EV_ONESHOT are mutually exclusive"); errno = EINVAL; return (-1); } if (filter_lookup(&filt, kq, src->filter) < 0) return (-1); dbg_printf("src=%s", kevent_dump(src)); kn = knote_lookup(filt, src->ident); dbg_printf("knote_lookup: ident %d == %p", (int)src->ident, kn); if (kn == NULL) { if (src->flags & EV_ADD) { if ((kn = knote_new()) == NULL) { errno = ENOENT; return (-1); } memcpy(&kn->kev, src, sizeof(kn->kev)); kn->kev.flags &= ~EV_ENABLE; kn->kev.flags |= EV_ADD;//FIXME why? kn->kn_kq = kq; assert(filt->kn_create); if (filt->kn_create(filt, kn) < 0) { knote_release(kn); errno = EFAULT; return (-1); } knote_insert(filt, kn); dbg_printf("created kevent %s", kevent_dump(src)); /* XXX- FIXME Needs to be handled in kn_create() to prevent races */ if (src->flags & EV_DISABLE) { kn->kev.flags |= EV_DISABLE; return (filt->kn_disable(filt, kn)); } //........................................ return (0); } else { dbg_printf("no entry found for ident=%u", (unsigned int)src->ident); errno = ENOENT; return (-1); } } if (src->flags & EV_DELETE) { rv = knote_delete(filt, kn); dbg_printf("knote_delete returned %d", rv); } else if (src->flags & EV_DISABLE) { kn->kev.flags |= EV_DISABLE; rv = filt->kn_disable(filt, kn); dbg_printf("kn_disable returned %d", rv); } else if (src->flags & EV_ENABLE) { kn->kev.flags &= ~EV_DISABLE; rv = filt->kn_enable(filt, kn); dbg_printf("kn_enable returned %d", rv); } else if (src->flags & EV_ADD || src->flags == 0 || src->flags & EV_RECEIPT) { kn->kev.udata = src->udata; rv = filt->kn_modify(filt, kn, src); dbg_printf("kn_modify returned %d", rv); } return (rv); } /** @return number of events added to the eventlist */ static int kevent_copyin(struct kqueue *kq, const struct kevent *src, int nchanges, struct kevent *eventlist, int nevents) { int status, nret; dbg_printf("nchanges=%d nevents=%d", nchanges, nevents); for (nret = 0; nchanges > 0; src++, nchanges--) { if (kevent_copyin_one(kq, src) < 0) { dbg_printf("errno=%s",strerror(errno)); status = errno; } else if (src->flags & EV_RECEIPT) { status = 0; } else { continue; } if (nevents > 0) { memcpy(eventlist, src, sizeof(*src)); eventlist->data = status; nevents--; eventlist++; nret++; } else { return (-1); } } return (nret); } int VISIBLE kevent(int kqfd, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout) { struct kqueue *kq; int rv = 0; #ifndef NDEBUG static unsigned int _kevent_counter = 0; unsigned int myid = 0; (void) myid; #endif /* Convert the descriptor into an object pointer */ kq = kqueue_lookup(kqfd); if (kq == NULL) { errno = ENOENT; return (-1); } #ifndef NDEBUG if (DEBUG_KQUEUE) { myid = atomic_inc(&_kevent_counter); dbg_printf("--- kevent %u --- (nchanges = %d, nevents = %d)", myid, nchanges, nevents); } #endif /* * Process each kevent on the changelist. */ if (nchanges > 0) { kqueue_lock(kq); rv = kevent_copyin(kq, changelist, nchanges, eventlist, nevents); kqueue_unlock(kq); dbg_printf("(%u) changelist: rv=%d", myid, rv); if (rv < 0) goto out; if (rv > 0) { eventlist += rv; nevents -= rv; } } rv = 0; /* * Wait for events and copy them to the eventlist */ if (nevents > MAX_KEVENT) nevents = MAX_KEVENT; if (nevents > 0) { rv = kqops.kevent_wait(kq, nevents, timeout); dbg_printf("kqops.kevent_wait returned %d", rv); if (fastpath(rv > 0)) { kqueue_lock(kq); rv = kqops.kevent_copyout(kq, rv, eventlist, nevents); kqueue_unlock(kq); } else if (rv == 0) { /* Timeout reached */ } else { dbg_printf("(%u) kevent_wait failed", myid); goto out; } } #ifndef NDEBUG if (DEBUG_KQUEUE) { int n; dbg_printf("(%u) returning %d events", myid, rv); for (n = 0; n < rv; n++) { dbg_printf("(%u) eventlist[%d] = %s", myid, n, kevent_dump(&eventlist[n])); } } #endif out: dbg_printf("--- END kevent %u ret %d ---", myid, rv); return (rv); } libkqueue-2.3.1/src/common/knote.c000066400000000000000000000077721342472035000170630ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include "private.h" #include "alloc.h" int knote_init(void) { return 0; // return (mem_init(sizeof(struct knote), 1024)); } static int knote_cmp(struct knote *a, struct knote *b) { return memcmp(&a->kev.ident, &b->kev.ident, sizeof(a->kev.ident)); } RB_GENERATE(knt, knote, kn_entries, knote_cmp) struct knote * knote_new(void) { struct knote *res; res = calloc(1, sizeof(struct knote)); if (res == NULL) return (NULL); res->kn_ref = 1; return (res); } void knote_release(struct knote *kn) { assert (kn->kn_ref > 0); if (atomic_dec(&kn->kn_ref) == 0) { if (kn->kn_flags & KNFL_KNOTE_DELETED) { dbg_printf("freeing knote at %p", kn); free(kn); } else { dbg_puts("this should never happen"); } } else { dbg_printf("decrementing refcount of knote %p rc=%d", kn, kn->kn_ref); } } void knote_insert(struct filter *filt, struct knote *kn) { pthread_rwlock_wrlock(&filt->kf_knote_mtx); RB_INSERT(knt, &filt->kf_knote, kn); pthread_rwlock_unlock(&filt->kf_knote_mtx); } int knote_delete(struct filter *filt, struct knote *kn) { struct knote query; struct knote *tmp; if (kn->kn_flags & KNFL_KNOTE_DELETED) { dbg_puts("ERROR: double deletion detected"); return (-1); } /* * Verify that the knote wasn't removed by another * thread before we acquired the knotelist lock. */ query.kev.ident = kn->kev.ident; pthread_rwlock_wrlock(&filt->kf_knote_mtx); tmp = RB_FIND(knt, &filt->kf_knote, &query); if (tmp == kn) { RB_REMOVE(knt, &filt->kf_knote, kn); } pthread_rwlock_unlock(&filt->kf_knote_mtx); if (filt->kn_delete(filt, kn) < 0) return (-1); kn->kn_flags |= KNFL_KNOTE_DELETED; knote_release(kn); return (0); } struct knote * knote_lookup(struct filter *filt, uintptr_t ident) { struct knote query; struct knote *ent = NULL; query.kev.ident = ident; pthread_rwlock_rdlock(&filt->kf_knote_mtx); ent = RB_FIND(knt, &filt->kf_knote, &query); pthread_rwlock_unlock(&filt->kf_knote_mtx); dbg_printf("id=%" PRIuPTR " ent=%p", ident, ent); return (ent); } #if DEADWOOD struct knote * knote_get_by_data(struct filter *filt, intptr_t data) { struct knote *kn; pthread_rwlock_rdlock(&filt->kf_knote_mtx); RB_FOREACH(kn, knt, &filt->kf_knote) { if (data == kn->kev.data) break; } if (kn != NULL) { knote_retain(kn); } pthread_rwlock_unlock(&filt->kf_knote_mtx); return (kn); } #endif int knote_free_all(struct filter *filt) { struct knote *kn; pthread_rwlock_rdlock(&filt->kf_knote_mtx); RB_FOREACH(kn, knt, &filt->kf_knote) { /* Check return code */ filt->kn_delete(filt, kn); kn->kn_flags |= KNFL_KNOTE_DELETED; knote_release(kn); } pthread_rwlock_unlock(&filt->kf_knote_mtx); return (0); } int knote_disable(struct filter *filt, struct knote *kn) { assert(!(kn->kev.flags & EV_DISABLE)); filt->kn_disable(filt, kn); //TODO: Error checking KNOTE_DISABLE(kn); return (0); } //TODO: knote_enable() libkqueue-2.3.1/src/common/kqueue.c000066400000000000000000000076461342472035000172420ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include "private.h" int DEBUG_KQUEUE = 0; char *KQUEUE_DEBUG_IDENT = "KQ"; #ifdef _WIN32 static LONG kq_init_begin = 0; static int kq_init_complete = 0; #else pthread_mutex_t kq_mtx = PTHREAD_MUTEX_INITIALIZER; pthread_once_t kq_is_initialized = PTHREAD_ONCE_INIT; #endif unsigned int get_fd_limit(void) { #ifdef _WIN32 /* actually windows should be able to hold way more, as they use HANDLEs for everything. Still this number should still be sufficient for the provided number of kqueue fds. */ return 65536; #else struct rlimit rlim; if (getrlimit(RLIMIT_NOFILE, &rlim) < 0) { dbg_perror("getrlimit(2)"); return (65536); } else { return (rlim.rlim_max); } #endif } static struct map *kqmap; void libkqueue_init(void) { #ifdef NDEBUG DEBUG_KQUEUE = 0; #else char *s = getenv("KQUEUE_DEBUG"); if (s != NULL && strlen(s) > 0) { DEBUG_KQUEUE = 1; #ifdef _WIN32 /* Initialize the Winsock library */ WSADATA wsaData; if (WSAStartup(MAKEWORD(2,2), &wsaData) != 0) abort(); #endif # if defined(_WIN32) && !defined(__GNUC__) /* Enable heap surveillance */ { int tmpFlag = _CrtSetDbgFlag( _CRTDBG_REPORT_FLAG ); tmpFlag |= _CRTDBG_CHECK_ALWAYS_DF; _CrtSetDbgFlag(tmpFlag); } # endif /* _WIN32 */ } #endif kqmap = map_new(get_fd_limit()); // INT_MAX if (kqmap == NULL) abort(); if (knote_init() < 0) abort(); dbg_puts("library initialization complete"); #ifdef _WIN32 kq_init_complete = 1; #endif } #if DEADWOOD static int kqueue_cmp(struct kqueue *a, struct kqueue *b) { return memcmp(&a->kq_id, &b->kq_id, sizeof(int)); } /* Must hold the kqtree_mtx when calling this */ void kqueue_free(struct kqueue *kq) { RB_REMOVE(kqt, &kqtree, kq); filter_unregister_all(kq); kqops.kqueue_free(kq); free(kq); } #endif struct kqueue * kqueue_lookup(int kq) { return ((struct kqueue *) map_lookup(kqmap, kq)); } int VISIBLE kqueue(void) { struct kqueue *kq; struct kqueue *tmp; #ifdef _WIN32 if (InterlockedCompareExchange(&kq_init_begin, 0, 1) == 0) { libkqueue_init(); } else { while (kq_init_complete == 0) { sleep(1); } } #else (void) pthread_mutex_lock(&kq_mtx); (void) pthread_once(&kq_is_initialized, libkqueue_init); (void) pthread_mutex_unlock(&kq_mtx); #endif kq = calloc(1, sizeof(*kq)); if (kq == NULL) return (-1); tracing_mutex_init(&kq->kq_mtx, NULL); if (kqops.kqueue_init(kq) < 0) { free(kq); return (-1); } dbg_printf("created kqueue, fd=%d", kq->kq_id); /* Delete and insert should be atomic */ (void) pthread_mutex_lock(&kq_mtx); tmp = map_delete(kqmap, kq->kq_id); if (tmp != NULL) { kqops.kqueue_free(tmp); } if (map_insert(kqmap, kq->kq_id, kq) < 0) { dbg_puts("map insertion failed"); kqops.kqueue_free(kq); return (-1); } pthread_mutex_unlock(&kq_mtx); return (kq->kq_id); } libkqueue-2.3.1/src/common/map.c000066400000000000000000000064551342472035000165150ustar00rootroot00000000000000/* * Copyright (c) 2011 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "private.h" struct map { size_t len; void **data; }; struct map * map_new(size_t len) { struct map *dst; dst = calloc(1, sizeof(struct map)); if (dst == NULL) return (NULL); #ifdef _WIN32 dst->data = calloc(len, sizeof(void*)); if(dst->data == NULL) { dbg_perror("calloc()"); free(dst); return NULL; } dst->len = len; #else dst->data = mmap(NULL, len * sizeof(void *), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_NORESERVE | MAP_ANON, -1, 0); if (dst->data == MAP_FAILED) { dbg_perror("mmap(2)"); free(dst); return (NULL); } dst->len = len; #endif return (dst); } int map_insert(struct map *m, int idx, void *ptr) { if (slowpath(idx < 0 || idx > (int)m->len)) return (-1); if (atomic_ptr_cas(&(m->data[idx]), 0, ptr) == NULL) { dbg_printf("inserted %p in location %d", ptr, idx); return (0); } else { dbg_printf("tried to insert a value into a non-empty location %d (value=%p)", idx, m->data[idx]); return (-1); } } int map_remove(struct map *m, int idx, void *ptr) { if (slowpath(idx < 0 || idx > (int)m->len)) return (-1); if (atomic_ptr_cas(&(m->data[idx]), ptr, 0) == NULL) { dbg_printf("removed %p from location %d", ptr, idx); return (0); } else { dbg_printf("removal failed: location %d does not contain value %p", idx, m->data[idx]); return (-1); } } int map_replace(struct map *m, int idx, void *oldp, void *newp) { void *tmp; if (slowpath(idx < 0 || idx > (int)m->len)) return (-1); tmp = atomic_ptr_cas(&(m->data[idx]), oldp, newp); if (tmp == oldp) { dbg_printf("replaced value %p in location %d with value %p", oldp, idx, newp); return (0); } else { dbg_printf("item in location %d does not match expected value %p", idx, oldp); return (-1); } } void * map_lookup(struct map *m, int idx) { if (slowpath(idx < 0 || idx > (int)m->len)) return (NULL); return m->data[idx]; } void * map_delete(struct map *m, int idx) { void *oval; void *nval; if (slowpath(idx < 0 || idx > (int)m->len)) return ((void *)-1); /* Hopefully we aren't racing with another thread, but you never know.. */ do { oval = m->data[idx]; nval = atomic_ptr_cas(&(m->data[idx]), oval, NULL); } while (nval != oval); m->data[idx] = NULL; return ((void *) oval); } libkqueue-2.3.1/src/common/private.h000066400000000000000000000200531342472035000174050ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _KQUEUE_PRIVATE_H #define _KQUEUE_PRIVATE_H #include #include #include #include "config.h" #include "tree.h" /* Maximum events returnable in a single kevent() call */ #define MAX_KEVENT 512 struct kqueue; struct kevent; struct knote; struct map; struct eventfd; struct evfilt_data; #if defined(_WIN32) # include "../windows/platform.h" # include "../common/queue.h" # if !defined(NDEBUG) && !defined(__GNUC__) # include # endif #elif defined(__linux__) # include "../posix/platform.h" # include "../linux/platform.h" #elif defined(__sun) # include "../posix/platform.h" # include "../solaris/platform.h" #else # error Unknown platform #endif #include "debug.h" /* Workaround for Android */ #ifndef EPOLLONESHOT # define EPOLLONESHOT (1 << 30) #endif struct eventfd { int ef_id; #if defined(EVENTFD_PLATFORM_SPECIFIC) EVENTFD_PLATFORM_SPECIFIC; #endif }; /* * Flags used by knote->kn_flags */ #define KNFL_FILE (1U << 0U) #define KNFL_PIPE (1U << 1U) #define KNFL_BLOCKDEV (1U << 2U) #define KNFL_CHARDEV (1U << 3U) #define KNFL_SOCKET_PASSIVE (1U << 4U) #define KNFL_SOCKET_STREAM (1U << 5U) #define KNFL_SOCKET_DGRAM (1U << 6U) #define KNFL_SOCKET_RDM (1U << 7U) #define KNFL_SOCKET_SEQPACKET (1U << 8U) #define KNFL_KNOTE_DELETED (1U << 31U) #define KNFL_SOCKET (KNFL_SOCKET_STREAM |\ KNFL_SOCKET_DGRAM |\ KNFL_SOCKET_RDM |\ KNFL_SOCKET_SEQPACKET) struct knote { struct kevent kev; unsigned int kn_flags; union { /* OLD */ int pfd; /* Used by timerfd */ int events; /* Used by socket */ struct { nlink_t nlink; /* Used by vnode */ off_t size; /* Used by vnode */ } vnode; timer_t timerid; struct sleepreq *sleepreq; /* Used by posix/timer.c */ void *handle; /* Used by win32 filters */ } data; struct kqueue* kn_kq; volatile uint32_t kn_ref; #if defined(KNOTE_PLATFORM_SPECIFIC) KNOTE_PLATFORM_SPECIFIC; #endif RB_ENTRY(knote) kn_entries; }; #define KNOTE_ENABLE(ent) do { \ (ent)->kev.flags &= ~EV_DISABLE; \ } while (0/*CONSTCOND*/) #define KNOTE_DISABLE(ent) do { \ (ent)->kev.flags |= EV_DISABLE; \ } while (0/*CONSTCOND*/) struct filter { short kf_id; /* filter operations */ int (*kf_init)(struct filter *); void (*kf_destroy)(struct filter *); int (*kf_copyout)(struct kevent *, struct knote *, void *); /* knote operations */ int (*kn_create)(struct filter *, struct knote *); int (*kn_modify)(struct filter *, struct knote *, const struct kevent *); int (*kn_delete)(struct filter *, struct knote *); int (*kn_enable)(struct filter *, struct knote *); int (*kn_disable)(struct filter *, struct knote *); struct eventfd kf_efd; /* Used by user.c */ //MOVE TO POSIX? int kf_pfd; /* fd to poll(2) for readiness */ int kf_wfd; /* fd to write when an event occurs */ //----? struct evfilt_data *kf_data; /* filter-specific data */ RB_HEAD(knt, knote) kf_knote; pthread_rwlock_t kf_knote_mtx; struct kqueue *kf_kqueue; #if defined(FILTER_PLATFORM_SPECIFIC) FILTER_PLATFORM_SPECIFIC; #endif }; /* Use this to declare a filter that is not implemented */ #define EVFILT_NOTIMPL { 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL } struct kqueue { int kq_id; struct filter kq_filt[EVFILT_SYSCOUNT]; fd_set kq_fds, kq_rfds; int kq_nfds; tracing_mutex_t kq_mtx; volatile uint32_t kq_ref; #if defined(KQUEUE_PLATFORM_SPECIFIC) KQUEUE_PLATFORM_SPECIFIC; #endif RB_ENTRY(kqueue) entries; }; struct kqueue_vtable { int (*kqueue_init)(struct kqueue *); void (*kqueue_free)(struct kqueue *); // @param timespec can be given as timeout // @param int the number of events to wait for // @param kqueue the queue to wait on int (*kevent_wait)(struct kqueue *, int, const struct timespec *); // @param kqueue the queue to look at // @param int The number of events that should be ready // @param kevent the structure to copy the events into // @param int The number of events to copy // @return the actual number of events copied int (*kevent_copyout)(struct kqueue *, int, struct kevent *, int); int (*filter_init)(struct kqueue *, struct filter *); void (*filter_free)(struct kqueue *, struct filter *); int (*eventfd_init)(struct eventfd *); void (*eventfd_close)(struct eventfd *); int (*eventfd_raise)(struct eventfd *); int (*eventfd_lower)(struct eventfd *); int (*eventfd_descriptor)(struct eventfd *); }; extern const struct kqueue_vtable kqops; /* * kqueue internal API */ #define kqueue_lock(kq) tracing_mutex_lock(&(kq)->kq_mtx) #define kqueue_unlock(kq) tracing_mutex_unlock(&(kq)->kq_mtx) /* * knote internal API */ int knote_free_all(struct filter *filt); struct knote * knote_lookup(struct filter *, uintptr_t); //DEADWOOD: struct knote * knote_get_by_data(struct filter *filt, intptr_t); struct knote * knote_new(void); #define knote_retain(kn) atomic_inc(&kn->kn_ref) void knote_release(struct knote *); void knote_insert(struct filter *, struct knote *); int knote_delete(struct filter *, struct knote *); int knote_init(void); int knote_disable(struct filter *, struct knote *); #define knote_get_filter(knt) &((knt)->kn_kq->kq_filt[(knt)->kev.filter]) int filter_lookup(struct filter **, struct kqueue *, short); int filter_register_all(struct kqueue *); void filter_unregister_all(struct kqueue *); const char *filter_name(short); int kevent_wait(struct kqueue *, const struct timespec *); int kevent_copyout(struct kqueue *, int, struct kevent *, int); void kevent_free(struct kqueue *); const char *kevent_dump(const struct kevent *); struct kqueue * kqueue_lookup(int); int kqueue_validate(struct kqueue *); struct map *map_new(size_t); int map_insert(struct map *, int, void *); int map_remove(struct map *, int, void *); int map_replace(struct map *, int, void *, void *); void *map_lookup(struct map *, int); void *map_delete(struct map *, int); void map_free(struct map *); /* DEADWOOD: No longer needed due to the un-smerging of POSIX and Linux int posix_evfilt_user_init(struct filter *); void posix_evfilt_user_destroy(struct filter *); int posix_evfilt_user_copyout(struct kevent *, struct knote *, void *ptr UNUSED); int posix_evfilt_user_knote_create(struct filter *, struct knote *); int posix_evfilt_user_knote_modify(struct filter *, struct knote *, const struct kevent *); int posix_evfilt_user_knote_delete(struct filter *, struct knote *); int posix_evfilt_user_knote_enable(struct filter *, struct knote *); int posix_evfilt_user_knote_disable(struct filter *, struct knote *); */ #endif /* ! _KQUEUE_PRIVATE_H */ libkqueue-2.3.1/src/common/queue.h000066400000000000000000000511771342472035000170720ustar00rootroot00000000000000/*- * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)queue.h 8.5 (Berkeley) 8/20/94 * $FreeBSD: src/sys/sys/queue.h,v 1.72.2.1.2.1 2009/10/25 01:10:29 kensmith Exp $ */ #ifndef _SYS_QUEUE_H_ #define _SYS_QUEUE_H_ /* * This file defines four types of data structures: singly-linked lists, * singly-linked tail queues, lists and tail queues. * * A singly-linked list is headed by a single forward pointer. The elements * are singly linked for minimum space and pointer manipulation overhead at * the expense of O(n) removal for arbitrary elements. New elements can be * added to the list after an existing element or at the head of the list. * Elements being removed from the head of the list should use the explicit * macro for this purpose for optimum efficiency. A singly-linked list may * only be traversed in the forward direction. Singly-linked lists are ideal * for applications with large datasets and few or no removals or for * implementing a LIFO queue. * * A singly-linked tail queue is headed by a pair of pointers, one to the * head of the list and the other to the tail of the list. The elements are * singly linked for minimum space and pointer manipulation overhead at the * expense of O(n) removal for arbitrary elements. New elements can be added * to the list after an existing element, at the head of the list, or at the * end of the list. Elements being removed from the head of the tail queue * should use the explicit macro for this purpose for optimum efficiency. * A singly-linked tail queue may only be traversed in the forward direction. * Singly-linked tail queues are ideal for applications with large datasets * and few or no removals or for implementing a FIFO queue. * * A list is headed by a single forward pointer (or an array of forward * pointers for a hash table header). The elements are doubly linked * so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before * or after an existing element or at the head of the list. A list * may only be traversed in the forward direction. * * A tail queue is headed by a pair of pointers, one to the head of the * list and the other to the tail of the list. The elements are doubly * linked so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before or * after an existing element, at the head of the list, or at the end of * the list. A tail queue may be traversed in either direction. * * For details on the use of these macros, see the queue(3) manual page. * * * SLIST LIST STAILQ TAILQ * _HEAD + + + + * _HEAD_INITIALIZER + + + + * _ENTRY + + + + * _INIT + + + + * _EMPTY + + + + * _FIRST + + + + * _NEXT + + + + * _PREV - - - + * _LAST - - + + * _FOREACH + + + + * _FOREACH_SAFE + + + + * _FOREACH_REVERSE - - - + * _FOREACH_REVERSE_SAFE - - - + * _INSERT_HEAD + + + + * _INSERT_BEFORE - + - + * _INSERT_AFTER + + + + * _INSERT_TAIL - - + + * _CONCAT - - + + * _REMOVE_AFTER + - + - * _REMOVE_HEAD + - + - * _REMOVE + + + + * */ #ifdef QUEUE_MACRO_DEBUG /* Store the last 2 places the queue element or head was altered */ struct qm_trace { char * lastfile; int lastline; char * prevfile; int prevline; }; #define TRACEBUF struct qm_trace trace; #define TRASHIT(x) do {(x) = (void *)-1;} while (0) #define QMD_TRACE_HEAD(head) do { \ (head)->trace.prevline = (head)->trace.lastline; \ (head)->trace.prevfile = (head)->trace.lastfile; \ (head)->trace.lastline = __LINE__; \ (head)->trace.lastfile = __FILE__; \ } while (0) #define QMD_TRACE_ELEM(elem) do { \ (elem)->trace.prevline = (elem)->trace.lastline; \ (elem)->trace.prevfile = (elem)->trace.lastfile; \ (elem)->trace.lastline = __LINE__; \ (elem)->trace.lastfile = __FILE__; \ } while (0) #else #define QMD_TRACE_ELEM(elem) #define QMD_TRACE_HEAD(head) #define TRACEBUF #define TRASHIT(x) #endif /* QUEUE_MACRO_DEBUG */ #ifndef _WIN32 /* * Singly-linked List declarations. */ #define SLIST_HEAD(name, type) \ struct name { \ struct type *slh_first; /* first element */ \ } #define SLIST_HEAD_INITIALIZER(head) \ { NULL } #define SLIST_ENTRY(type) \ struct { \ struct type *sle_next; /* next element */ \ } /* * Singly-linked List functions. */ #define SLIST_EMPTY(head) ((head)->slh_first == NULL) #define SLIST_FIRST(head) ((head)->slh_first) #define SLIST_FOREACH(var, head, field) \ for ((var) = SLIST_FIRST((head)); \ (var); \ (var) = SLIST_NEXT((var), field)) #define SLIST_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = SLIST_FIRST((head)); \ (var) && ((tvar) = SLIST_NEXT((var), field), 1); \ (var) = (tvar)) #define SLIST_FOREACH_PREVPTR(var, varp, head, field) \ for ((varp) = &SLIST_FIRST((head)); \ ((var) = *(varp)) != NULL; \ (varp) = &SLIST_NEXT((var), field)) #define SLIST_INIT(head) do { \ SLIST_FIRST((head)) = NULL; \ } while (0) #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \ SLIST_NEXT((slistelm), field) = (elm); \ } while (0) #define SLIST_INSERT_HEAD(head, elm, field) do { \ SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \ SLIST_FIRST((head)) = (elm); \ } while (0) #define SLIST_NEXT(elm, field) ((elm)->field.sle_next) #define SLIST_REMOVE(head, elm, type, field) do { \ if (SLIST_FIRST((head)) == (elm)) { \ SLIST_REMOVE_HEAD((head), field); \ } \ else { \ struct type *curelm = SLIST_FIRST((head)); \ while (SLIST_NEXT(curelm, field) != (elm)) \ curelm = SLIST_NEXT(curelm, field); \ SLIST_REMOVE_AFTER(curelm, field); \ } \ TRASHIT((elm)->field.sle_next); \ } while (0) #define SLIST_REMOVE_AFTER(elm, field) do { \ SLIST_NEXT(elm, field) = \ SLIST_NEXT(SLIST_NEXT(elm, field), field); \ } while (0) #define SLIST_REMOVE_HEAD(head, field) do { \ SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \ } while (0) #endif /* defined(_WIN32) */ /* * Singly-linked Tail queue declarations. */ #define STAILQ_HEAD(name, type) \ struct name { \ struct type *stqh_first;/* first element */ \ struct type **stqh_last;/* addr of last next element */ \ } #define STAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).stqh_first } #define STAILQ_ENTRY(type) \ struct { \ struct type *stqe_next; /* next element */ \ } /* * Singly-linked Tail queue functions. */ #define STAILQ_CONCAT(head1, head2) do { \ if (!STAILQ_EMPTY((head2))) { \ *(head1)->stqh_last = (head2)->stqh_first; \ (head1)->stqh_last = (head2)->stqh_last; \ STAILQ_INIT((head2)); \ } \ } while (0) #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) #define STAILQ_FIRST(head) ((head)->stqh_first) #define STAILQ_FOREACH(var, head, field) \ for((var) = STAILQ_FIRST((head)); \ (var); \ (var) = STAILQ_NEXT((var), field)) #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = STAILQ_FIRST((head)); \ (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ (var) = (tvar)) #define STAILQ_INIT(head) do { \ STAILQ_FIRST((head)) = NULL; \ (head)->stqh_last = &STAILQ_FIRST((head)); \ } while (0) #define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\ (head)->stqh_last = &STAILQ_NEXT((elm), field); \ STAILQ_NEXT((tqelm), field) = (elm); \ } while (0) #define STAILQ_INSERT_HEAD(head, elm, field) do { \ if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \ (head)->stqh_last = &STAILQ_NEXT((elm), field); \ STAILQ_FIRST((head)) = (elm); \ } while (0) #define STAILQ_INSERT_TAIL(head, elm, field) do { \ STAILQ_NEXT((elm), field) = NULL; \ *(head)->stqh_last = (elm); \ (head)->stqh_last = &STAILQ_NEXT((elm), field); \ } while (0) #define STAILQ_LAST(head, type, field) \ (STAILQ_EMPTY((head)) ? \ NULL : \ ((struct type *)(void *) \ ((char *)((head)->stqh_last) - __offsetof(struct type, field)))) #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) #define STAILQ_REMOVE(head, elm, type, field) do { \ if (STAILQ_FIRST((head)) == (elm)) { \ STAILQ_REMOVE_HEAD((head), field); \ } \ else { \ struct type *curelm = STAILQ_FIRST((head)); \ while (STAILQ_NEXT(curelm, field) != (elm)) \ curelm = STAILQ_NEXT(curelm, field); \ STAILQ_REMOVE_AFTER(head, curelm, field); \ } \ TRASHIT((elm)->field.stqe_next); \ } while (0) #define STAILQ_REMOVE_HEAD(head, field) do { \ if ((STAILQ_FIRST((head)) = \ STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \ (head)->stqh_last = &STAILQ_FIRST((head)); \ } while (0) #define STAILQ_REMOVE_AFTER(head, elm, field) do { \ if ((STAILQ_NEXT(elm, field) = \ STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \ (head)->stqh_last = &STAILQ_NEXT((elm), field); \ } while (0) #define STAILQ_SWAP(head1, head2, type) do { \ struct type *swap_first = STAILQ_FIRST(head1); \ struct type **swap_last = (head1)->stqh_last; \ STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \ (head1)->stqh_last = (head2)->stqh_last; \ STAILQ_FIRST(head2) = swap_first; \ (head2)->stqh_last = swap_last; \ if (STAILQ_EMPTY(head1)) \ (head1)->stqh_last = &STAILQ_FIRST(head1); \ if (STAILQ_EMPTY(head2)) \ (head2)->stqh_last = &STAILQ_FIRST(head2); \ } while (0) /* * List declarations. * NOTE: LIST_HEAD conflicts with a Linux macro. */ #define FIXME_LIST_HEAD(name, type) \ struct name { \ struct type *lh_first; /* first element */ \ } #define LIST_HEAD_INITIALIZER(head) \ { NULL } #define LIST_ENTRY(type) \ struct { \ struct type *le_next; /* next element */ \ struct type **le_prev; /* address of previous next element */ \ } /* * List functions. */ #if (defined(_KERNEL) && defined(INVARIANTS)) #define QMD_LIST_CHECK_HEAD(head, field) do { \ if (LIST_FIRST((head)) != NULL && \ LIST_FIRST((head))->field.le_prev != \ &LIST_FIRST((head))) \ panic("Bad list head %p first->prev != head", (head)); \ } while (0) #define QMD_LIST_CHECK_NEXT(elm, field) do { \ if (LIST_NEXT((elm), field) != NULL && \ LIST_NEXT((elm), field)->field.le_prev != \ &((elm)->field.le_next)) \ panic("Bad link elm %p next->prev != elm", (elm)); \ } while (0) #define QMD_LIST_CHECK_PREV(elm, field) do { \ if (*(elm)->field.le_prev != (elm)) \ panic("Bad link elm %p prev->next != elm", (elm)); \ } while (0) #else #define QMD_LIST_CHECK_HEAD(head, field) #define QMD_LIST_CHECK_NEXT(elm, field) #define QMD_LIST_CHECK_PREV(elm, field) #endif /* (_KERNEL && INVARIANTS) */ #define LIST_EMPTY(head) ((head)->lh_first == NULL) #define LIST_FIRST(head) ((head)->lh_first) #define LIST_FOREACH(var, head, field) \ for ((var) = LIST_FIRST((head)); \ (var); \ (var) = LIST_NEXT((var), field)) #define LIST_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = LIST_FIRST((head)); \ (var) && ((tvar) = LIST_NEXT((var), field), 1); \ (var) = (tvar)) #define LIST_INIT(head) do { \ LIST_FIRST((head)) = NULL; \ } while (0) #define LIST_INSERT_AFTER(listelm, elm, field) do { \ QMD_LIST_CHECK_NEXT(listelm, field); \ if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\ LIST_NEXT((listelm), field)->field.le_prev = \ &LIST_NEXT((elm), field); \ LIST_NEXT((listelm), field) = (elm); \ (elm)->field.le_prev = &LIST_NEXT((listelm), field); \ } while (0) #define LIST_INSERT_BEFORE(listelm, elm, field) do { \ QMD_LIST_CHECK_PREV(listelm, field); \ (elm)->field.le_prev = (listelm)->field.le_prev; \ LIST_NEXT((elm), field) = (listelm); \ *(listelm)->field.le_prev = (elm); \ (listelm)->field.le_prev = &LIST_NEXT((elm), field); \ } while (0) #define LIST_INSERT_HEAD(head, elm, field) do { \ QMD_LIST_CHECK_HEAD((head), field); \ if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\ LIST_FIRST((head)) = (elm); \ (elm)->field.le_prev = &LIST_FIRST((head)); \ } while (0) #define LIST_NEXT(elm, field) ((elm)->field.le_next) #define LIST_REMOVE(elm, field) do { \ QMD_LIST_CHECK_NEXT(elm, field); \ QMD_LIST_CHECK_PREV(elm, field); \ if (LIST_NEXT((elm), field) != NULL) \ LIST_NEXT((elm), field)->field.le_prev = \ (elm)->field.le_prev; \ *(elm)->field.le_prev = LIST_NEXT((elm), field); \ TRASHIT((elm)->field.le_next); \ TRASHIT((elm)->field.le_prev); \ } while (0) #define LIST_SWAP(head1, head2, type, field) do { \ struct type *swap_tmp = LIST_FIRST((head1)); \ LIST_FIRST((head1)) = LIST_FIRST((head2)); \ LIST_FIRST((head2)) = swap_tmp; \ if ((swap_tmp = LIST_FIRST((head1))) != NULL) \ swap_tmp->field.le_prev = &LIST_FIRST((head1)); \ if ((swap_tmp = LIST_FIRST((head2))) != NULL) \ swap_tmp->field.le_prev = &LIST_FIRST((head2)); \ } while (0) /* * Tail queue declarations. */ #define TAILQ_HEAD(name, type) \ struct name { \ struct type *tqh_first; /* first element */ \ struct type **tqh_last; /* addr of last next element */ \ TRACEBUF \ } #define TAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).tqh_first } #define TAILQ_ENTRY(type) \ struct { \ struct type *tqe_next; /* next element */ \ struct type **tqe_prev; /* address of previous next element */ \ TRACEBUF \ } /* * Tail queue functions. */ #if (defined(_KERNEL) && defined(INVARIANTS)) #define QMD_TAILQ_CHECK_HEAD(head, field) do { \ if (!TAILQ_EMPTY(head) && \ TAILQ_FIRST((head))->field.tqe_prev != \ &TAILQ_FIRST((head))) \ panic("Bad tailq head %p first->prev != head", (head)); \ } while (0) #define QMD_TAILQ_CHECK_TAIL(head, field) do { \ if (*(head)->tqh_last != NULL) \ panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head)); \ } while (0) #define QMD_TAILQ_CHECK_NEXT(elm, field) do { \ if (TAILQ_NEXT((elm), field) != NULL && \ TAILQ_NEXT((elm), field)->field.tqe_prev != \ &((elm)->field.tqe_next)) \ panic("Bad link elm %p next->prev != elm", (elm)); \ } while (0) #define QMD_TAILQ_CHECK_PREV(elm, field) do { \ if (*(elm)->field.tqe_prev != (elm)) \ panic("Bad link elm %p prev->next != elm", (elm)); \ } while (0) #else #define QMD_TAILQ_CHECK_HEAD(head, field) #define QMD_TAILQ_CHECK_TAIL(head, headname) #define QMD_TAILQ_CHECK_NEXT(elm, field) #define QMD_TAILQ_CHECK_PREV(elm, field) #endif /* (_KERNEL && INVARIANTS) */ #define TAILQ_CONCAT(head1, head2, field) do { \ if (!TAILQ_EMPTY(head2)) { \ *(head1)->tqh_last = (head2)->tqh_first; \ (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ (head1)->tqh_last = (head2)->tqh_last; \ TAILQ_INIT((head2)); \ QMD_TRACE_HEAD(head1); \ QMD_TRACE_HEAD(head2); \ } \ } while (0) #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) #define TAILQ_FIRST(head) ((head)->tqh_first) #define TAILQ_FOREACH(var, head, field) \ for ((var) = TAILQ_FIRST((head)); \ (var); \ (var) = TAILQ_NEXT((var), field)) #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = TAILQ_FIRST((head)); \ (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ (var) = (tvar)) #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ for ((var) = TAILQ_LAST((head), headname); \ (var); \ (var) = TAILQ_PREV((var), headname, field)) #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \ for ((var) = TAILQ_LAST((head), headname); \ (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \ (var) = (tvar)) #define TAILQ_INIT(head) do { \ TAILQ_FIRST((head)) = NULL; \ (head)->tqh_last = &TAILQ_FIRST((head)); \ QMD_TRACE_HEAD(head); \ } while (0) #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ QMD_TAILQ_CHECK_NEXT(listelm, field); \ if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\ TAILQ_NEXT((elm), field)->field.tqe_prev = \ &TAILQ_NEXT((elm), field); \ else { \ (head)->tqh_last = &TAILQ_NEXT((elm), field); \ QMD_TRACE_HEAD(head); \ } \ TAILQ_NEXT((listelm), field) = (elm); \ (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \ QMD_TRACE_ELEM(&(elm)->field); \ QMD_TRACE_ELEM(&listelm->field); \ } while (0) #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ QMD_TAILQ_CHECK_PREV(listelm, field); \ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ TAILQ_NEXT((elm), field) = (listelm); \ *(listelm)->field.tqe_prev = (elm); \ (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \ QMD_TRACE_ELEM(&(elm)->field); \ QMD_TRACE_ELEM(&listelm->field); \ } while (0) #define TAILQ_INSERT_HEAD(head, elm, field) do { \ QMD_TAILQ_CHECK_HEAD(head, field); \ if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \ TAILQ_FIRST((head))->field.tqe_prev = \ &TAILQ_NEXT((elm), field); \ else \ (head)->tqh_last = &TAILQ_NEXT((elm), field); \ TAILQ_FIRST((head)) = (elm); \ (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ QMD_TRACE_HEAD(head); \ QMD_TRACE_ELEM(&(elm)->field); \ } while (0) #define TAILQ_INSERT_TAIL(head, elm, field) do { \ QMD_TAILQ_CHECK_TAIL(head, field); \ TAILQ_NEXT((elm), field) = NULL; \ (elm)->field.tqe_prev = (head)->tqh_last; \ *(head)->tqh_last = (elm); \ (head)->tqh_last = &TAILQ_NEXT((elm), field); \ QMD_TRACE_HEAD(head); \ QMD_TRACE_ELEM(&(elm)->field); \ } while (0) #define TAILQ_LAST(head, headname) \ (*(((struct headname *)((head)->tqh_last))->tqh_last)) #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) #define TAILQ_PREV(elm, headname, field) \ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) #define TAILQ_REMOVE(head, elm, field) do { \ QMD_TAILQ_CHECK_NEXT(elm, field); \ QMD_TAILQ_CHECK_PREV(elm, field); \ if ((TAILQ_NEXT((elm), field)) != NULL) \ TAILQ_NEXT((elm), field)->field.tqe_prev = \ (elm)->field.tqe_prev; \ else { \ (head)->tqh_last = (elm)->field.tqe_prev; \ QMD_TRACE_HEAD(head); \ } \ *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \ TRASHIT((elm)->field.tqe_next); \ TRASHIT((elm)->field.tqe_prev); \ QMD_TRACE_ELEM(&(elm)->field); \ } while (0) #define TAILQ_SWAP(head1, head2, type, field) do { \ struct type *swap_first = (head1)->tqh_first; \ struct type **swap_last = (head1)->tqh_last; \ (head1)->tqh_first = (head2)->tqh_first; \ (head1)->tqh_last = (head2)->tqh_last; \ (head2)->tqh_first = swap_first; \ (head2)->tqh_last = swap_last; \ if ((swap_first = (head1)->tqh_first) != NULL) \ swap_first->field.tqe_prev = &(head1)->tqh_first; \ else \ (head1)->tqh_last = &(head1)->tqh_first; \ if ((swap_first = (head2)->tqh_first) != NULL) \ swap_first->field.tqe_prev = &(head2)->tqh_first; \ else \ (head2)->tqh_last = &(head2)->tqh_first; \ } while (0) #endif /* !_SYS_QUEUE_H_ */ libkqueue-2.3.1/src/common/tree.h000066400000000000000000000623341342472035000167020ustar00rootroot00000000000000/* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */ /* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */ /* $FreeBSD: src/sys/sys/tree.h,v 1.9.2.1.2.1 2009/10/25 01:10:29 kensmith Exp $ */ /*- * Copyright 2002 Niels Provos * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _SYS_TREE_H_ #define _SYS_TREE_H_ /* * This file defines data structures for different types of trees: * splay trees and red-black trees. * * A splay tree is a self-organizing data structure. Every operation * on the tree causes a splay to happen. The splay moves the requested * node to the root of the tree and partly rebalances it. * * This has the benefit that request locality causes faster lookups as * the requested nodes move to the top of the tree. On the other hand, * every lookup causes memory writes. * * The Balance Theorem bounds the total access time for m operations * and n inserts on an initially empty tree as O((m + n)lg n). The * amortized cost for a sequence of m accesses to a splay tree is O(lg n); * * A red-black tree is a binary search tree with the node color as an * extra attribute. It fulfills a set of conditions: * - every search path from the root to a leaf consists of the * same number of black nodes, * - each red node (except for the root) has a black parent, * - each leaf node is black. * * Every operation on a red-black tree is bounded as O(lg n). * The maximum height of a red-black tree is 2lg (n+1). */ #define SPLAY_HEAD(name, type) \ struct name { \ struct type *sph_root; /* root of the tree */ \ } #define SPLAY_INITIALIZER(root) \ { NULL } #define SPLAY_INIT(root) do { \ (root)->sph_root = NULL; \ } while (/*CONSTCOND*/ 0) #define SPLAY_ENTRY(type) \ struct { \ struct type *spe_left; /* left element */ \ struct type *spe_right; /* right element */ \ } #define SPLAY_LEFT(elm, field) (elm)->field.spe_left #define SPLAY_RIGHT(elm, field) (elm)->field.spe_right #define SPLAY_ROOT(head) (head)->sph_root #define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL) /* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */ #define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ (head)->sph_root = tmp; \ } while (/*CONSTCOND*/ 0) #define SPLAY_ROTATE_LEFT(head, tmp, field) do { \ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \ SPLAY_LEFT(tmp, field) = (head)->sph_root; \ (head)->sph_root = tmp; \ } while (/*CONSTCOND*/ 0) #define SPLAY_LINKLEFT(head, tmp, field) do { \ SPLAY_LEFT(tmp, field) = (head)->sph_root; \ tmp = (head)->sph_root; \ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ } while (/*CONSTCOND*/ 0) #define SPLAY_LINKRIGHT(head, tmp, field) do { \ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ tmp = (head)->sph_root; \ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ } while (/*CONSTCOND*/ 0) #define SPLAY_ASSEMBLE(head, node, left, right, field) do { \ SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \ SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \ } while (/*CONSTCOND*/ 0) /* Generates prototypes and inline functions */ #define SPLAY_PROTOTYPE(name, type, field, cmp) \ void name##_SPLAY(struct name *, struct type *); \ void name##_SPLAY_MINMAX(struct name *, int); \ struct type *name##_SPLAY_INSERT(struct name *, struct type *); \ struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \ \ /* Finds the node with the same key as elm */ \ static __inline struct type * \ name##_SPLAY_FIND(struct name *head, struct type *elm) \ { \ if (SPLAY_EMPTY(head)) \ return(NULL); \ name##_SPLAY(head, elm); \ if ((cmp)(elm, (head)->sph_root) == 0) \ return (head->sph_root); \ return (NULL); \ } \ \ static __inline struct type * \ name##_SPLAY_NEXT(struct name *head, struct type *elm) \ { \ name##_SPLAY(head, elm); \ if (SPLAY_RIGHT(elm, field) != NULL) { \ elm = SPLAY_RIGHT(elm, field); \ while (SPLAY_LEFT(elm, field) != NULL) { \ elm = SPLAY_LEFT(elm, field); \ } \ } else \ elm = NULL; \ return (elm); \ } \ \ static __inline struct type * \ name##_SPLAY_MIN_MAX(struct name *head, int val) \ { \ name##_SPLAY_MINMAX(head, val); \ return (SPLAY_ROOT(head)); \ } /* Main splay operation. * Moves node close to the key of elm to top */ #define SPLAY_GENERATE(name, type, field, cmp) \ struct type * \ name##_SPLAY_INSERT(struct name *head, struct type *elm) \ { \ if (SPLAY_EMPTY(head)) { \ SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \ } else { \ int __comp; \ name##_SPLAY(head, elm); \ __comp = (cmp)(elm, (head)->sph_root); \ if(__comp < 0) { \ SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\ SPLAY_RIGHT(elm, field) = (head)->sph_root; \ SPLAY_LEFT((head)->sph_root, field) = NULL; \ } else if (__comp > 0) { \ SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\ SPLAY_LEFT(elm, field) = (head)->sph_root; \ SPLAY_RIGHT((head)->sph_root, field) = NULL; \ } else \ return ((head)->sph_root); \ } \ (head)->sph_root = (elm); \ return (NULL); \ } \ \ struct type * \ name##_SPLAY_REMOVE(struct name *head, struct type *elm) \ { \ struct type *__tmp; \ if (SPLAY_EMPTY(head)) \ return (NULL); \ name##_SPLAY(head, elm); \ if ((cmp)(elm, (head)->sph_root) == 0) { \ if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\ } else { \ __tmp = SPLAY_RIGHT((head)->sph_root, field); \ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\ name##_SPLAY(head, elm); \ SPLAY_RIGHT((head)->sph_root, field) = __tmp; \ } \ return (elm); \ } \ return (NULL); \ } \ \ void \ name##_SPLAY(struct name *head, struct type *elm) \ { \ struct type __node, *__left, *__right, *__tmp; \ int __comp; \ \ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ __left = __right = &__node; \ \ while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \ if (__comp < 0) { \ __tmp = SPLAY_LEFT((head)->sph_root, field); \ if (__tmp == NULL) \ break; \ if ((cmp)(elm, __tmp) < 0){ \ SPLAY_ROTATE_RIGHT(head, __tmp, field); \ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ break; \ } \ SPLAY_LINKLEFT(head, __right, field); \ } else if (__comp > 0) { \ __tmp = SPLAY_RIGHT((head)->sph_root, field); \ if (__tmp == NULL) \ break; \ if ((cmp)(elm, __tmp) > 0){ \ SPLAY_ROTATE_LEFT(head, __tmp, field); \ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ break; \ } \ SPLAY_LINKRIGHT(head, __left, field); \ } \ } \ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ } \ \ /* Splay with either the minimum or the maximum element \ * Used to find minimum or maximum element in tree. \ */ \ void name##_SPLAY_MINMAX(struct name *head, int __comp) \ { \ struct type __node, *__left, *__right, *__tmp; \ \ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ __left = __right = &__node; \ \ while (1) { \ if (__comp < 0) { \ __tmp = SPLAY_LEFT((head)->sph_root, field); \ if (__tmp == NULL) \ break; \ if (__comp < 0){ \ SPLAY_ROTATE_RIGHT(head, __tmp, field); \ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ break; \ } \ SPLAY_LINKLEFT(head, __right, field); \ } else if (__comp > 0) { \ __tmp = SPLAY_RIGHT((head)->sph_root, field); \ if (__tmp == NULL) \ break; \ if (__comp > 0) { \ SPLAY_ROTATE_LEFT(head, __tmp, field); \ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ break; \ } \ SPLAY_LINKRIGHT(head, __left, field); \ } \ } \ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ } #define SPLAY_NEGINF -1 #define SPLAY_INF 1 #define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y) #define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y) #define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y) #define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y) #define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \ : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF)) #define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \ : name##_SPLAY_MIN_MAX(x, SPLAY_INF)) #define SPLAY_FOREACH(x, name, head) \ for ((x) = SPLAY_MIN(name, head); \ (x) != NULL; \ (x) = SPLAY_NEXT(name, head, x)) /* Macros that define a red-black tree */ #define RB_HEAD(name, type) \ struct name { \ struct type *rbh_root; /* root of the tree */ \ } #define RB_INITIALIZER(root) \ { NULL } #define RB_INIT(root) do { \ (root)->rbh_root = NULL; \ } while (/*CONSTCOND*/ 0) #define RB_BLACK 0 #define RB_RED 1 #define RB_ENTRY(type) \ struct { \ struct type *rbe_left; /* left element */ \ struct type *rbe_right; /* right element */ \ struct type *rbe_parent; /* parent element */ \ int rbe_color; /* node color */ \ } #define RB_LEFT(elm, field) (elm)->field.rbe_left #define RB_RIGHT(elm, field) (elm)->field.rbe_right #define RB_PARENT(elm, field) (elm)->field.rbe_parent #define RB_COLOR(elm, field) (elm)->field.rbe_color #define RB_ROOT(head) (head)->rbh_root #define RB_EMPTY(head) (RB_ROOT(head) == NULL) #define RB_SET(elm, parent, field) do { \ RB_PARENT(elm, field) = parent; \ RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \ RB_COLOR(elm, field) = RB_RED; \ } while (/*CONSTCOND*/ 0) #define RB_SET_BLACKRED(black, red, field) do { \ RB_COLOR(black, field) = RB_BLACK; \ RB_COLOR(red, field) = RB_RED; \ } while (/*CONSTCOND*/ 0) #ifndef RB_AUGMENT #define RB_AUGMENT(x) do {} while (0) #endif #define RB_ROTATE_LEFT(head, elm, tmp, field) do { \ (tmp) = RB_RIGHT(elm, field); \ if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \ RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \ } \ RB_AUGMENT(elm); \ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ else \ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ } else \ (head)->rbh_root = (tmp); \ RB_LEFT(tmp, field) = (elm); \ RB_PARENT(elm, field) = (tmp); \ RB_AUGMENT(tmp); \ if ((RB_PARENT(tmp, field))) \ RB_AUGMENT(RB_PARENT(tmp, field)); \ } while (/*CONSTCOND*/ 0) #define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \ (tmp) = RB_LEFT(elm, field); \ if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \ RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \ } \ RB_AUGMENT(elm); \ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ else \ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ } else \ (head)->rbh_root = (tmp); \ RB_RIGHT(tmp, field) = (elm); \ RB_PARENT(elm, field) = (tmp); \ RB_AUGMENT(tmp); \ if ((RB_PARENT(tmp, field))) \ RB_AUGMENT(RB_PARENT(tmp, field)); \ } while (/*CONSTCOND*/ 0) /* Generates prototypes and inline functions */ #define RB_PROTOTYPE(name, type, field, cmp) \ RB_PROTOTYPE_INTERNAL(name, type, field, cmp,) #define RB_PROTOTYPE_STATIC(name, type, field, cmp) \ RB_PROTOTYPE_INTERNAL(name, type, field, cmp, __unused static) #define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \ attr void name##_RB_INSERT_COLOR(struct name *, struct type *); \ attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\ attr struct type *name##_RB_REMOVE(struct name *, struct type *); \ attr struct type *name##_RB_INSERT(struct name *, struct type *); \ attr struct type *name##_RB_FIND(struct name *, struct type *); \ attr struct type *name##_RB_NFIND(struct name *, struct type *); \ attr struct type *name##_RB_NEXT(struct type *); \ attr struct type *name##_RB_PREV(struct type *); \ attr struct type *name##_RB_MINMAX(struct name *, int); \ \ /* Main rb operation. * Moves node close to the key of elm to top */ #define RB_GENERATE(name, type, field, cmp) \ RB_GENERATE_INTERNAL(name, type, field, cmp,) #define RB_GENERATE_STATIC(name, type, field, cmp) \ RB_GENERATE_INTERNAL(name, type, field, cmp, __unused static) #define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \ attr void \ name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \ { \ struct type *parent, *gparent, *tmp; \ while ((parent = RB_PARENT(elm, field)) != NULL && \ RB_COLOR(parent, field) == RB_RED) { \ gparent = RB_PARENT(parent, field); \ if (parent == RB_LEFT(gparent, field)) { \ tmp = RB_RIGHT(gparent, field); \ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ RB_COLOR(tmp, field) = RB_BLACK; \ RB_SET_BLACKRED(parent, gparent, field);\ elm = gparent; \ continue; \ } \ if (RB_RIGHT(parent, field) == elm) { \ RB_ROTATE_LEFT(head, parent, tmp, field);\ tmp = parent; \ parent = elm; \ elm = tmp; \ } \ RB_SET_BLACKRED(parent, gparent, field); \ RB_ROTATE_RIGHT(head, gparent, tmp, field); \ } else { \ tmp = RB_LEFT(gparent, field); \ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ RB_COLOR(tmp, field) = RB_BLACK; \ RB_SET_BLACKRED(parent, gparent, field);\ elm = gparent; \ continue; \ } \ if (RB_LEFT(parent, field) == elm) { \ RB_ROTATE_RIGHT(head, parent, tmp, field);\ tmp = parent; \ parent = elm; \ elm = tmp; \ } \ RB_SET_BLACKRED(parent, gparent, field); \ RB_ROTATE_LEFT(head, gparent, tmp, field); \ } \ } \ RB_COLOR(head->rbh_root, field) = RB_BLACK; \ } \ \ attr void \ name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \ { \ struct type *tmp; \ while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \ elm != RB_ROOT(head)) { \ if (RB_LEFT(parent, field) == elm) { \ tmp = RB_RIGHT(parent, field); \ if (RB_COLOR(tmp, field) == RB_RED) { \ RB_SET_BLACKRED(tmp, parent, field); \ RB_ROTATE_LEFT(head, parent, tmp, field);\ tmp = RB_RIGHT(parent, field); \ } \ if ((RB_LEFT(tmp, field) == NULL || \ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\ (RB_RIGHT(tmp, field) == NULL || \ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\ RB_COLOR(tmp, field) = RB_RED; \ elm = parent; \ parent = RB_PARENT(elm, field); \ } else { \ if (RB_RIGHT(tmp, field) == NULL || \ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\ struct type *oleft; \ if ((oleft = RB_LEFT(tmp, field)) \ != NULL) \ RB_COLOR(oleft, field) = RB_BLACK;\ RB_COLOR(tmp, field) = RB_RED; \ RB_ROTATE_RIGHT(head, tmp, oleft, field);\ tmp = RB_RIGHT(parent, field); \ } \ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\ RB_COLOR(parent, field) = RB_BLACK; \ if (RB_RIGHT(tmp, field)) \ RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\ RB_ROTATE_LEFT(head, parent, tmp, field);\ elm = RB_ROOT(head); \ break; \ } \ } else { \ tmp = RB_LEFT(parent, field); \ if (RB_COLOR(tmp, field) == RB_RED) { \ RB_SET_BLACKRED(tmp, parent, field); \ RB_ROTATE_RIGHT(head, parent, tmp, field);\ tmp = RB_LEFT(parent, field); \ } \ if ((RB_LEFT(tmp, field) == NULL || \ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\ (RB_RIGHT(tmp, field) == NULL || \ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\ RB_COLOR(tmp, field) = RB_RED; \ elm = parent; \ parent = RB_PARENT(elm, field); \ } else { \ if (RB_LEFT(tmp, field) == NULL || \ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\ struct type *oright; \ if ((oright = RB_RIGHT(tmp, field)) \ != NULL) \ RB_COLOR(oright, field) = RB_BLACK;\ RB_COLOR(tmp, field) = RB_RED; \ RB_ROTATE_LEFT(head, tmp, oright, field);\ tmp = RB_LEFT(parent, field); \ } \ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\ RB_COLOR(parent, field) = RB_BLACK; \ if (RB_LEFT(tmp, field)) \ RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\ RB_ROTATE_RIGHT(head, parent, tmp, field);\ elm = RB_ROOT(head); \ break; \ } \ } \ } \ if (elm) \ RB_COLOR(elm, field) = RB_BLACK; \ } \ \ attr struct type * \ name##_RB_REMOVE(struct name *head, struct type *elm) \ { \ struct type *child, *parent, *old = elm; \ int color; \ if (RB_LEFT(elm, field) == NULL) \ child = RB_RIGHT(elm, field); \ else if (RB_RIGHT(elm, field) == NULL) \ child = RB_LEFT(elm, field); \ else { \ struct type *left; \ elm = RB_RIGHT(elm, field); \ while ((left = RB_LEFT(elm, field)) != NULL) \ elm = left; \ child = RB_RIGHT(elm, field); \ parent = RB_PARENT(elm, field); \ color = RB_COLOR(elm, field); \ if (child) \ RB_PARENT(child, field) = parent; \ if (parent) { \ if (RB_LEFT(parent, field) == elm) \ RB_LEFT(parent, field) = child; \ else \ RB_RIGHT(parent, field) = child; \ RB_AUGMENT(parent); \ } else \ RB_ROOT(head) = child; \ if (RB_PARENT(elm, field) == old) \ parent = elm; \ (elm)->field = (old)->field; \ if (RB_PARENT(old, field)) { \ if (RB_LEFT(RB_PARENT(old, field), field) == old)\ RB_LEFT(RB_PARENT(old, field), field) = elm;\ else \ RB_RIGHT(RB_PARENT(old, field), field) = elm;\ RB_AUGMENT(RB_PARENT(old, field)); \ } else \ RB_ROOT(head) = elm; \ RB_PARENT(RB_LEFT(old, field), field) = elm; \ if (RB_RIGHT(old, field)) \ RB_PARENT(RB_RIGHT(old, field), field) = elm; \ if (parent) { \ left = parent; \ do { \ RB_AUGMENT(left); \ } while ((left = RB_PARENT(left, field)) != NULL); \ } \ goto color; \ } \ parent = RB_PARENT(elm, field); \ color = RB_COLOR(elm, field); \ if (child) \ RB_PARENT(child, field) = parent; \ if (parent) { \ if (RB_LEFT(parent, field) == elm) \ RB_LEFT(parent, field) = child; \ else \ RB_RIGHT(parent, field) = child; \ RB_AUGMENT(parent); \ } else \ RB_ROOT(head) = child; \ color: \ if (color == RB_BLACK) \ name##_RB_REMOVE_COLOR(head, parent, child); \ return (old); \ } \ \ /* Inserts a node into the RB tree */ \ attr struct type * \ name##_RB_INSERT(struct name *head, struct type *elm) \ { \ struct type *tmp; \ struct type *parent = NULL; \ int comp = 0; \ tmp = RB_ROOT(head); \ while (tmp) { \ parent = tmp; \ comp = (cmp)(elm, parent); \ if (comp < 0) \ tmp = RB_LEFT(tmp, field); \ else if (comp > 0) \ tmp = RB_RIGHT(tmp, field); \ else \ return (tmp); \ } \ RB_SET(elm, parent, field); \ if (parent != NULL) { \ if (comp < 0) \ RB_LEFT(parent, field) = elm; \ else \ RB_RIGHT(parent, field) = elm; \ RB_AUGMENT(parent); \ } else \ RB_ROOT(head) = elm; \ name##_RB_INSERT_COLOR(head, elm); \ return (NULL); \ } \ \ /* Finds the node with the same key as elm */ \ attr struct type * \ name##_RB_FIND(struct name *head, struct type *elm) \ { \ struct type *tmp = RB_ROOT(head); \ int comp; \ while (tmp) { \ comp = cmp(elm, tmp); \ if (comp < 0) \ tmp = RB_LEFT(tmp, field); \ else if (comp > 0) \ tmp = RB_RIGHT(tmp, field); \ else \ return (tmp); \ } \ return (NULL); \ } \ \ /* Finds the first node greater than or equal to the search key */ \ attr struct type * \ name##_RB_NFIND(struct name *head, struct type *elm) \ { \ struct type *tmp = RB_ROOT(head); \ struct type *res = NULL; \ int comp; \ while (tmp) { \ comp = cmp(elm, tmp); \ if (comp < 0) { \ res = tmp; \ tmp = RB_LEFT(tmp, field); \ } \ else if (comp > 0) \ tmp = RB_RIGHT(tmp, field); \ else \ return (tmp); \ } \ return (res); \ } \ \ /* ARGSUSED */ \ attr struct type * \ name##_RB_NEXT(struct type *elm) \ { \ if (RB_RIGHT(elm, field)) { \ elm = RB_RIGHT(elm, field); \ while (RB_LEFT(elm, field)) \ elm = RB_LEFT(elm, field); \ } else { \ if (RB_PARENT(elm, field) && \ (elm == RB_LEFT(RB_PARENT(elm, field), field))) \ elm = RB_PARENT(elm, field); \ else { \ while (RB_PARENT(elm, field) && \ (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\ elm = RB_PARENT(elm, field); \ elm = RB_PARENT(elm, field); \ } \ } \ return (elm); \ } \ \ /* ARGSUSED */ \ attr struct type * \ name##_RB_PREV(struct type *elm) \ { \ if (RB_LEFT(elm, field)) { \ elm = RB_LEFT(elm, field); \ while (RB_RIGHT(elm, field)) \ elm = RB_RIGHT(elm, field); \ } else { \ if (RB_PARENT(elm, field) && \ (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \ elm = RB_PARENT(elm, field); \ else { \ while (RB_PARENT(elm, field) && \ (elm == RB_LEFT(RB_PARENT(elm, field), field)))\ elm = RB_PARENT(elm, field); \ elm = RB_PARENT(elm, field); \ } \ } \ return (elm); \ } \ \ attr struct type * \ name##_RB_MINMAX(struct name *head, int val) \ { \ struct type *tmp = RB_ROOT(head); \ struct type *parent = NULL; \ while (tmp) { \ parent = tmp; \ if (val < 0) \ tmp = RB_LEFT(tmp, field); \ else \ tmp = RB_RIGHT(tmp, field); \ } \ return (parent); \ } #define RB_NEGINF -1 #define RB_INF 1 #define RB_INSERT(name, x, y) name##_RB_INSERT(x, y) #define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y) #define RB_FIND(name, x, y) name##_RB_FIND(x, y) #define RB_NFIND(name, x, y) name##_RB_NFIND(x, y) #define RB_NEXT(name, x, y) name##_RB_NEXT(y) #define RB_PREV(name, x, y) name##_RB_PREV(y) #define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF) #define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF) #define RB_FOREACH(x, name, head) \ for ((x) = RB_MIN(name, head); \ (x) != NULL; \ (x) = name##_RB_NEXT(x)) #define RB_FOREACH_FROM(x, name, y) \ for ((x) = (y); \ ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \ (x) = (y)) #define RB_FOREACH_SAFE(x, name, head, y) \ for ((x) = RB_MIN(name, head); \ ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \ (x) = (y)) #define RB_FOREACH_REVERSE(x, name, head) \ for ((x) = RB_MAX(name, head); \ (x) != NULL; \ (x) = name##_RB_PREV(x)) #define RB_FOREACH_REVERSE_FROM(x, name, y) \ for ((x) = (y); \ ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \ (x) = (y)) #define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \ for ((x) = RB_MAX(name, head); \ ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \ (x) = (y)) #endif /* _SYS_TREE_H_ */ libkqueue-2.3.1/src/linux/000077500000000000000000000000001342472035000154315ustar00rootroot00000000000000libkqueue-2.3.1/src/linux/platform.c000066400000000000000000000447601342472035000174340ustar00rootroot00000000000000/* * Copyright (c) 2011 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ # define _GNU_SOURCE # include #include #include #include "../common/private.h" #ifndef SO_GET_FILTER #define SO_GET_FILTER SO_ATTACH_FILTER #endif //XXX-FIXME TEMP const struct filter evfilt_proc = EVFILT_NOTIMPL; /* * Per-thread epoll event buffer used to ferry data between * kevent_wait() and kevent_copyout(). */ static __thread struct epoll_event epevt[MAX_KEVENT]; extern pthread_mutex_t kq_mtx; /* * Monitoring thread that takes care of cleaning up kqueues (on linux only) */ static pthread_t monitoring_thread; static pid_t monitoring_tid; /* Monitoring thread */ pthread_once_t monitoring_thread_initialized = PTHREAD_ONCE_INIT; pthread_cond_t monitoring_thread_cond = PTHREAD_COND_INITIALIZER; /* * Number of active kqueues. * When the last kqueue is closed, the monitoring thread can be stopped. */ static unsigned int kqueue_cnt = 0; /* * Map for kqueue pipes where index is the read side (for which signals are received) * and value is the write side that gets closed and corresponds to the kqueue id. */ static unsigned int *fd_map; /* * Map kqueue id to counter for kq cleanups. * When cleanup counter is at 0, cleanup can be performed by signal handler. * Otherwise, it means cleanup was already performed for this FD in linux_kqueue_free. */ static unsigned int *fd_cleanup_cnt; const struct kqueue_vtable kqops = { linux_kqueue_init, linux_kqueue_free, linux_kevent_wait, linux_kevent_copyout, NULL, NULL, linux_eventfd_init, linux_eventfd_close, linux_eventfd_raise, linux_eventfd_lower, linux_eventfd_descriptor }; static bool linux_kqueue_cleanup(struct kqueue *kq); unsigned int get_fd_limit(void); /* * Monitoring thread that loops on waiting for signals to be received */ static void * monitoring_thread_start(void *arg) { short end_thread = 0; int res = 0; siginfo_t info; int fd; int nb_max_fd; struct kqueue *kq; sigset_t monitoring_sig_set; nb_max_fd = get_fd_limit(); sigemptyset(&monitoring_sig_set); sigfillset(&monitoring_sig_set); pthread_sigmask(SIG_BLOCK, &monitoring_sig_set, NULL); sigemptyset(&monitoring_sig_set); sigaddset(&monitoring_sig_set, SIGRTMIN + 1); (void) pthread_mutex_lock(&kq_mtx); monitoring_tid = syscall(SYS_gettid); fd_map = calloc(nb_max_fd, sizeof(unsigned int)); if (fd_map == NULL) return NULL; fd_cleanup_cnt = calloc(nb_max_fd, sizeof(unsigned int)); if (fd_cleanup_cnt == NULL) return NULL; /* * Now that thread is initialized, let kqueue init resume */ pthread_cond_broadcast(&monitoring_thread_cond); (void) pthread_mutex_unlock(&kq_mtx); pthread_detach(pthread_self()); while (!end_thread) { /* * Wait for signal notifying us that a change has occured on the pipe * It's not possible to only listen on FD close but no other operation * should be performed on the kqueue. */ res = sigwaitinfo(&monitoring_sig_set, &info); if( res != -1 ) { (void) pthread_mutex_lock(&kq_mtx); /* * Signal is received for read side of pipe * Get FD for write side as it's the kqueue identifier */ fd = fd_map[info.si_fd]; if (fd) { kq = kqueue_lookup(fd); if (kq) { /* If kqueue instance for this FD hasn't been cleaned yet */ if (fd_cleanup_cnt[kq->kq_id] == 0) { linux_kqueue_cleanup(kq); } /* Decrement cleanup counter as signal handler has been run for this FD */ fd_cleanup_cnt[kq->kq_id]--; } else { /* Should not happen */ dbg_puts("Failed to lookup FD"); } } else { /* Should not happen */ dbg_puts("Got signal from unknown FD"); } /* * Stop thread if all kqueues have been closed */ if (kqueue_cnt == 0) { end_thread = 1; /* Reset so that thread can be restarted */ monitoring_thread_initialized = PTHREAD_ONCE_INIT; /* Free thread resources */ free(fd_map); free(fd_cleanup_cnt); } (void) pthread_mutex_unlock(&kq_mtx); } else { dbg_perror("sigwait()"); } } return NULL; } static void linux_kqueue_start_thread() { if (pthread_create(&monitoring_thread, NULL, &monitoring_thread_start, NULL)) { dbg_perror("linux_kqueue_start_thread failure"); } /* Wait for thread creating to be done as we need monitoring_tid to be available */ pthread_cond_wait(&monitoring_thread_cond, &kq_mtx); } int linux_kqueue_init(struct kqueue *kq) { struct f_owner_ex sig_owner; kq->epollfd = epoll_create(1); if (kq->epollfd < 0) { dbg_perror("epoll_create(2)"); return (-1); } /* * The standard behaviour when closing a kqueue fd is for the underlying resources to be freed. * In order to catch the close on the libkqueue fd, we use a pipe and return the write end as kq_id. * Closing the end will cause the pipe to be close which will be caught by the monitoring thread. */ if (pipe(kq->pipefd)) { close(kq->epollfd); return (-1); } if (filter_register_all(kq) < 0) { error: close(kq->epollfd); close(kq->pipefd[0]); close(kq->pipefd[1]); return (-1); } kq->kq_id = kq->pipefd[1]; if (fcntl(kq->pipefd[0], F_SETFL, fcntl(kq->pipefd[0], F_GETFL, 0) | O_ASYNC) < 0) { dbg_perror("failed setting O_ASYNC"); goto error; } if (fcntl(kq->pipefd[0], F_SETSIG, SIGRTMIN + 1) < 0) { dbg_perror("failed settting F_SETSIG"); goto error; } (void) pthread_mutex_lock(&kq_mtx); /* Start monitoring thread during first initialization */ (void) pthread_once(&monitoring_thread_initialized, linux_kqueue_start_thread); /* Update pipe FD map */ fd_map[kq->pipefd[0]] = kq->pipefd[1]; /* Increment kqueue counter */ kqueue_cnt++; sig_owner.type = F_OWNER_TID; sig_owner.pid = monitoring_tid; if (fcntl(kq->pipefd[0], F_SETOWN_EX, &sig_owner) < 0) { dbg_perror("failed settting F_SETOWN"); goto error; } (void) pthread_mutex_unlock(&kq_mtx); #if DEADWOOD //might be useful in posix /* Add each filter's pollable descriptor to the epollset */ for (i = 0; i < EVFILT_SYSCOUNT; i++) { filt = &kq->kq_filt[i]; if (filt->kf_id == 0) continue; memset(&ev, 0, sizeof(ev)); ev.events = EPOLLIN; ev.data.ptr = filt; if (epoll_ctl(kq->kq_id, EPOLL_CTL_ADD, filt->kf_pfd, &ev) < 0) { dbg_perror("epoll_ctl(2)"); close(kq->kq_id); return (-1); } } #endif return (0); } /* * Cleanup kqueue resources * Should be done while holding kq_mtx * return * - true if epoll fd and pipes were closed * - false if epoll fd was already closed */ static bool linux_kqueue_cleanup(struct kqueue *kq) { char buffer; ssize_t ret; filter_unregister_all(kq); if (kq->epollfd > 0) { close(kq->epollfd); kq->epollfd = -1; } else { // Don't do cleanup if epollfd has already been closed return false; } /* * read will return 0 on pipe EOF (i.e. if the write end of the pipe has been closed) */ ret = read(kq->pipefd[0], &buffer, 1); if (ret == -1 && errno == EWOULDBLOCK) { // Shoudn't happen unless kqops.kqueue_free is called on an open FD dbg_puts("kqueue wasn't closed"); close(kq->pipefd[1]); kq->pipefd[1] = -1; } else if (ret > 0) { // Shouldn't happen unless data is written to kqueue FD // Ignore write and continue with close dbg_puts("Unexpected data available on kqueue FD"); } if (kq->pipefd[0] > 0) { close(kq->pipefd[0]); kq->pipefd[0] = -1; } fd_map[kq->pipefd[0]] = 0; /* Decrement kqueue counter */ kqueue_cnt--; return true; } void linux_kqueue_free(struct kqueue *kq) { /* Increment cleanup counter as cleanup is being performed outside signal handler */ if (linux_kqueue_cleanup(kq)) fd_cleanup_cnt[kq->kq_id]++; else /* Reset counter as FD had already been cleaned */ fd_cleanup_cnt[kq->kq_id] = 0; free(kq); } static int linux_kevent_wait_hires( struct kqueue *kq, const struct timespec *timeout) { int n; #if HAVE_DECL_PPOLL struct pollfd fds; dbg_printf("waiting for events (timeout=%ld sec %ld nsec)", timeout->tv_sec, timeout->tv_nsec); fds.fd = kqueue_epfd(kq); fds.events = POLLIN; n = ppoll(&fds, 1, timeout, NULL); #else int epfd; fd_set fds; dbg_printf("waiting for events (timeout=%ld sec %ld nsec)", timeout->tv_sec, timeout->tv_nsec); epfd = kqueue_epfd(kq); FD_ZERO(&fds); FD_SET(epfd, &fds); n = pselect(epfd + 1, &fds, NULL , NULL, timeout, NULL); #endif if (n < 0) { if (errno == EINTR) { dbg_puts("signal caught"); return (-1); } dbg_perror("ppoll(2) or pselect(2)"); return (-1); } return (n); } int linux_kevent_wait( struct kqueue *kq, int nevents, const struct timespec *ts) { int timeout, nret; /* Use a high-resolution syscall if the timeout value is less than one millisecond. */ if (ts != NULL && ts->tv_sec == 0 && ts->tv_nsec > 0 && ts->tv_nsec < 1000000) { nret = linux_kevent_wait_hires(kq, ts); if (nret <= 0) return (nret); /* epoll_wait() should have ready events */ timeout = 0; } else { /* Convert timeout to the format used by epoll_wait() */ if (ts == NULL) timeout = -1; else timeout = (1000 * ts->tv_sec) + (ts->tv_nsec / 1000000); } dbg_puts("waiting for events"); nret = epoll_wait(kqueue_epfd(kq), &epevt[0], nevents, timeout); if (nret < 0) { dbg_perror("epoll_wait"); return (-1); } return (nret); } int linux_kevent_copyout(struct kqueue *kq, int nready, struct kevent *eventlist, int nevents UNUSED) { struct epoll_event *ev; struct filter *filt; struct knote *kn; int i, nret, rv; nret = nready; for (i = 0; i < nready; i++) { ev = &epevt[i]; kn = (struct knote *) ev->data.ptr; filt = &kq->kq_filt[~(kn->kev.filter)]; rv = filt->kf_copyout(eventlist, kn, ev); if (slowpath(rv < 0)) { dbg_puts("knote_copyout failed"); /* XXX-FIXME: hard to handle this without losing events */ abort(); } /* * Certain flags cause the associated knote to be deleted * or disabled. */ if (eventlist->flags & EV_DISPATCH) knote_disable(filt, kn); //FIXME: Error checking if (eventlist->flags & EV_ONESHOT) { knote_delete(filt, kn); //FIXME: Error checking } /* If an empty kevent structure is returned, the event is discarded. */ /* TODO: add these semantics to windows + solaris platform.c */ if (fastpath(eventlist->filter != 0)) { eventlist++; } else { dbg_puts("spurious wakeup, discarding event"); nret--; } } return (nret); } int linux_eventfd_init(struct eventfd *e) { int evfd; evfd = eventfd(0, 0); if (evfd < 0) { dbg_perror("eventfd"); return (-1); } if (fcntl(evfd, F_SETFL, O_NONBLOCK) < 0) { dbg_perror("fcntl"); close(evfd); return (-1); } e->ef_id = evfd; return (0); } void linux_eventfd_close(struct eventfd *e) { close(e->ef_id); e->ef_id = -1; } int linux_eventfd_raise(struct eventfd *e) { uint64_t counter; int rv = 0; dbg_puts("raising event level"); counter = 1; if (write(e->ef_id, &counter, sizeof(counter)) < 0) { switch (errno) { case EAGAIN: /* Not considered an error */ break; case EINTR: rv = -EINTR; break; default: dbg_printf("write(2): %s", strerror(errno)); rv = -1; } } return (rv); } int linux_eventfd_lower(struct eventfd *e) { uint64_t cur; ssize_t n; int rv = 0; /* Reset the counter */ dbg_puts("lowering event level"); n = read(e->ef_id, &cur, sizeof(cur)); if (n < 0) { switch (errno) { case EAGAIN: /* Not considered an error */ break; case EINTR: rv = -EINTR; break; default: dbg_printf("read(2): %s", strerror(errno)); rv = -1; } } else if (n != sizeof(cur)) { dbg_puts("short read"); rv = -1; } return (rv); } int linux_eventfd_descriptor(struct eventfd *e) { return (e->ef_id); } int linux_get_descriptor_type(struct knote *kn) { socklen_t slen; struct stat sb; int ret, lsock, stype; socklen_t out_len; const int fd = (int)kn->kev.ident; /* * Determine the actual descriptor type. */ if (fstat(fd, &sb) < 0) { dbg_perror("fstat(2)"); return (-1); } switch (sb.st_mode & S_IFMT) { case S_IFREG: dbg_printf("fd %d is a regular file\n", fd); kn->kn_flags |= KNFL_FILE; break; case S_IFIFO: dbg_printf("fd %d is a pipe\n", fd); kn->kn_flags |= KNFL_PIPE; break; case S_IFBLK: dbg_printf("fd %d is a block device\n", fd); kn->kn_flags |= KNFL_BLOCKDEV; break; case S_IFCHR: dbg_printf("fd %d is a character device\n", fd); kn->kn_flags |= KNFL_CHARDEV; break; case S_IFSOCK: dbg_printf("fd %d is a socket\n", fd); break; /* deferred type determination */ default: errno = EBADF; dbg_perror("unknown fd type"); return -1; } /* * Test if the socket is active or passive. */ if (!S_ISSOCK(sb.st_mode)) return (0); /* * Determine socket type. */ slen = sizeof(stype); stype = 0; ret = getsockopt(fd, SOL_SOCKET, SO_TYPE, &stype, &slen); if (ret < 0) { dbg_perror("getsockopt(3)"); return (-1); } switch (stype) { case SOCK_STREAM: dbg_printf("fd %d is a stream socket\n", fd); kn->kn_flags |= KNFL_SOCKET_STREAM; break; case SOCK_DGRAM: dbg_printf("fd %d is a datagram socket\n", fd); kn->kn_flags |= KNFL_SOCKET_DGRAM; break; case SOCK_RDM: dbg_printf("fd %d is a reliable datagram socket\n", fd); kn->kn_flags |= KNFL_SOCKET_RDM; break; case SOCK_SEQPACKET: dbg_printf("fd %d is a sequenced and reliable datagram socket\n", fd); kn->kn_flags |= KNFL_SOCKET_SEQPACKET; break; default: errno = EBADF; dbg_perror("unknown socket type"); return (-1); } slen = sizeof(lsock); lsock = 0; ret = getsockopt(fd, SOL_SOCKET, SO_ACCEPTCONN, &lsock, &slen); if (ret < 0) { switch (errno) { case ENOTSOCK: /* same as lsock = 0 */ break; default: dbg_perror("getsockopt(3)"); return (-1); } } else { if (lsock) kn->kn_flags |= KNFL_SOCKET_PASSIVE; } /* * Test if socket has a filter * pcap file descriptors need to be considered as passive sockets as * SIOCINQ always returns 0 even if data is available. * Looking at SO_GET_FILTER is a good way of doing this. */ out_len = 0; ret = getsockopt(fd, SOL_SOCKET, SO_GET_FILTER, NULL, &out_len); if (ret < 0) { switch (errno) { case ENOTSOCK: /* same as lsock = 0 */ break; default: dbg_perror("getsockopt(3)"); return (-1); } } else { if (out_len) kn->kn_flags |= KNFL_SOCKET_PASSIVE; } return (0); } char * epoll_event_dump(struct epoll_event *evt) { static __thread char buf[128]; if (evt == NULL) return "(null)"; #define EPEVT_DUMP(attrib) \ if (evt->events & attrib) \ strcat(&buf[0], #attrib" "); snprintf(&buf[0], 128, " { data = %p, events = ", evt->data.ptr); EPEVT_DUMP(EPOLLIN); EPEVT_DUMP(EPOLLOUT); #if defined(HAVE_EPOLLRDHUP) EPEVT_DUMP(EPOLLRDHUP); #endif EPEVT_DUMP(EPOLLONESHOT); EPEVT_DUMP(EPOLLET); strcat(&buf[0], "}\n"); return (&buf[0]); #undef EPEVT_DUMP } int epoll_update(int op, struct filter *filt, struct knote *kn, struct epoll_event *ev) { dbg_printf("op=%d fd=%d events=%s", op, (int)kn->kev.ident, epoll_event_dump(ev)); if (epoll_ctl(filter_epfd(filt), op, kn->kev.ident, ev) < 0) { dbg_printf("epoll_ctl(2): %s", strerror(errno)); return (-1); } return (0); } /* * Given a file descriptor, return the path to the file it refers to. */ int linux_fd_to_path(char *buf, size_t bufsz, int fd) { char path[1024]; //TODO: Maxpathlen, etc. if (snprintf(&path[0], sizeof(path), "/proc/%d/fd/%d", getpid(), fd) < 0) return (-1); memset(buf, 0, bufsz); return (readlink(path, buf, bufsz)); } libkqueue-2.3.1/src/linux/platform.h000066400000000000000000000063461342472035000174370ustar00rootroot00000000000000/* * Copyright (c) 2011 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _KQUEUE_LINUX_PLATFORM_H #define _KQUEUE_LINUX_PLATFORM_H struct filter; #include #include #include #include #if HAVE_SYS_EVENTFD_H # include #else # ifdef SYS_eventfd2 # define eventfd(a,b) syscall(SYS_eventfd2, (a), (b)) # else # define eventfd(a,b) syscall(SYS_eventfd, (a), (b)) # endif static inline int eventfd_write(int fd, uint64_t val) { if (write(fd, &val, sizeof(val)) < (ssize_t) sizeof(val)) return (-1); else return (0); } #endif #if HAVE_SYS_TIMERFD_H # include #endif /* * Get the current thread ID */ # define _GNU_SOURCE # include # include #ifndef __ANDROID__ extern long int syscall (long int __sysno, ...); #endif /* Convenience macros to access the epoll descriptor for the kqueue */ #define kqueue_epfd(kq) ((kq)->epollfd) #define filter_epfd(filt) ((filt)->kf_kqueue->epollfd) /* * Additional members of struct filter */ #undef FILTER_PLATFORM_SPECIFIC /* * Additional members of struct knote */ #define KNOTE_PLATFORM_SPECIFIC \ int kn_epollfd; /* A copy of filter->epfd */ \ int kn_registered; /* Is FD registered with epoll */ \ union { \ int kn_timerfd; \ int kn_signalfd; \ int kn_inotifyfd; \ int kn_eventfd; \ } kdata /* * Additional members of struct kqueue */ #define KQUEUE_PLATFORM_SPECIFIC \ int epollfd; /* Main epoll FD */ \ int pipefd[2]; /* FD for pipe that catches close */ \ struct epoll_event kq_plist[MAX_KEVENT]; \ size_t kq_nplist int linux_kqueue_init(struct kqueue *); void linux_kqueue_free(struct kqueue *); int linux_kevent_wait(struct kqueue *, int, const struct timespec *); int linux_kevent_copyout(struct kqueue *, int, struct kevent *, int); int linux_knote_copyout(struct kevent *, struct knote *); int linux_eventfd_init(struct eventfd *); void linux_eventfd_close(struct eventfd *); int linux_eventfd_raise(struct eventfd *); int linux_eventfd_lower(struct eventfd *); int linux_eventfd_descriptor(struct eventfd *); /* utility functions */ int linux_get_descriptor_type(struct knote *); int linux_fd_to_path(char *, size_t, int); /* epoll-related functions */ int epoll_update(int, struct filter *, struct knote *, struct epoll_event *); char * epoll_event_dump(struct epoll_event *); #endif /* ! _KQUEUE_LINUX_PLATFORM_H */ libkqueue-2.3.1/src/linux/proc.c000066400000000000000000000137571342472035000165550ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "sys/event.h" #include "private.h" /* XXX-FIXME Should only have one wait_thread per process. Now, there is one thread per kqueue */ struct evfilt_data { pthread_t wthr_id; pthread_cond_t wait_cond; pthread_mutex_t wait_mtx; }; //FIXME: WANT: static void * void * wait_thread(void *arg) { struct filter *filt = (struct filter *) arg; uint64_t counter = 1; const int options = WEXITED | WNOWAIT; struct knote *kn; siginfo_t si; sigset_t sigmask; /* Block all signals */ sigfillset (&sigmask); pthread_sigmask(SIG_BLOCK, &sigmask, NULL); for (;;) { /* Wait for a child process to exit(2) */ if (waitid(P_ALL, 0, &si, options) != 0) { if (errno == ECHILD) { dbg_puts("got ECHILD, waiting for wakeup condition"); pthread_mutex_lock(&filt->kf_data->wait_mtx); pthread_cond_wait(&filt->kf_data->wait_cond, &filt->kf_data->wait_mtx); pthread_mutex_unlock(&filt->kf_data->wait_mtx); dbg_puts("awoken from ECHILD-induced sleep"); continue; } dbg_puts(" waitid(2) returned"); if (errno == EINTR) continue; dbg_perror("waitid(2)"); break; } /* Scan the wait queue to see if anyone is interested */ kn = knote_lookup(filt, si.si_pid); if (kn == NULL) continue; /* Create a proc_event */ if (si.si_code == CLD_EXITED) { kn->kev.data = si.si_status; } else if (si.si_code == CLD_KILLED) { /* FIXME: probably not true on BSD */ /* FIXME: arbitrary non-zero number */ kn->kev.data = 254; } else { /* Should never happen. */ /* FIXME: arbitrary non-zero number */ kn->kev.data = 1; } knote_enqueue(filt, kn); /* Indicate read(2) readiness */ if (write(filt->kf_pfd, &counter, sizeof(counter)) < 0) { if (errno != EAGAIN) { dbg_printf("write(2): %s", strerror(errno)); /* TODO: set filter error flag */ break; } } } /* TODO: error handling */ return (NULL); } int evfilt_proc_init(struct filter *filt) { #if FIXME struct evfilt_data *ed; int efd = -1; if ((ed = calloc(1, sizeof(*ed))) == NULL) return (-1); filt->kf_data = ed; pthread_mutex_init(&ed->wait_mtx, NULL); pthread_cond_init(&ed->wait_cond, NULL); if ((efd = eventfd(0, 0)) < 0) goto errout; if (fcntl(filt->kf_pfd, F_SETFL, O_NONBLOCK) < 0) goto errout; filt->kf_pfd = efd; if (pthread_create(&ed->wthr_id, NULL, wait_thread, filt) != 0) goto errout; return (0); errout: if (efd >= 0) close(efd); free(ed); close(filt->kf_pfd); return (-1); #endif return (-1); /*STUB*/ } void evfilt_proc_destroy(struct filter *filt) { //TODO: pthread_cancel(filt->kf_data->wthr_id); close(filt->kf_pfd); } int evfilt_proc_copyout(struct filter *filt, struct kevent *dst, int maxevents) { struct knote *kn; int nevents = 0; uint64_t cur; /* Reset the counter */ if (read(filt->kf_pfd, &cur, sizeof(cur)) < sizeof(cur)) { dbg_printf("read(2): %s", strerror(errno)); return (-1); } dbg_printf(" counter=%llu", (unsigned long long) cur); for (kn = knote_dequeue(filt); kn != NULL; kn = knote_dequeue(filt)) { kevent_dump(&kn->kev); memcpy(dst, &kn->kev, sizeof(*dst)); if (kn->kev.flags & EV_DISPATCH) { KNOTE_DISABLE(kn); } if (kn->kev.flags & EV_ONESHOT) { knote_free(filt, kn); } else { kn->kev.data = 0; //why?? } if (++nevents > maxevents) break; dst++; } if (knote_events_pending(filt)) { /* XXX-FIXME: If there are leftover events on the waitq, re-arm the eventfd. list */ abort(); } return (nevents); } int evfilt_proc_knote_create(struct filter *filt, struct knote *kn) { return (0); /* STUB */ } int evfilt_proc_knote_modify(struct filter *filt, struct knote *kn, const struct kevent *kev) { return (0); /* STUB */ } int evfilt_proc_knote_delete(struct filter *filt, struct knote *kn) { return (0); /* STUB */ } int evfilt_proc_knote_enable(struct filter *filt, struct knote *kn) { return (0); /* STUB */ } int evfilt_proc_knote_disable(struct filter *filt, struct knote *kn) { return (0); /* STUB */ } const struct filter evfilt_proc_DEADWOOD = { 0, //XXX-FIXME broken: EVFILT_PROC, evfilt_proc_init, evfilt_proc_destroy, evfilt_proc_copyout, evfilt_proc_knote_create, evfilt_proc_knote_modify, evfilt_proc_knote_delete, evfilt_proc_knote_enable, evfilt_proc_knote_disable, }; libkqueue-2.3.1/src/linux/read.c000066400000000000000000000200561342472035000165130ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "private.h" /* * Return the offset from the current position to end of file. */ static intptr_t get_eof_offset(int fd) { off_t curpos; struct stat sb; curpos = lseek(fd, 0, SEEK_CUR); if (curpos == (off_t) -1) { dbg_perror("lseek(2)"); curpos = 0; } if (fstat(fd, &sb) < 0) { dbg_perror("fstat(2)"); sb.st_size = 1; } dbg_printf("curpos=%zu size=%zu\n", (size_t)curpos, (size_t)sb.st_size); return (sb.st_size - curpos); //FIXME: can overflow } int evfilt_read_copyout(struct kevent *dst, struct knote *src, void *ptr) { int ret; int serr; socklen_t slen = sizeof(serr); struct epoll_event * const ev = (struct epoll_event *) ptr; /* Special case: for regular files, return the offset from current position to end of file */ if (src->kn_flags & KNFL_FILE) { memcpy(dst, &src->kev, sizeof(*dst)); dst->data = get_eof_offset(src->kev.ident); if (dst->data == 0) { dst->filter = 0; /* Will cause the kevent to be discarded */ if (epoll_ctl(src->kn_epollfd, EPOLL_CTL_DEL, src->kdata.kn_eventfd, NULL) < 0) { dbg_perror("epoll_ctl(2)"); return (-1); } src->kn_registered = 0; #if FIXME /* XXX-FIXME Switch to using kn_inotifyfd to monitor for IN_ATTRIB events that may signify the file size has changed. This code is not tested. */ int inofd; char path[PATH_MAX]; inofd = inotify_init(); if (inofd < 0) { dbg_perror("inotify_init(2)"); (void) close(inofd); return (-1); } src->kdata.kn_inotifyfd = inofd; if (linux_fd_to_path(&path[0], sizeof(path), src->kev.ident) < 0) return (-1); if (inotify_add_watch(inofd, path, IN_ATTRIB) < 0) { dbg_perror("inotify_add_watch"); return (-1); } if (epoll_ctl(src->kn_epollfd, EPOLL_CTL_ADD, src->kdata.kn_inotifyfd, NULL) < 0) { dbg_perror("epoll_ctl(2)"); return (-1); } /* FIXME: race here, should we check the EOF status again ? */ #endif } return (0); } dbg_printf("epoll: %s", epoll_event_dump(ev)); memcpy(dst, &src->kev, sizeof(*dst)); #if defined(HAVE_EPOLLRDHUP) if (ev->events & EPOLLRDHUP || ev->events & EPOLLHUP) dst->flags |= EV_EOF; #else if (ev->events & EPOLLHUP) dst->flags |= EV_EOF; #endif if (ev->events & EPOLLERR) { if (src->kn_flags & KNFL_SOCKET) { ret = getsockopt(src->kev.ident, SOL_SOCKET, SO_ERROR, &serr, &slen); dst->fflags = ((ret < 0) ? errno : serr); } else { dst->fflags = EIO; } } if (src->kn_flags & KNFL_SOCKET_PASSIVE) { /* On return, data contains the length of the socket backlog. This is not available under Linux. */ dst->data = 1; } else { /* On return, data contains the number of bytes of protocol data available to read. */ int i; if (ioctl(dst->ident, SIOCINQ, &i) < 0) { /* race condition with socket close, so ignore this error */ dbg_puts("ioctl(2) of socket failed"); dst->data = 0; } else { dst->data = i; if (dst->data == 0 && src->kn_flags & KNFL_SOCKET_STREAM) dst->flags |= EV_EOF; } } return (0); } int evfilt_read_knote_create(struct filter *filt, struct knote *kn) { struct epoll_event ev; if (linux_get_descriptor_type(kn) < 0) return (-1); /* Convert the kevent into an epoll_event */ #if defined(HAVE_EPOLLRDHUP) kn->data.events = EPOLLIN | EPOLLRDHUP; #else kn->data.events = EPOLLIN; #endif if (kn->kev.flags & EV_ONESHOT || kn->kev.flags & EV_DISPATCH) kn->data.events |= EPOLLONESHOT; if (kn->kev.flags & EV_CLEAR) kn->data.events |= EPOLLET; memset(&ev, 0, sizeof(ev)); ev.events = kn->data.events; ev.data.ptr = kn; /* Special case: for regular files, add a surrogate eventfd that is always readable */ if (kn->kn_flags & KNFL_FILE) { int evfd; kn->kn_epollfd = filter_epfd(filt); evfd = eventfd(0, 0); if (evfd < 0) { dbg_perror("eventfd(2)"); return (-1); } if (eventfd_write(evfd, 1) < 0) { dbg_perror("eventfd_write(3)"); (void) close(evfd); return (-1); } kn->kdata.kn_eventfd = evfd; if (epoll_ctl(kn->kn_epollfd, EPOLL_CTL_ADD, kn->kdata.kn_eventfd, &ev) < 0) { dbg_printf("epoll_ctl(2): %s", strerror(errno)); return (-1); } kn->kn_registered = 1; return (0); } return epoll_update(EPOLL_CTL_ADD, filt, kn, &ev); } int evfilt_read_knote_modify(struct filter *filt, struct knote *kn, const struct kevent *kev) { (void) filt; (void) kn; (void) kev; return (-1); /* STUB */ } int evfilt_read_knote_delete(struct filter *filt, struct knote *kn) { if (kn->kev.flags & EV_DISABLE) return (0); if ((kn->kn_flags & KNFL_FILE) && (kn->kdata.kn_eventfd != -1)) { if (kn->kn_registered && epoll_ctl(kn->kn_epollfd, EPOLL_CTL_DEL, kn->kdata.kn_eventfd, NULL) < 0) { dbg_perror("epoll_ctl(2)"); return (-1); } kn->kn_registered = 0; (void) close(kn->kdata.kn_eventfd); kn->kdata.kn_eventfd = -1; return (0); } else { return epoll_update(EPOLL_CTL_DEL, filt, kn, NULL); } // clang will complain about not returning a value otherwise return (-1); } int evfilt_read_knote_enable(struct filter *filt, struct knote *kn) { struct epoll_event ev; memset(&ev, 0, sizeof(ev)); ev.events = kn->data.events; ev.data.ptr = kn; if (kn->kn_flags & KNFL_FILE) { if (epoll_ctl(kn->kn_epollfd, EPOLL_CTL_ADD, kn->kdata.kn_eventfd, &ev) < 0) { dbg_perror("epoll_ctl(2)"); return (-1); } kn->kn_registered = 1; return (0); } else { return epoll_update(EPOLL_CTL_ADD, filt, kn, &ev); } // clang will complain about not returning a value otherwise return (-1); } int evfilt_read_knote_disable(struct filter *filt, struct knote *kn) { if (kn->kn_flags & KNFL_FILE) { if (epoll_ctl(kn->kn_epollfd, EPOLL_CTL_DEL, kn->kdata.kn_eventfd, NULL) < 0) { dbg_perror("epoll_ctl(2)"); return (-1); } kn->kn_registered = 1; return (0); } else { return epoll_update(EPOLL_CTL_DEL, filt, kn, NULL); } } const struct filter evfilt_read = { EVFILT_READ, NULL, NULL, evfilt_read_copyout, evfilt_read_knote_create, evfilt_read_knote_modify, evfilt_read_knote_delete, evfilt_read_knote_enable, evfilt_read_knote_disable, }; libkqueue-2.3.1/src/linux/signal.c000066400000000000000000000122061342472035000170530ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "private.h" #if HAVE_SYS_SIGNALFD_H # include #else #define signalfd(a,b,c) syscall(SYS_signalfd, (a), (b), (c)) #define SFD_NONBLOCK 04000 struct signalfd_siginfo { uint32_t ssi_signo; int32_t ssi_errno; int32_t ssi_code; uint32_t ssi_pid; uint32_t ssi_uid; int32_t ssi_fd; uint32_t ssi_tid; uint32_t ssi_band; uint32_t ssi_overrun; uint32_t ssi_trapno; int32_t ssi_status; int32_t ssi_int; uint64_t ssi_ptr; uint64_t ssi_utime; uint64_t ssi_stime; uint64_t ssi_addr; uint8_t __pad[48]; }; #endif static void signalfd_reset(int sigfd) { struct signalfd_siginfo sig; ssize_t n; /* Discard any pending signal */ n = read(sigfd, &sig, sizeof(sig)); if (n < 0 || n != sizeof(sig)) { if (errno == EWOULDBLOCK) return; //FIXME: eintr? dbg_perror("read(2) from signalfd"); abort(); } } static int signalfd_add(int epfd, int sigfd, void *ptr) { struct epoll_event ev; int rv; /* Add the signalfd to the kqueue's epoll descriptor set */ memset(&ev, 0, sizeof(ev)); ev.events = EPOLLIN; ev.data.ptr = ptr; rv = epoll_ctl(epfd, EPOLL_CTL_ADD, sigfd, &ev); if (rv < 0) { dbg_perror("epoll_ctl(2)"); return (-1); } return (0); } static int signalfd_create(int epfd, void *ptr, int signum) { static int flags = SFD_NONBLOCK; sigset_t sigmask; int sigfd; /* Create a signalfd */ sigemptyset(&sigmask); sigaddset(&sigmask, signum); sigfd = signalfd(-1, &sigmask, flags); /* WORKAROUND: Flags are broken on kernels older than Linux 2.6.27 */ if (sigfd < 0 && errno == EINVAL && flags != 0) { flags = 0; sigfd = signalfd(-1, &sigmask, flags); } if (sigfd < 0) { dbg_perror("signalfd(2)"); goto errout; } /* Block the signal handler from being invoked */ if (sigprocmask(SIG_BLOCK, &sigmask, NULL) < 0) { dbg_perror("sigprocmask(2)"); goto errout; } signalfd_reset(sigfd); if (signalfd_add(epfd, sigfd, ptr) < 0) goto errout; dbg_printf("added sigfd %d to epfd %d (signum=%d)", sigfd, epfd, signum); return (sigfd); errout: (void) close(sigfd); return (-1); } int evfilt_signal_copyout(struct kevent *dst, struct knote *src, void *x UNUSED) { int sigfd; sigfd = src->kdata.kn_signalfd; signalfd_reset(sigfd); memcpy(dst, &src->kev, sizeof(*dst)); /* NOTE: dst->data should be the number of times the signal occurred, but that information is not available. */ dst->data = 1; return (0); } int evfilt_signal_knote_create(struct filter *filt, struct knote *kn) { int fd; fd = signalfd_create(filter_epfd(filt), kn, kn->kev.ident); if (fd > 0) { kn->kev.flags |= EV_CLEAR; kn->kdata.kn_signalfd = fd; return (0); } else { kn->kdata.kn_signalfd = -1; return (-1); } } int evfilt_signal_knote_modify(struct filter *filt UNUSED, struct knote *kn UNUSED, const struct kevent *kev UNUSED) { /* Nothing to do since the signal number does not change. */ return (0); } int evfilt_signal_knote_delete(struct filter *filt, struct knote *kn) { const int sigfd = kn->kdata.kn_signalfd; /* Needed so that delete() can be called after disable() */ if (kn->kdata.kn_signalfd == -1) return (0); if (epoll_ctl(filter_epfd(filt), EPOLL_CTL_DEL, sigfd, NULL) < 0) { dbg_perror("epoll_ctl(2)"); return (-1); } if (close(sigfd) < 0) { dbg_perror("close(2)"); return (-1); } /* NOTE: This does not call sigprocmask(3) to unblock the signal. */ kn->kdata.kn_signalfd = -1; return (0); } int evfilt_signal_knote_enable(struct filter *filt, struct knote *kn) { dbg_printf("enabling ident %u", (unsigned int) kn->kev.ident); return evfilt_signal_knote_create(filt, kn); } int evfilt_signal_knote_disable(struct filter *filt, struct knote *kn) { dbg_printf("disabling ident %u", (unsigned int) kn->kev.ident); return evfilt_signal_knote_delete(filt, kn); } const struct filter evfilt_signal = { EVFILT_SIGNAL, NULL, NULL, evfilt_signal_copyout, evfilt_signal_knote_create, evfilt_signal_knote_modify, evfilt_signal_knote_delete, evfilt_signal_knote_enable, evfilt_signal_knote_disable, }; libkqueue-2.3.1/src/linux/timer.c000066400000000000000000000144101342472035000167150ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "private.h" #ifndef HAVE_SYS_TIMERFD_H /* Android 4.0 does not have this header, but the kernel supports timerfds */ #ifndef SYS_timerfd_create #ifdef __ARM_EABI__ #define __NR_timerfd_create (__NR_SYSCALL_BASE+350) #define __NR_timerfd_settime (__NR_SYSCALL_BASE+353) #define __NR_timerfd_gettime (__NR_SYSCALL_BASE+354) #else #error Unsupported architecture, need to get the syscall numbers #endif #define SYS_timerfd_create __NR_timerfd_create #define SYS_timerfd_settime __NR_timerfd_settime #define SYS_timerfd_gettime __NR_timerfd_gettime #endif /* ! SYS_timerfd_create */ /* XXX-FIXME These are horrible hacks that are only known to be true on RHEL 5 x86. */ #ifndef SYS_timerfd_settime #define SYS_timerfd_settime (SYS_timerfd_create + 1) #endif #ifndef SYS_timerfd_gettime #define SYS_timerfd_gettime (SYS_timerfd_create + 2) #endif int timerfd_create(int clockid, int flags) { return syscall(SYS_timerfd_create, clockid, flags); } int timerfd_settime(int ufc, int flags, const struct itimerspec *utmr, struct itimerspec *otmr) { return syscall(SYS_timerfd_settime, ufc, flags, utmr, otmr); } int timerfd_gettime(int ufc, struct itimerspec *otmr) { return syscall(SYS_timerfd_gettime, ufc, otmr); } #endif #ifndef NDEBUG static char * itimerspec_dump(struct itimerspec *ts) { static __thread char buf[1024]; snprintf(buf, sizeof(buf), "itimer: [ interval=%lu s %lu ns, next expire=%lu s %lu ns ]", ts->it_interval.tv_sec, ts->it_interval.tv_nsec, ts->it_value.tv_sec, ts->it_value.tv_nsec ); return (buf); } #endif /* Convert time data into seconds+nanoseconds */ #define NOTE_TIMER_MASK (NOTE_ABSOLUTE-1) static void convert_timedata_to_itimerspec(struct itimerspec *dst, long src, unsigned int flags, int oneshot) { time_t sec, nsec; switch (flags & NOTE_TIMER_MASK) { case NOTE_USECONDS: sec = src / 1000000; nsec = (src % 1000000); break; case NOTE_NSECONDS: sec = src / 1000000000; nsec = (src % 1000000000); break; case NOTE_SECONDS: sec = src; nsec = 0; break; default: /* milliseconds */ sec = src / 1000; nsec = (src % 1000) * 1000000; } /* Set the interval */ if (oneshot) { dst->it_interval.tv_sec = 0; dst->it_interval.tv_nsec = 0; } else { dst->it_interval.tv_sec = sec; dst->it_interval.tv_nsec = nsec; } /* Set the initial expiration */ dst->it_value.tv_sec = sec; dst->it_value.tv_nsec = nsec; dbg_printf("%s", itimerspec_dump(dst)); } int evfilt_timer_copyout(struct kevent *dst, struct knote *src, void *ptr) { struct epoll_event * const ev = (struct epoll_event *) ptr; uint64_t expired; ssize_t n; memcpy(dst, &src->kev, sizeof(*dst)); if (ev->events & EPOLLERR) dst->fflags = 1; /* FIXME: Return the actual timer error */ /* On return, data contains the number of times the timer has been trigered. */ n = read(src->data.pfd, &expired, sizeof(expired)); if (n != sizeof(expired)) { dbg_puts("invalid read from timerfd"); expired = 1; /* Fail gracefully */ } dst->data = expired; return (0); } int evfilt_timer_knote_create(struct filter *filt, struct knote *kn) { struct epoll_event ev; struct itimerspec ts; int tfd; int flags; kn->kev.flags |= EV_CLEAR; tfd = timerfd_create(CLOCK_MONOTONIC, 0); if (tfd < 0) { dbg_printf("timerfd_create(2): %s", strerror(errno)); return (-1); } dbg_printf("created timerfd %d", tfd); convert_timedata_to_itimerspec(&ts, kn->kev.data, kn->kev.fflags, kn->kev.flags & EV_ONESHOT); flags = (kn->kev.fflags & NOTE_ABSOLUTE) ? TFD_TIMER_ABSTIME : 0; if (timerfd_settime(tfd, flags, &ts, NULL) < 0) { dbg_printf("timerfd_settime(2): %s", strerror(errno)); close(tfd); return (-1); } memset(&ev, 0, sizeof(ev)); ev.events = EPOLLIN | EPOLLET; if (kn->kev.flags & (EV_ONESHOT | EV_DISPATCH)) ev.events |= EPOLLONESHOT; ev.data.ptr = kn; if (epoll_ctl(filter_epfd(filt), EPOLL_CTL_ADD, tfd, &ev) < 0) { dbg_printf("epoll_ctl(2): %d", errno); close(tfd); return (-1); } kn->data.pfd = tfd; return (0); } int evfilt_timer_knote_modify(struct filter *filt, struct knote *kn, const struct kevent *kev) { (void)filt; (void)kn; (void)kev; return (0); /* STUB */ } int evfilt_timer_knote_delete(struct filter *filt, struct knote *kn) { int rv = 0; if (kn->data.pfd == -1) return (0); if (epoll_ctl(filter_epfd(filt), EPOLL_CTL_DEL, kn->data.pfd, NULL) < 0) { dbg_printf("epoll_ctl(2): %s", strerror(errno)); rv = -1; } if (close(kn->data.pfd) < 0) { dbg_printf("close(2): %s", strerror(errno)); rv = -1; } kn->data.pfd = -1; return (rv); } int evfilt_timer_knote_enable(struct filter *filt, struct knote *kn) { return evfilt_timer_knote_create(filt, kn); } int evfilt_timer_knote_disable(struct filter *filt, struct knote *kn) { return evfilt_timer_knote_delete(filt, kn); } const struct filter evfilt_timer = { EVFILT_TIMER, NULL, NULL, evfilt_timer_copyout, evfilt_timer_knote_create, evfilt_timer_knote_modify, evfilt_timer_knote_delete, evfilt_timer_knote_enable, evfilt_timer_knote_disable, }; libkqueue-2.3.1/src/linux/user.c000066400000000000000000000133621342472035000165600ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include "sys/event.h" #include "private.h" /* NOTE: copy+pasted from linux_eventfd_raise() */ static int eventfd_raise(int evfd) { uint64_t counter; int rv = 0; dbg_puts("raising event level"); counter = 1; if (write(evfd, &counter, sizeof(counter)) < 0) { switch (errno) { case EAGAIN: /* Not considered an error */ break; case EINTR: rv = -EINTR; break; default: dbg_printf("write(2): %s", strerror(errno)); rv = -1; } } return (rv); } /* NOTE: copy+pasted from linux_eventfd_lower() */ static int eventfd_lower(int evfd) { uint64_t cur; ssize_t n; int rv = 0; /* Reset the counter */ dbg_puts("lowering event level"); n = read(evfd, &cur, sizeof(cur)); if (n < 0) { switch (errno) { case EAGAIN: /* Not considered an error */ break; case EINTR: rv = -EINTR; break; default: dbg_printf("read(2): %s", strerror(errno)); rv = -1; } } else if (n != sizeof(cur)) { dbg_puts("short read"); rv = -1; } return (rv); } int linux_evfilt_user_copyout(struct kevent *dst, struct knote *src, void *ptr UNUSED) { memcpy(dst, &src->kev, sizeof(*dst)); dst->fflags &= ~NOTE_FFCTRLMASK; //FIXME: Not sure if needed dst->fflags &= ~NOTE_TRIGGER; if (src->kev.flags & EV_ADD) { /* NOTE: True on FreeBSD but not consistent behavior with other filters. */ dst->flags &= ~EV_ADD; } if (src->kev.flags & EV_CLEAR) src->kev.fflags &= ~NOTE_TRIGGER; if (src->kev.flags & (EV_DISPATCH | EV_CLEAR | EV_ONESHOT)) { if (eventfd_lower(src->kdata.kn_eventfd) < 0) return (-1); } if (src->kev.flags & EV_DISPATCH) src->kev.fflags &= ~NOTE_TRIGGER; return (0); } int linux_evfilt_user_knote_create(struct filter *filt, struct knote *kn) { struct epoll_event ev; int evfd; /* Create an eventfd */ evfd = eventfd(0, 0); if (evfd < 0) { dbg_perror("eventfd"); goto errout; } /* Add the eventfd to the epoll set */ memset(&ev, 0, sizeof(ev)); ev.events = EPOLLIN; ev.data.ptr = kn; if (epoll_ctl(filter_epfd(filt), EPOLL_CTL_ADD, evfd, &ev) < 0) { dbg_perror("epoll_ctl(2)"); goto errout; } kn->kdata.kn_eventfd = evfd; kn->kn_registered = 1; return (0); errout: (void) close(evfd); kn->kdata.kn_eventfd = -1; kn->kn_registered = 0; return (-1); } int linux_evfilt_user_knote_modify(struct filter *filt UNUSED, struct knote *kn, const struct kevent *kev) { unsigned int ffctrl; unsigned int fflags; /* Excerpted from sys/kern/kern_event.c in FreeBSD HEAD */ ffctrl = kev->fflags & NOTE_FFCTRLMASK; fflags = kev->fflags & NOTE_FFLAGSMASK; switch (ffctrl) { case NOTE_FFNOP: break; case NOTE_FFAND: kn->kev.fflags &= fflags; break; case NOTE_FFOR: kn->kev.fflags |= fflags; break; case NOTE_FFCOPY: kn->kev.fflags = fflags; break; default: /* XXX Return error? */ break; } if ((!(kn->kev.flags & EV_DISABLE)) && kev->fflags & NOTE_TRIGGER) { kn->kev.fflags |= NOTE_TRIGGER; if (eventfd_raise(kn->kdata.kn_eventfd) < 0) return (-1); } return (0); } int linux_evfilt_user_knote_delete(struct filter *filt, struct knote *kn) { if (kn->kn_registered && epoll_ctl(filter_epfd(filt), EPOLL_CTL_DEL, kn->kdata.kn_eventfd, NULL) < 0) { dbg_perror("epoll_ctl(2)"); return (-1); } kn->kn_registered = 0; if (close(kn->kdata.kn_eventfd) < 0) { dbg_perror("close(2)"); return (-1); } dbg_printf("removed eventfd %d from the epollfd", kn->kdata.kn_eventfd); kn->kdata.kn_eventfd = -1; return (0); } int linux_evfilt_user_knote_enable(struct filter *filt, struct knote *kn) { /* FIXME: what happens if NOTE_TRIGGER is in fflags? should the event fire? */ return linux_evfilt_user_knote_create(filt, kn); } int linux_evfilt_user_knote_disable(struct filter *filt, struct knote *kn) { return linux_evfilt_user_knote_delete(filt, kn); } const struct filter evfilt_user = { EVFILT_USER, NULL, NULL, linux_evfilt_user_copyout, linux_evfilt_user_knote_create, linux_evfilt_user_knote_modify, linux_evfilt_user_knote_delete, linux_evfilt_user_knote_enable, linux_evfilt_user_knote_disable, }; libkqueue-2.3.1/src/linux/vnode.c000066400000000000000000000202411342472035000167070ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "private.h" #ifndef NDEBUG static char * inotify_mask_dump(uint32_t mask) { static __thread char buf[1024]; #define INEVT_MASK_DUMP(attrib) \ if (mask & attrib) \ strcat(buf, #attrib" "); snprintf(buf, sizeof(buf), "mask = %d (", mask); INEVT_MASK_DUMP(IN_ACCESS); INEVT_MASK_DUMP(IN_MODIFY); INEVT_MASK_DUMP(IN_ATTRIB); INEVT_MASK_DUMP(IN_CLOSE_WRITE); INEVT_MASK_DUMP(IN_CLOSE_NOWRITE); INEVT_MASK_DUMP(IN_OPEN); INEVT_MASK_DUMP(IN_MOVED_FROM); INEVT_MASK_DUMP(IN_MOVED_TO); INEVT_MASK_DUMP(IN_CREATE); INEVT_MASK_DUMP(IN_DELETE); INEVT_MASK_DUMP(IN_DELETE_SELF); INEVT_MASK_DUMP(IN_MOVE_SELF); buf[strlen(buf) - 1] = ')'; return (buf); } static char * inotify_event_dump(struct inotify_event *evt) { static __thread char buf[1024]; if (evt->len > 0) snprintf(buf, sizeof(buf), "wd=%d mask=%s name=%s", evt->wd, inotify_mask_dump(evt->mask), evt->name); else snprintf(buf, sizeof(buf), "wd=%d mask=%s", evt->wd, inotify_mask_dump(evt->mask)); return (buf); } #endif /* !NDEBUG */ int get_one_event(struct inotify_event *dst, size_t len, int inofd) { ssize_t n; size_t want = sizeof(struct inotify_event); dbg_puts("reading one inotify event"); for (;;) { if (len < want) { dbg_printf("Needed %zu bytes have %zu bytes", want, len); return (-1); } n = read(inofd, dst, want); if (n < 0) { switch (errno) { case EINVAL: want += sizeof(struct inotify_event); /* FALL-THROUGH */ case EINTR: continue; } dbg_perror("read"); return (-1); } break; } dbg_printf("read(2) from inotify wd: %ld bytes", (long)n); return (0); } static int add_watch(struct filter *filt, struct knote *kn) { struct epoll_event ev; int ifd; char path[PATH_MAX]; uint32_t mask; /* Convert the fd to a pathname */ if (linux_fd_to_path(&path[0], sizeof(path), kn->kev.ident) < 0) return (-1); /* Convert the fflags to the inotify mask */ mask = IN_CLOSE; if (kn->kev.fflags & NOTE_DELETE) mask |= IN_ATTRIB | IN_DELETE_SELF; if (kn->kev.fflags & NOTE_WRITE) mask |= IN_MODIFY | IN_ATTRIB; if (kn->kev.fflags & NOTE_EXTEND) mask |= IN_MODIFY | IN_ATTRIB; if ((kn->kev.fflags & NOTE_ATTRIB) || (kn->kev.fflags & NOTE_LINK)) mask |= IN_ATTRIB; if (kn->kev.fflags & NOTE_RENAME) mask |= IN_MOVE_SELF; if (kn->kev.flags & EV_ONESHOT) mask |= IN_ONESHOT; /* Create an inotify descriptor */ ifd = inotify_init(); if (ifd < 0) { dbg_perror("inotify_init(2)"); return (-1); } /* Add the watch */ dbg_printf("inotify_add_watch(2); inofd=%d, %s, path=%s", ifd, inotify_mask_dump(mask), path); kn->kev.data = inotify_add_watch(ifd, path, mask); if (kn->kev.data < 0) { dbg_perror("inotify_add_watch(2)"); goto errout; } /* Add the inotify fd to the epoll set */ memset(&ev, 0, sizeof(ev)); ev.events = EPOLLIN; ev.data.ptr = kn; if (epoll_ctl(filter_epfd(filt), EPOLL_CTL_ADD, ifd, &ev) < 0) { dbg_perror("epoll_ctl(2)"); goto errout; } kn->kdata.kn_inotifyfd = ifd; return (0); errout: kn->kdata.kn_inotifyfd = -1; (void) close(ifd); return (-1); } static int delete_watch(struct filter *filt, struct knote *kn) { int ifd = kn->kdata.kn_inotifyfd; if (ifd < 0) return (0); if (epoll_ctl(filter_epfd(filt), EPOLL_CTL_DEL, ifd, NULL) < 0) { dbg_perror("epoll_ctl(2)"); return (-1); } (void) close(ifd); kn->kdata.kn_inotifyfd = -1; return (0); } int evfilt_vnode_copyout(struct kevent *dst, struct knote *src, void *ptr UNUSED) { uint8_t buf[sizeof(struct inotify_event) + NAME_MAX + 1] __attribute__ ((aligned(__alignof__(struct inotify_event)))); struct inotify_event *evt; struct stat sb; evt = (struct inotify_event *)buf; if (get_one_event(evt, sizeof(buf), src->kdata.kn_inotifyfd) < 0) return (-1); dbg_printf("inotify event: %s", inotify_event_dump(evt)); if (evt->mask & IN_IGNORED) { /* TODO: possibly return error when fs is unmounted */ dst->filter = 0; return (0); } /* Check if the watched file has been closed, and XXX-this may not exactly match the kevent() behavior if multiple file de scriptors reference the same file. */ if (evt->mask & IN_CLOSE_WRITE || evt->mask & IN_CLOSE_NOWRITE) { src->kev.flags |= EV_ONESHOT; /* KLUDGE: causes the knote to be deleted */ dst->filter = 0; /* KLUDGE: causes the event to be discarded */ return (0); } memcpy(dst, &src->kev, sizeof(*dst)); dst->data = 0; dst->fflags = 0; /* No error checking because fstat(2) should rarely fail */ //FIXME: EINTR if (fstat(src->kev.ident, &sb) < 0 && errno == ENOENT) { if (src->kev.fflags & NOTE_DELETE) dst->fflags |= NOTE_DELETE; } else { if ((evt->mask & IN_ATTRIB || evt->mask & IN_MODIFY)) { if (sb.st_nlink == 0 && src->kev.fflags & NOTE_DELETE) dst->fflags |= NOTE_DELETE; if (sb.st_nlink != src->data.vnode.nlink && src->kev.fflags & NOTE_LINK) dst->fflags |= NOTE_LINK; #if HAVE_NOTE_TRUNCATE if (sb.st_nsize == 0 && src->kev.fflags & NOTE_TRUNCATE) dst->fflags |= NOTE_TRUNCATE; #endif if (sb.st_size > src->data.vnode.size && src->kev.fflags & NOTE_WRITE) dst->fflags |= NOTE_EXTEND; src->data.vnode.nlink = sb.st_nlink; src->data.vnode.size = sb.st_size; } } if (evt->mask & IN_MODIFY && src->kev.fflags & NOTE_WRITE) dst->fflags |= NOTE_WRITE; if (evt->mask & IN_ATTRIB && src->kev.fflags & NOTE_ATTRIB) dst->fflags |= NOTE_ATTRIB; if (evt->mask & IN_MOVE_SELF && src->kev.fflags & NOTE_RENAME) dst->fflags |= NOTE_RENAME; if (evt->mask & IN_DELETE_SELF && src->kev.fflags & NOTE_DELETE) dst->fflags |= NOTE_DELETE; return (0); } int evfilt_vnode_knote_create(struct filter *filt, struct knote *kn) { struct stat sb; if (fstat(kn->kev.ident, &sb) < 0) { dbg_puts("fstat failed"); return (-1); } kn->data.vnode.nlink = sb.st_nlink; kn->data.vnode.size = sb.st_size; kn->kev.data = -1; return (add_watch(filt, kn)); } int evfilt_vnode_knote_modify(struct filter *filt, struct knote *kn, const struct kevent *kev) { (void)filt; (void)kn; (void)kev; return (-1); /* FIXME - STUB */ } int evfilt_vnode_knote_delete(struct filter *filt, struct knote *kn) { return delete_watch(filt, kn); } int evfilt_vnode_knote_enable(struct filter *filt, struct knote *kn) { return add_watch(filt, kn); } int evfilt_vnode_knote_disable(struct filter *filt, struct knote *kn) { return delete_watch(filt, kn); } const struct filter evfilt_vnode = { EVFILT_VNODE, NULL, NULL, evfilt_vnode_copyout, evfilt_vnode_knote_create, evfilt_vnode_knote_modify, evfilt_vnode_knote_delete, evfilt_vnode_knote_enable, evfilt_vnode_knote_disable, }; libkqueue-2.3.1/src/linux/write.c000066400000000000000000000073471342472035000167420ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "private.h" int evfilt_write_copyout(struct kevent *dst, struct knote *src, void *ptr) { int ret; int serr; socklen_t slen = sizeof(serr); struct epoll_event * const ev = (struct epoll_event *) ptr; epoll_event_dump(ev); memcpy(dst, &src->kev, sizeof(*dst)); #if defined(HAVE_EPOLLRDHUP) if (ev->events & EPOLLRDHUP || ev->events & EPOLLHUP) dst->flags |= EV_EOF; #else if (ev->events & EPOLLHUP) dst->flags |= EV_EOF; #endif if (ev->events & EPOLLERR) { if (src->kn_flags & KNFL_SOCKET) { ret = getsockopt(src->kev.ident, SOL_SOCKET, SO_ERROR, &serr, &slen); dst->fflags = ((ret < 0) ? errno : serr); } else { dst->fflags = EIO; } } /* On return, data contains the the amount of space remaining in the write buffer */ if (ioctl(dst->ident, SIOCOUTQ, &dst->data) < 0) { /* race condition with socket close, so ignore this error */ dbg_puts("ioctl(2) of socket failed"); dst->data = 0; } return (0); } int evfilt_write_knote_create(struct filter *filt, struct knote *kn) { struct epoll_event ev; if (linux_get_descriptor_type(kn) < 0) return (-1); if (kn->kn_flags & KNFL_FILE) { errno = EBADF; return (-1); } /* Convert the kevent into an epoll_event */ kn->data.events = EPOLLOUT; if (kn->kev.flags & EV_ONESHOT || kn->kev.flags & EV_DISPATCH) kn->data.events |= EPOLLONESHOT; if (kn->kev.flags & EV_CLEAR) kn->data.events |= EPOLLET; memset(&ev, 0, sizeof(ev)); ev.events = kn->data.events; ev.data.ptr = kn; return epoll_update(EPOLL_CTL_ADD, filt, kn, &ev); } int evfilt_write_knote_modify(struct filter *filt, struct knote *kn, const struct kevent *kev) { (void) filt; (void) kn; (void) kev; return (-1); /* STUB */ } int evfilt_write_knote_delete(struct filter *filt, struct knote *kn) { if (kn->kev.flags & EV_DISABLE) return (0); else return epoll_update(EPOLL_CTL_DEL, filt, kn, NULL); } int evfilt_write_knote_enable(struct filter *filt, struct knote *kn) { struct epoll_event ev; memset(&ev, 0, sizeof(ev)); ev.events = kn->data.events; ev.data.ptr = kn; return epoll_update(EPOLL_CTL_ADD, filt, kn, &ev); } int evfilt_write_knote_disable(struct filter *filt, struct knote *kn) { return epoll_update(EPOLL_CTL_DEL, filt, kn, NULL); } const struct filter evfilt_write = { EVFILT_WRITE, NULL, NULL, evfilt_write_copyout, evfilt_write_knote_create, evfilt_write_knote_modify, evfilt_write_knote_delete, evfilt_write_knote_enable, evfilt_write_knote_disable, }; libkqueue-2.3.1/src/posix/000077500000000000000000000000001342472035000154345ustar00rootroot00000000000000libkqueue-2.3.1/src/posix/kevent.c000066400000000000000000000043711342472035000171010ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include "sys/event.h" #include "private.h" const struct filter evfilt_proc = EVFILT_NOTIMPL; int posix_kevent_wait( struct kqueue *kq, const struct timespec *timeout) { int n, nfds; fd_set rfds; nfds = kq->kq_nfds; rfds = kq->kq_fds; dbg_puts("waiting for events"); n = pselect(nfds, &rfds, NULL , NULL, timeout, NULL); if (n < 0) { if (errno == EINTR) { dbg_puts("signal caught"); return (-1); } dbg_perror("pselect(2)"); return (-1); } kq->kq_rfds = rfds; return (n); } int posix_kevent_copyout(struct kqueue *kq, int nready, struct kevent *eventlist, int nevents) { struct filter *filt; int i, rv, nret; nret = 0; for (i = 0; (i < EVFILT_SYSCOUNT && nready > 0 && nevents > 0); i++) { // dbg_printf("eventlist: n = %d nevents = %d", nready, nevents); filt = &kq->kq_filt[i]; // dbg_printf("pfd[%d] = %d", i, filt->kf_pfd); if (FD_ISSET(filt->kf_pfd, &kq->kq_rfds)) { dbg_printf("pending events for filter %d (%s)", filt->kf_id, filter_name(filt->kf_id)); rv = filt->kf_copyout(filt, eventlist, nevents); if (rv < 0) { dbg_puts("kevent_copyout failed"); nret = -1; break; } nret += rv; eventlist += rv; nevents -= rv; nready--; } } return (nret); } libkqueue-2.3.1/src/posix/platform.c000066400000000000000000000042171342472035000174300ustar00rootroot00000000000000/* * Copyright (c) 2011 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "../common/private.h" int posix_kqueue_init(struct kqueue *kq UNUSED) { return (0); } void posix_kqueue_free(struct kqueue *kq UNUSED) { } int posix_eventfd_init(struct eventfd *e) { int sd[2]; if (socketpair(AF_UNIX, SOCK_STREAM, 0, sd) < 0) { return (-1); } if ((fcntl(sd[0], F_SETFL, O_NONBLOCK) < 0) || (fcntl(sd[1], F_SETFL, O_NONBLOCK) < 0)) { close(sd[0]); close(sd[1]); return (-1); } e->ef_wfd = sd[0]; e->ef_id = sd[1]; return (0); } void posix_eventfd_close(struct eventfd *e) { close(e->ef_id); close(e->ef_wfd); e->ef_id = -1; } int posix_eventfd_raise(struct eventfd *e) { dbg_puts("raising event level"); if (write(e->ef_wfd, ".", 1) < 0) { /* FIXME: handle EAGAIN and EINTR */ dbg_printf("write(2) on fd %d: %s", e->ef_wfd, strerror(errno)); return (-1); } return (0); } int posix_eventfd_lower(struct eventfd *e) { char buf[1024]; /* Reset the counter */ dbg_puts("lowering event level"); if (read(e->ef_id, &buf, sizeof(buf)) < 0) { /* FIXME: handle EAGAIN and EINTR */ /* FIXME: loop so as to consume all data.. may need mutex */ dbg_printf("read(2): %s", strerror(errno)); return (-1); } return (0); } int posix_eventfd_descriptor(struct eventfd *e) { return (e->ef_id); } libkqueue-2.3.1/src/posix/platform.h000066400000000000000000000050201342472035000174260ustar00rootroot00000000000000/* * Copyright (c) 2011 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _KQUEUE_POSIX_PLATFORM_H #define _KQUEUE_POSIX_PLATFORM_H /* Required by glibc for MAP_ANON */ #define __USE_MISC 1 #include "../../include/sys/event.h" /* * GCC-compatible atomic operations */ #define atomic_inc(p) __sync_add_and_fetch((p), 1) #define atomic_dec(p) __sync_sub_and_fetch((p), 1) #define atomic_cas(p, oval, nval) __sync_val_compare_and_swap(p, oval, nval) #define atomic_ptr_cas(p, oval, nval) __sync_val_compare_and_swap(p, oval, nval) /* * GCC-compatible branch prediction macros */ #define fastpath(x) __builtin_expect((x), 1) #define slowpath(x) __builtin_expect((x), 0) /* * GCC-compatible attributes */ #define VISIBLE __attribute__((visibility("default"))) #define HIDDEN __attribute__((visibility("hidden"))) #define UNUSED __attribute__((unused)) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Additional members of 'struct eventfd' */ #define EVENTFD_PLATFORM_SPECIFIC \ int ef_wfd void posix_kqueue_free(struct kqueue *); int posix_kqueue_init(struct kqueue *); int posix_kevent_wait(struct kqueue *, const struct timespec *); int posix_kevent_copyout(struct kqueue *, int, struct kevent *, int); int posix_eventfd_init(struct eventfd *); void posix_eventfd_close(struct eventfd *); int posix_eventfd_raise(struct eventfd *); int posix_eventfd_lower(struct eventfd *); int posix_eventfd_descriptor(struct eventfd *); #endif /* ! _KQUEUE_POSIX_PLATFORM_H */ libkqueue-2.3.1/src/posix/proc.c000066400000000000000000000115121342472035000165430ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "sys/event.h" #include "private.h" pthread_cond_t wait_cond = PTHREAD_COND_INITIALIZER; pthread_mutex_t wait_mtx = PTHREAD_MUTEX_INITIALIZER; struct evfilt_data { pthread_t wthr_id; }; static void * wait_thread(void *arg) { struct filter *filt = (struct filter *) arg; struct knote *kn; int status, result; pid_t pid; sigset_t sigmask; /* Block all signals */ sigfillset (&sigmask); sigdelset(&sigmask, SIGCHLD); pthread_sigmask(SIG_BLOCK, &sigmask, NULL); for (;;) { /* Wait for a child process to exit(2) */ if ((pid = waitpid(-1, &status, 0)) < 0) { if (errno == ECHILD) { dbg_puts("got ECHILD, waiting for wakeup condition"); pthread_mutex_lock(&wait_mtx); pthread_cond_wait(&wait_cond, &wait_mtx); pthread_mutex_unlock(&wait_mtx); dbg_puts("awoken from ECHILD-induced sleep"); continue; } if (errno == EINTR) continue; dbg_printf("wait(2): %s", strerror(errno)); break; } /* Create a proc_event */ if (WIFEXITED(status)) { result = WEXITSTATUS(status); } else if (WIFSIGNALED(status)) { /* FIXME: probably not true on BSD */ result = WTERMSIG(status); } else { dbg_puts("unexpected code path"); result = 234; /* arbitrary error value */ } /* Scan the wait queue to see if anyone is interested */ pthread_mutex_lock(&filt->kf_mtx); kn = knote_lookup(filt, pid); if (kn != NULL) { kn->kev.data = result; kn->kev.fflags = NOTE_EXIT; LIST_REMOVE(kn, entries); LIST_INSERT_HEAD(&filt->kf_eventlist, kn, entries); /* Indicate read(2) readiness */ /* TODO: error handling */ filter_raise(filt); } pthread_mutex_unlock(&filt->kf_mtx); } /* TODO: error handling */ return (NULL); } int evfilt_proc_init(struct filter *filt) { struct evfilt_data *ed; if ((ed = calloc(1, sizeof(*ed))) == NULL) return (-1); if (filter_socketpair(filt) < 0) goto errout; if (pthread_create(&ed->wthr_id, NULL, wait_thread, filt) != 0) goto errout; return (0); errout: free(ed); return (-1); } void evfilt_proc_destroy(struct filter *filt) { //TODO: pthread_cancel(filt->kf_data->wthr_id); close(filt->kf_pfd); } int evfilt_proc_copyin(struct filter *filt, struct knote *dst, const struct kevent *src) { if (src->flags & EV_ADD && KNOTE_EMPTY(dst)) { memcpy(&dst->kev, src, sizeof(*src)); /* TODO: think about locking the mutex first.. */ pthread_cond_signal(&wait_cond); } if (src->flags & EV_ADD || src->flags & EV_ENABLE) { /* Nothing to do.. */ } return (0); } int evfilt_proc_copyout(struct filter *filt, struct kevent *dst, int maxevents) { struct knote *kn; int nevents = 0; filter_lower(filt); LIST_FOREACH(kn, &filt->kf_eventlist, entries) { kevent_dump(&kn->kev); memcpy(dst, &kn->kev, sizeof(*dst)); dst->fflags = NOTE_EXIT; if (kn->kev.flags & EV_DISPATCH) { KNOTE_DISABLE(kn); } #if FIXME /* XXX - NEED TO use safe foreach instead */ if (kn->kev.flags & EV_ONESHOT) knote_free(kn); #endif if (++nevents > maxevents) break; dst++; } if (!LIST_EMPTY(&filt->kf_eventlist)) filter_raise(filt); return (nevents); } const struct filter evfilt_proc = { EVFILT_PROC, evfilt_proc_init, evfilt_proc_destroy, evfilt_proc_copyin, evfilt_proc_copyout, }; libkqueue-2.3.1/src/posix/signal.c000066400000000000000000000126221342472035000170600ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include "sys/event.h" #include "private.h" /* Highest signal number supported. POSIX standard signals are < 32 */ #define SIGNAL_MAX 32 struct sentry { struct filter *s_filt; struct knote *s_knote; volatile uint32_t s_cnt; }; static pthread_mutex_t sigtbl_mtx = PTHREAD_MUTEX_INITIALIZER; static struct sentry sigtbl[SIGNAL_MAX]; /* XXX-FIXME this will not work with multiple kqueue objects. Need a linked list? Or should signals be delivered to all kqueue objects? */ static struct eventfd * sig_eventfd; static void signal_handler(int sig) { struct sentry *s = &sigtbl[sig]; dbg_printf("caught sig=%d", sig); atomic_inc(&s->s_cnt); #if defined(__sun__) if (port_send(s->s_filt->kf_kqueue->kq_port, X_PORT_SOURCE_SIGNAL, &sigtbl[sig]) < 0) { return; //FIXME: errorhandling } #else kqops.eventfd_raise(sig_eventfd); #endif } static int catch_signal(struct filter *filt, struct knote *kn) { int sig; struct sigaction sa; sig = kn->kev.ident; memset(&sa, 0, sizeof(sa)); sa.sa_handler = signal_handler; sa.sa_flags |= SA_RESTART; sigfillset(&sa.sa_mask); if (sigaction(kn->kev.ident, &sa, NULL) == -1) { dbg_perror("sigaction"); return (-1); } /* FIXME: will clobber previous entry, if any */ pthread_mutex_lock(&sigtbl_mtx); sigtbl[kn->kev.ident].s_filt = filt; sigtbl[kn->kev.ident].s_knote = kn; pthread_mutex_unlock(&sigtbl_mtx); dbg_printf("installed handler for signal %d", sig); return (0); } static int ignore_signal(int sig) { struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = SIG_IGN; sigemptyset(&sa.sa_mask); if (sigaction(sig, &sa, NULL) == -1) { dbg_perror("sigaction"); return (-1); } pthread_mutex_lock(&sigtbl_mtx); sigtbl[sig].s_filt = NULL; sigtbl[sig].s_knote = NULL; pthread_mutex_unlock(&sigtbl_mtx); dbg_printf("removed handler for signal %d", sig); return (0); } int evfilt_signal_init(struct filter *filt) { if (kqops.eventfd_init(&filt->kf_efd) < 0) return (-1); sig_eventfd = &filt->kf_efd; // XXX - does not work w/ multiple kqueues return (0); } void evfilt_signal_destroy(struct filter *filt) { kqops.eventfd_close(&filt->kf_efd); } int evfilt_signal_knote_create(struct filter *filt, struct knote *kn) { if (kn->kev.ident >= SIGNAL_MAX) { dbg_printf("unsupported signal number %u", (unsigned int) kn->kev.ident); return (-1); } kn->kev.flags |= EV_CLEAR; return catch_signal(filt, kn); } int evfilt_signal_knote_modify(struct filter *filt, struct knote *kn, const struct kevent *kev) { (void) filt; kn->kev.flags = kev->flags | EV_CLEAR; return (0); } int evfilt_signal_knote_delete(struct filter *filt, struct knote *kn) { (void) filt; return ignore_signal(kn->kev.ident); } int evfilt_signal_knote_enable(struct filter *filt, struct knote *kn) { return catch_signal(filt, kn); } int evfilt_signal_knote_disable(struct filter *filt, struct knote *kn) { (void) filt; return ignore_signal(kn->kev.ident); } int evfilt_signal_copyout(struct kevent *dst, struct knote *src, void *ptr UNUSED) { struct sentry *s; struct knote *kn; int sig; (void) src; #if defined(__sun__) port_event_t pe; port_event_dequeue(&pe, filt->kf_kqueue); s = (struct sentry *) pe.portev_user; sig = s - &sigtbl[0]; #else kqops.eventfd_lower(sig_eventfd); sig = 1; //XXX-FIXME totally broken, workaround just to compile s = &sigtbl[sig]; #endif kn = s->s_knote; //TODO: READ counter: s->s_knote->kev.data = ?; /* TODO: dst->data should be the number of times the signal occurred */ dst->ident = sig; dst->filter = EVFILT_SIGNAL; dst->udata = kn->kev.udata; dst->flags = kn->kev.flags; dst->fflags = 0; dst->data = 1; #if DEADWOOD if (kn->kev.flags & EV_DISPATCH) { ignore_signal(kn->kev.ident); KNOTE_DISABLE(kn); } else if (kn->kev.flags & EV_ONESHOT) { ignore_signal(kn->kev.ident); knote_free(filt, kn); } #endif return (1); } const struct filter evfilt_signal = { EVFILT_SIGNAL, evfilt_signal_init, evfilt_signal_destroy, evfilt_signal_copyout, evfilt_signal_knote_create, evfilt_signal_knote_modify, evfilt_signal_knote_delete, evfilt_signal_knote_enable, evfilt_signal_knote_disable, }; libkqueue-2.3.1/src/posix/timer.c000066400000000000000000000207511342472035000167250ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "sys/event.h" #include "private.h" /* A request to sleep for a certain time */ struct sleepreq { int pfd; /* fd to poll for ACKs */ int wfd; /* fd to wake up when sleep is over */ uintptr_t ident; /* from kevent */ intptr_t interval; /* sleep time, in milliseconds */ pthread_cond_t cond; pthread_mutex_t mtx; struct sleepstat *stat; }; /* Information about a successful sleep operation */ struct sleepinfo { uintptr_t ident; /* from kevent */ uintptr_t counter; /* number of times the timer expired */ }; static void * sleeper_thread(void *arg) { struct sleepreq *sr = (struct sleepreq *) arg; struct sleepinfo si; struct timeval now; struct timespec req; sigset_t mask; ssize_t cnt; bool cts = true; /* Clear To Send */ char buf[1]; int rv; #if 0 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); #endif /* Initialize the response */ si.ident = sr->ident; si.counter = 0; /* Block all signals */ sigfillset(&mask); (void) pthread_sigmask(SIG_BLOCK, &mask, NULL); for (;;) { pthread_mutex_lock(&sr->mtx); /* Convert the timeout into an absolute time */ /* Convert milliseconds into seconds+nanoseconds */ gettimeofday(&now, NULL); req.tv_sec = now.tv_sec + sr->interval / 1000; req.tv_nsec = now.tv_usec + ((sr->interval % 1000) * 1000000); /* Sleep */ dbg_printf("sleeping for %ld ms", (unsigned long) sr->interval); rv = pthread_cond_timedwait(&sr->cond, &sr->mtx, &req); pthread_mutex_unlock(&sr->mtx); if (rv == 0) { /* _timer_delete() has requested that we terminate */ dbg_puts("terminating sleeper thread"); break; } else if (rv != 0) { dbg_printf("rv=%d %s", rv, strerror(rv)); if (rv == EINTR) abort(); //FIXME should not happen //ASSUME: rv == ETIMEDOUT } si.counter++; dbg_printf(" -------- sleep over (CTS=%d)----------", cts); /* Test if the previous wakeup has been acknowledged */ if (!cts) { cnt = read(sr->wfd, &buf, 1); if (cnt < 0) { if (errno == EAGAIN || errno == EWOULDBLOCK) { ; } else { dbg_perror("read(2)"); break; } } else if (cnt == 0) { dbg_perror("short read(2)"); break; } else { cts = true; } } /* Wake up kevent waiters if they are ready */ if (cts) { cnt = write(sr->wfd, &si, sizeof(si)); if (cnt < 0) { /* FIXME: handle EAGAIN */ dbg_perror("write(2)"); } else if ((size_t)cnt < sizeof(si)) { dbg_puts("FIXME: handle short write"); } cts = false; si.counter = 0; } } dbg_puts("sleeper thread exiting"); return (NULL); } static int _timer_create(struct filter *filt, struct knote *kn) { pthread_attr_t attr; pthread_t tid; struct sleepreq *req; kn->kev.flags |= EV_CLEAR; req = malloc(sizeof(*req)); if (req == NULL) { dbg_perror("malloc"); return (-1); } req->pfd = filt->kf_pfd; req->wfd = filt->kf_wfd; req->ident = kn->kev.ident; req->interval = kn->kev.data; kn->data.sleepreq = req; pthread_cond_init(&req->cond, NULL); pthread_mutex_init(&req->mtx, NULL); pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); if (pthread_create(&tid, &attr, sleeper_thread, req) != 0) { dbg_perror("pthread_create"); pthread_attr_destroy(&attr); free(req); return (-1); } pthread_attr_destroy(&attr); return (0); } static int _timer_delete(struct knote *kn) { if (kn->data.sleepreq != NULL) { dbg_puts("deleting timer"); pthread_mutex_lock(&kn->data.sleepreq->mtx); //FIXME - error check pthread_cond_signal(&kn->data.sleepreq->cond); //FIXME - error check pthread_mutex_unlock(&kn->data.sleepreq->mtx); //FIXME - error check pthread_cond_destroy(&kn->data.sleepreq->cond); //FIXME - error check free(kn->data.sleepreq); kn->data.sleepreq = NULL; } return (0); } int evfilt_timer_init(struct filter *filt) { int fd[2]; if (socketpair(AF_UNIX, SOCK_STREAM, 0, fd) < 0) { dbg_perror("socketpair(3)"); return (-1); } if (fcntl(fd[0], F_SETFL, O_NONBLOCK) < 0 || fcntl(fd[1], F_SETFL, O_NONBLOCK) < 0) { dbg_perror("fcntl(2)"); close(fd[0]); close(fd[1]); return (-1); } filt->kf_wfd = fd[0]; filt->kf_pfd = fd[1]; return (0); } void evfilt_timer_destroy(struct filter *filt) { (void) close(filt->kf_wfd); (void) close(filt->kf_pfd); } int evfilt_timer_copyout(struct kevent *dst, struct knote *src, void *ptr UNUSED) { struct filter *filt; struct sleepinfo si; ssize_t cnt; struct knote *kn; filt = knote_get_filter(src); /* Read the ident */ cnt = read(filt->kf_pfd, &si, sizeof(si)); if (cnt < 0) { if (errno == EINTR) return (-EINTR); /* FIXME: handle EAGAIN */ dbg_printf("read(2): %s", strerror(errno)); return (-1); } else if ((size_t)cnt < sizeof(si)) { dbg_puts("error: short read"); return (-1); } /* Acknowlege receipt */ cnt = write(filt->kf_pfd, ".", 1); if (cnt < 0) { /* FIXME: handle EAGAIN and EINTR */ dbg_printf("write(2): %s", strerror(errno)); return (-1); } else if (cnt < 1) { dbg_puts("error: short write"); return (-1); } kn = knote_lookup(filt, si.ident); /* Race condition: timer events remain queued even after the knote is deleted. Ignore these events */ if (kn == NULL) return (0); dbg_printf("knote=%p", kn); memcpy(dst, &kn->kev, sizeof(*dst)); dst->data = si.counter; #if DEADWOOD if (kn->kev.flags & EV_DISPATCH) { KNOTE_DISABLE(kn); _timer_delete(kn); } else if (kn->kev.flags & EV_ONESHOT) { _timer_delete(kn); knote_free(filt, kn); } #endif return (1); } int evfilt_timer_knote_create(struct filter *filt, struct knote *kn) { return _timer_create(filt, kn); } int evfilt_timer_knote_modify(struct filter *filt, struct knote *kn, const struct kevent *kev) { (void) filt; (void) kn; (void) kev; return (-1); /* STUB */ } int evfilt_timer_knote_delete(struct filter *filt, struct knote *kn) { (void) filt; if (kn->kev.flags & EV_DISABLE) return (0); dbg_printf("deleting timer # %d", (int) kn->kev.ident); return _timer_delete(kn); } int evfilt_timer_knote_enable(struct filter *filt, struct knote *kn) { return evfilt_timer_knote_create(filt, kn); } int evfilt_timer_knote_disable(struct filter *filt, struct knote *kn) { return evfilt_timer_knote_delete(filt, kn); } const struct filter evfilt_timer = { EVFILT_TIMER, evfilt_timer_init, evfilt_timer_destroy, evfilt_timer_copyout, evfilt_timer_knote_create, evfilt_timer_knote_modify, evfilt_timer_knote_delete, evfilt_timer_knote_enable, evfilt_timer_knote_disable, }; libkqueue-2.3.1/src/posix/user.c000066400000000000000000000100631342472035000165560ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include "sys/event.h" #include "private.h" int posix_evfilt_user_init(struct filter *filt) { if (kqops.eventfd_init(&filt->kf_efd) < 0) return (-1); filt->kf_pfd = kqops.eventfd_descriptor(&filt->kf_efd); return (0); } void posix_evfilt_user_destroy(struct filter *filt) { kqops.eventfd_close(&filt->kf_efd); return; } int posix_evfilt_user_copyout(struct kevent *dst, struct knote *src, void *ptr UNUSED) { memcpy(dst, &src->kev, sizeof(*dst)); struct knote *kn; int nevents = 0; dst->fflags &= ~NOTE_FFCTRLMASK; //FIXME: Not sure if needed dst->fflags &= ~NOTE_TRIGGER; if (src->kev.flags & EV_ADD) { /* NOTE: True on FreeBSD but not consistent behavior with other filters. */ dst->flags &= ~EV_ADD; } if (src->kev.flags & EV_CLEAR) src->kev.fflags &= ~NOTE_TRIGGER; if (src->kev.flags & (EV_DISPATCH | EV_CLEAR | EV_ONESHOT)) { kqops.eventfd_raise(&src->kdata.kn_eventfd); } if (src->kev.flags & EV_DISPATCH) src->kev.fflags &= ~NOTE_TRIGGER; return (0); } int posix_evfilt_user_knote_create(struct filter *filt, struct knote *kn) { #if TODO u_int ffctrl; //determine if EV_ADD + NOTE_TRIGGER in the same kevent will cause a trigger */ if ((!(dst->kev.flags & EV_DISABLE)) && src->fflags & NOTE_TRIGGER) { dst->kev.fflags |= NOTE_TRIGGER; eventfd_raise(filt->kf_pfd); } #endif return (0); } int posix_evfilt_user_knote_modify(struct filter *filt, struct knote *kn, const struct kevent *kev) { unsigned int ffctrl; unsigned int fflags; /* Excerpted from sys/kern/kern_event.c in FreeBSD HEAD */ ffctrl = kev->fflags & NOTE_FFCTRLMASK; fflags = kev->fflags & NOTE_FFLAGSMASK; switch (ffctrl) { case NOTE_FFNOP: break; case NOTE_FFAND: kn->kev.fflags &= fflags; break; case NOTE_FFOR: kn->kev.fflags |= fflags; break; case NOTE_FFCOPY: kn->kev.fflags = fflags; break; default: /* XXX Return error? */ break; } if ((!(kn->kev.flags & EV_DISABLE)) && kev->fflags & NOTE_TRIGGER) { kn->kev.fflags |= NOTE_TRIGGER; knote_enqueue(filt, kn); kqops.eventfd_raise(&filt->kf_efd); } return (0); } int posix_evfilt_user_knote_delete(struct filter *filt, struct knote *kn) { return (0); } int posix_evfilt_user_knote_enable(struct filter *filt, struct knote *kn) { /* FIXME: what happens if NOTE_TRIGGER is in fflags? should the event fire? */ return (0); } int posix_evfilt_user_knote_disable(struct filter *filt, struct knote *kn) { return (0); } /* FIXME: this conflicts with the struct in linux/platform.c const struct filter evfilt_user = { EVFILT_USER, evfilt_user_init, evfilt_user_destroy, evfilt_user_copyout, evfilt_user_knote_create, evfilt_user_knote_modify, evfilt_user_knote_delete, evfilt_user_knote_enable, evfilt_user_knote_disable, }; */ libkqueue-2.3.1/src/solaris/000077500000000000000000000000001342472035000157465ustar00rootroot00000000000000libkqueue-2.3.1/src/solaris/platform.c000066400000000000000000000147441342472035000177500ustar00rootroot00000000000000/* * Copyright (c) 2011 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "../common/private.h" const struct filter evfilt_vnode = EVFILT_NOTIMPL; const struct filter evfilt_proc = EVFILT_NOTIMPL; /* * Per-thread port event buffer used to ferry data between * kevent_wait() and kevent_copyout(). */ static __thread port_event_t evbuf[MAX_KEVENT]; #ifndef NDEBUG /* Dump a poll(2) events bitmask */ static char * poll_events_dump(short events) { static __thread char buf[512]; #define _PL_DUMP(attrib) \ if (events == attrib) \ strcat(&buf[0], " "#attrib); snprintf(&buf[0], 512, "events = %hd 0x%o (", events, events); _PL_DUMP(POLLIN); _PL_DUMP(POLLPRI); _PL_DUMP(POLLOUT); _PL_DUMP(POLLRDNORM); _PL_DUMP(POLLRDBAND); _PL_DUMP(POLLWRBAND); _PL_DUMP(POLLERR); _PL_DUMP(POLLHUP); _PL_DUMP(POLLNVAL); strcat(&buf[0], ")"); return (&buf[0]); #undef _PL_DUMP } static char * port_event_dump(port_event_t *evt) { static __thread char buf[512]; if (evt == NULL) { snprintf(&buf[0], sizeof(buf), "NULL ?!?!\n"); goto out; } #define PE_DUMP(attrib) \ if (evt->portev_source == attrib) \ strcat(&buf[0], #attrib); snprintf(&buf[0], 512, " { object = %u, user = %p, %s, source = %d (", (unsigned int) evt->portev_object, evt->portev_user, poll_events_dump(evt->portev_events), evt->portev_source); PE_DUMP(PORT_SOURCE_AIO); PE_DUMP(PORT_SOURCE_FD); PE_DUMP(PORT_SOURCE_TIMER); PE_DUMP(PORT_SOURCE_USER); PE_DUMP(PORT_SOURCE_ALERT); strcat(&buf[0], ") }"); #undef PE_DUMP out: return (&buf[0]); } #endif /* !NDEBUG */ int solaris_kqueue_init(struct kqueue *kq) { if ((kq->kq_id = port_create()) < 0) { dbg_perror("port_create(2)"); return (-1); } dbg_printf("created event port; fd=%d", kq->kq_id); if (filter_register_all(kq) < 0) { close(kq->kq_id); return (-1); } return (0); } void solaris_kqueue_free(struct kqueue *kq) { (void) close(kq->kq_id); dbg_printf("closed event port; fd=%d", kq->kq_id); } int solaris_kevent_wait( struct kqueue *kq, int nevents UNUSED, const struct timespec *ts) { int rv; uint_t nget = 1; reset_errno(); dbg_puts("waiting for events"); rv = port_getn(kq->kq_id, &evbuf[0], 1, &nget, (struct timespec *) ts); dbg_printf("rv=%d errno=%d (%s) nget=%d", rv, errno, strerror(errno), nget); if ((rv < 0) && (nget < 1)) { if (errno == ETIME) { dbg_puts("no events within the given timeout"); return (0); } if (errno == EINTR) { dbg_puts("signal caught"); return (-1); } dbg_perror("port_getn(2)"); return (-1); } return (nget); } int solaris_kevent_copyout(struct kqueue *kq, int nready, struct kevent *eventlist, int nevents UNUSED) { port_event_t *evt; struct knote *kn; struct filter *filt; int i, rv, skip_event, skipped_events = 0; for (i = 0; i < nready; i++) { evt = &evbuf[i]; kn = evt->portev_user; skip_event = 0; dbg_printf("event=%s", port_event_dump(evt)); switch (evt->portev_source) { case PORT_SOURCE_FD: //XXX-FIXME WHAT ABOUT WRITE??? filter_lookup(&filt, kq, EVFILT_READ); rv = filt->kf_copyout(eventlist, kn, evt); /* For sockets, the event port object must be reassociated after each event is retrieved. */ if (rv == 0 && !(kn->kev.flags & EV_DISPATCH || kn->kev.flags & EV_ONESHOT)) { rv = filt->kn_create(filt, kn); } if (eventlist->data == 0) // if zero data is returned, we raced with a read of data from the socket, skip event to have proper semantics skip_event = 1; break; case PORT_SOURCE_TIMER: filter_lookup(&filt, kq, EVFILT_TIMER); rv = filt->kf_copyout(eventlist, kn, evt); break; case PORT_SOURCE_USER: switch (evt->portev_events) { case X_PORT_SOURCE_SIGNAL: filter_lookup(&filt, kq, EVFILT_SIGNAL); rv = filt->kf_copyout(eventlist, kn, evt); break; case X_PORT_SOURCE_USER: filter_lookup(&filt, kq, EVFILT_USER); rv = filt->kf_copyout(eventlist, kn, evt); break; default: dbg_puts("unsupported portev_events"); abort(); } break; default: dbg_puts("unsupported source"); abort(); } if (rv < 0) { dbg_puts("kevent_copyout failed"); return (-1); } /* * Certain flags cause the associated knote to be deleted * or disabled. */ if (eventlist->flags & EV_DISPATCH) knote_disable(filt, kn); //TODO: Error checking if (eventlist->flags & EV_ONESHOT) { knote_delete(filt, kn); //TODO: Error checking } if (skip_event) skipped_events++; else eventlist++; } return (nready - skipped_events); } const struct kqueue_vtable kqops = { solaris_kqueue_init, solaris_kqueue_free, solaris_kevent_wait, solaris_kevent_copyout, NULL, NULL, posix_eventfd_init, posix_eventfd_close, posix_eventfd_raise, posix_eventfd_lower, posix_eventfd_descriptor }; libkqueue-2.3.1/src/solaris/platform.h000066400000000000000000000034021342472035000177420ustar00rootroot00000000000000/* * Copyright (c) 2011 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _KQUEUE_SOLARIS_PLATFORM_H #define _KQUEUE_SOLARIS_PLATFORM_H #include /* * Atomic operations that override the ones in posix/platform.h */ #include #undef atomic_inc #define atomic_inc atomic_inc_32_nv #undef atomic_dec #define atomic_dec atomic_dec_32_nv #undef atomic_cas #define atomic_cas atomic_cas_ptr #undef atomic_ptr_cas #define atomic_ptr_cas atomic_cas_ptr /* * Event ports */ #include /* Used to set portev_events for PORT_SOURCE_USER */ #define X_PORT_SOURCE_SIGNAL 101 #define X_PORT_SOURCE_USER 102 /* Convenience macros to access the event port descriptor for the kqueue */ #define kqueue_epfd(kq) ((kq)->kq_id) #define filter_epfd(filt) ((filt)->kf_kqueue->kq_id) void solaris_kqueue_free(struct kqueue *); int solaris_kqueue_init(struct kqueue *); /* * Data structures */ struct event_buf { port_event_t pe; TAILQ_ENTRY(event_buf) entries; }; #endif /* ! _KQUEUE_SOLARIS_PLATFORM_H */ libkqueue-2.3.1/src/solaris/signal.c000066400000000000000000000104241342472035000173700ustar00rootroot00000000000000/* * Copyright (c) 2011 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "private.h" /* Highest signal number supported. POSIX standard signals are < 32 */ #define SIGNAL_MAX 32 static struct sentry { int st_signum; int st_port; volatile sig_atomic_t st_count; struct kevent st_kev; } sigtbl[SIGNAL_MAX]; static pthread_mutex_t sigtbl_mtx = PTHREAD_MUTEX_INITIALIZER; static void signal_handler(int sig) { struct sentry *s; if (sig < 0 || sig >= SIGNAL_MAX) // 0..31 are valid { dbg_printf("Received unexpected signal %d", sig); return; } s = &sigtbl[sig]; dbg_printf("sig=%d %d", sig, s->st_signum); atomic_inc((volatile uint32_t *) &s->st_count); port_send(s->st_port, X_PORT_SOURCE_SIGNAL, &sigtbl[sig]); /* TODO: crash if port_send() fails? */ } static int catch_signal(struct filter *filt, struct knote *kn) { int sig; struct sigaction sa; sig = kn->kev.ident; memset(&sa, 0, sizeof(sa)); sa.sa_handler = signal_handler; sa.sa_flags |= SA_RESTART; sigfillset(&sa.sa_mask); if (sigaction(kn->kev.ident, &sa, NULL) == -1) { dbg_perror("sigaction"); return (-1); } pthread_mutex_lock(&sigtbl_mtx); sigtbl[sig].st_signum = sig; sigtbl[sig].st_port = filter_epfd(filt); sigtbl[sig].st_count = 0; memcpy(&sigtbl[sig].st_kev, &kn->kev, sizeof(struct kevent)); pthread_mutex_unlock(&sigtbl_mtx); dbg_printf("installed handler for signal %d", sig); dbg_printf("sigtbl ptr = %p", &sigtbl[sig]); return (0); } static int ignore_signal(int sig) { struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = SIG_IGN; sigemptyset(&sa.sa_mask); if (sigaction(sig, &sa, NULL) == -1) { dbg_perror("sigaction"); return (-1); } dbg_printf("removed handler for signal %d", sig); return (0); } int evfilt_signal_knote_create(struct filter *filt, struct knote *kn) { if (kn->kev.ident >= SIGNAL_MAX) { dbg_printf("unsupported signal number %u", (unsigned int) kn->kev.ident); return (-1); } kn->kev.flags |= EV_CLEAR; return catch_signal(filt, kn); } int evfilt_signal_knote_modify(struct filter *filt UNUSED, struct knote *kn, const struct kevent *kev) { kn->kev.flags = kev->flags | EV_CLEAR; return (0); } int evfilt_signal_knote_delete(struct filter *filt UNUSED, struct knote *kn) { return ignore_signal(kn->kev.ident); } int evfilt_signal_knote_enable(struct filter *filt, struct knote *kn) { return catch_signal(filt, kn); } int evfilt_signal_knote_disable(struct filter *filt UNUSED, struct knote *kn) { return ignore_signal(kn->kev.ident); } int evfilt_signal_copyout(struct kevent *dst, struct knote *src, void *ptr) { port_event_t *pe = (port_event_t *) ptr; struct sentry *ent = (struct sentry *) pe->portev_user; pthread_mutex_lock(&sigtbl_mtx); dbg_printf("sigtbl ptr = %p sig=%d", ptr, ent->st_signum); dst->ident = ent->st_kev.ident; dst->filter = EVFILT_SIGNAL; dst->udata = ent->st_kev.udata; dst->flags = ent->st_kev.flags; dst->fflags = 0; dst->data = 1; pthread_mutex_unlock(&sigtbl_mtx); if (src->kev.flags & EV_DISPATCH || src->kev.flags & EV_ONESHOT) ignore_signal(src->kev.ident); return (1); } const struct filter evfilt_signal = { EVFILT_SIGNAL, NULL, NULL, evfilt_signal_copyout, evfilt_signal_knote_create, evfilt_signal_knote_modify, evfilt_signal_knote_delete, evfilt_signal_knote_enable, evfilt_signal_knote_disable, }; libkqueue-2.3.1/src/solaris/socket.c000066400000000000000000000103541342472035000174050ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "sys/event.h" #include "private.h" int evfilt_socket_knote_create(struct filter *filt, struct knote *kn) { int rv, events; switch (kn->kev.filter) { case EVFILT_READ: events = POLLIN; break; case EVFILT_WRITE: events = POLLOUT; break; default: dbg_puts("invalid filter"); return (-1); } dbg_printf("port_associate kq fd %d with actual fd %ld", filter_epfd(filt), kn->kev.ident); rv = port_associate(filter_epfd(filt), PORT_SOURCE_FD, kn->kev.ident, events, kn); if (rv < 0) { dbg_perror("port_associate(2)"); return (-1); } return (0); } int evfilt_socket_knote_modify(struct filter *filt, struct knote *kn, const struct kevent *kev) { dbg_puts("XXX-FIXME"); (void)filt; (void)kn; (void)kev; return (-1); /* STUB */ } int evfilt_socket_knote_delete(struct filter *filt, struct knote *kn) { /* FIXME: should be handled at kevent_copyin() if (kn->kev.flags & EV_DISABLE) return (0); */ if (port_dissociate(filter_epfd(filt), PORT_SOURCE_FD, kn->kev.ident) < 0) { dbg_perror("port_dissociate(2)"); return (-1); } return (0); } int evfilt_socket_knote_enable(struct filter *filt, struct knote *kn) { dbg_printf("enabling knote %p", kn); return evfilt_socket_knote_create(filt, kn); } int evfilt_socket_knote_disable(struct filter *filt, struct knote *kn) { dbg_printf("disabling knote %p", kn); return evfilt_socket_knote_delete(filt, kn); } int evfilt_socket_copyout(struct kevent *dst, struct knote *src, void *ptr) { port_event_t *pe = (port_event_t *) ptr; unsigned int pending_data = 0; memcpy(dst, &src->kev, sizeof(*dst)); if (pe->portev_events == 8) //XXX-FIXME Should be POLLHUP) dst->flags |= EV_EOF; else if (pe->portev_events & POLLERR) dst->fflags = 1; /* FIXME: Return the actual socket error */ if (pe->portev_events & POLLIN) { /* On return, data contains the number of bytes of protocol data available to read / the length of the socket backlog. */ if (ioctl(pe->portev_object, FIONREAD, &pending_data) < 0) { /* race condition with socket close, so ignore this error */ dbg_puts("ioctl(2) of socket failed"); dst->data = 0; } else dst->data = pending_data; } /* FIXME: make sure this is in kqops.copyout() if (src->kev.flags & EV_DISPATCH || src->kev.flags & EV_ONESHOT) { socket_knote_delete(filt->kf_kqueue->kq_port, kn->kev.ident); } */ return (0); } const struct filter evfilt_read = { EVFILT_READ, NULL, NULL, evfilt_socket_copyout, evfilt_socket_knote_create, evfilt_socket_knote_modify, evfilt_socket_knote_delete, evfilt_socket_knote_enable, evfilt_socket_knote_disable, }; const struct filter evfilt_write = { EVFILT_WRITE, NULL, NULL, evfilt_socket_copyout, evfilt_socket_knote_create, evfilt_socket_knote_modify, evfilt_socket_knote_delete, evfilt_socket_knote_enable, evfilt_socket_knote_disable, }; libkqueue-2.3.1/src/solaris/timer.c000066400000000000000000000110411342472035000172270ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "sys/event.h" #include "private.h" #ifndef NDEBUG static char * itimerspec_dump(struct itimerspec *ts) { static __thread char buf[1024]; snprintf(buf, sizeof(buf), "itimer: [ interval=%lu s %lu ns, next expire=%lu s %lu ns ]", ts->it_interval.tv_sec, ts->it_interval.tv_nsec, ts->it_value.tv_sec, ts->it_value.tv_nsec ); return (buf); } #endif /* Convert milliseconds into seconds+nanoseconds */ static void convert_msec_to_itimerspec(struct itimerspec *dst, int src, int oneshot) { time_t sec, nsec; sec = src / 1000; nsec = (src % 1000) * 1000000; /* Set the interval */ if (oneshot) { dst->it_interval.tv_sec = 0; dst->it_interval.tv_nsec = 0; } else { dst->it_interval.tv_sec = sec; dst->it_interval.tv_nsec = nsec; } /* Set the initial expiration */ dst->it_value.tv_sec = sec; dst->it_value.tv_nsec = nsec; dbg_printf("%s", itimerspec_dump(dst)); } int evfilt_timer_init(struct filter *filt UNUSED) { return (0); } void evfilt_timer_destroy(struct filter *filt UNUSED) { return; } int evfilt_timer_copyout(struct kevent *dst, struct knote *src, void *ptr UNUSED) { /* port_event_t *pe = (port_event_t *) ptr; */ memcpy(dst, &src->kev, sizeof(*dst)); //TODO: //if (ev->events & EPOLLERR) // dst->fflags = 1; /* FIXME: Return the actual timer error */ dst->data = timer_getoverrun(src->data.timerid) + 1; #if FIXME timerid = src->data.timerid; //should be done in kqops.copyout() if (src->kev.flags & EV_DISPATCH) { timer_delete(src->data.timerid); } else if (src->kev.flags & EV_ONESHOT) { timer_delete(src->data.timerid); } #endif return (1); } int evfilt_timer_knote_create(struct filter *filt, struct knote *kn) { port_notify_t pn; struct sigevent se; struct itimerspec ts; timer_t timerid; kn->kev.flags |= EV_CLEAR; pn.portnfy_port = filter_epfd(filt); pn.portnfy_user = (void *) kn; se.sigev_notify = SIGEV_PORT; se.sigev_value.sival_ptr = &pn; if (timer_create (CLOCK_MONOTONIC, &se, &timerid) < 0) { dbg_perror("timer_create(2)"); return (-1); } convert_msec_to_itimerspec(&ts, kn->kev.data, kn->kev.flags & EV_ONESHOT); if (timer_settime(timerid, 0, &ts, NULL) < 0) { dbg_perror("timer_settime(2)"); (void) timer_delete(timerid); return (-1); } kn->data.timerid = timerid; dbg_printf("created timer with id #%lu", (unsigned long) timerid); return (0); } int evfilt_timer_knote_modify(struct filter *filt, struct knote *kn, const struct kevent *kev) { (void)filt; (void)kn; (void)kev; return (-1); /* STUB */ } int evfilt_timer_knote_delete(struct filter *filt UNUSED, struct knote *kn) { if (kn->kev.flags & EV_DISABLE) return (0); dbg_printf("deleting timer # %d", kn->data.timerid); return timer_delete(kn->data.timerid); } int evfilt_timer_knote_enable(struct filter *filt, struct knote *kn) { return evfilt_timer_knote_create(filt, kn); } int evfilt_timer_knote_disable(struct filter *filt, struct knote *kn) { return evfilt_timer_knote_delete(filt, kn); } const struct filter evfilt_timer = { EVFILT_TIMER, evfilt_timer_init, evfilt_timer_destroy, evfilt_timer_copyout, evfilt_timer_knote_create, evfilt_timer_knote_modify, evfilt_timer_knote_delete, evfilt_timer_knote_enable, evfilt_timer_knote_disable, }; libkqueue-2.3.1/src/solaris/user.c000066400000000000000000000072251342472035000170760ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "private.h" int evfilt_user_copyout(struct kevent *dst, struct knote *src, void *ptr UNUSED) { //port_event_t *pe = (port_event_t *) ptr; memcpy(dst, &src->kev, sizeof(*dst)); dst->fflags &= ~NOTE_FFCTRLMASK; //FIXME: Not sure if needed dst->fflags &= ~NOTE_TRIGGER; if (src->kev.flags & EV_ADD) { /* NOTE: True on FreeBSD but not consistent behavior with other filters. */ dst->flags &= ~EV_ADD; } if (src->kev.flags & EV_CLEAR) src->kev.fflags &= ~NOTE_TRIGGER; /* FIXME: This shouldn't be necessary in Solaris... if (src->kev.flags & (EV_DISPATCH | EV_CLEAR | EV_ONESHOT)) eventfd_lower(filt->kf_efd); */ /* FIXME: should move to kqops.copyout() if (src->kev.flags & EV_DISPATCH) { KNOTE_DISABLE(src); src->kev.fflags &= ~NOTE_TRIGGER; } else if (src->kev.flags & EV_ONESHOT) { knote_free(filt, src); } */ return (1); } int evfilt_user_knote_create(struct filter *filt UNUSED, struct knote *kn UNUSED) { #if TODO u_int ffctrl; //determine if EV_ADD + NOTE_TRIGGER in the same kevent will cause a trigger */ if ((!(dst->kev.flags & EV_DISABLE)) && src->fflags & NOTE_TRIGGER) { dst->kev.fflags |= NOTE_TRIGGER; eventfd_raise(filt->kf_pfd); } #endif return (0); } int evfilt_user_knote_modify(struct filter *filt, struct knote *kn, const struct kevent *kev) { unsigned int ffctrl; unsigned int fflags; /* Excerpted from sys/kern/kern_event.c in FreeBSD HEAD */ ffctrl = kev->fflags & NOTE_FFCTRLMASK; fflags = kev->fflags & NOTE_FFLAGSMASK; switch (ffctrl) { case NOTE_FFNOP: break; case NOTE_FFAND: kn->kev.fflags &= fflags; break; case NOTE_FFOR: kn->kev.fflags |= fflags; break; case NOTE_FFCOPY: kn->kev.fflags = fflags; break; default: /* XXX Return error? */ break; } if ((!(kn->kev.flags & EV_DISABLE)) && kev->fflags & NOTE_TRIGGER) { kn->kev.fflags |= NOTE_TRIGGER; return (port_send(filter_epfd(filt), X_PORT_SOURCE_USER, kn)); } return (0); } int evfilt_user_knote_delete(struct filter *filt UNUSED, struct knote *kn UNUSED) { return (0); } int evfilt_user_knote_enable(struct filter *filt UNUSED, struct knote *kn UNUSED) { /* FIXME: what happens if NOTE_TRIGGER is in fflags? should the event fire? */ return (0); } int evfilt_user_knote_disable(struct filter *filt UNUSED, struct knote *kn UNUSED) { return (0); } const struct filter evfilt_user = { EVFILT_USER, NULL, NULL, evfilt_user_copyout, evfilt_user_knote_create, evfilt_user_knote_modify, evfilt_user_knote_delete, evfilt_user_knote_enable, evfilt_user_knote_disable, }; libkqueue-2.3.1/src/windows/000077500000000000000000000000001342472035000157645ustar00rootroot00000000000000libkqueue-2.3.1/src/windows/platform.c000066400000000000000000000134671342472035000177670ustar00rootroot00000000000000/* * Copyright (c) 2011 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "../common/private.h" struct event_buf { DWORD bytes; ULONG_PTR key; OVERLAPPED *overlap; }; /* * Per-thread evt event buffer used to ferry data between * kevent_wait() and kevent_copyout(). */ static __thread struct event_buf iocp_buf; /* FIXME: remove these as filters are implemented */ const struct filter evfilt_proc = EVFILT_NOTIMPL; const struct filter evfilt_vnode = EVFILT_NOTIMPL; const struct filter evfilt_signal = EVFILT_NOTIMPL; const struct filter evfilt_write = EVFILT_NOTIMPL; const struct kqueue_vtable kqops = { windows_kqueue_init, windows_kqueue_free, windows_kevent_wait, windows_kevent_copyout, windows_filter_init, windows_filter_free, }; int windows_kqueue_init(struct kqueue *kq) { kq->kq_iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, (ULONG_PTR) 0, 0); if (kq->kq_iocp == NULL) { dbg_lasterror("CreateIoCompletionPort"); return (-1); } #if DEADWOOD /* Create a handle whose sole purpose is to indicate a synthetic * IO event. */ kq->kq_synthetic_event = CreateSemaphore(NULL, 0, 1, NULL); if (kq->kq_synthetic_event == NULL) { /* FIXME: close kq_iocp */ dbg_lasterror("CreateSemaphore"); return (-1); } kq->kq_loop = evt_create(); if (kq->kq_loop == NULL) { dbg_perror("evt_create()"); return (-1); } #endif if(filter_register_all(kq) < 0) { CloseHandle(kq->kq_iocp); return (-1); } return (0); } void windows_kqueue_free(struct kqueue *kq) { CloseHandle(kq->kq_iocp); free(kq); } int windows_kevent_wait(struct kqueue *kq, int no, const struct timespec *timeout) { int retval; DWORD timeout_ms; BOOL success; if (timeout == NULL) { timeout_ms = INFINITE; } else if ( timeout->tv_sec == 0 && timeout->tv_nsec < 1000000 ) { /* do we need to try high precision timing? */ // TODO: This is currently not possible on windows! timeout_ms = 0; } else { /* Convert timeout to milliseconds */ timeout_ms = 0; if (timeout->tv_sec > 0) timeout_ms += ((DWORD)timeout->tv_sec) * 1000; if (timeout->tv_nsec > 0) timeout_ms += timeout->tv_nsec / 1000000; } dbg_printf("waiting for events (timeout=%u ms)", (unsigned int) timeout_ms); #if 0 if(timeout_ms <= 0) dbg_printf("Woop, not waiting !?"); #endif memset(&iocp_buf, 0, sizeof(iocp_buf)); success = GetQueuedCompletionStatus(kq->kq_iocp, &iocp_buf.bytes, &iocp_buf.key, &iocp_buf.overlap, timeout_ms); if (success) { return (1); } else { if (GetLastError() == WAIT_TIMEOUT) { dbg_puts("no events within the given timeout"); return (0); } dbg_lasterror("GetQueuedCompletionStatus"); return (-1); } return (retval); } int windows_kevent_copyout(struct kqueue *kq, int nready, struct kevent *eventlist, int nevents) { struct filter *filt; struct knote* kn; int rv, nret; //FIXME: not true for EVFILT_IOCP kn = (struct knote *) iocp_buf.overlap; filt = &kq->kq_filt[~(kn->kev.filter)]; rv = filt->kf_copyout(eventlist, kn, &iocp_buf); if (slowpath(rv < 0)) { dbg_puts("knote_copyout failed"); /* XXX-FIXME: hard to handle this without losing events */ abort(); } else { nret = 1; } /* * Certain flags cause the associated knote to be deleted * or disabled. */ if (eventlist->flags & EV_DISPATCH) knote_disable(filt, kn); //TODO: Error checking if (eventlist->flags & EV_ONESHOT) knote_delete(filt, kn); //TODO: Error checking /* If an empty kevent structure is returned, the event is discarded. */ if (fastpath(eventlist->filter != 0)) { eventlist++; } else { dbg_puts("spurious wakeup, discarding event"); nret--; } return nret; } int windows_filter_init(struct kqueue *kq, struct filter *kf) { kq->kq_filt_ref[kq->kq_filt_count] = (struct filter *) kf; kq->kq_filt_count++; return (0); } void windows_filter_free(struct kqueue *kq, struct filter *kf) { } int windows_get_descriptor_type(struct knote *kn) { switch (GetFileType((HANDLE)kn->kev.ident)) { case FILE_TYPE_PIPE: { socklen_t slen; int lsock, stype, i; slen = sizeof(lsock); lsock = 0; i = getsockopt(kn->kev.ident, SOL_SOCKET, SO_ACCEPTCONN, (char *)&lsock, &slen); if (i == 0 && lsock) kn->kn_flags |= KNFL_SOCKET_PASSIVE; slen = sizeof(stype); stype = 0; i = getsockopt(kn->kev.ident, SOL_SOCKET, SO_TYPE, (char *)&stype, &slen); if (i < 0) { dbg_perror("getsockopt(3)"); return (-1); } if (stype == SOCK_STREAM) kn->kn_flags |= KNFL_SOCKET_STREAM; break; } default: { struct stat sb; if (fstat((int)kn->kev.ident, &sb) == 0) { dbg_printf("HANDLE %d appears to be a regular file", kn->kev.ident); kn->kn_flags |= KNFL_FILE; } } } return 0; } libkqueue-2.3.1/src/windows/platform.h000066400000000000000000000105071342472035000177640ustar00rootroot00000000000000/* * Copyright (c) 2011 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _KQUEUE_WINDOWS_PLATFORM_H #define _KQUEUE_WINDOWS_PLATFORM_H /* Require Windows XP or later */ #define WINVER 0x0501 #define _WIN32_WINNT 0x0501 /* Reduces build time by omitting extra system headers */ #define WIN32_LEAN_AND_MEAN #include #include #include #include #include #include #include #include #include #define _CRT_SECURE_NO_WARNINGS 1 /* The #define doesn't seem to work, but the #pragma does.. */ #ifdef _MSC_VER # pragma warning( disable : 4996 ) #endif #include "../../include/sys/event.h" /* * Atomic integer operations */ #define atomic_inc(value) InterlockedIncrement((LONG volatile *)value) #define atomic_dec(value) InterlockedDecrement((LONG volatile *)value) #define atomic_cas(p, oval, nval) InterlockedCompareExchange(p, nval, oval) #define atomic_ptr_cas(p, oval, nval) InterlockedCompareExchangePointer(p, nval, oval) /* * Additional members of struct kqueue */ #define KQUEUE_PLATFORM_SPECIFIC \ HANDLE kq_iocp; \ HANDLE kq_synthetic_event; \ struct filter *kq_filt_ref[EVFILT_SYSCOUNT]; \ size_t kq_filt_count /* * Additional members of struct filter */ /* #define FILTER_PLATFORM_SPECIFIC \ HANDLE kf_event_handle */ /* * Additional members for struct knote */ #define KNOTE_PLATFORM_SPECIFIC \ HANDLE kn_event_whandle /* * Some datatype forward declarations */ struct filter; struct kqueue; struct knote; /* * Hooks and prototypes */ int windows_kqueue_init(struct kqueue *); void windows_kqueue_free(struct kqueue *); int windows_kevent_wait(struct kqueue *, int, const struct timespec *); int windows_kevent_copyout(struct kqueue *, int, struct kevent *, int); int windows_filter_init(struct kqueue *, struct filter *); void windows_filter_free(struct kqueue *, struct filter *); int windows_get_descriptor_type(struct knote *); /* * GCC-compatible branch prediction macros */ #ifdef __GNUC__ # define fastpath(x) __builtin_expect((x), 1) # define slowpath(x) __builtin_expect((x), 0) #else # define fastpath(x) (x) # define slowpath(x) (x) #endif /* Function visibility macros */ #define VISIBLE __declspec(dllexport) #define HIDDEN #if !defined(__func__) && !defined(__GNUC__) #define __func__ __FUNCDNAME__ #endif #define snprintf _snprintf #define ssize_t SSIZE_T #define sleep(x) Sleep((x) * 1000) #define inline __inline /* For POSIX compatibility when compiling, not for actual use */ typedef int socklen_t; typedef int nlink_t; typedef int timer_t; typedef int pthread_t; typedef int sigset_t; typedef int pid_t; #ifndef __GNUC__ # define __thread __declspec(thread) #endif /* Emulation of pthreads mutex functionality */ #define PTHREAD_PROCESS_SHARED 1 #define PTHREAD_PROCESS_PRIVATE 2 typedef CRITICAL_SECTION pthread_mutex_t; typedef CRITICAL_SECTION pthread_spinlock_t; typedef CRITICAL_SECTION pthread_rwlock_t; #define _cs_init(x) InitializeCriticalSection((x)) #define _cs_lock(x) EnterCriticalSection ((x)) #define _cs_unlock(x) LeaveCriticalSection ((x)) #define pthread_mutex_lock _cs_lock #define pthread_mutex_unlock _cs_unlock #define pthread_mutex_init(x,y) _cs_init((x)) #define pthread_spin_lock _cs_lock #define pthread_spin_unlock _cs_unlock #define pthread_spin_init(x,y) _cs_init((x)) #define pthread_mutex_init(x,y) _cs_init((x)) #define pthread_mutex_destroy(x) #define pthread_rwlock_rdlock _cs_lock #define pthread_rwlock_wrlock _cs_lock #define pthread_rwlock_unlock _cs_unlock #define pthread_rwlock_init(x,y) _cs_init((x)) #endif /* ! _KQUEUE_WINDOWS_PLATFORM_H */ libkqueue-2.3.1/src/windows/read.c000066400000000000000000000125011342472035000170420ustar00rootroot00000000000000/* * Copyright (c) 2011 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "../common/private.h" static VOID CALLBACK evfilt_read_callback(void *param, BOOLEAN fired) { WSANETWORKEVENTS events; struct kqueue *kq; struct knote *kn; int rv; assert(param); if (fired) { dbg_puts("called, but event was not triggered(?)"); return; } assert(param); kn = (struct knote*)param; // FIXME: check if knote is pending destroyed kq = kn->kn_kq; assert(kq); /* Retrieve the socket events and update the knote */ rv = WSAEnumNetworkEvents( (SOCKET) kn->kev.ident, kn->data.handle, &events); if (rv != 0) { dbg_wsalasterror("WSAEnumNetworkEvents"); return; //fIXME: should crash or invalidate the knote } /* FIXME: check for errors somehow.. if (events.lNetworkEvents & FD_ACCEPT) kn->kev.flags |= EV */ if (!PostQueuedCompletionStatus(kq->kq_iocp, 1, (ULONG_PTR) 0, (LPOVERLAPPED) param)) { dbg_lasterror("PostQueuedCompletionStatus()"); return; /* FIXME: need more extreme action */ } /* DEADWOOD kn = (struct knote *) param; evt_signal(kn->kn_kq->kq_loop, EVT_WAKEUP, kn); */ } #if FIXME static intptr_t get_eof_offset(int fd) { off_t curpos; struct stat sb; curpos = lseek(fd, 0, SEEK_CUR); if (curpos == (off_t) -1) { dbg_perror("lseek(2)"); curpos = 0; } if (fstat(fd, &sb) < 0) { dbg_perror("fstat(2)"); sb.st_size = 1; } dbg_printf("curpos=%zu size=%zu\n", curpos, sb.st_size); return (sb.st_size - curpos); //FIXME: can overflow } #endif int evfilt_read_copyout(struct kevent *dst, struct knote *src, void *ptr) { unsigned long bufsize; //struct event_buf * const ev = (struct event_buf *) ptr; /* TODO: handle regular files if (src->flags & KNFL_FILE) { ... } */ memcpy(dst, &src->kev, sizeof(*dst)); if (src->kn_flags & KNFL_PASSIVE_SOCKET) { /* TODO: should contains the length of the socket backlog */ dst->data = 1; } else { /* On return, data contains the number of bytes of protocol data available to read. */ if (ioctlsocket(src->kev.ident, FIONREAD, &bufsize) != 0) { dbg_wsalasterror("ioctlsocket"); return (-1); } dst->data = bufsize; } return (0); } int evfilt_read_knote_create(struct filter *filt, struct knote *kn) { HANDLE evt; int rv; if (windows_get_descriptor_type(kn) < 0) return (-1); /* Create an auto-reset event object */ evt = CreateEvent(NULL, FALSE, FALSE, NULL); if (evt == NULL) { dbg_lasterror("CreateEvent()"); return (-1); } rv = WSAEventSelect( (SOCKET) kn->kev.ident, evt, FD_READ | FD_ACCEPT | FD_CLOSE); if (rv != 0) { dbg_wsalasterror("WSAEventSelect()"); CloseHandle(evt); return (-1); } /* TODO: handle regular files in addition to sockets */ /* TODO: handle in copyout if (kn->kev.flags & EV_ONESHOT || kn->kev.flags & EV_DISPATCH) kn->data.events |= EPOLLONESHOT; if (kn->kev.flags & EV_CLEAR) kn->data.events |= EPOLLET; */ kn->data.handle = evt; if (RegisterWaitForSingleObject(&kn->kn_event_whandle, evt, evfilt_read_callback, kn, INFINITE, 0) == 0) { dbg_puts("RegisterWaitForSingleObject failed"); CloseHandle(evt); return (-1); } return (0); } int evfilt_read_knote_modify(struct filter *filt, struct knote *kn, const struct kevent *kev) { return (-1); /* STUB */ } int evfilt_read_knote_delete(struct filter *filt, struct knote *kn) { if (kn->data.handle == NULL || kn->kn_event_whandle == NULL) return (0); if(!UnregisterWaitEx(kn->kn_event_whandle, INVALID_HANDLE_VALUE)) { dbg_lasterror("UnregisterWait()"); return (-1); } if (!WSACloseEvent(kn->data.handle)) { dbg_wsalasterror("WSACloseEvent()"); return (-1); } kn->data.handle = NULL; return (0); } int evfilt_read_knote_enable(struct filter *filt, struct knote *kn) { return evfilt_read_knote_create(filt, kn); } int evfilt_read_knote_disable(struct filter *filt, struct knote *kn) { return evfilt_read_knote_delete(filt, kn); } const struct filter evfilt_read = { EVFILT_READ, NULL, NULL, evfilt_read_copyout, evfilt_read_knote_create, evfilt_read_knote_modify, evfilt_read_knote_delete, evfilt_read_knote_enable, evfilt_read_knote_disable, }; libkqueue-2.3.1/src/windows/stdint.h000066400000000000000000000170601342472035000174460ustar00rootroot00000000000000// ISO C9x compliant stdint.h for Microsoft Visual Studio // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 // // Copyright (c) 2006-2008 Alexander Chemeris // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. The name of the author may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////////// #ifndef _MSC_VER // [ #error "Use this header only with Microsoft Visual C++ compilers!" #endif // _MSC_VER ] #ifndef _MSC_STDINT_H_ // [ #define _MSC_STDINT_H_ #if _MSC_VER > 1000 #pragma once #endif #include // For Visual Studio 6 in C++ mode and for many Visual Studio versions when // compiling for ARM we should wrap include with 'extern "C++" {}' // or compiler give many errors like this: // error C2733: second C linkage of overloaded function 'wmemchr' not allowed #ifdef __cplusplus extern "C" { #endif # include #ifdef __cplusplus } #endif // Define _W64 macros to mark types changing their size, like intptr_t. #ifndef _W64 # if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 # define _W64 __w64 # else # define _W64 # endif #endif // 7.18.1 Integer types // 7.18.1.1 Exact-width integer types // Visual Studio 6 and Embedded Visual C++ 4 doesn't // realize that, e.g. char has the same size as __int8 // so we give up on __intX for them. #if (_MSC_VER < 1300) typedef signed char int8_t; typedef signed short int16_t; typedef signed int int32_t; typedef unsigned char uint8_t; typedef unsigned short uint16_t; typedef unsigned int uint32_t; #else typedef signed __int8 int8_t; typedef signed __int16 int16_t; typedef signed __int32 int32_t; typedef unsigned __int8 uint8_t; typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; #endif typedef signed __int64 int64_t; typedef unsigned __int64 uint64_t; // 7.18.1.2 Minimum-width integer types typedef int8_t int_least8_t; typedef int16_t int_least16_t; typedef int32_t int_least32_t; typedef int64_t int_least64_t; typedef uint8_t uint_least8_t; typedef uint16_t uint_least16_t; typedef uint32_t uint_least32_t; typedef uint64_t uint_least64_t; // 7.18.1.3 Fastest minimum-width integer types typedef int8_t int_fast8_t; typedef int16_t int_fast16_t; typedef int32_t int_fast32_t; typedef int64_t int_fast64_t; typedef uint8_t uint_fast8_t; typedef uint16_t uint_fast16_t; typedef uint32_t uint_fast32_t; typedef uint64_t uint_fast64_t; // 7.18.1.4 Integer types capable of holding object pointers #ifdef _WIN64 // [ typedef signed __int64 intptr_t; typedef unsigned __int64 uintptr_t; #else // _WIN64 ][ typedef _W64 signed int intptr_t; typedef _W64 unsigned int uintptr_t; #endif // _WIN64 ] // 7.18.1.5 Greatest-width integer types typedef int64_t intmax_t; typedef uint64_t uintmax_t; // 7.18.2 Limits of specified-width integer types #if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 // 7.18.2.1 Limits of exact-width integer types #define INT8_MIN ((int8_t)_I8_MIN) #define INT8_MAX _I8_MAX #define INT16_MIN ((int16_t)_I16_MIN) #define INT16_MAX _I16_MAX #define INT32_MIN ((int32_t)_I32_MIN) #define INT32_MAX _I32_MAX #define INT64_MIN ((int64_t)_I64_MIN) #define INT64_MAX _I64_MAX #define UINT8_MAX _UI8_MAX #define UINT16_MAX _UI16_MAX #define UINT32_MAX _UI32_MAX #define UINT64_MAX _UI64_MAX // 7.18.2.2 Limits of minimum-width integer types #define INT_LEAST8_MIN INT8_MIN #define INT_LEAST8_MAX INT8_MAX #define INT_LEAST16_MIN INT16_MIN #define INT_LEAST16_MAX INT16_MAX #define INT_LEAST32_MIN INT32_MIN #define INT_LEAST32_MAX INT32_MAX #define INT_LEAST64_MIN INT64_MIN #define INT_LEAST64_MAX INT64_MAX #define UINT_LEAST8_MAX UINT8_MAX #define UINT_LEAST16_MAX UINT16_MAX #define UINT_LEAST32_MAX UINT32_MAX #define UINT_LEAST64_MAX UINT64_MAX // 7.18.2.3 Limits of fastest minimum-width integer types #define INT_FAST8_MIN INT8_MIN #define INT_FAST8_MAX INT8_MAX #define INT_FAST16_MIN INT16_MIN #define INT_FAST16_MAX INT16_MAX #define INT_FAST32_MIN INT32_MIN #define INT_FAST32_MAX INT32_MAX #define INT_FAST64_MIN INT64_MIN #define INT_FAST64_MAX INT64_MAX #define UINT_FAST8_MAX UINT8_MAX #define UINT_FAST16_MAX UINT16_MAX #define UINT_FAST32_MAX UINT32_MAX #define UINT_FAST64_MAX UINT64_MAX // 7.18.2.4 Limits of integer types capable of holding object pointers #ifdef _WIN64 // [ # define INTPTR_MIN INT64_MIN # define INTPTR_MAX INT64_MAX # define UINTPTR_MAX UINT64_MAX #else // _WIN64 ][ # define INTPTR_MIN INT32_MIN # define INTPTR_MAX INT32_MAX # define UINTPTR_MAX UINT32_MAX #endif // _WIN64 ] // 7.18.2.5 Limits of greatest-width integer types #define INTMAX_MIN INT64_MIN #define INTMAX_MAX INT64_MAX #define UINTMAX_MAX UINT64_MAX // 7.18.3 Limits of other integer types #ifdef _WIN64 // [ # define PTRDIFF_MIN _I64_MIN # define PTRDIFF_MAX _I64_MAX #else // _WIN64 ][ # define PTRDIFF_MIN _I32_MIN # define PTRDIFF_MAX _I32_MAX #endif // _WIN64 ] #define SIG_ATOMIC_MIN INT_MIN #define SIG_ATOMIC_MAX INT_MAX #ifndef SIZE_MAX // [ # ifdef _WIN64 // [ # define SIZE_MAX _UI64_MAX # else // _WIN64 ][ # define SIZE_MAX _UI32_MAX # endif // _WIN64 ] #endif // SIZE_MAX ] // WCHAR_MIN and WCHAR_MAX are also defined in #ifndef WCHAR_MIN // [ # define WCHAR_MIN 0 #endif // WCHAR_MIN ] #ifndef WCHAR_MAX // [ # define WCHAR_MAX _UI16_MAX #endif // WCHAR_MAX ] #define WINT_MIN 0 #define WINT_MAX _UI16_MAX #endif // __STDC_LIMIT_MACROS ] // 7.18.4 Limits of other integer types #if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 // 7.18.4.1 Macros for minimum-width integer constants #define INT8_C(val) val##i8 #define INT16_C(val) val##i16 #define INT32_C(val) val##i32 #define INT64_C(val) val##i64 #define UINT8_C(val) val##ui8 #define UINT16_C(val) val##ui16 #define UINT32_C(val) val##ui32 #define UINT64_C(val) val##ui64 // 7.18.4.2 Macros for greatest-width integer constants #define INTMAX_C INT64_C #define UINTMAX_C UINT64_C #endif // __STDC_CONSTANT_MACROS ] #endif // _MSC_STDINT_H_ ] libkqueue-2.3.1/src/windows/timer.c000066400000000000000000000106011342472035000172460ustar00rootroot00000000000000/* * Copyright (c) 2011 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "../common/private.h" /* Convert milliseconds into negative increments of 100-nanoseconds */ static void convert_msec_to_filetime(LARGE_INTEGER *dst, intptr_t src) { dst->QuadPart = -((int64_t) src * 1000 * 10); } static int ktimer_delete(struct filter *filt, struct knote *kn) { if (kn->data.handle == NULL || kn->kn_event_whandle == NULL) return (0); if(!UnregisterWaitEx(kn->kn_event_whandle, INVALID_HANDLE_VALUE)) { dbg_lasterror("UnregisterWait()"); return (-1); } if (!CancelWaitableTimer(kn->data.handle)) { dbg_lasterror("CancelWaitableTimer()"); return (-1); } if (!CloseHandle(kn->data.handle)) { dbg_lasterror("CloseHandle()"); return (-1); } if( !(kn->kev.flags & EV_ONESHOT) ) knote_release(kn); kn->data.handle = NULL; return (0); } static VOID CALLBACK evfilt_timer_callback(void* param, BOOLEAN fired){ struct knote* kn; struct kqueue* kq; if(fired){ dbg_puts("called, but timer did not fire - this case should never be reached"); return; } assert(param); kn = (struct knote*)param; if(kn->kn_flags & KNFL_KNOTE_DELETED) { dbg_puts("knote marked for deletion, skipping event"); return; } else { kq = kn->kn_kq; assert(kq); if (!PostQueuedCompletionStatus(kq->kq_iocp, 1, (ULONG_PTR) 0, (LPOVERLAPPED) kn)) { dbg_lasterror("PostQueuedCompletionStatus()"); return; /* FIXME: need more extreme action */ } #if DEADWOOD evt_signal(kq->kq_loop, EVT_WAKEUP, kn); #endif } if(kn->kev.flags & EV_ONESHOT) { struct filter* filt; if( filter_lookup(&filt, kq, kn->kev.filter) ) dbg_perror("filter_lookup()"); knote_release(kn); } } int evfilt_timer_init(struct filter *filt) { return (0); } void evfilt_timer_destroy(struct filter *filt) { } int evfilt_timer_copyout(struct kevent* dst, struct knote* src, void* ptr) { memcpy(dst, &src->kev, sizeof(struct kevent)); // TODO: Timer error handling /* We have no way to determine the number of times the timer triggered, thus we assume it was only once */ dst->data = 1; return (0); } int evfilt_timer_knote_create(struct filter *filt, struct knote *kn) { HANDLE th; LARGE_INTEGER liDueTime; kn->kev.flags |= EV_CLEAR; th = CreateWaitableTimer(NULL, FALSE, NULL); if (th == NULL) { dbg_lasterror("CreateWaitableTimer()"); return (-1); } dbg_printf("created timer handle %p", th); convert_msec_to_filetime(&liDueTime, kn->kev.data); // XXX-FIXME add completion routine to this call if (!SetWaitableTimer(th, &liDueTime, (LONG)( (kn->kev.flags & EV_ONESHOT) ? 0 : kn->kev.data ), NULL, NULL, FALSE)) { dbg_lasterror("SetWaitableTimer()"); CloseHandle(th); return (-1); } kn->data.handle = th; RegisterWaitForSingleObject(&kn->kn_event_whandle, th, evfilt_timer_callback, kn, INFINITE, 0); knote_retain(kn); return (0); } int evfilt_timer_knote_modify(struct filter *filt, struct knote *kn, const struct kevent *kev) { return (0); /* STUB */ } int evfilt_timer_knote_delete(struct filter *filt, struct knote *kn) { return (ktimer_delete(filt,kn)); } int evfilt_timer_knote_enable(struct filter *filt, struct knote *kn) { return evfilt_timer_knote_create(filt, kn); } int evfilt_timer_knote_disable(struct filter *filt, struct knote *kn) { return evfilt_timer_knote_delete(filt, kn); } const struct filter evfilt_timer = { EVFILT_TIMER, evfilt_timer_init, evfilt_timer_destroy, evfilt_timer_copyout, evfilt_timer_knote_create, evfilt_timer_knote_modify, evfilt_timer_knote_delete, evfilt_timer_knote_enable, evfilt_timer_knote_disable, }; libkqueue-2.3.1/src/windows/user.c000066400000000000000000000062121342472035000171070ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "../common/private.h" int evfilt_user_init(struct filter *filt) { return (0); } void evfilt_user_destroy(struct filter *filt) { } int evfilt_user_copyout(struct kevent* dst, struct knote* src, void* ptr) { memcpy(dst, &src->kev, sizeof(struct kevent)); dst->fflags &= ~NOTE_FFCTRLMASK; //FIXME: Not sure if needed dst->fflags &= ~NOTE_TRIGGER; if (src->kev.flags & EV_ADD) { /* NOTE: True on FreeBSD but not consistent behavior with other filters. */ dst->flags &= ~EV_ADD; } if (src->kev.flags & EV_CLEAR) src->kev.fflags &= ~NOTE_TRIGGER; if (src->kev.flags & EV_DISPATCH) src->kev.fflags &= ~NOTE_TRIGGER; return (0); } int evfilt_user_knote_create(struct filter *filt, struct knote *kn) { return (0); } int evfilt_user_knote_modify(struct filter *filt, struct knote *kn, const struct kevent *kev) { unsigned int ffctrl; unsigned int fflags; /* Excerpted from sys/kern/kern_event.c in FreeBSD HEAD */ ffctrl = kev->fflags & NOTE_FFCTRLMASK; fflags = kev->fflags & NOTE_FFLAGSMASK; switch (ffctrl) { case NOTE_FFNOP: break; case NOTE_FFAND: kn->kev.fflags &= fflags; break; case NOTE_FFOR: kn->kev.fflags |= fflags; break; case NOTE_FFCOPY: kn->kev.fflags = fflags; break; default: /* XXX Return error? */ break; } if ((!(kn->kev.flags & EV_DISABLE)) && kev->fflags & NOTE_TRIGGER) { kn->kev.fflags |= NOTE_TRIGGER; if (!PostQueuedCompletionStatus(kn->kn_kq->kq_iocp, 1, (ULONG_PTR) 0, (LPOVERLAPPED) kn)) { dbg_lasterror("PostQueuedCompletionStatus()"); return (-1); } } return (0); } int evfilt_user_knote_delete(struct filter *filt, struct knote *kn) { return (0); } int evfilt_user_knote_enable(struct filter *filt, struct knote *kn) { return evfilt_user_knote_create(filt, kn); } int evfilt_user_knote_disable(struct filter *filt, struct knote *kn) { return evfilt_user_knote_delete(filt, kn); } const struct filter evfilt_user = { EVFILT_USER, evfilt_user_init, evfilt_user_destroy, evfilt_user_copyout, evfilt_user_knote_create, evfilt_user_knote_modify, evfilt_user_knote_delete, evfilt_user_knote_enable, evfilt_user_knote_disable, };libkqueue-2.3.1/test/000077500000000000000000000000001342472035000144625ustar00rootroot00000000000000libkqueue-2.3.1/test/CMakeLists.txt000066400000000000000000000042041342472035000172220ustar00rootroot00000000000000# # Copyright (c) 2011 Marius Zwicker # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # set(LIBKQUEUE_TEST_SOURCES kevent.c main.c read.c test.c timer.c user.c vnode.c) if(UNIX) list(APPEND LIBKQUEUE_TEST_SOURCES proc.c signal.c) endif() add_executable(libkqueue-test ${LIBKQUEUE_TEST_SOURCES}) target_include_directories(libkqueue-test PRIVATE "${CMAKE_SOURCE_DIR}" "${CMAKE_SOURCE_DIR}/include") if(WIN32) target_compile_definitions(libkqueue-test PRIVATE _CRT_SECURE_NO_WARNINGS _CRT_NONSTDC_NO_WARNINGS _WINSOCK_DEPRECATED_NO_WARNINGS) if(CMAKE_C_COMPILER_ID MATCHES Clang) target_compile_options(libkqueue-test PRIVATE -Wno-unused-variable) endif() elseif(UNIX) target_compile_options(libkqueue-test PRIVATE -rdynamic) endif() target_link_libraries(libkqueue-test PRIVATE kqueue Threads::Threads) if(WIN32) target_link_libraries(libkqueue-test PRIVATE Ws2_32) endif() set_target_properties(libkqueue-test PROPERTIES DEBUG_POSTFIX "d") add_test(NAME libkqueue-test COMMAND libkqueue-test) libkqueue-2.3.1/test/benchmark/000077500000000000000000000000001342472035000164145ustar00rootroot00000000000000libkqueue-2.3.1/test/benchmark/abtest000077500000000000000000000012611342472035000176240ustar00rootroot00000000000000#!/usr/bin/perl # # Test using ApacheBench against an HTTP server with lots of idle clients # use IO::Socket; use Getopt::Long; use strict; use warnings; our $NCLIENT = 3000; our $BASELINE = 0; our @CLIENT; sub create_client { my $socket = new IO::Socket::INET ( PeerAddr => '127.0.0.1', PeerPort => 8080, Proto => 'tcp', ) or die $!; push @CLIENT, $socket; } GetOptions("baseline" => \$BASELINE, "idle=i" => \$NCLIENT) or die; for (my $i = 0; $i < $NCLIENT; $i++) { create_client(); } print "====> Created $NCLIENT idle connections <=====\n"; system "ab -n 5000 -c 500 http://localhost:8080/"; libkqueue-2.3.1/test/benchmark/results.txt000066400000000000000000000111221342472035000206530ustar00rootroot00000000000000Test system: Linux voltaire 2.6.31-20-generic #58-Ubuntu SMP Fri Mar 12 04:38:19 UTC 2010 x86_64 GNU/Linux THTTPD 2.25b + POLL(2) NO IDLE CONNECTIONS Time taken for tests: 0.515 seconds Complete requests: 5000 Failed requests: 0 Write errors: 0 Total transferred: 2760000 bytes HTML transferred: 1555000 bytes Requests per second: 9699.79 [#/sec] (mean) Time per request: 51.547 [ms] (mean) Time per request: 0.103 [ms] (mean, across all concurrent requests) Transfer rate: 5228.79 [Kbytes/sec] received Connection Times (ms) min mean[+/-sd] median max Connect: 0 2 3.2 1 20 Processing: 3 12 5.1 10 42 Waiting: 2 11 4.7 9 41 Total: 3 14 6.3 11 61 Percentage of the requests served within a certain time (ms) 50% 11 66% 15 75% 18 80% 19 90% 20 95% 29 98% 37 99% 37 100% 61 (longest request) THTTPD 2.25b + LIBKQUEUE NO IDLE CONNECTIONS Requests per second: 8243.78 [#/sec] (mean) Time per request: 60.652 [ms] (mean) Time per request: 0.121 [ms] (mean, across all concurrent requests) Transfer rate: 4455.47 [Kbytes/sec] received Connection Times (ms) min mean[+/-sd] median max Connect: 0 4 3.2 4 15 Processing: 3 11 8.6 11 265 Waiting: 2 9 8.2 10 260 Total: 7 16 8.9 16 277 Percentage of the requests served within a certain time (ms) 50% 16 66% 19 75% 19 80% 20 90% 21 95% 23 98% 28 99% 29 100% 277 (longest request) ================================================================== THTTPD 2.25b + POLL(2) 3000 IDLE CONNECTIONS Requests per second: 5902.01 [#/sec] (mean) Time per request: 84.717 [ms] (mean) Time per request: 0.169 [ms] (mean, across all concurrent requests) Transfer rate: 3181.55 [Kbytes/sec] received Connection Times (ms) min mean[+/-sd] median max Connect: 0 3 3.7 1 18 Processing: 7 28 13.3 25 339 Waiting: 7 26 13.4 23 338 Total: 8 30 14.2 27 342 Percentage of the requests served within a certain time (ms) 50% 27 66% 32 75% 35 80% 38 90% 51 95% 54 98% 59 99% 62 100% 342 (longest request) THTTPD 2.25b + LIBKQUEUE 3000 IDLE CONNECTIONS Requests per second: 7945.49 [#/sec] (mean) Time per request: 62.929 [ms] (mean) Time per request: 0.126 [ms] (mean, across all concurrent requests) Transfer rate: 4283.11 [Kbytes/sec] received Connection Times (ms) min mean[+/-sd] median max Connect: 0 2 2.6 1 12 Processing: 2 14 5.1 14 28 Waiting: 2 12 4.5 12 27 Total: 9 16 4.9 17 30 Percentage of the requests served within a certain time (ms) 50% 17 66% 19 75% 19 80% 20 90% 22 95% 24 98% 28 99% 29 100% 30 (longest request) =========================================================================== THTTPD 2.25b + POLL(2) 6000 IDLE CONNECTIONS Requests per second: 4650.43 [#/sec] (mean) Time per request: 107.517 [ms] (mean) Time per request: 0.215 [ms] (mean, across all concurrent requests) Transfer rate: 2507.37 [Kbytes/sec] received Connection Times (ms) min mean[+/-sd] median max Connect: 0 3 5.5 1 23 Processing: 12 34 18.8 29 542 Waiting: 12 33 18.1 29 542 Total: 13 38 21.4 31 544 Percentage of the requests served within a certain time (ms) 50% 31 66% 35 75% 39 80% 41 90% 65 95% 85 98% 99 99% 99 100% 544 (longest request) THTTPD 2.25b + LIBKQUEUE 6000 IDLE CONNECTIONS Requests per second: 9023.58 [#/sec] (mean) Time per request: 55.410 [ms] (mean) Time per request: 0.111 [ms] (mean, across all concurrent requests) Transfer rate: 4864.27 [Kbytes/sec] received Connection Times (ms) min mean[+/-sd] median max Connect: 0 4 4.6 3 18 Processing: 7 22 8.5 23 44 Waiting: 4 21 8.6 21 43 Total: 12 27 8.7 26 49 Percentage of the requests served within a certain time (ms) 50% 26 66% 29 75% 31 80% 34 90% 40 95% 45 98% 49 99% 49 100% 49 (longest request) libkqueue-2.3.1/test/benchmark/scalability.ods000066400000000000000000000440311342472035000214250ustar00rootroot00000000000000PKºrt<…l9Š..mimetypeapplication/vnd.oasis.opendocument.spreadsheetPKºrt<ObjectReplacements/Object 1í t\ÅyÇ?IزÞÚ•,lÙ–dÉø~b †b  Æ6XEôÀcÀ¼ll¤pRF¤†&„PrBHx¹¥%PB£i ¼Nx¤ R^=„º¼’ þçÎ~šÑH£+¥o–"éÜývvf÷þ¾¹¿½÷îÝ{l[vXÇA4ô_M ûïèEDwïOôr Ñ¿aJ êÎ £ˆ.EóB<ÖÝ}!îU[OÛ†V·˜Mô5Ü›Œ{…É “GÕŸjUÐóhé²e«Wµ¾zIû—.Wõ[“®ûQr»0¹M×"Ñ'M,6ñ-Ì®y®‰êqnGtAwòÇ3-è™ý{Aþ9õô¹g¡õèÑÖ`DÕjfœžh_~ Û×wÜq‡Z|ô Æêö«:ŽXqh{ï¾[ŸÀ¬Kn÷ÄmVAN§‚­Ü¿¢^¥^ˆ½D9-WARÙˬ÷ºAÖôé†:îŒ;nE2¿¥ÉmšGÂtCŽõ†ÁuLoà;fN¿3É—Ä3^3©8l‰Û3É—Ä«ë4SuÝ0&nÏ$_?(«™T¶Äí™äKâçWj&‡-q{&ù’øí¥šIÅaKÜžI¾$þæhͤâ°%nÏdçO_«Ä¼^ļfªÎž4]¥è÷iæ@w&ùàÈç6qµú¥˜T¶ÄÝ™Œ$1q5R)&^¶áОÉHâçñfXwg’‰wØžsg2’xäć}CÖÉHâ‘ö-xw&#‰GN|Øw]¸3IZ›Ç-BœQB4qÞŠÓ[ˈ•Õ"¶U•!.Åà\„¸²Šè¢#«‰þñLïb:uo ž‚þ}ñÌZ¢ß"ž3–èEį"õ7ïªã¥ã°xVå_Büûz¢ßeôÞª×oœHôâ?£O¶#ÞÞ@ô)â½D%˜×Ïšˆê1™h âcÍD{">ÝfĦu þn7¢ÛT¢¯ ¾;è2ÄíÓ‰~ˆøÉ ¢» v'z±x<±rшµ³‰Ê‘Ó,Òæ½‹aïšTaŽ<À,hußìRO]Ÿ#£ŽP¸5™Í?&·û%·ÅTåX¾&,ÇjkyNA<±õ­U)nŸOÐÞÞÕÕWë¡>RaÿFÄ wë£uë- Ÿ>ücßävt£/2;—±ô E;õ¤äÕw˜š¾@´Iílïï°Ô 0 ƒÒhkpÚñXË8‚Ôá¨òR3“/ºìѤN3R‡&%.52«Trò'í<ÄUýsûR‡£ÊKÍŒ¾Èì\Ž&u:˜‘:4)q©`VßÏ˜ÕÆ½±Œq#HŽ*/53ù¢ËMêt0#uhRâR³!û`Ûç¿2fhÄúæö¤G•—š}‘Ù¹Mêt0#uhRâR3ÀblÈÿGÆlз#îeãG:U^jf\â‰ÌÎíÄ¥Ö‡æò‚ö4r‡&'.7\Iô|Æì¥:qâ9s˜ÛG;U^nfôEfçr´;ÌHš”¸Ô °¢ŠèÉŒÙåzx•^Aec\àR‡£ÊKÍL¾è²G“:ÌHš”¸ÔlÈ_Vý»õýA'bc?ÀÜ>‚Ôá¨òR3£/2;—£If¤MJ\j8Óóeر˜Æg1.p©ÃQ廕×€ç'óÝâZÄš¬aæ¸}„n G•+˜Ñ™ËÑÆŠt03V„&%.5¬«!Ú–1_”ŸŠX™5ƸÀ¤G•—š}‘Ù¹Mêt0#uhRâR3À†Z¢Û3樈¥YcŒ AêpTy©™Ñ™ËѤN3R‡&%.5œ=õsÓ¹ˆÅYcŒ AêpTy©mF_|ÑÊ%ÚÓ@{¹C“—›Ôñx·dÌqy*ŽÊjÐâ¬æöäG•—ÛfôŬr´;ÌHš”¸Ô  .åÈ›*SØ8‚Ôá¨òRÛŒ¾ø‚UŽ&u:˜‘:4)q©@)ýãŒ9bZEuÐãG:U^j›Ñc•£If¤MJ\jP‡ýß–1‡ÿ«8&kŒq#HŽ*/µÍ¨â·œh³ßsÝzð€FîÐäÄåf «ë‰î̘ߴ\…XS.Ï™ÃÀÜ>‚Üá¨òr3£/2;—£Øé`FêФĥf€b¶÷fÌ´~€˜Éc\àR‡£ÊKÍŒ¾Èì\Ž&u:˜‘:4)q©উD?Ϙ_Þ€8.kŒq#HŽ*/53ú"³s9šÔé`FêФĥf€[&ý2c~:ûOˆMYcŒ AêpTy©™‘ãÈì\mÝzð€FîÐäÄåf ;ˆ~1¿¿ q&L¹9gsûr‡£ÊËÍŒ¾Èì\Ž6b§ƒ©C“—š~ÒHôrÆœÜànÄ…YcŒ AêpTy©™Ñ™ËѤN3R‡&%.5<ÐDôVÆœ©ã§ˆK,c\àR‡£ÊKÍŒ¾Èì\Ž&u:˜‘:4)q©à—“‰>ʘÓÎ<Œ¸Ò2ÆŽ u8ª¼ÔÌèÆGv~<Úºµ æ4r‡&'.7=Ù¬¨às)=‚¸ñ¡œ9 Ìí#ÈŽ*/73ú"³s9Úˆf¤MJ\jx®…h׬91ØSˆë-c\àR‡£ÊKÍŒ¾Èì\Ž&u:˜‘:4)q©à¥)DÓ³æ,wÏ!^`ãG:U^jfôEfçr4©ÓÁŒÔ¡I‰KͯîF´OÖœ²ñeÄoYƸÀ¤G•—š}‘Ù¹Mêt0#uhRâR3À[S‰Ëšó¾†x£eŒ AêpTy©™Ñ™ËѤN3R‡&%.5¼7èKYs2Ý·ﱌq#HŽ*/53ú"³s9šÔé`FêФĥf€§•5g†þâc–1.p©ÃQå¥fF7~ä°óãÑví¹`~@#whrâr3Ч3ˆþ6kNwþâï•!9s˜ÛG;U^nfòE—=Úˆf¤MJ\j6dÔîD?P ¹s÷ÿy¦>g¿ Ìí#HŽ*/53ú"³÷”cIf¤MJ\j(Ûƒèά¹EÑúlŒ AêpTy©™É]öhR§ƒ©C“—š ÉÎÒ&ðUUJgé«©¸ÀÜ>‚Ôá¨òR3£/2;—£If¤MJ\j7[›À—ªž­/ ÄÆ¸À¤G•—š™|Ñe&u:˜‘:4)q©ÙÆ9Ú¾ÞÕ®ˆËûæö¤G•ïVfœ:W³ðåÃO°˜9n¡[ÃQÑ­ªÉ ¯nW¹¤›Â¯n÷I¾êÒ 5úB5ŠY]Ûã㬾ÂõY}ùåY}²í3úüÄ7fô)]Èè³`vWëÓ6ÞR­Ïtwtµ>9Xyµ>ŸÒO«ô)gÖWé³t4Wé<_© þJýóÙöJý‹Ãî ý#­­Ð¿k9§Bÿ µB5ýA¹>Ðôžr}lÞùåú0¦Êõ‘£Ëõ—åO”éï¿[¦¿’Y]¦÷bÏ(Ó;þ¶—ê}$?/Õ›•—•ê5ñ¿*Õ+/ÓJõx¿½D¿E,ÑËôнÌW—$B„]Ý.Yt;ÿ¶©Är|J½…±ÿÚZžsŸ@œ‰ú)µÔ³¼¹=äîî^²¤²’g>üo›pÔúïÖþˆeF_dv.÷|ÄJu*Ħƒ™ØÐ¤v¶÷wXj˜€7ØkpÚ ñ\Ë8‚Ôá¨òR3£/2;—£If¤MJ\j˜ŒO•'³æ“v?ÄË8‚Ôá¨òR3“/ºìѤN3R‡&%.52½LÀ«">ží Ìí#HŽ*/53ú"³s9šÔé`FêФĥf€Ùåú§¼ tâ¹–1.p©ÃQå¥fF_dv.G“:ÌHš”¸Ô °°B_ƒ…7èGl±Œq#HŽ*/53¶z"³s;q©ùàh_@#whrâr3Ð"tèyÖ^ª#+sWÛÊ™ÃÀÜ>‚Üá¨òr3£/2;—£Øé`FêФĥf€¶*|n[»\A<ß2ÆŽ u8ª¼ÔÌä‹.{4©ÓÁŒÔ¡I‰K͆,­&z¶Ú|°ÓŒ~€¹}©ÃQå¥fF_dv.G“:ÌHš”¸Ô °Kÿ"ë˰SŸ·Œq#HŽ*/53ú"³s9šÔé`FêФĥf€N¬ŒÎ·¾Ù=±Ë2ÆŽ u8ª¼ÔÌè‹ÌÎåhR§ƒ©C“—šŽ©!z¥Ê¦pb«eŒ AêpÔëÔ9F_dv.Ç[§N³Ö©“—šN¨%úv•9ææ«ˆoTc\àR‡£ÊKÍŒ¾Èì\Ž&u:˜‘:4)q©à”±DíÖd›¯¶Œq#HŽ*/53žê‰ÌÎí¢í¯öö4r‡&'.7m¬#ú¨Ò¹qâE9s˜ÛG;U^nfôEfçr´;ÌHš”¸Ô p®ºfY¥9Ä÷rÄË8‚Ôá¨òR3£/2;—£If¤MJ\jø›qøÜ®4Ç«_…xeŒ AêpTy©™É]öhR§ƒ©C“—š ¹x<Ñ„Jóã‹k×÷Ìí#HŽ*/53rì²b½ÅÎõâRŸã€º€~P#yh’â’3ÐßÕ=Sa~Yt=âL<þ}Ôo°€¹}ÉÃQå%gF_dv.G¹ÓÁŒÔ¡I‰KÍßÁl¿iýLîfuM3Ë8‚Ôá¨òR3£/2;—£If¤MJ\jøÞDluY¿ù¼ ñ:Ë8‚Ôá¨òR3£/2;—£If¤MJ\jøÑ$¢² 󿻳Œq#HŽ*/53r¼Î‰ÌÎõÑö_ÐÈšœ¸Ü ô D¿*7¿Ê¿q2ß–3‡¹}¹ÃQååfF_dv.G±ÓÁŒÔ¡I‰KÍ·6beÔ:ÅÄCˆ¯ZƸÀ¤G•—š}‘Ù¹Mêt0#uhRâR3À]MD‡[çKyq«eŒ AêpTy©™Ñ™ËѤN3R‡&%.5Ü7™hl¹9ùÏSˆ§ZƸÀ¤G•—š}‘Ù¹Mêt0#uhRâR3ÀƒÍD/–™3Y=‡8Ï2ÆŽ u8ª¼ÔÌø'2;·‹¶Áèí häMN\nz´…èú2sz¶—?F|6gsûr‡£ÊËÍL¾è²G±ÓÁŒÔ¡I‰K͆<5Ãu®ÁWè˜ÛG:U^jfôEfçr4©ÓÁŒÔ¡I‰KÍÏïF´À:q曈[,c\àR‡£ÊKÍŒ¾Èì\Ž&u:˜‘:4)q©à·S‰>-5g}±Ã2ÆŽ u8ª¼ÔÌè‹ÌÎåhR§ƒ©C“—š^›FôH©9¥ñˆ–1.p©ÃQå¥fF_dv.G“:ÌHš”¸Ô ðöt¢+­ósÿñmË8‚Ôá¨òR3“/ºìѤN3R‡&%.5ò‡DÇ•š“ÍÍ$º»`nAêpTy©™‘ãûNdv®¶koð€FîÐäÄåf aÈl²»¾‚B âfÄœ9 Ìí#ÈŽ*/73ú"³÷”cØé`FêФĥf€ný©Ä\¤ ñË8‚Ôá¨òR3“/ºìѤN3R‡&%.52zVDK̵mêûæö¤G•—š}‘Ù¹Mêt0#uhRâR3@ùl¢«¬ 5MD|×2ÆŽ u8ª¼ÔÌè‹ÌÎåhR§ƒ©C“—šjæXb®:Ö‚xŸeŒ AêpTy©™É]öhR§ƒ©C“—š ©ŸK´ Ä\Bo&â–~€¹}©ÃQÑ­ªIê/¶»»Wߟ„‚:Íßý£‰.o :¯˜èíFý²MutTGûQ«W.^º¼cÕÁ‹W¶›Å¢þ¶àù{!Ñ<ÜŽ¢Å×wš®ªGÏvwSò°þë.ê¡ÎÝý:é+Ð%9ªnüQWâ-^¢¾žÎD€Ñã ÜÚ¨—×Üä)ê޵ɽ¹ ¯ºfTIŽ·}E›ÝC½«–ù«VÚUj¶µ};`‡:vU½îØw&æcǶâþñõºcÕã{¢+ç&Ý»KRVÝ»?f÷¦ó Õ£=-v¤ÃÛ üU…þª"o•ä|n¶^‚ëçåë|}¶Y‚­©K°õs·ÏÂ#âý7CÈ ù¶ÐE¨­jøÌ nÏ"YãuÇ~½>ß:v*ÚA¦šÏOº¶0¹Ïo‹Bý轤û~ÍXÝ÷÷×åkߟW§û~A¿}¿à3Ü÷WgtßoÏækßÿKV÷}k¿}ßúîû'+tßϬÊ×¾£R÷ýÞýöýÞŸÑ¾ß ™*Õ}ÿ¥²|ë{¬ÑAx`R™š(s+BEIÉ^*âºá_’^<‹FëÅsUq¾.žŽb^<óX£¯ˆ›Oƒwë÷ˉöŘþ3Lm9/ĸ~X~ÏØF,•£k‰¾‡uçëÔ\v˜,ê†NÛ.þªQþªÑþªbÕU‰¿ªÔ_Uæ­êôWµ•û«*üU•þyù«ÚªüUÕþªŒ¿*믪ñWÕú«Æú«êüU»z«$Ç,õŽ|rß<ƒè¾Q4cÖæ!Üù—z„Ç¢Å<¾0·k£‰¦ü¯Ä½ôX° ãÄ&<òeÜ®£3c-¦ GÖÐɨ_‚éŒä¾z5õzÏ`ÚšÛq²d÷bz·b~×àzÞŠÕxÏ}ÓÉxƒí«¾LÆûâULÛð&hÛvÅçAÔQèÒŽj}a·?a:®u` Nƒ=Äô$T¹Nœ½«>÷èضžŠ­×O0=>AQ Y†ù:&væýpÙ9Ô#é—’cbgÞ—mãüðþª¶ñþªzÕÕDÕ$Uƒ¿ªÑ_Õ䯚ì¯jöWµx«$?¾=Ÿh®:éø~X¯¬Ì3Z÷$zϘ³hYÆ4êjO¾»,ÀpH4¶Ïwg=Ÿe+Hµ[šÜªä é@¼âµx馅:æ>©Lìýwäæ+Fuß@';ÿE¨âÙ”ãÙdq©ø+DUÏñÚ\ܤ/BFÔÞÞÕÅ3þ/BÃQUÏÈ|ª„)ÉjaÖÔ˜FÉ×ÖÁÂlËê,U fˆ¾9W<[r<[,._Éê^ç¸-·$£¼ø7çá¨r¿P_‡é’EØŽÍÐP¬!oº5äcq÷˨™úÝn/IjN!½¾û`nû}snû½¯=«WÅŪåÈÖu¿U’‚J¸g°8§íCtG å™pjvJ8µY£ºrÞЧá:Í'lCâ1õè“ÞÓwP¡Þá´óhÆËlÝEµpž“¯‚ ù¦J§¿JR°ûqÓÕDô&6×6Sž öŸ¸ÿ8‹A¦VÄ—c:cÒZló>À¥tôY‹{mxä ü¯ÅVïY»Ti“zµä¹j‡só\gubZ€Ñ O/Åô¿¹}–‡a¼Ó¹müÃrû,Õçá+˜žÊí³¬Ä[óœÜ¶ý°¤*jÕÜ‚(óUü!·ûs¸>²KsÛèlˆKŠ ðþª‚‹5xÇÔ Í1DýŠ›‹zîÒ ÅOqÿ.´2µN»˜ÎN†šãÀ¯5¨œžÜÛ‚zï4lÂgí鸧>¡'%¯Ð¸1½î jh¼&·:x&¦GÆèaqrnXT_ç\ˆ7ËÛ˜6äv}ª‹ß5å†Fõ•Ρðñ®Üðx,[ Þ«æ¼Üÿ¿ËFÄÎüøþg€±3?vhv~þË!ß¡Ù9Ôû:;ýUC7œ_–Û7qinôl¢¯ážÚóY˜ì(ìÙóòp ïÇØ¶¿z¼ç©ÿPK`üçî/PKºrt< content.xmlÝXMoã6½÷W*P´Z–?[½hÚí)Af[ôVÐesC‰*IÅv}‡¤DQŽ(H|éÅ‘†oæ=‡C*7Ÿö9<!)/–A4R$<¥ÅfüñåW4>­¾¹áYF§<©rR(”ðBÁßx2¶£Ë E̱¤2.pNd¬’˜—¤h¼b.k‘êÀz»°ï­È^õuÖØŽ/^÷g6`ß;x××Yc!©¾{Æû:ï%C‡¬ç%VôHÅžÑâql•*ã0ÜívÃÝdÈÅ&Œ‹EhFàÄáÊJ0ƒJ“0¢Éd £°ÁæDá¾ú4Ö—TTùšˆÞ©Á ?[ÕR ˜®.Ì~|ŸN}=mzW×ÓæLš“-½ëÌ€»¥2Iû—Ê$õ}s¬¶gÖwÞàù¹¿këJä}¹4¶“ªDв÷4-Ú÷çœ;©ÚÁnv#w<MCûî¡w/Âw‚*"à…ržB$&Z÷—ºϤn.·4qZêw_¾ºÅê„eIHRô– ý ÷NA²Óyꛆ 3yb]³۠XóôÐö>¸$áTn Q«K`~k2+øACª¬Í„CÞb[s) ù6ª:±êsÂÝlÀšÉÜk¨Dp ¬œ&W "µe‚Âò£4õvY¡„OPŠg”€ëò‰º×5êe8"ï«æ9]ØËä5Ì*‚Ô¡„xãXØýšB锫¨´O]¹:Þ;&£‘G¢ß.ÃsÕá¹z+ÏêÆìÝL@îëÅ€ûŒ-(œ¦ð¹ ›bÞ¯³7Mt:Zè.ÚšÆ<šN´Ùÿî )ÑðÚà/·m8p2Ö§ÂÕp2éSÁ›~;^¦×Õ0ƒ3¬!Ò†zF|ýšå+¸¢ÙñU%|JAKÏÀņ´s»½ŽÝÓ ~úÙÙ>;ÛíÜáæçlŸíváp ‡s¶Ï ¸ÐéÏÎx =o Ã߬ܨ±Û—4/õ1amr«¿l¸´1áDU0¡eÀ‹;ŽS½çÌŒáÀÚ“¿h•Iý5ûq¬a[F'ëî¢àÔf‘ šï¦ÝŒ®ÿ©HE.µ ýMx!ޱÇ1¾ ÇlÖrÌfoâ¸t¿ï³Ê%gìûñÊMäå&º Ç|Úŗ—áˆF×- ¼¼•}Ë+w¦ñeïLw#úø©Oþ×SO]ŸÃλy;þ'ùê?PK/­Þ(ePKºrt< styles.xmlÝYmoÛ6þ¾_!(CÑ“%ÙI»±ƒŰKQôeßi‰’¹R¤@R±Ý_¿#)Ê’%«ZÛ¡ë …Èç^øÜñî¤ÞÝ ê=b! gk?žE¾‡YÂSÂòµÿáýoÁ­¿ùéŽgIð*åIU`¦©ŽK„™\Ù͵_ ¶âH¹b¨Àr¥’/1sB«6zeLÙ£lª¸·¥>¨©ÂÛ‘EÛé– ¸- ´Ÿ*¬±Ài[<ãS…’^”H‘3/”°k§T¹ Ãý~?Û/f\äa¼\.C³Û8œ4¸²Ô Ò$Äkc2Œgqè°VhªÛv‰UÅ‹ÉÔ …zQ-–ã꼜¦¨-ÓɯÇ|rv=æhNvHLÎ3î¦Ê"ž*‹´-[ µ»ßÛð6ÍŸ‡?Ny%Š©¶4¶CU"H9ù˜Ý–çœ7®j{Ù»ó(ºís ½…ïQX´àÉ(n«Öèvg¸¯Y—tŠçQmÔ6ûŠ›=£Zt RSÂg'I»ú¹M¥žVr‡R¾ =$VÁAóÇ·1„`hÿØÚŸ_=T).K¤ƒKRÌ-Ñr×PSV,Q•i¥Á¶!ۉ桡Ž0lF0zH×A¹]³aÒ ž‚úèíă°ë*§§H£Dûa¦Í Q‰Û™mòPž:ÕxRV0viºŒñ„Ss‰6é8P€Þ¨æÞk¼÷ÞB¦° EHhÜhhLIò 8‰ç¥š|=(VЃX0Ã’õ½eLëtÙ?¿.Õ7¸3Fe“÷ƒJ¿Á±Sb=,vnŒ-#¯#ÿ äÕOaG ç —’œ(¨n±14 ³Ñ‘TBÀ»ÍqÈT]¿‰Ü99…Ä×/†ìÇbË©ó¦E‡mErósãØ™Š çëWîñ“»Ý\ðªÃ¬%gþEìt‰Õo¯½pÅåwŒôוo˜‘ñÏ6zJrH^ Ýë3ÒÔjx¡ÁH“ W)×|Ç ü¤tí' /g糦÷¼nzC™óuÁ¬¿ˆRŽ)” n?.P¯´û˨ïdx6Ö×:¹áEƽ磀¢#¯TçLeû ¾SÝy‹Š@mO1ÙÁéN°³–q®‹nÀtuÜÕAˆfóåÍ b'“‰ö(ÎôNwQÔøîê–+¥ßÁ£Y´¼½¶ÓixÙ«Úïá)L™ƒnv] {±˜Ãùÿ,†zoËEª¿îÁââú9až©íÞUd~ ¤D©ý‚ ˜èEìQòQ7X–º‚~•Dú·!©…€öã^ÊôücäÔ—¶Ïçxx±‚Õ’ y˜Ì¢Ö46Ò´oÅ@á³îoîÌwû²þWî0¶èÍýýý]x¾X¯”g$œ%€Ž¥káDœxì5AËMcý>Ký Ý¶ÃÜ&vöZk=œªé£.„=?GíÛúå³ó³nÈu[ÓýC²½§§ˆ¢mˆ}~Ö#¢c©³dîÑ™u®ðÅ9 ¦·˜Yæü(Ž‚hÌárD‹p…z¥öB7¿xÎað>ŽVóxu³hœJŸ®ß-§¼° 4oG›å² ´kÿRî…÷=þŸÁÍßPK+_%qYPKºrt<Object 1/content.xmlÕZmoÛ6þ¾_a¸(Ð~Лe;¶‘¸h‹ ÐÚûJK´Í†5’ŽíýúIQ¢lY‘ÓèÀ‰xw¼ãs/¼rûîÑÁæ‚°ünùáp€ó„¥$ßÜ ÿúö›7¾[þrËÖk’àEÊ’]†sé%,—ð{Ò¹XêÝpÇóC‚ˆEŽ2,2Y°çVjár/´.³"ä‘ö×Ì®´ÄÙWXñ6dѪ¿fÍìJ§íû +^Õ_³¾ÂA½5Ô³IrbÅ’üþn¸•²XÁ~¿÷÷±Ïø&ˆæóy ©•ÁIÅWì8Õ\i`Š•2D~XÞ KÔ×>Åëš”ï²潡AyUÒ­Wk“至ÙÅFq<™MÛ®¦šó*NUcÄv¹yÝmSO?wõ*­´ÊÍÓëÜ=Òaÿ$x¯×ãQØÚ Ô”ÿÞô°×£×Ñ>•Ìm ©ÏUv?ÒÒ>g×wÑìDÎÍ.ް%aÅÒcõ  _Þš@ÖßÚ®2 £©N&6 í$5÷£ÉZ,¥(¢„`áöinß¡¦W«F© …!dÆ~N­5MùÑl\oÞÜCM躡(–_a\B+Ba*„Q~@ÉêþŸÞáÛ d¸ mVµØ­ÚcÇŽöÈÃø’ö¸Ö>Éxðû·o8µBŠÁN@š ÞàºÅpžlÏ,±Ê­1opžÜ玄§¡53ùóiäØûóptÉα C((ƒþ¯VFèÐÍ[fÕðz.`Èó4åXùóëcùnå_¿Î­:ÓUêvFvaZEY1¹­lýñ8j@[ŸÀ†×È×Íç°hO :Ī3Ípn^Ö¬)æ(‡±›C›ÝÈôbè9öÍüøbÜÝÔžÿbÞí@Èý‘ÂVYž›ŽWtG^‚$Þ05(<ŽùÇóà5ݰÛa9^KèÇ£‘Ì?5WQÛ³™÷0  H°guϦÆàM&Þvƒ³á$mÝ~~Rd2ôñ ˜1¬½ö„v3†y]àÏ*ðgÕà£ÜåGÅþav¹"ZuæèCÕà¤ÞáT“õ±ÌQú,ºâ`óóLî:XÅþáÔ Ïu°=¢´ýX£º„­)c¼Ëݹªt:¿ôw™kF‚2¸3<½2lpy[Œ`¤WWë.ËE“f•¢ë$Z­=æoÉèbõèf¥võ¦¡I€•µàZ›¡g[ÚÉJõb\TÙÔB›—[•êf¨WïhÛ Ö2t’ýYô-c¸nŸ}Óéc›gȶ…@«ú¸å)@7ªÍ²­éy6dêE¸Kn>ìð¸[%ʲÀiø¦£—Ñ1™8mÝäÊ€x'»•wY0Jߌ޾Ð᣻÷År½Œ³q ÿlü2:¢ÐiààáG²Þ$·»T]Aå,4G« 1vÙ§ÓÿmYþPKÙ–%s#PKºrt<Object 1/styles.xml”Írƒ €ï} ‡žMÚ™†‰æÖ'h€ ¦Ê:€?}û V‡´I†£ú}»Ë.îñ4µM4p¥ÈeIŠ".”BÖ9úüxßЩx:BU ÆI ¬o¹4±6ß ×Ñ,KM–9ê•$@µÐDÒ–kbŽËU">M\ªå ª;Ø· ŸL¨lÙ+—žÃ3;Ø·KEÇPÙ²sO}½‚PyÒM\AÌ í¨ª˜!¿rt1¦#㘌ûT³Ãá€Ý×­`¶q]¯G• ó†ÛdgI†WVö홫àãQCÿMFuðT‡úÎñØ…ªàù:øzDû2|Dûòª|¦Dœz¡}¶v[a¹ø®é»4}Á˳GñQ Õ‡³‡8£ Ûºí­ 2sž‰˜vü+­ì¡ïF~ÅŠw ÌVHþãÏÝÙmYÊŠnifR'.BlE—Ë€[n(¶Ê3Š~ˆ·´v¨X7Ô²˜pqÄ·WVñPKÓp:ªiòPKºrt<Object 1/meta.xml‘Aoƒ0 …ïû(êL ‡•vص—õ<¡àvÙÀFIüüvØ1öç÷žü0¶Mð‰Ö¦BÈ(’æÚÐ¥§—çðQʇœÏg£QÕ¬ûɇ-ú*¸Ž’SK«½%Å•3NQÕ¢S^+îÖµ¥Õl´TÆÆÐG!Þ¼ïÀ0 ÑFl/ ³,ƒ¹»¢µ¾q]o›™ª5`ƒ“ƒIXÙ)áCMì63ߌ&| =Û%q¼‡å-‚ïe6çKD¹Þj-óYú‚„¶òlËãUíø£–Frw"3÷õ×Îò;j©Œ[™ížzÓÔa¶Oâ~ æpg}TùPK~,ºùæPKºrt Mark Heily2010-03-20T09:51:542010-03-20T10:21:53Mark HeilyPT00H14M28S3OpenOffice.org/3.1$Unix OpenOffice.org_project/310m19$Build-9420PKºrt<Thumbnails/thumbnail.pngí–é7ˆŵÕeÔÖ–*­ejÉkè (FjKMIDÑNçI ©¶„H-/<íX[¥tAl•…ðhb ÒÅ ¥AH¤¢±Ö–´‚I¢^ßûðþ†÷aÎùsïÇ{î¹n¦¬¦¢§¢¤¤¤æ}Ñ#à»¶*)íQ:tà»SuD))©8{{\€ãKV„?Ü,”ê÷¬_ß÷ãÛüi×ÖAÁºëJU'ãZi¼rsðQmí¿)3Aäsí´`Í=оŠt¢Þ!M-ˬó«êx#0] 0õ3¿d€^=MHXïÙ Ï÷5`“tÑ“‡ç‹ÇVÙËW»t#Í)šqP‰ UVú'ïö뼫ë¨b l`t ’äô †¯ g«‡¼Iá»EW'’Q¸ÁO8s ó8ÛÝìM¬45°kS=`p)0Ù`mO¬ú8ÁìKþªêÒ<ÿ›öÓlûÜç‚!  ¢…¦&ÆSÆ«·øØïµ9’rKh‰K"ƒb£î-ˆÝ^èÖ \¹öüé›…ãWÊ­6M(Ÿ׳4€°*ÔjÄoÛƒ*iX'Zd­©“„:QVë…¯oxÁô±ÀÄ㪶ï}§Ì‘6þ £“tÎx6¢Ž#Dµd­ ¾hÎsHÎŒg¶:þµóðxrX]ÝöZè:”DŸ ÇûÿMZZ$N‘’ÅŠ*5•Õsll~°×슷ǻy£–)‰ iâœÛYÝžó}%Ï{³å;k%àUäãg¯ü<#žE 9­#BãèñqÛÂüºŽ½íådçgî· ÿ+˜o³–¶ÿ‚81%&fšTî?+2÷= 6•jfìÏÂ,€þ…åjŠâäøÎöΙus«¿î'³<Åü}9Áö˜aT‹žXüº-غ¹`€2Í‚)2{Âø tà •õ\Îø¶4€ažà±·C’ײ¿8óE&¾ŠmŠŽûˆ?¦@·ŠlVxÑ.³8Êlrmøô±Ü3ø‘y±ò6Ûze9|ɉÙÁLjƮ•Ã#7tê™÷‚Óæ^ù &!Æ_Ò®®^1„ÝkÑ Æ¼õÅH>üD EcFsÝ,½/k8Ùl ºq·†ýä~=n('dÒasU;…²~ìùÎ4‡9R¦ºëdñˆ$ïR¦ŸxBX7›#;&nb¹àÊÔšÖ(› ¿Wc›‰LòA-:棵ò¯é³¸À.—Ú Åª­Û†Ãî¤Ë” RŽôRlš).ŸO‰nv£9A ¨”e‘ä¬e~¨›æ5ÖÙR3“%ˆœÅÍ]îg¬ƒ¶];>\²Ée¸Y{^›÷jß *¨ÿó@rP´{é$%ÿ¨G:vŸ}Š3Z7±ŽøµbÁÁr )ž]åöâ©ÕP%2‹Ä‹Hð2YvlÎ^!$О(y~é Šœ“_ ¸óV»È…nÅïzÙ‡séû¦‹qn}›«„èÐ:õ¥NDËÄœ^‰ÁpºkÞLDŽ­Îç_kÓI{ù ½Xòã/6‰\4~'K”Ðk·o^j9lÉ ]tÌ7FÿŒZ9 >TÛå%–º-1äȶ© ¾èÖ”=8ÜdL¶YY›-cüüÌŸžšõ¬C¾®Ð¨ 8cç;¨†õt™Þ‚©Çˆ¨ôÃÕ’ªàН6±  ‚,£ü}¾ '„KU©þÓžDX/õ˜ÄûûŒËˆà‰³xþðÛ{¹ú— b9•åºuß]‡ÏõÓG}ÜrìT¼§Ÿ·]NŸM€3C3_Î áÍ ¥hú{¸äL,ï:g†ÖÈrÁÇâíÃsCÆÝ®‹Y6v1pš^ϲ (ÃQ†üÅД_;0Yé±÷ÆO  #nXHÙó­2_.²À†>”-óáÄõSØ–²Í™N™Qû³ž²'ä7Ã&|µý–!ªa.Гµ7Æt"ƺÊ8ø~ÒÛ‚Jó ’=´»s2pú««¿(f4bx1S¸†C(ö*¸F@ Æàuãi§˜—Ê(÷€GÖ!;òÁ@nú•*ØeßýCv~½·¶0UªìfHIMxÃ=&™úúµj<åŸç”Ï?ŽžVÉÌô'%„dÌRâ´òè³SÓTÖª,ü)É9 b£J+wöyzs ssØ!¤©ËFº¨g Σ¡».Âý+;Íz?ƒX¬È3Rÿ’‡àžË]ÞH n5µHª µSï¸{Í`?ÝÛÕ2ágÞôáX½Ã¡¼8Q½P2rw饊„„`ŸÐ*·}É=³ j7Ã+¸'÷åªÇ¥8Ú/~$`¶oà‘ òùFQ¿tè“ðiõâR….Fp:¤ß&F{øp—Y1-*‡ùæ‚ÿƒhU(HÍÙÄ‘ÈÕ6¸Ñß‹Äo³zþ°E¶1.tP´i«;©&AÑÚ{}†mR¡¤+{] š X’D²ÄðÎ;àÂhÕš-vØeWîćÖt~“þÒN+ ŠIa•a üÑRLu”Ácj ër¹©Ó‚á×ìx¾ýZká&âO‡í ¤p„ƒktП›éVº$íî±àNrš¼@«EØ¥¶K_É žÚw<‚ˆ¶&m\¸'y»óLâp ðF8F5þ”ª~)¿ ¬½è„–ˆu·P *îE^h*þ¶BÄ›ª•Ô¶°ïqý˜ðÁWÄÝ£Ä.½¿ø b—0UéVŽèˆ¡ÕÁÿÜIoOˆG½ÛÕ´PK©[þú u PKºrt<'Configurations2/accelerator/current.xmlPKPKºrt<Configurations2/progressbar/PKºrt<Configurations2/floater/PKºrt<Configurations2/popupmenu/PKºrt<Configurations2/menubar/PKºrt<Configurations2/toolbar/PKºrt<Configurations2/images/Bitmaps/PKºrt<Configurations2/statusbar/PKºrt< settings.xmlí™_s›8ÀßïSxxwü'9·fb:Ž{¹øšk3ÆI{}“amk"´Œ$LÜOL'!` 7Ó?yŒ¤ß.«Õîj¹üðä±Ö„¤ÈGFï¬k´€;èR¾÷óëö{ãƒõÛ%.—ÔÓE'ð€«¶¥ôÙÒ˹4“á‘n"‘Tšœx Må˜èO—™Ïg›±°äÉ£üqd¬•òÍN' óðü ŪÓ‡x4ê _ÒUYQÉìç¢ñ§ hA¢L,¬ßí^t’ÿFk§ä3Óô +µCúúÖåN@òÓ¦ ¼È6­ÝãHµ‘¡Eš áO«yë^®y ’.Œ9úF:¨¶¾¤\V0^v^sÞľ…¥Ê…÷4ü+uÕ:—þ®;|w4þèj«ýðâ¢4¾í¿M¹ OàfEA˜¿Uñídb[Fa§nFK©„öÊ{oÒ4‚fôœmCо\b¯T¯„N!QÜ¡¤JŸ‚oyÆTÛÈ—äòÈçÕÈ7(èäŠ0ÛgTý.dÍ¿F¡ùÝŠ.BQ§)zFûÔ@yö©CÿøcGÑ Äôá«óô«ÁS}‹WESì¬(¤É-ˆáGR¯P)ôjGôæšR«GGЂ,5V´×­j²‚(|î¥*Âí5† š Û D„–ä“s>ÏEÃqX/{´â€_Q’È~tèq2¶ b_ ?¹Ø±. ¸Ò•Ì〨@,r·%a²ÀßʈùcýåÛý¹ ÿ3ª¦ÐÕá!rD C‘A3ŒŽP¯?8×õoźæÅ¾6`•"µêÇgÞqõý¤!q Ñ‘¦úT~ ”¾Ó½õȤ Ù”[‹›Ž3"d7ºŽ“•€§rw#iL ¤ÞïÂò»W9gñ¹5ø±x;X¸tCe¡ú5Áó•¯ê: ~üD¥½åÎZ §?à¿«v×Ñü Tù¶Cò $Úà·ôNyã–7n)¼÷]¢ øÊZõÂ}JI§”tJI¿FJ:$i(œæL‡Šúñwúšªßã³þ“ƒí”fè3d»* "apqE9Û2¨±ï³í½ñ‘(RÇÛ·“¯Q,¨ëŸ¬‰ ŽŠ¢ä±Íå[ÂWyÝb+mÊ ‘¤Ê눠„gceùõW°¢\§¦êoðw÷®?P‚5i xÅÞø¾]±îíŠ}ß_}¯–Ëíšž¼ z¾EéÚ{QSù Km뻀;* 9ýõZZ;dÉçÉ/|ÂP6‘ð“’ñZ 7Ïo&'ŒÓµoÔÿÂÅ„pXõ/w¦‹Sä,{°ê°ÓT~Ü}y¶µ“'æ=" /{W_;Eßã­PK‚ß%ÑPKºrt<META-INF/manifest.xmlµ–MoÜ †ïù–½Ù49UÎz#µUªªJUz®fñØK…2C²ûï I÷#íJÝµÌ Ðð¼ï,î6£.Ñ“²¦-¯ë·eFÚN™¡-¿?ÜWïÊ»åÕb£z$nv"Î3´ï¶eð¦±@Š#Rò±MgeÑpó:¾IJË«âî•Æ*úmqÃNAÅ[‡m Îi%£OñhºúY«>–¨Éy„ŽÖˆ\ GÉÝ ÷AëʯÛR”b²•M•,ؾW«¡S#2¤ù·O*{¢½õ#pZ“öͯ`ùöÓÇÏ_bÐ} z8mêëê'Jþ†NƒÄ”‰—¡âúB»gó/ä2nXÄm=Í—ÖpÚ”0'—x«‘fÇîÖVäò½È@:ègÔ—\ƒçÿ¨ÏšÓÄTNÃÖa\Pšïšµ3ÃÜðYx¬éÕüó~Ð)QcìZ/dð~Ú ½\+‹€óvðH´‚L½¶À˜Ë½uÁÅzyð‰œmaØZ ®FÄ{Å#¸LuðG# ›8L9”_L*Î:¨ZkœgcÞ×™ãWnÿÜ,Ä??¹åoPKÚ"Éæ° PKºrt<…l9Š..mimetypePKºrt<`üçî/TObjectReplacements/Object 1PKºrt&ÿÂS65éèç|o© ÿŸ +%‡‚Þɘï))t,ÃÌ/|üK‘+…YMtÉ'«“wV \i/'HeÆ÷R˜Â›wÊ>#ª.-ü ßuO¤ïh;¾·ˆ8¼v-†e‡ U7fQÕ¥å‘ágô³æ»TËÔÎð$XRÎA?1Sq‘á=l¥÷ÝëÛœ UÅmÆ/Øø“¤À^ä§ù^]eKN4¯Á\Oë*q))À„¾mùÞ<±ú̾K¡²ÅœÑv·®áuƒ†j”YÛBÏâ^†ü Ÿ ¤ÌÜ¡K­°f÷ ß+‡wÌ 2gžèY´QLdÈ¿ðÜã'ÔZf’áù%/Eä_xy~¨M]yªÜi¿$Q¸Â—¦’CA Ÿ}ª+uä_øÒZr(Háå9ô&kpm¾WcÉ¿ðÜ«ï”\«–¦’CA ?Ü{ßUZÈ¿ð¨ùÒIþ…ojWënä3Îþ‡ÎAîJ ù^VrBi*9¤ð9iØ÷؃ýùÞÂ’rßý€þñüŠ'*+wñŸäÒl£o›A~…ýVßSøÂî#w y!'øþÂÓak|×·úžÂ„à%=&·ÃÖ~sOD'þúËFùƒ¹k€>ÀkÿµS'ߺg®ý"è/sŠ¢Wd¢_ÆÔèç‘{.9°}Ü/³no\ekÙ{3Óª£¨ OJÎ1®« ÷N´çÈ$ŸZ oÄ<“JMü@J>jÙqbŸ}Kæ[Úe¤¥”#¹33@˰\’Í EUxîY€ë´?˜¶Ž‰‘cVpžì6™Ë³wA? Ë‘[ÌÿÐ)ŒíÝ›ó!E[ø^¨`á… ^¨»ðØ–V `á… ^¨`á… ^¨`ወ”§gì&4±p$LSÙÌ ~àêjMÙ´’5¶e w.ϲgqjY’U¼Â¼=(™¡l-ØÜ(ªÂ3m#/(¾`šÊf²–®Mtvaœeg_¦²ŸÒVwÙ†ê)ªÂsêb˜£Þ²¾+×PÛ±\£Zoi˜•£¥-#*¢¨ ¯YáE~î-vy‡yËê0­ßGGG'9 t2<:è^wøk «åýd±I†t1ÜI t6k¾j¶á¢ ©LÎÔL AÞÌ%u!s©©EVxsWï°ì&´„¡'åÓÉÿ%W›ã@Ž–’÷ª+9¼ðƒ[/˜Ñ¶òˆýØW.¥ƒ‚þPH ßU’···ç{ó#ŸÂOÞsĪ#M©|êºk(ù¾Û–Ó¢Rô;ç‘Oá¾J›e¯Ï÷–v.„#› akXïÃO*0ù>»ä"?Èʸ¤íÖI¥ÇÛB°x\?c™Ì⸥̤ù>öíz'¾·(ñÙëg€ûd˜¸’Y\¼ŸŸAiò)|?¯§“Ükw,¶hË·vÍLEI>…^?«­Ç)¾Wµ·è+1vñ6`ɧð¯?Ýñ—^QÖh9äÙ­ƒª©ˆG l]ö3vñXž| LCbɽô‹[ùi…bÛÿÀ{KIh–%#ŸÂ«à^’¹ûhøi#sÜâ]%†| ÿý¨¯í×öᎱKºMô;’po™&#‰'à’v{å=¨—S¥…ý†5TW™eÈšVr7‹\É :\sZiÉ!ß¿»åU©¥äöÒªÉÐ=W˜‡·˜™ó8Õªüìݦª‘ÓâŽ]]yݪ¢ ì¼÷Ý iã>|9µÿ¿ç 䦠¾u¹Ð˜OFúº¿öo4¨u ~îÂ0Éÿ³Þü›nFðüù^VrBì½̇““ø‚c¬› wÀ®5ë_íeù¾;;hçä¸ù'¾?WÎÜzÖÙÉÔtî‘ „ýÑG1#“7líPr”ò-|îpzžù'ß_ŒôhY+Åg¶l‘ÜÒÊ%ÊÈ缜OágÞ mjÞ„wG«>=«–| ÿ娉Q±]Ü7ùþþk¡ÏFÎvwp?æ­\»€’E>…_rjãµ$hkÊØ93j:ù¾É“ìS]é#ŸÂ—â’C¾…—Güñô]£žÍJѳÌ| m£gÛ»8[«|ÏfŠ©šN>…wh©ð µ”‘Oá…®yÉõqi$ÿÂË—<8Y•]bµh¡šÆc>|à» @þ…—G…%'¨ªä¾«®ð„6î}$„ï- u)Â;z·Û$­mF«'&vè~rF…zCDÝÚÕ¨÷êšÏî&}¦£Ï“mž>D¾É¯0©.Ewm`yúQ\g{êJ8Ý©.uþ ]î$QboÂ̸HËÏAÒc…|ó&VˆŒ“DÌg±Í!ÎþÎŒ ±u„=L™Îec=ýn3ÃC05YC²Ëޮܢ„ÞÇ7û1uþlÆøøK¼5F~['’Ũ;ÈüþY¦²(³MvGÍŒ€D¾âÈüj8c_~òýG—¢¦Ä^E°¿’ïàtÓôµ¡ƒ" <{(>öUWǾœŽjkä—z¾¯O¯ï ÏЬ¿M‰ìB ‘RG4 ¼º¹p~g®ô µWû)¤ÊþÔGq¯ò Ý"¦ÇzëYõ­å<¨NbÀ©Pº´6¼, ‡BÀY‰Íõ %O—!Ì$%+÷NNT²¿<½ç¤ÿžœ¸þ°… { gåNæÏ5à±¥ÊØ4Nÿ%±Ç/ƒÓR—PX,(ø{«;÷#F=kIÔK-ñ4Œh élWëµÃЖ{¸?}3iJ¾ÉìK•7Q÷hºôTÍŸ?}ÃÂ<mé™Ê‚‚óñrù4¥OÐ1w<ÝÍÚFÃÎå\g¹7X*׋åòÃÙv©FÙÀßcôqY =à¦?}{}÷&¿hC's¸qãF… ä’‹­¬Lûém@œEìˆuW² ¤}ù ü•™8˜³U AK«˜:W6ð3oˆÖµ6¯Í{<§W»r‡ßݤK­[·–O,B:—‡¬LÐÑ… r„\úÄ}ÖÉv•\òèºPõ(ø¶uÍüD «Vfü¤¢eÕx¸È–w\‡š’÷ëpñ£\ŽÒ ÷rRòŠ’}FÎÏ‘Ê~RK2ç*ÀYYйT¨ •ª1—Ósw0“€!Q‡/!l'Ub@h•—iCΰ?tXó뿳þ×н֚Í×ùË Á|ùÕ• |‹^óO°¯,UEÒÕ3¦Ë†ÿçÒ ¦Ž/^&STÙ%iY¢g©ÀvÖ\DÔ¨Z¶Že…Õ+Õ¢Ê7²³hdWYW'G…¬’ C9¹ËçsÍcGq9¢Ê¾j¿Aä÷ec¼q=j¸Œµyç{ù)ÏnÏrØ¿†1Ž„AE¦›iÓö= =ÓqnvÒ ÇùÞŽô Š42Ð ”{ß¾ZPø+btmn׿­ý€v&†…¯œÁ êÀñ×M÷¶]¼cc ý%èٽʄjr÷TakÝ~Éeß¹QYòûòOäþE€ôI–=;bÊ¢>þòçË’¨¾4¾l§|΃ʾ7;NW‡^Ùýò'3!J“kÝuÎÈ7ê |àßïi¬ÞòÖ­æß¸IŒ &Ïÿª8¿>ð4ߥVêºÿÉÞo8A§ÁüDõÀEŠñUGJ šø›"q+sæŠQV¿lW æM`7ðÔ`ìõL Ùþ͸Ìîõzá²e‰¡+œsoé¥û%ÉFÓ õ™|l}Å‹"¦#$Ùf™^¬´\v>>êV†Y$dtûº·Üí䙕enÕp)Ͷ¾Ð@4/ð\ÔA®~™«äM`W®4y¼á é[ãnÞaݲ“¹¨ƒ¤ÚZg¹ k¼^¬ä^:wõëJ>´™n±45ê ‰GT^ `à ^ `à ^ `à ^ `à Ž-+PPñ/P0ð/P0ð/P0ð/P0ð/P0ð/P0ð/P0ð/P0ð/P0ð/P4,ðE‘¹|f ŠòÈzÓf›¹»;C ÷Ó£`â"Ë @zeÛšlÓ¢¼Ù¶õò«ózå.5hTà³>.ßeôµAö ˆ¹OY7b 0¹ŸÅAvRä3ÀCšKf`…ôhʶ•EÏ?ãOO&)ùÕÉ6‰Aæu.²? æ÷Áz€Û/Ï“óGY"ѨÀk—ýCyfÄå4íi7Éx ¹0ðPä?b…qY8)gDï~²ªuݹLž­HúRTˆ\²Õ=l‡Çè—3;‡èB¶—§d£Q'Q–̰$ «³òŠË}È‹Æ])ˆO¯UÇ“ŽØÕùÏ…ô\ÆqäÅM¢Ñr]Ø¡‘­îörª=u›U³I0ÿ5£‹$kN† Q×2•Å2§!oo<Æ ÒÎöh"2Dén´4¿ž ï°ìÛꮬ“»ÐϹߜ†F QWeb#ûY”„xD^ ”ÐÀwßÿêì°jÿЙÇ:¸÷÷÷–ÍÏDœ”Ý[Ë©›}Ã-ò³é÷¤òóµÁQt3[jÅCšë´nACŠxÈ9>6ÌËÊa@`,-“ëÒŽYùQät@Uw¦£˜+ÿ„¬„2ÚeZU§nFÑu(*”¦;ÛSWÂéNu©óOh—êÌ6I¶ÌWût,\»ÿzÝðÔîÀÄ7tôùF½W×|v/0é3Mß“–©dQBO¢î^ð«u ÌL ´es~¾Üˆ¾ø™àÎKšë˜ÀEhËò™_ÍÈåšë¿{Öƒ‰´gmÆO¹‹€ÄË\Ô!å~];7ŸHšDý’Ü—þå/‚«áÌv.?a¶Ìm“ m`6´Ñø³¡'ÝO¢Ã~ÚxÒýGÌPÏtØÉJ%4ðïÐCdÞ÷!3$©ü\ö(M2Â{ žý|-·µe©½GÿÌ3$©Ü"·®YGI‚a#ÙÅ`'sè4†é×P†ü=­ ½„öéW:ðˆÙN®ƒA–JnàUBi}àª<¥<ðH^¨:ðâ¿DÃK7¢‡µ7ïÝhjþãY#jAÕ×2$³e žj•Íì;óµhÑB!¢nTxî´jÞ™:Ts~RbP}à@©À§¼±oÖ½i·aÇwù*ˆjQ*ð†U¿,]¯.„ƒR‘_ôáß­zÓ®<ˆŸ„”l” ¼¹«Í¸\ž”!%#f÷†7$‹n#áç?2Ê>õ!8òÈ7ˆ{S;ƒè#”­ŸßÁœí°^=:” |<ð}~"ø³u*4ÖÞyгÀ£üd¡qó,`·«Ý¢#`ÝY8ÅÏ£” ¼y'¾‡eâ ú }é™ïH~‚¸r–fŒ†.ð6¶\Ë ü<%¥ÿõÕ=ûæ=uxjÏz™Sò^¤lRÈýë0³;c4p†oaûµ’iJÞ¨Z“hÞÎ¥~'xušÀË0ØuS#"ÍC©ÀsM@Ç‚Ž á§•2¶Îƒã[Áª&Ä>‡9ÛàÀ#~MC©ÀÓÑAâÄÇôÕ“ü„R@z* ª FÌyºA+Øè “~ãçÑd” <èVî¸üzÍ'Y;ûðS4’ð»°dÄÇ1ö´up<’Ÿ¡¡\àA{Ï4“­£ƒøn âîæ ÇŸáÐc…ÔÒ‹’gX~ôšy¯IÄ=‡‘M%ö®M¼4Så/ºÔºÕD¾ 3›Ãw¢JÜœîÕàëÆX²_˜Á–G¹À›»Ò4wrÏ]gÃí—ü¹87nÜHNN ˽ucQSÉggE_¦õ§vý߬¹œ ¦ï“/|WÑ \àÙŒ1彬m"ay ɜ֭[[yÒÓàÇJŒ¡«çãá— Ä,ÇNˆ åoÈ48ýà7¡‚ëv~Rñsd#ì\Äã–â‘<_” ¼¾=™©9êCê3·Ú„s¯aà ~*’Ê>ÑÄé⌭ò½øIE ¹F#Wj„v}à`(?U`|_G=ÊÞ¬™I =7ÎÀ¢aŒ1~™ æŽãÿzõN¶(¾032œžømWßw0ítüª#g®3ñòô3ÇýØ=]d…eѶE|XpMD¥(xéÅ2ä϶_Á{3cx *Þ2îËw±0¡Ô7ø˜œ5öÀ ½2ÆF•ʤ¦D˜˜ès‰ Zzdö1,ôåÖT6ðìíß©*ft…G7¡akX{&®à§"llïís#Æà‰5™eÑï0¦&\×½K™E® ¨s7c¯uP6ðù9E\ÖPÉ(Ù§†ŽèC†®ÁÚѧ¢l?'$§Þ½3&^”‘™ÅϪRÊ™ÚU+WϦ¢ƒuÅz¶ëÛV²­\†Ÿ©„“[Ká¼P2f:?Ô£ºÿvqÙ€ºòÞ¹ÿë¿rîxí¬T(ë|+üuËÙ¯¶šÔ¨xïÅ{ÙbÛºU’ÓïEÎц,‘y•+ÓŽXU4-c¬ßŸIdºšh Ë\<¤¦gÞüt¿ï§È7¢ §ïÂâ>sþºVåžÄ~’ekT½Âý¨u¨2mêVi_¿jËÚÜ·U ;;;¾+?ÜÜÉÜûÑ&Э–’þUKÏø}Pù6¿ôo“âõáæF€òîrÏ#µÌ;Pnt˜›“““X$¾ }£cÐvS-¸ÌüCÉÅiñÞäI]{ÈãçNüçä+÷£ODø=ÿ’’N<-ª…½’eèêTÃÁªbÿ¶ö-äúË+ c[A”ÜÛ£Ë dNƒNÖroõ‡x*ç[Ô~Ä’— õŒIÒðö‹üo,ÉΟÊžÜÇëÛWø‘„™s”éÿƒóåù%Èž¬µï[j.Ö,Êlï@&~‚"‘¯>ýv8ðØµ°çoùi5ª–ü;—wð×MžC‡üïMMYSÌy´™÷eðçí„©ÍË€–â|n(xÑe0ï(þxª@÷ñI ÐËš1Ü'—šxr1o3™ø …„6è/¦e*À1oö¤qdÆD`ßUö æ7Q.ðæL¯!ùF½Úµ#ð¿.à±@°ñ.t-e&¥#çÏå_0^µh5+».RP.ð©!5.5öžA=ɳ$DSP.ðõÚw™w2›LjÊ^tù´¯7D³P.ðæísí¿W‡_OýûHà$&&FF–æ«*ä;à|ÊÄ¡‘1å ù‘§üWdÉà[y¢dàuËåYÃV;Ï8H @¹À³'ÝÿÝ´QÉí´ÉåϾ¨k"Ê ±].oç’rÏãíRòQ.ðy¼CJ>Ê^t Ì;‰?žro{äxèA~*R‚Q.ðl8Zå{å¿ ón›ªkŶ³AJÊ>éúýÕÿ«ÖÔÖ¢×>y·›Û¯>gf<öZUo,¾„¢\àMÛ|øåêð!¿>R¼S)¶ÞØ ^¤$¡\à!³©¾øÑÙU|7RâQ2ð¹×²EJ>Êïã5å¯_ ïã5åÏTóŠb7‡Ž’Ï$±Ç¨k"J¾Qõº!þÐʵÉVVüý-8üXÉE©Àߊxàû×õÌSº7ä§¥F7?†”@” <¡a×±9bÎbÔ\6ü˜µ5Û€)I(ø‚PµjU¾ Q7Åø’ÆãOÁu’B骥w§QrIy“ô³sÏ-Ÿ›¿Ë ~•R¥‰Ìõ ß=OѰ ðó˜¦Õ#ޝ:®ÕrËÈê'KÄ›¤Ô¬ô¶Ö±—Ó‡“ÿgðs¨šâ|ɉ:a™sYæcì ®?ÆÊ%ãM’¶^ï#!\‹|™Q¤Gà‘^ hRàÝ܇yâº| òtwúóž¹q*@‡yQch:n¨Kí=Oÿ>²Àsà2§Ø9l{Ã3Œv¦þ?EQ^Ä6.›>@§-‘—&Ûó=ºïñ !KC7\!jßëJuûÄàj©WÖ’@?ø]–½7EM £»d¯/Aüñ”eÃtìbo9ç:ÜòͱþMwu¥/xH$'ÍÉ[$̨OU=5Ç2€rXKÓ>2ÿWêPŸk&ؤS”íÍhÚFn-ä¼½8-Ž¢Z\¤k’-½¤lÛÅÐÑ:ì6½˜o’½®ä»™¼¦¬ûÑôMâI _Wýg ú°EþyÎ —ÿFˆ2 à‹Šƒ=-7Ô?rÛëìrv±EÝéAgd‚ž>ó?×Ò5 z¤ÿ¨:KhÚ ëÀ—t1èIVcG9]~9ÞVïSÈEÈJáœeʵ #%lE’ï›l ¥ëSÔa¢ð'q¼$iïä̇–BJžäÜ»–¾ååÙöíë¡_m’šÅnUGšL ÏZY÷88 šü‘}û™ô9€Dÿüó ª_T 97DnñM“y³KôXvQ¿ÎLúc°j'h?f3¸ÒìIzw$·å;næR¼%Àrø)š=çI=’Ux‹'çÔià‡.eD=Ú-1õin-s…µ ÇÖòÜûôËôt6A§*MDz–vS.«YšîÀy^HV·–ìNFAò ª_Ê©5ÍŸžÆw5²£RÒ@Á#ˆ€@Á#ˆ€@ÁŽæÝÿ¹}vk¦ÿ¸úþÙÉUô?vntö8&£ÈÏ\¹×WîîÞaìi©¡Aj|MJhÿË­ í 7ÕÊп®û±'…|Q—/Yoݦxxmö©¬ÍOÉ…"çðg¥'ÿ»¨{»åþ °µ¼6…ä ¾0| ¬5 yFòÛÞké³3ÞϘÐE×ê½¼"ÛÜm`àá@I7á,5r‡ËiX¹žà‰Kä·ém‡éµôÖþèþËyïŒè]*{^pë>çòÙÄÓo:nÚÕ‚ýõ³‹G²óìn^wÍÃ'FìÓ:î™ü”Ù‰rØuϸd§2þ Ïu¯;̬£ÇØWízeëi£ÛÐÖýÁK‰Ò:´ÛîÝgqë‹oø»ÛÔõŽ~’¹ùnÕ)-ÙäOgýtº‡è?óü1ÉÎÒß2óßDÅýŒ^®ùb®¨çâÀ­’צ°"@¡@ÁãzÞ<Ñ‚r媖ó^›ïtký¨J[0ÿ!w[ óRZz†—9sE–SûÊzdÞ£¼IΈü÷|µIžÃÆWMJ?¸âßFG6¤G¤‹Ó7ŒžÖpO„|«ò†Ÿ“3>\˜Ñq{)Ÿâå=ٽȯ+ŸÊí·~Uf¿ z6—ú>]â¸9£Õ…ëÌ«òÅ7.´šq³ZFÂÕW)&¾—l&N™Û¼ñÿ–ößÙ‡îÙ§üç´¬—‡ÆÞz•%++–ÌirÐÉJ=>÷Ç ^’~UÙ*ym )(øÂ eî7»%ùÜçÉ,ÒŒÌ#ü§~Ãf0w•]šË.D®Heärf#u֙ƬReúqfaÀ.2›sd§Àçˆ=ûxã—Ë3ç?öÅvŸí–C¯ð<òëºÊçg¿·_ö«¦k]s“ñå§’—å­62jgѽ¹±•»Ÿsûj†0å_²¼ê6SÑhíuöAýødÖhäév¤Å‘ûž¿¶A¿ÕÌ—”/¦Â*ym ) (xM¦ø~ñzØJ൦€‚G¡ ‚—{6#ºösZÃnÁЩ›Øç’¶['ÖL溺º +"ˆr899ñ]Ž&ž#9˜ªÕ˜ÇZk×4£ð?&9EªU‡WQ ¥mzB·0ø'f*j<ˆSjõõ]WÎP‡Ÿ"#ñú¯+:pÕ >$jÖ?}ï}ãúÏÒY£¢G3#& Wñ&11122’·6Rú0¹wÙ,èœISñž#ÅÎÑ0òaf™J"—ž‰mú¤»ÿdÊ aÿHÔ&øŸÍXzpã­+³] S-ÜÌ{ngÖf¨£Ÿ1 «?î(>k5ªÇª¿YÃé8ä*ÞØÙÙɯŠh*>pí4\:œíiØÜËšÌ)Úmô—´;æ0dçä¤QŽTH8^Ò«Š ‡å¤Î¡[†¯vIÅ›JÞc*œ –Ì@4•˜§pùß@FšÄS·9<¹ ªBÏ1ÐÃ\Ü`Ó<Q-j<ƒ\%ùô[oTšôƒ? ¢ÁžïÍp_2Ø:ÀË0Æh×úŒ‡Ñ¿2R¼¨Uðr­;õjLúA> ÑnùÁÑ?˜ëm‚…ij=UCïqÐo¬;«˜Q3j<{†·?Üáí0ÿf‰‡ÿ3„gøÊë—pxøü å,à#Û½A[zŒ†þSa}ö˜6H G­‚çÎðãü™qŽˆÚùÉH±r Ní‚ë§œõ[ÂàŸá§MÌ„h8j|¢ÿ©ôzé^ çù.§üõzuÄ3|ñ^Ë%×á2\Ü`Ä\X¼OÁ‰”.Ô*x³½˜îN¾WGÅDD$'×28)é”RB£60f!^‡ µ ^ŽÔ'¿?³›S߀ïÏ&%dÃS»1Z{")çð4…Š7CÔcØ2—¹&—ad `ÊïÌ„ ,%EðuçÔçûÑ1†¤ÄÔÌwƵª?©PñFpÜñ‡-s˜WÙ2j:ÂäU°öL¶ArC­‚O íg±§§1þ›þÌiÓ·ÎØzu~jE>–WTÅ›'Á°i&<˜íiܦ­ƒ=·³=R`Ô*xƒú«*ìjXcñ{j;ý®¹}CíBàó;Øø³ÂrG·vH+® ˆÒ¨Uðé6þìé+ÛÂËuõïݬ‹ÿé@'àðøk)ˆ³$‹†ÆÌ6>!GŠõ ´2ÓÈO>íó«G¯ 0ˆ fõÖN…ð»ŒmZ’>C×á0u5 n#MaÂä'™×é$Òˆ«Ôù¿—ž´ŽyæÇ;/ ›\YáwŒsnFwùf§j¼Iëþ&|Ÿ&‘œë§Áv°7Žv½aæŸÐ_Ú§"$<~lÈw}ñ—.õ©WÕz=x|„œÀßé: âÏŽËøq§Ë“}6]çv7jàOÝ6®¬BQ”çÚ%Înp8&j`K‡"ÛÇ¡GA;}E«Ò²!hyç[;Ç··£ÇÂSßV;¨YðÉw6½¨7½¾†<¬‹¸+=™®‘,í .´´ÁsÌ÷b&)Ü|’y¸ov‚´™Ew¶£7ÇáÑôpIRŸÍti[ ä–¬÷½{ÒaVn iz¡lñ¨UðÆÍ¦çóò]ž¯í—…ìkrèý Ö|=ûëÿ¬¦øTêœ#5=ó]Bò»ÏÉ _R‰ñ))åƒèküç/z:Ú!‘o?%¦ç›_ZÕ¥®>Œ‘­ÕÎÑú?¹ÅÖõ,mCývfùÓÛÙ“šU¾¼¡Ýý‚–;耣Q¥‡:ï˜|»¿Âîßj[–÷Q¶nµ ¦¯>$ÉÍ DÉ©ZZPÖİœ™¡¶–Vc=}r¦LÇ ÜÜÜÄ@G[KWGÛ̈©{d §cl(y‚ÉeÈ—”´Œ¯iÙ#m‘’2ó$æ ósRªX,&ÿ Ö“’•%&ÿb¤g0¯9m+›¿|+’å¤*š‘ ²E.•\nÔ·­T׺BC»Êµ©òÄ62PZ ÛçKºÈÉ„`aI~ñ§B˜£˜{½AÞ£ÃÚ-¢éEà{Q¤ðh}Î&“#Ò.¼Ä_ø¦ýØ©›fdºiŒ;"Ÿó;PºJ‘ÙuäÑãÁO«dÅöí2dà_Õ«S“aõd¸z‚©Äò0€ñtÁÔT›¸‚ŸÑ(È]c 2ñ ¹#»p÷Å£¨wäØñèå»°˜÷܃öGaK˜°œïá‘ù¾Mm*©ö€»™îúìÿ]@MöòøóÚ²¾ ß›×êI_L¾Þ¦jö濇ß<™ªæ„*Öð&†Í‚{˜ Aò€Ü#©W«ÚüUwbéýôªübõÞËèÞË$IrÏól<Ï3.£æ²ÑSYL|ƨà–z¯{ÿÅ Ö0;‘};÷ý\< ¿Ÿä;‘¢^Áû[^]†.XuíÀ‹eêò“ ¾Go¢VÁ§…/þÒ'$„òƒö3ÌÇ…§í´×çgAD…¨Uðúö[˜nnØZ?î¬ÀOFDŨUð©×DÙÏ*ði],ºiYo=½uˆWžoùê||m‹|g^ˆü,˜.nüÞç^ñA|Q«àÍ;±X sWÅ·üŠ7‚ä‹Z Hñ¢VÁË-‡ H1 VÁË-‡ H1 VÁªµ‚ J£VÁªµ‚ J£VÁCzËû|‚",áõÀÖN.îw©Æ‰1¶ü\‚¨õ ^»¢±±®ŽŽ–X[ǸJyëV'ØQü\2‡¶¬u\"oýݳ¾U„™KxàQ~A¾…z¯Ó©ÎÛ¶Ž5¹…‹‰†}F)¤+ sàÖ‹#–h‹ü'Þ ÝÀç’:±OøoÜ`šÜ%''‡……ñÖAeÐÒœît ˆZŸú¨ìÊ€§¦Ç6ßVûóÑ«øß.ûg™»1¦¬ëhݺ5™988H}¢TÛ¾$ VÁg¦êedš¸l³'¶Å€üdATŒZ_¸^kQµ ßÃ#Hñ¢VÁã{x)^Ô*x€ÈoĶ/žô²/«æo‚ B@Í2{óöm¦¶®¾NÐ#¢4j|¯é«×Ù= QáëŽNïÝ„ŸŒ ˆJQ›àžÙ6|ÚÆ!K·Íîe¹"äy!ÉŠ.wê&ö¹¤íÆU¼ fÜ"ÑÕ«W³"¹™™©£ƒu „¹yÁ—šÚïØcâƒ9›k$› P Ÿ¡y‡5µ)OÇØ0¶€“ÓùMPPP‹-r"¹ÿ¨‚ÀŽVTŠP›àsR µ3h_y*ßוüüG¾KÃ)A‚W†Ï7—×ë¿mÒ¾û¿v¨ÄO:âñíë\Nk~ó@oI££Ã½ø6Cb'=ª]çÇáûîO3ÜÁýœ&ç04ýVJ?qÜ}š¦ûÕ ¡‡øiBGkÇÕ§±ÇÇÜ}+mtDç0¤ÍΠú}‚¢¢z»n{'ù9M„tž¡é?°R"øC¡Ì(|Ç5<E‡U¿ÝVÓ­SÙ†$IèyùœÌ¯]™Ã-Ê~N9 Í¥”A‚€‚G‚/*Üít÷¿7ÊF±ܹ¡¿¿Å÷uÚ-º<ár-},™§n¼-°‹»Zä¹ë§{†ÿ£ë¹hˆó­½Ó^vØ4¼zAÇüÊïûþHñ‚‚/2ô댲Ñé>ãÂÙ]8‡Döœ4ðŽ~”tù›^  ˜¤ ÏíghסÔÛヴú¾6°QŸc÷u «Iÿ÷v›-‹n¿ ·-&ø‡é@¦c¯Ã÷UìÓ‘U»Œ¬wÃΈ÷÷²xuv;t—ôîÝ_nkÇÜ—èrDï–—c*5·³­ ùHpÊô¨{å7Çdõ=:­fï¨{’oÛ0ÛN#·©Ã-Ç\6ÙÝ¥\öÞ9Ù幩ݚzq_ ¨ |‘¢MÔ~pTùJ)‘`ç Æu˜dÃ2-l%2&˜š1çØŒÔtC-01áηڦ¢l²³•5e®Y™Y2§Ú©)iäCÏÌTæRÜ‹yǵM<{ÜÛÅ.dt_öèìÿ*K’@,&3ÃÒE[aSâæ:º²¢É5ˆÐ–µ|äoêè‹{̇È׺âå@ñƒ‚/*dÒCöž"ç1l¼”œv]¹TéP\Ò¾žŒM 9Næ]÷2õ… ×üg´2"ÇÐÓÙOгÃ’$[«pl óQ©Ç>ÙŽx[ãøO¢v‚îÙÉÇQöÜ_¥ÿ1}v› ßVqS;e™wa÷Î}‡jymJš¹«|‰b                   BWÌö2ˆ ˆÀ3<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚<‚|Q‘ñú´M³‰²Åýb~¨¨#—ž;îåF»šóý ò£vÑ´wNd,N®Kõ¼•۾Ɏ.ÔÒÇM6Ògú3Ëyl<{…œßÑPðEÅĶñú©Ž>±S­´s™JGlågRsWš(1ÇâÛ„l_Á¹øv‡Ñ?æ8LxÓ4ó!âû _T è`æ9uþ(ŸÕåôÀ°ÁÿèƹËÝnq` c•ëA‡n¿4§ÍÈý/¸ü~¸¡dåÌw”u#‰ G³JŒ£(Šù´ŸF_žÃ?Çr‹až”W7 ü)ô¯.åâþîÚñ漈me<2Öñgßœü’žnKÉÖâ]HÏðÀß»”󳜧ÇyDò‹‚,úOåî£ßž Ͼ:0—»ÈŒ§¬s+Vé»íîŸ= ’G¶/DyPðEEçáç}·»ÚS4¸Iýg~Æ™±‹3bhš\Ù·¤¨»_·wúý:ý;“Yüñ”eÃÍ ¼N%š;µô¦¨[‰Ð…1éhšÖ…LÊ:&c޵$«"æ®Î =ºÐÞ è›KV<\pÕ%;OF,QûKšÖx¾µ³S·Ýd_yÞJd“ËÞOMk6ïóôˆCɽ£w_—M÷ŽºWŽÚÙ¥õÙÜ7t¨wãé—^Ìv0 ¶ EÑssÎ`:,Ù#[AœÉ}þ{ntª±Iê~üEjÁ·òP¹=ï_4d¾¢¬›?‰¥Ëh3KåÊCÒ;0® p7 @›=wmzNokÒsaÀËñ¶z/Oi0"6{õ¤ÿV\2cOòYõ(«ät10½Ï®›q ® €¼ZòàèÑþözA'æt)ÃØ (˜M>?¸ìË'æMν;ß}x€¢jx¿^î^U§§#ì<ç6Ü2þÊ~n#]Ow5×{ýçéâ ý®o'Ûè¾äAT ¾hЩæ;¿u]+ö¦—åâ³8ÐÕZà¤mÅÝ WîßÌFõª8ÈÅv©$‹±,3˜›ØÉ=3€ÏƒÄ>Íȧ5·®óbù6Zàá@Œ Û™’süZø`c¿x—B]«u®åm¥ˆ*ÈéòØ»Aø ûf?¸Óן¼õ‹m j.hÙ‹æQåÂ_êµog»ÀÔ™ÜÛ%ž1>·Ú¶¤¸·¢Ÿ+HÙÎåAÁŽÐÙoå$L8ù‚¹Ä—Òfýz½Ü²ì©8˜Ëîá¥(>“‡ìÇòÒU$‹¿…пegbî6ްÉv° òz4HÑ#݈”<6ž3ƒY‹e’,zV’ïÌ>~#Ÿµ¦^ §Ê­"ŸGJAò *_jq§˜gïå»om pA|©…Ò.6x’ A A A A A ¾| œöÙé®j÷‹mÝÍ=N…mìjùóñ_¹ÛÕÙú«îîÞal‘Ÿ¸r E$‹,ƒÄÈJÛÞÇbÂÙ l-= ¸;UáÝ¿ñ ø5ä˜;?!WòúRÿǘW¾ Þþ#?CNòÚRPð…àá¡@'i…÷ûà÷‰:.ã|’àFvõr «m½:îÃo~…Ö&Ì’{רäп#éWë ßvãÙƒê†à³Ø}Ü÷t*ú{r¿~î`á¿J–'½ eû Wú†œéêàöržƒÄ3Ò‘•‹T9캇dùGÚÿ.§n_ô>ühñ’¾§—þ¦±]ÓãÆ¶Pª´Œ•åÖ­ƒ_2À]Wä·qÏñ5k½Üw® ý?w—¿£Glü­õ•uÃÖ_Zw%|P¸µeœÇfŸ>[‚6÷¶ÌþžÒB—·n\ùál€³“Òü6î?¿fÅQn•¼6%ÝR Pð…àЕÄzéYÏöŒ°·?ê³!ñh˜êäeKÊ*Éz…}óݸ4§ë”^ó¤Î̯A±4¤…?ô8HÏÕîæò¿sW†¿ö8L/Ö­g£xvM /Ës¡{ƒ ×ï ¨AÜ#üaëÜ<(õä—۲ü’Öïé ¾áqÌAAdžÄÐ_‚fßfOøcÏ?7¯õé?W…MÖÒqæ›N¿|wÓJï›+ú:ô·åFì¦gjqÛ4ï·…ž·“\ ÛòìïéÛÎÃAòÏqYyS¡¾í2;®¦'m`WY‘û¦zc…_þxNæM½(‚å8õ‹žaž¶sg:û’þ›ÈrÊ£gÁÌSh:äó\ÀJHô¯jî@ìÚFÙ¹˜:´òyV9ü·`òœ÷­–mëÁæ0i•íjŸ½&·®|*—_¿ª,CpSãš­džåCÖýõfcUûó•V뙳\ê;€ÔÆNÃÿØ4;C,†Ô8æûpԭµÐSüž"?Y#üÞõCŸ±òIõ«JWÉkSH!AÁ‚€_ZÕZqã徃í§tµ&ŽOÉŠ9¼ÇfB¿qyØüõ•Á¼ÍáÍ·»±lÖß'äìè=+¯t¥œHtO^ŽkQÿÉ­DÅ<'ZÏ»cºgËAÇš£ ×¸ðè$—±Ùž¡ØÊ­›–Úc„â¾aÅ,ã×_Ó÷ ™ÖôXoÖ‘ôpú-z„±bööJ‚VS¯r;5&²n/øú¸ù¬µ ïÞ%Û4k{ãôSW×rÍ»î½û¯cöæä¿çAI3ßÏ1!IC悹CÎ$†¼6…|!pY{“Ì-¦ì#óŽ3öÔ³LB^6‹ä‰0uN%5Ne‹,CvN9¤NýCSœÈGÈYOÆù¿ÈÜÙüO0︽1-é8¦ ù<{G3«=zÎh{z=2s÷0(®+ŸJ¾ž+·_ø‡Ìæì]ù@rf5=9”³¬G"§â]ÇæéBÈÑij“é¯M3ÍsVô¬Mæwýg»MGþ{rþ²Ö/ýÊ9“¸UòÚR(PðHþ¼:¿¼ù˜m­9Ë^Å#š ^ƒ)¶S\µçÓô|¾W†â R’AÁ#ˆ€@Á#ˆ€@Á#ˆ€(é‚ñ_ç<2Žõžïì<÷êù–í6,¶<ÛaßŶï…z÷âçFä›”tÁ[w˜élÊÔ'[ ¦š´8~-Ôݨœ}øaFð!!!éééÄ‹ÅZZlM,Q:::M›6å{5œ’.x9Ä«'ÎñÛ¶z–ó²b*©Ö¸±d”’€€¹áD9ž?Îwi>#ø™ ,„M§_<ÚW—¢¨qGBù9É <÷¶yÝ#ZV郦WË¥#RP4@ð‚¨ <‚<‚5 ^ì³kùê­ûŸÇ'•©Zg䄟gíŽoÕ¤HQ“àÿÛr‹šì¹ÀÍsÌ~nÓ›6ÓÛ3=) R$¨Iðfí&wæûì»MWè‡AU£&Á³ìëÙÐýÔƒ 5-w<‹j^{TvÇ ‚ êü©gµ†ke\2h¨­gcŒ½”!H‘£NÁo\Y…¢ì.†GÈ¿ßÑ­üd)f’Áï œßÏ’%kËZð÷~ G‚·ì³™î³™5;ŒÆqâ'ê1øî¿”À,–³€Oñ’¤ÚšºV’Ë[:P§à‡Öl°÷ù#¼”GŠ{WÁg/üw ÄYÌ¢¾¤¥J’Z÷€.ƒÀ¥»,oÂóç(xUR»JET;R„deÁ³pÖ î^á'™–סÐu8T¯ËO*Õ¨Sð“–´’ ÉàLÓÅÔ%#Rj!7áÁ¹¿!2GcÊjÕ¡ÛHFáe+ò“„„:?}úš¦ùÞx´ìtèóÍåõúo›´ïþdÜñk‡ÒwÁ…’÷¯àÜ?à»âãøIÍàÇaÐyÈ Ð#xÔ)øuË*í»õrXKÛoÔ¨=|îgÓ¡ÍÒ™ÁäèàÖjÎqgÀÍßA®Ç›ÔÔÔÈÈHÞêH)C÷ý+ó'ͯŸÐIüÄKJvl#réõ¥qoÜ+…ÅÂÀ>É+]¨SðUûìÎ÷ñY½o3f ÀëDftFCQÌk\ùoììì$«!¥†WQpvó¤{.O›žà6 šwä–ŒÙI…`7ª#ñ¿mA–;)è3ÜwÓ›ÖyÖ¥X/R”&z^»ï½•œÁÏ”ÞÑŒÂÏxA¿–´ïË܇7i§àG ƒšoÖnb§¬³;—®Þv 2>ɼrÍaãgÌŸ{]z®Ç›ÞGBÈüÁ‰þÌØe¬” ’“àÌn8ý¼‘\µIàÞc44l£àG”@M‚gÐî>n!™øn¤Ô#ÃÅÃpj„ßå'¹¸A÷Ñà”£e¢"Ô(xDH„Þ‚“;àê ¾¿¾3ô ˜Ç4H1 NÁ»;¸Ë†CìPo”ÿ㽠ɈFóþü»“9“+vy,,¡ïè1 Uûˆ )j¼8ÑÒÒ^ «xÓz¶bDùï_ðÞO‚œºzà6zë: ~D¨IðZfq¨rƒ”t?Á±Íp|+¤$+ø¶÷ÉЪ›‚)¨Ið,؆Fò8o„›çœ†Ðg ˜ e]qµä£NÁcÃ…CphÄD(8íêÃYо·‚)Ù¨SðØFÉ%3ƒ¹P?°–_¿\¨wÂHÑLÔ)xì£d‘š ‡6ÂÑMš’ílÐ ê9Á ŸÀ¬l¶ÑXÔ)øxŸÉÛ¾Ž|¸`õŽ£ƒé‰5ዯ_`ÿjæÁ9¥s84ƒè2úOa°#¥ u ~æÜWû9¹/€Š ÜaÈ0ðÜÏÏéipx\i왼NcFífå`ølè3´uøù‘R„:¿~­õ¼búÕfÝ~2¢ZNï¯eÌ‹4»ùˆñ˜–‘ó˜§ëZßh Œ”*Ô)øJ®›~#C“Y ~"¢ }a×bHùoc%}C¦k§Þž £ÎÐ#êBQw§¨Àì¥|º¸z{iE“‘[§¼?^{¼ù&ob`Ç|¸v*ÛS¯´üFÏÇoˆ:ï-WÙ®}Ûer)¹ðç¢+Ñq1][/z›õP¾ÇD‘M°w¥ä¶¼33'"÷\ ¶ŠùA£NÁË‘þáU8ß§HE“ô¬¬ŒÔ2U_Ç09e=Þq]\¥¤¤„……ɯRê1޼oq|“QLXrÍÆÆÏ™þÒ*Y¾í÷SR}IޝûŸ¨¤$Åf?¥‚"x½üëÕ”OkCzš¬ëÎÝ¢…äö? ÀÁAg3rß½ÀÓû–¶±s'Ø|ôôõ¬r#ßvq¥bïá¼Âh×=ü«Sðã›ôÚqO®G)àM4ü>‘©'£ë˜ºG,«ÆüÀwågwæÄ^¹ÓÈ ID̉C–ì­Ûs ñ ³c%«×ÒTq…¤3GQ3sâ~¼ƒÞMçº{×r!÷ i9P§à?¼½#ë—>ßÖr%šÐ@X5^¿dnËÉ¥;™{,„!?ó³!¥š9[ò]ysè¿wÝzV¢}þj:¸+‘áÁEsHk|e”ÓËLrº‘½ŸˆJ0kÖfÓÝÃîUR-2”øS"ÀÎudêÀÞ`€¶Öd¯“ê¼|k9äêIøgSO†ÃÈ~ÚVçù.½\FQ'ýûhœ€6sè²êRÛÇ®ý<³GãÃÏÊÖ2ªææQÔ¸*-jV¹5FK)jG…§¶7à:€¥<×.q¶ƒ®{o liwCdû8ô279P§à5•c›açBÈÊ”,ZXÂÜí8Â)’æÌ³:`ŸØ‘ùÌidúƒK‘>¢×¹ös¶Xw&dûèž\Øß“;)Z¹-¤iÉà«rÀôÜŠ”Úù NÁÿÚmÀŠsGùÞ’ ¹Pÿk13ƒŒºNŒÎñ!R4Ñ‹:u ~úT›‹c;9Z•ØÕ²ÒRRÿ˜kä»G扪ÕöT«oÒµ¾¤&î{,J%§’‰,~JLù”$×ÁsLô“¾¦É«U0{õ!Q¶X‹*ÿŒ–¾¢hV«Êgod‹­êZÞ|'[lÛÀêÚ#i¯Uí­¯>Ì\½£õr‹mê[]ÍÎÌÛ”cJ_¼“-6®Y%þó—JeŒ+–1*gjHU0#_»Jy‹²&d±jòµ‰!Ëhê¼E×5üþ’6Õæ\èÚnù<+¿û.¶~-Ô»?‘Þ{Qò»ÏÉï˜éƒè+ñ|LLyó)éí§ä÷ Éß–%¡,¤îÉòé-~ªMnÌYÏNíFS´:§ƒ¼x¡8Xb ÑÑÑ276¨cYžY43ÔÖÒªRÎ$=3Ë̈yuJ<†zºF’n¡É¢Ž¶Vf–XOG۔͠¯§“–.½‰0ÐÓžÒ«™Ü¢ÎàÙ­ÈåH׿vé™™ÜñEWG»¶eyrTú’’ž–‘YÎÔ¨®MÅ”´ –%[‘ÿé ý!‘~/9‘ïLV”¥rßJ¶˜+e­*™ÛT.CæÖæ6el«”!srágEŠu  }î×}7*ô®«³êÜa³o6YwD'ô¯š£ÿ ;>2Âݨœ}øaFð!!!\7­—\6/+ †ú:åL Ê›Ô4üºäÕÁ†"i›s²ñF}Ã:Ž37ѯm¨wËX¯Œq¾õŠÞ?H¡?ézfò‹¼Eœö^”ú†4™£ç×÷¢”7Ÿ“ãRâ¾¾ú˜,;Äççä»r—'ߦrY#ëŠ&6f¶dªdjkajcAŽrêýe2$$(ŽºStHjãHªßíïç<ôx`âÍ%å'7úê7üZÓ Óë´ìwàÖñaü :ÿ­+[·úíÆíçî[ÎnÓrÞõ[yÖ[æ¼Q1äÉU÷úCª›1ççÓªœ¿qãÆœ1«{LóFõɹ”œÉ)™Œ Vº¸HXá‘Ý aÌ"î¥ZcvB”!#3+&^õæst|BÔëbD½aæ¯?J:zûù+™n?¯¸^.Ô¬VŽ\ª0U¾žm%{« äj‚ŸIu¨¸Ç‘ߎS×–ÎÝ;uOðÜ.ˆS8×» ÍÂnQ<¤o«³•(Û¼Õ"öñÞP³aƒaú¡-5·¦Ã0å)˜$Š]íLéÕaV¢A…´¬>=Íš¢øEô‹_JQÔ¸#¡¼ ½šVuq±ç9¿Ed(,÷È Q[&ýƌrCQ£jY2ñrƒž¿úôŒþøŒþDŒˆØdNŽ\*±Ét.ø[O¤ÉQÞÁªb=ÛŠõl*±óŠÕ«h×* c¾GÆeæbáßOš^ñÖ{`|Ö‘cº©™Ž½<Ü'7änÊ“£å‡îKO_I>Çô’ •~x¢NÁϾ$…żý>"îããèw±¹K†¯©÷ž¿!9Œ ôT¯ä`]¡¾-™W$sk‹~ŽïæÂ!Ø0C2*°•áì…ºÍò 9˜¸Ô³$?AåíÚÏüû·%²%CïÛQS¡žîÔ—3Ä´æ!-W3^üSì´[Ù‹ß:¿fi¥É;.mÛ>äÄŠ„Ñÿã'g÷Â3³G>¶ªó½°#g¤D Quþ ÿWÚR6Z·Žåû¾ u ¾Zß[Xéÿ¢ükýç‡ËÂn‹(œèœ¨AEÔ)xÕò¶I—Ê3×Ak~‚ RJàŸ÷žQÕŽ ßD½‚/DÕZA”G‚/TÕZA”G‚/TÕZA”G‚/TÕZA”G‚/Öªµ‚¨WðCk6Øûü‘ò-þ) ê|í* ¥ö¹-m~¸èÑì=Þ ò Ô)øIKZ¼_úôçÛzßžp<Ü6×oRSS##¿ÕRA EñõxSŒ¨SðÓ§?¡ Ü5ýàvË™Oç!Ö¹÷x`g‡}È"*CågJêüºe•öÝz9¬¥mAz­eF­`»þj~gv®=Þ ’/ê|Õ>;‡ó}ß„koÔA¹oDÀ¨Sð‚3(xê¼dP{†¬6 Æ_´‹—AÕ¢NÁË¡jGb@‚/¢áòÉ u AbF½‚Ç0¤XQ§à± )fÔ)xìAŠu ;À@bF‚ìAŠu þåž~À´ß‡·º}tÉš/ÃZžòóé{…j! H¡P§àÿ·.ë`h;b´² Ÿs}Ù%tRŸ½‚äŠ:ÿdžê¿¼¹dPóÛÇ–×ùsÓ³cêŒpãg’òáÆZÇf<2ks½þÛ&í»ÿk‡JüL‚|u ¾bçõ+XÃyàgòÑþ¤bºZÿBÓ¿ôm82ÚøMÓn­æÀÍßù™ù&ê|zì™:-'¤J–òéâŠùþòô+{¶·@lCQ ç ⺸JII “Ï Ê”Ä _ÊP§à'÷Üò‚Žì¶ëí9O«FÝÿá'+òöüÜÿ·wpMcÀ_[ʦlAÜ=p°QÁ­œ¨çÀ î‰ ÷¨§ž{qwîó'@\¸Q7œˆ¢ìÕÒþ“–QÂ8(4¿¯ý`òò’–’×_^šqÏz¡›÷³åóäìäèz¿ÊÊmmme¡¡¡3|\⪌™ifSQ»óñžZíõEB1kÈ©Ò~™›t ûHêç½#= O€ÿö3üœKûÛ8 ¾¸¢‘©©éÆð—ÌÉPÖ~fƒŸÙuÌ…°ƒÔ@l¬/s”ƒŸÙàÇ­~îþÛ¶«}ÉUkàûýÌoÔaE;f”£ŸÙàq><Àö3<·øÁ~fƒÇùð?ØÏlð8àû™ ^˜ƒóá~¨ŸÙà•Dÿz·ï`Ü{åÂaØ[ð#üÌÏQ©ºíÜ=B$ø´Üð©ïÓÀÌP¦~fƒÆßíÙÁÓzÜö¹[¯ŒcN€²÷3<ÑÿåØí¼’r„„‡Ë[”¯ŸÙàù<"ÉŽ_8vè©„zÇö¬4æ1+@ÙúI >'q¶wï Iõ¶nZuÿŠÊõ¨UÌ % ôlæºë\K¯+»1§@©~Rƒ'D"‘p¸\÷«Oœ9ü¤†§šnƒ'û ¡|dd¤ìŠ7III!!!ŒÊP”X,ærqóËÎήS§³´’ûI ž§³èϳÔÿ’¬x+çL‡ÞSíZn¤üEßL+“ú™©i"µ¶¶– „††:::æWƒ’àúBÌ¢Êï'5ø<£¹[ƒæéN;òE;íü7455õ9ð9þËOnð¾x½ž«l¬?³”sssfoÔRÈ7ªÂ4øïV­Z5foÔ21Éí6*ÅiððŸ£Á‹;7ªöTËñIøA|—Ïr{ƒsÇ쌩¶UvǞѪ[¸‡ÌŽá­æ„é¼yp´[îê´¿È@¥_Á¢Á§ßêwþmOá_‘¤™s"Ëi5óÒÃÁ}ë‡Ì²;ö$H¢¸‡-ëá›QÁ±[TIúÜÕ)!o½Ê¨ü+˜B4xQ²@•«¤Z%QHH%ÿ{”޲ õsü;‘2‘Þ±ça0ê³TÖ»¨áÇ´ãê9å®NiÉ•Sˆ¯ÙäÏ?×ì°Râ½(Yk¶ƒÃ¼k!ºæ {«…ÈîØ3Ÿ¿„1Àœ‡´ZpÕ>±HY«ÉŸ»¥«Ó -£ÿ.‡ßmí‘wF^³—µÃÍÇBúíù×µ[Ó[ߌÓ{{‡ˆݬªÅ¶»w1€OHlðÒCÖß>§“¹gƒ.IÙwÖFÅ6T¡—ôêóGlõÙ>×9*$ãS­}ccå[šp‹Õ¹Õ,ÄÚùEòK“•\nó~¤ôź×ÎNÈ!úÜÌ^ö–·I³¨ë”“ƒ7º°dÎ ë] w7+´(qRˆ µûDîmžnjêi/ t³—”°¨îUlLk¼'ªKNGl\üç”+4ørÃÑZ®· A¼¢Ø{h=«356vAçê̓^¿¢? ’ Ivêé[%’cÓûÈ}Nã?Œõ‹?Þ?^¼›d¿?÷"6wNáó©Y#bcçÆêõ¯ÆA{=#Wõ±yÂ%9æÕzç‰ãå—f$½ÚÍöК­ófáØS/rƒ«õ¾ëÏy$§q·=÷wÄÙ,ˆõ§>žH„wÁp×ò‹:ÓÕå|T¬ìº%²WBסÕ¦„E¹ú´ÙywEºÿÓ Á—#ÝÖ+Fu™µq3s!n4©ŸªêÕ Š4[Hÿ¼ZT­pßA¦¾ôøŸT»T‘;1;#z°›;õ¿±ÛàsiÅùöݯÑþŸ×‚~©Ô{òKs“&«…Þç¼É¹Bâ,GÓÿóêEŸ dˆSmÕüIÃ…ô¢ö€üJrJ\”À­ÊžŽ¦îM9óÏ+u¹9àAƒ/__vó—_ J^ÒWè"Å\Í+-RL•Ì/l­fÎkoï÷4Îz°èeA»“Ò°úóèûžUâÎ4üµCái´M_åPMˆïä·´ÉÝŽ½u7¡W€ØCCôzt¯óL:cΓšóæ,¢ð¢Üª¿äý.òW',eQ“NM–vø' Ãÿ3 Á—3¥j= þ˯Dàêgjêcl×Dµ˜ÆNS¯×¾‘é¿U»Ý;»‘[¡¶ÀÔtK—9Ç7[®Æ«º\i¶©iÀuWw®f§>mžŒ5n¤¦—[ÄÑ-º4ÕF~V'ýëù¬Í6´8sõ¬)!ýNÜðl^ó6§éãˆ@’œ·¼Â /ªÁÉ NuLÓ-½n_6}˜Ä´¶—=½÷°”EI&v¶9ï±^/W) Á—;½Ù-Ugä©:ôÿü;±yýpBdSs»ßÒÊîONçO¥Të4'6vŽl˜ÑK¯Ù}al÷…E'å ó¶^xœ_({"ù¥å«Ýiê³Ø©rª7_ææ½~Ù2å‡ -Š«{-:÷—²™t8vRnq)‹Z¹:¿üphð,‚À"hð,‚À"hð,‚À"hð,‚À"hð,‚À"hð,‚À"hð,‚À"hð,‚À"hð,‚À"hð,‚À"hð,‚À"hð,‚À"hð,‚À"hð,‚À"hð,‚À"hð,‚À"hð,‚À"hð,‚À"hð,‚À"J‰„Y•¶è@!à@!à@!à@!à@!à@!à@!à@!à@!à@!à@!à@!à@!à@!à@!à@!à@!à@!à@!à@!à¡R§<=°}_ÐÅ›ÏâREÔŠ¬SÝÊ®­Çï~-«©r˜µ¿Yr°§Åpbè.`N,ޗ̨Sú(!9ÉŽm¿R{ÄÈ_Ô™K*sÙoN.Ÿ¶d÷•W©„h5›ù¿C£ê*ËM.ý¥yåÅø’:PðPÙˆN³œFÌ:Í\ø×rÛzU4¹Ù‰±¯®Û×nÑöؾ®µvÙ¥|Ù¸ÆÆ2 ó1¦&÷–%â°‘…å$åÂ`gŸ‘qÿ7çµ2âåeùt/ªô_~*qûþÕö5tùYŸ^Ü8aôZŸ“‡n¯wÐÈ_T.ÉWÌîíàëµfíe“ÄÐ-“GûuÞã÷Ë¢°ãƒ«]“£ºÈQ̼±œvbÓCÛ‘]{Õ»~ÔËŒZ¢øã©™¿¿ J-Oµ.>ÝEÿîûÍ~r¸ˆ˜u™·!¤WsòîÆ%“¦v˜ªì¸:l_/i_¼Ølûj_ü» ßœÚÝç`ŒVëEgž ªŸ»…$|±±ƒóâ(¢b3zãE{­7§ü½l½*<ë89ÔÑçlš±ûÔ%»–:ÖÓç%½¼~tÍÌΦ©º¬ Ûùk•/Kç²ZÀÆlqW×aÚñ¦ÉF$ÙŸ^ܽvíòö‘3úòˆøÓ¹QŽƒþ—LÔë8»9XY:Ø×ªÛÐìõì.3"©‰ÌÚR_1 O‰Wð¼qýbxÊ%ï§þZ*¹ÝøÀÞ½,÷v9°'èx.R»`Æ‘HÄô ãÊ¿Øï÷%¿{BŠë‰Çó.÷s™q”µÛëK§ÇÕËýâ„£¤B,Q³Iÿ€R\eâÈÉ–Û¨ÊI‰O-£žX$¤§6ZzãÔÓúØÉ²ÿ¾{9*Mç•;zßì{`¡c€¶c& êÒʺ¶‘'ãËÈK×.Üvƒsï©ôîe»~ÖA3#g´îýa³¿·“qöó 'Ùú@däuìÚ²æ_¢>à5[O]µ²_¬77Ž®›4æÐ3úi>½ý,$‚"iù³ÜŸîÒûãÖUC›©½9·v„¿¢ˆ’Ý\?ÛoéêñøT¬äÄݙҮ‰VALæwãç÷}@Åbóù3KYº¦Ýœùög&‡Ïsp¼=ÿw?é.ú›–Nœ÷¿X¢ÔbÖŒRfýz_ò»[ èQ_[P?è©íR·Žë—·nörßÕß[Ò‡FjØÎ^ârrLÈl§öÑkÖLhc’t}Ûä‘ëÈÏ­bfmFÂcnPå:˶0HrvÛœi› ÕÑt˜·Ðá¬o˜_‹—ýü§öq¬-ÈùüêNÐú³_ðš- ;ìeVVËÁç(THX1¡ráh·úýnìŠÄ'vl?0å¯éïRéþ•’n­¦­:/:àÑX'·—¥d>(èmï—§×/ýcrÛÕñYDÅĺ³ß¡ÃŒ¤«½†Ó¦»!mÏø}E·¦ó¨êzµš·é¹õ\»ì€þ#÷-ö=Ôi_Ó‚ç•úŠYì·‡Ì}·pPó~ÏR5ª;^2Õ£®ú7u“­Vm4|ÆÎ5¬1ìºÿÚ&çü}é*–“÷Í>Gï»6»´«Qiû”Lû¾îù1b×Ê5o9÷³ˆ¨W±tî¾âÄè^MõËøcàk~wõ&~W;NnÕgŸÆo–];àUM‰gä±çEÇ·çþ˜·tVë1|s×QËÏný£¥OÞ1€ô/>3ì~› —n_û›Ã2¢lÒÌsüâKÛçµVP‡(UýíЫžŸîì_»æ¯É—ÆgJW“æíúì¾1¬µ©t€R- B*ã– ðCðt¬<|×zø2Ë™8j5;NÙÚq ³\†£V×sÑ!ÏEŒâ•·»®Ì,rØ×ͲéÒMyUä1[ú(áWí´øˆ3Sˆ¦zöÛËF]ÿ”hvØzbŠÅ$’íàeû/c–çc>u¥¿ÔoøÝ ½ÍÖrÕÝØU…ʈJµvSv´ËÿÃ%ÿ!?•ÂÓ·óYsÜg|sÉžžM¿ù;ûÍg–ç+«åT<x€Ê&ãÎ|·ßÆä(éÕwp}‚{µ/w`<@e£á°:âÍjf©B+Úï€ÿ‚€P@x„€P@x„€‡ò"Šù«£íÌG¹W8—¤ÜÝ<~ìêàiJÜM[0ÆÕ˜¾>Ì×–Hö´ÝæQøòé²ÂóÞÛZäßCåËÙÜ(2£†Y³.£Ìõj"(í|óŸF’ùêüÙlç®õT™SÊF±oò7<Ñ¿ÙÈp‡Í7ww1(ãwók_g¡»ìÐ8ZÕíº]0ë·†š%œ»ÿ ¾öU”<”QÜùC‰½Žü³Ú¶ÐMÌsÞíóófιgôMQ„/6¸9ÿ–~ãpgÁW–ëÊ/´dZrG_—ò![tRrð6}@áÊ⸽]‡M p)³Ë¶—”Ë“ÇíôŽ(·€§o{ãÎ,ü&9±{‡ÎSÙ|uãÏ¡{mŽôÿé×-ú‡Ž?ÐÍ~Ðİ¿]µ *<”*ß÷?ÍáÏsª~ÿ½Ø°©×Ü?æzÔP!$=êô‹*½jæÞòŒ¯WCO‰Ãã}}ù&¦¼zyãßiçÙ0®êJm4ò¢×Ö£óìÄï9Õêj½ýsÌÁäÙ]jiJ®/ûm6oõÉYMÔˆðŸ }ü2'­Ù\ãßÓ‹ÚýÊ.à±ô†°òÛ›—Å.'ùB?› Zþ§×ýjÊIÿÀô¡Ûkï¼8ÛªÐK*¡Žü½_Û4ù£¤„y³ò+пï};ßÍ+‡µ¬®wll‡))kîìi#¾þ{°ïûëÆØj}¸à?p莊»+ðÅ–Áëêl¸Ð¾–¦Óú³moq89ª?÷m 7ÿmÃßs;YÖwâËÉ—v{‰¢–»õ}8 `ÕoVº’÷7wŒ´ÍìïëØk_ŒÝ¨1fj—úúÜÏwwM¶·É¾à) èß%|Ô€å›Ww©­ñ^îåÿ~~u$'óÓ«–nÓ™´Õ™N÷æ’¾Úbß„¯|UŒ§(Kx(JfƒÎDÊ¿ßïÑjÂŒ‹kl©‡—¿[–Ãáp•rׯ-ÿ‘8|-ãFwfNÒ²[4·SuêÚz¤\œ3/è¢(h߸¼©Ü%ׯhǹ23|ÕÁD:]—Í~ì´_~L)W‹_ŽÀeõZÇÖ>¶6ûFLëg©järÕJ›z²äæ-©Î—(i^ùåkÙ­Þ1ÉU›4kãQŸ¿MDè<û÷ÌÁ—è_°zûùëï9í‘›G&ëÑŠ¡‡Ûïð4:úñÝÛiŸ¹É9ôÓÍ]“Ð÷Z##z‘QSïÝ‘ÞÒgȤ{äëÿžÖ®ÈË+þýÌ“‘._«J+uÛö<í;»QvÉs•ð&|Ý«(O?á£X!-lLËÖGƒ†šÓë˜$GHÔôÕ©ž·ªeçBú¾,Úô÷èÂÏo’T 4¨rµ¯,¯X8·OS®Ò –•CÀÿFÖ,| €°z ÓŒ·‰B¢C—gÇÜ‹Í.4]FøîQœP:TÒrÏ¨ÓæG±›¥Ãâä›KÜëΟIu¬‰ÜmóJ¨Sr±àyKšW®2õTJE¿žV5³6Î|—$":ô_<óõ7™Ì*é7ç 9ãp®~ޯį?ú·¶ƒç´»´¼•þ1‘1™= éNµ(õC†š!ý‡N½2Ìq©óÉ 13{RåÙQË]:ߤgU6nX…“%”P/&ùù¸EËJz?so(—‹ÃSÖ2ªß²W÷Ú{o§å”>WqoÂ×¾*€ò„€‡ò¡á°.dÅŸÓÜš¢\­Õˆ¥ÛÎyIoòRÅsÃÄ壜j…ÆdUã&îö,µ£wzmyE¥b9óìŽSKú4 'â›Øœ¿zf§jÊ„_gÜ©½··ŽnõûÉhq=±îú²ëË \×,ù_W Ó!Tk¬b;pb{]Y””´"I‹Ú=kÊïG#㥉\Åcý W*¹Å¶ú¬jaš¢êºëîNÛ˜âêÈ+éyKZ~á ,¿ÞÄ“;Î/ïÓtkXœjýÎC] 4Tå#P’2ejâÂãË,å¿)P±œvlžÏ¯SBöt!Ê:üÓ#šw9ÿ/ߴů>ó‚×´ ÿÐŽKV;ŽèRcF¼X¥ªm/ogìCK¥¡ß©µgý³Ý~-&[©J³>3VÍíQG­Ä -ñý¤wž±(¸¯ßÐÂ¥·ïÆ««¥Kž«X_ûªÊÊ GËjÈÆ‹C62Ëy.3FÌ`uybÿ*ZX´$_ÑIEKJR´&ß´ãÜÀŽs •ѸÚMGl½¹Å/‡£aáµ:È‹y©Z®Žãì '³óF‹­SHIÏ[Âò ~Í¿o~yZèH»1ܹû.D×UKþçü†É+;Ï´•;H#pÙpÉ¥`<W¿Ãö˨7d=WÏnÌßs™iž‘ë¬#‘³ |&æðMÜfp›Y0…ÆøsÈû~þçEpKš«ÄgùÊWPnð?ƒâ}Äk8nz™;¬ß Óœ N…&ÿÅ{C~6<€BÀ( <€BÀ—áÛÀជ{ŸÛ'= )'áÚêq“7†¼ÍR®æ:aͺ±v‚ä°%ý}6E~&êæŽƒo˜îjXáÎóÅ‚€ÿ>)W§Ž:Ùd¸MbrîIÄ„ðô&ï¹>™’|:Ñ£õx«s£ƒ|oô;ÿ6Ș+zЭ÷ÊÈ Ë›1/ P–ðßGËÙ—3I>Êœ@r>^˜7)rü¥?[)]Ü–¢,P¥/$¦¤UE5ù\"µ1 ðBaþöIOO‹Åššðrç´ìììªU«6h@_„*,|™½?3kÈ:Þø€y w¶¡ ÄMz[>ÿóðcg/³·;¢ëõ±*œÝ¶¶ôõ[óQy/‰å *ŽèèhêcŠY  ¾,:…W©Jûe'ÛËMåv?Ù]:h9ìȽar“Ê@!à@!àË^’¾ãê Ѻöîî®NÍ­Ô©a¬«©ÊÍNMüôþõ“û‘·BÏŸ<};Óò×áãÇyµ¬¦Š=@%IJ€ç¨˜: \L=˜_ÇHCǨZýæízúø­aN¨\Xð$i·t³àVW8åÁ_Ì>O•WñØpbuw3>³>@eÂÖ€O½2®P‡³G]$ýú$»·½C^í¨Ë'ÉÁž¶.ºJ¯*PY±5àÅ¢ÌÔBß:ê.Ýr5ÇyÁ€:|"ÉxzpóÓÚ]-Ô™ÕXM˜M^E‘èûäùýÀPßô]OúŽP‘±5àmöD?ù®sñÍ+U«?ìè\iX(>&7¿éÇC"Ð%Oî0ëäð”â<Æ1K¡‚akÀa̱‰]G}Ï,§ØDa=(–ôTòü>Õù–vÁï“—Q¤jMòæ)³Z¾_m=’“CêýBj7&u¬Hm+b^ðèÔx-Áµè+<¶|ò%ß™qK¢bÛ#È@1P]ðîÑ*¿©Ÿÿ%:$ñ#³Z>*Ý«×'ʪ¤Nc:¼©¯Õˆhà3Qq°5஫æìõì=;Ó¬[C#us:@Å"ûüŸ»t~SñIFQV!TOZœÃ¬,C¥{SWÂçÓ^· ýÓ¸:³(.¶|æã¿‡ê÷Øf]]W éAR‚´ó}WÚ @bŸ‰„.×1$‰˜•errˆS:æëýBwÁë4Aò±5à9<¾jU§NöÕßwÆ»$óÍ¥ Ó|×^ùØ"à‘ô›{q|`¯ŽGÚïXßÏüÍöþý/û\>`©wá’CÝôq<Š%ÿDÒùýLšâŸòŽªZ“þλXZºÄ©QÓ¢ó›Jq³:„‹N |¶|VÌèèp·Zë™È×dÇQ5wõÝw¹‘§í¶Ü’Ôû‡ŸÔðÚÑDOèyj°ewdœ³$¥[[¹åGDD…ÂüÑôôt.—UP* 屪oŸª¾}¢ú†~ðÒ“eå™fõTcž®›K¨[%µNSQS÷Ìjõ©j">³F¾41yZüB~°ÔÔT---f)T0l x{`l,³°LðµÍ´2?¦æm^NZB¦¦‰@µHIáwÝÖÖV~”Ê{‘Hdaa!_EܺÿýìyIïNOIÌ-·hN¢nª™¯jMÕ®¤}_º N=¨N¹>!…Æ+ƒèèhŽ¢¯ðXðôÍfömø}ãáçzŽ:¸:5³ª_Ë¼Šž¦W˜šø)îõ“қ͜º‘ZßcøÄñ]ª«}í¾tµ¦~‹öhmî—FmF4öÙ¶·™¾vF .¤P‘%%§Òü–ýü—[ÞØÜ+TS†Ã%fµ‰E3Òª;ßu›uônáçcYÀÓ7›´„z0'PoCumC³úÍÚzzO_ÍœúîQîy#\=WÿKÏüå§-€ŸI˜M…þä6ßÔÏ7ψDL—7q"÷®1+Ë›óúÄÎÔ³¡#\S›Y ‚aYÀ{$~ Q·èü–=R“èÂR"\ GôªþSH=kÒÀ†è›0+T*x¨´Äbúj-²ü¦²üåc’#"MɽPfMžKHçA¤¾ iДTo ».€BbëÊMß5n›wãhyqêݼ¢‡ok©)_?0‹>£ŒÊïGôÏøÒØ‰Ü/¡NyýŒØ¸ÐùmÑŒþIuÊX‰­_<®æ/Ó®ÊîA?Õó~þŽðÇ7ȃ0RÅœ<¼Î¬“ïa8ýux+:¿©GÝ&ôõV 0¶¼ÀíïCoÆzúð·º Ä)þš0`ö™xjB 'Vw7û¾Ëß@ b¢é§Â;5‰\9ž{¥¶¢>üK_¾MC‹>÷¬a º/^»v§|¶6˜Ô+ãúu8{ÔU@Ò¯O²ëqÛ;äÕŽº|é®û ]¿øB7PTRÝù˜Dœ¥Of1+Èh¤ô.t*ÂÙ’Föt_\ [×eƒ­/e¦¾¸úÖQ7pé–«9Î ÔáIÆÓƒ›ŸÖîjóÔþ“XL^G‘»×èî8õøø/³Űjnº«k’͈¥-±lA,íèQ(gl xA›=Ѳ/Û}×¹øæ•ªÕvôÁ°üJ =Æíéú/÷Cé ÏH+4ULj$Ò_nÑ8\R£±²'숕12+T~,¶d71¨TØðž7'3+GB ”ŠSß¾LQ©§ŽÏ1Pb1}sñÈúª/oÜ¥¨¦­é^8Õ#ÿʼnÔhH¸Ø… ØðÊ ¦^¹ññÆ®}wú1›D´°kâÜ©ÿèå·/ë"ß¡’Iˆ#wBÈËt¿üCq·I”]ŸUMƒþjügbÝ =r…ÇÖ€§)´¼lïàeÌr€ŠJ$¤oHJ9çQ·è‹Ã䣒ûAÞΨä®Ó˜4u¡/èfiKTð…±9à ÉÉJú÷Á©?ü¶dO8ø{'£oïÚ¤…sža¾ãØøÆêé÷×t÷Ž]2#fPÇ#íw¬ïgþf{ÿþ—}.ê¦ÿµw¡¶ÉJ§Ï@»qŽÜ¾DÞþC—Ô·¡fgÐ3–y+:Ë ª2§‹!àsñT´«5ë³â°®§ýØ ÎܾùB7¶óVÚ´ëÜPzËYã¾{/´ßZû¤†×Ž&zjDÏkTƒ-»#Sºµ•[~dd¤P(ÌMIIár¹ÏŸ?/¨ —š¨uCýQ˜ò»*/2'KIxü4 Ýì.Ã3,l³jYIŠî]OÊ IXgàIJJÒÐÐ`–BÃÖ€Oö´Î,%ô¥jןhýÍéNI¾8züÛÅQ±í©…$žôtyñâ$3­Ì©9D›—“–©i"(ü®[[[ËFDDˆD¢ÚµkË‚‚ˆ{Cnž§;åwBHZ2sj½_èŸjtw¼y[úØ·ª5eS8„àÒ0PqDGGSSÌR¨`Øð÷ÀØâŽEú~‚6Û¼õíRwhtºz.3noc¤f³¸aÖæ~iÔÔÆ>Ûö6ÕòØ@–åÔãÖEúÌ4™ÆŽô‰æùôªÐYÞ‚ÊrW¢cPPðÝØð$'þä„ã"[øÎÕÕ®ž±º89æÞ¹€E3wf šÐ@…YÿËq4Þ|yðf¹"=WÿKÏüå @±$¼£/ºqŽÜ¾H_Mý~ÞÁnòŒÍéN9õXrˆî£”3¶|rÈÄi1sn\-8ÚM¯¦]ï…ÿkÛì×ÖKB‡îtÕ*T WÊgr÷*¹Dn] ‰™S)On³:ĺ%åTïY? [^à²zùáö­<îΙîåÚ¸š.?;áåíÓÛ¯yÚqÏþÖHw ³ésǯŸ!ágHÜë‚róúäÍÓÜaÒ¢iáF𷡝ûPa°5à Ï¨ÓÆ; ÆùFõZ\Ñr`A °Ëó‡$"˜„¢/ÃneWÌnvUúú0ví‰;14eN¨`Xð7›a¯ÄôAìWŽÑ‡¿1î&óà:qêLê4!öíIm+ú†+• [7›a qyNïc§ºæ±r§‰«k‘ìLúÂpª;NuÊ:Ò×pÅÑ*;éGºwD » äáRfQ l xÜlF!%¼£³üZýݹy=úV+ T~Û¸ût×\ߘ9ŒÀ=0ÊYÈl xÜl¦²£ºæo«ÿ#¡'É»WÌ©*Ý»ÓZtœ7l;¤Tg|“À,ý/§÷ªo¦Ï,et͓ÇX¹ûl\7¾qRØ–‰ã5Úqf~ -Æ—pY¯OœVê2¤™²$9ÌÏÍ'º×úUÞvZ1§ýGMØõ,Ç>àq ;a,9T’|}vû!w;¯^;º•QRÄß±ël>³ÄAÀa¾˜/˜Eþe}=¶|ú­™žþFþãf3•CR½=ü4}rZVf^)—>ˆ=1žTQ£OK³s§ãWŒ¨Ìb>&¿xWòÝK-3‹ŠÒl´ä©ft'NÇuÊ¡3Uºv™vu£cîdaLÐ̬·üÕgäiAZØô1FŸð2¥g±ðXvΦ¶{»à¼Å'-|Ƙ»£.ïoBÏ¢ÝjÂþ`Ãn]f„_^ïÀ¬š§”Y¾ï4[¶¼ºõø±ÕGlü«áˆÎÍë™h«*}熔¥çÉ5ªkDŸ¨öæs*Å´6}œCGtÍÏý-ØEe†+ÿ!çH”ø<Ù§røhï}çv-?`²°[—ßÿ82©¡*ýe.ŸGè¯róp”ÕUŠK ‰0S$«Æå«)så«ðLúÝêG¾8õÍò}Øð„gÔaÅ‘ÌRøÑ¨¼Lgyè)òYÚgø¥á+‡Nı#Ñ7aNøri÷§»ôþ¸uÕ0;¸‹Lô m»ç¨º4zö¶ ¨©LHÓ9gwŸŸèÖËfמÕíüwwêݵÛÛE«Çµ1N ßîë³ò^š½tYJ*§G/ĸv3J~rrÕ8ß›©6#©bµæóÖ7kß¶û«•«F·©!~qzÕ˜y¯†ýý›©Ò·Ìò]¾sv€¯‘øÞÓz’¾£yvÞžöª5É¿/s‡µõé~¹c'ú>+8 ¾üuŒá˜‚[huœÔ1o¤ð±x\½¶k¯´• «ÔuìÎ(Ù°¶ËÄÀ[ é³®}Û‘=Ñdåú =–œ÷X"!DÃfÎÕGy# º->ß-wøfù>x(7/Ó{Úï‡Ñ'«†åÎ]H]é]Ô ì°5àÓ® w^ÑöÌÑžFø·,ÈN7¿z‚>EíC‘Ûô5q"n_œˆSâÐá«ö´g sÒ³è³Õ3³EYôí)©QªHËfKR3³…"úøšäô¬1ý¥VRZ&‡p¨ájžz’(G’’ž%[fJF¶º ?Y:J Ë*ú j™Ô€‰¾æ»„Ôü'Ê—’NWn\Ëèþ‹‚o¨g‘-JÆ¡¡YØã˜üQ†–VÕ®ÓU?%lIŸM‘Ÿ‰º¹ã Å¦»þø3ñ$’˜š)‹©”æä¤fÐéHåY–P”ž%¢â“JÄÉiÙB1•ŽTR?eáJç¨(Ç@[ýŸ˜O‰iY‰$+%¥­ð·Œ‡%Ñ’M-ü¹ŠIí¬‚dJ$*Aœ:‡9õÏrj¦~ãtÃû'IHä{²î¯üj2uMuÿ‰ýÌ(Ìg ­ö1)ï^«Eh¨)§eÐ/àkUÕ×ú7!…Yš§nºÞ?±Ÿ˜¥y>$¥QaÉ,- BQNIK¦Ê« J9´ØÁÒ,ìQ‰ÔÆÁͧÅo:P×2¼ÿâ³4Oé *ºï?w5@)ºJþÖµ¹ ¡ª¬£©¢©¦Lm@hJ‡©m-ueM5>5@maPÛô¨ª2µ¡­¡¢­¡*ÐP¦6C /¾[¾üîŸõpqûá ÿÙ’÷rú¾7úd̽ èÖ{eä…åÍJ¾RÞ½×IS÷Þ—¬ˆïæ+½ƒèܨÚÕ‡%v]›‡ÜÃ,ÍãÒÄ<ä^1S HzIôpÍ×ÖÉO”I¡­L G+LÐhp‚k(1¾ª+õ9®«Ä3ÔÖ¨e"¤:‹ÔÇ7‡Ã¡>Ö©IÔÇ7—á>ñ«èªSÝlªoÉçñ¨~.—šD¿u:*TeêóÇ¡»ªÔ羬Ï*›‘® ©Bm”Pÿò»ªð è-¿4z«"9Þÿ!Ûž£7û2E¢qJF¶¬BF–03;G¶Gäs ½ H Sc™B‘¹‘€ÚLÍȦÙ"qbÞ6 •èòë0U'K˜ñ)%wsÍXO#îS‰[fZ1KÜkPMÿÉÛÏ“¶07xû!™Z¨- Uú§¦*µ’P+ õ“Ú"¡WKM5ª\O‹.WUfëÇ È+íšw’䫾íý-öñ®AïÒÆ]\;}Þ¶sÏS¹º :ø,X<ÊÑZ‰„¯¶yôšvf•ó÷ž¿^VX»fçÄŸ×qZL—y35›Ô~§Oľ¦¯¯¿ðYŸ#½kû~ɬwbâ%QŸ,,Å·–ü:SmË¡¦ïR”ªt8)iUQM>—($D.à#""„Bé5S¥$ÂŒ¤ô‚Q†ÏɹŸzê*JJ\Ž€Ž=ކВ ½C•kª£ÔÞÚT ÆWVâª*ó4U骦º2ª§£©<Ä¥º=ÌU•ê¨(q©¼ÅÛðÞiݽ$¸¢öò!‘œWš^ÅZ9™N÷,ãš)¿¸¤üÒ:Ó¬^þÔfÒG9PO˜7˜Aè=ôD˜”[ð>¯7›»ÛÊ‚’ô¡Nm6ÉVQMY±Šôñ#P›éÙ¢Ô QJ¦05S”JýÌ ÐÃ)ôƒêñ«)‘dj“"SD¦K¿»É§Â˶6ŠÝDhZKÿö‹7šÕ6¸õ¼¸»KÙÖ5xõ!M[¯£¡¬­N=¨ •üQ=MªéQÃÔƒWè¤'”ššª¥Å–Ûn _lìé§¾áœ4Ý…ÿüá1U{sà_W&˦fÞ[àêpbÝååMÕjxÚ0«]ÏM;ƒFÕúö )Cl ø”˾Óÿ}åX7=µ±&m‹|+Ïeü_[ͽòÛî6ß¼êjµÞ±ÊV5 %NŠXÄUÕPÖjÒÛòùŸ‡;{™½ Ü]¯Uî‡f.[[[ùQ>?âÚ\GǼK/”«×OÈ•ãôãÅ#ú·WQ…¦R]ç†ÍéëÁ9wU76—•É>éq)¨¼2²DŸR2>§f~JÉüL=R3dT¡º __Wœ!{0ö¢©ª©¿Oüø>±øïŒJ?À¢UóËEöh«Ô©ŸÖµ3²…F:ù%úµ*ô¨º†j…ˆ †èèh‘¨ÐvUYÚ<‹Ä—øN–hÄbbdFPýï†C¹uû¯Ù8­ƒYrø¦ ãN4]±ŽêÚvó}ßoãêáŽz ×6Ms¸Þ–³Ë[ê”x4–äã©i;ªÎ±–mòòëŽ :*?§¦¥¦®$íÁQ=7ë©óŒ[M;Õë`7ƒ °ÇÖ€×jõûÊã;^Ÿ5ÇÇ)KB$Ù‰¯#Nn¿à|½µA.ßœî޶ëÆPWé ŠÓ¢Óô@í‘Ý¥%–ÃŽÜVP÷G{z‡Îò«'Hì æ$JF*iæJœ»§NDÇ9@Q¨©(™ªh™|O;gÊæ|JÎü˜œ.Û2ø”;’»­Àçñ´Ô”S 2ò1)ƒ>Ää-¡6/½.qÏUM£/‹»J„”euƒq‰†ÚêUt5¨ŸÔ6t˜þI=¨#uCõÊt¼ä­‹äeþIc_Ìk*!Ò€§¨7˜w.Ø»:pz®ÓWíÖ;0\NȤ)¯'] ½2 Í´#çMºvžzu«³Ü’ I}pô™y×&ů-—\¦µ9xbݼ=\ZMºVvôAj·ÖÅÏñC±5à ÏÐ}íͼþêᨮôæ‹OõY,W§2ç{¡¹‡µ'ä]Õĉ¾ ‹ŒŠ*iÑŽŽsûöô­Õà;¨ðy&úÔƒ9á›$¦fÆ'Ò›Ôµð)%ãß„ÔÔvCbz|b5‰Ú€ÈÌÎí@?—H ¿‰O¦…C+ý@TUe¥üåéA3Ôµ¡` P3ÐV3ÑÓ¤6¨ajû@¶¡` P§6äP>¶‡1K¾GYM¹ -‹$»øÓÙÑv“[:2ؼ"¿1é©äú:Îoœ#Y…¿40¡¶7‰ž1}©v箤‰#áâ#–ÒRWÖR׫kªÇœð•>Ó{Òd[ ñ‰éñŸÓˆ°Ä3 *}@ÄAÙ1ðšn3N¸åOà꺮¹ý8¯–딣QSòFJ¸þG¿Ã2ïõý–Þlåß‚Žmžžã´Àó¹™Òn,YøiÄ>w½"ÉÿSTä+Oâ¬ô¬¬”Ôl1õç*(å(©ª c¿’ ðÉŸèË»^>F_ÞU”]h7»ŒI Ò²iÕÔ³¦Ó LéJÏ3¬Ÿ÷7)ïƒì*~­Q‡–NèÐgû¾Ãê—r$ƒðõö>…ËÏøHÏ¥«*NŽýXÙqß~ am.»'`aÝëì•þÿ½W¼ù6z®…'óÒߣEå½Ïúëóš ™“ ì/6ÇÑvYæÂ,eâWvâ»(Sl øò»ÐÍwK«ZGûŽܑú6ôç-»Ówdøbl ø ,K×8tÁ©t<((<€bmÀ—Û¥j*¶|ù]ª `kÀ—ߥj*¶¼Â_ªص/Œ96±ëè£ï™åägþP†ØðÉ—|gÆ-‰Šm_^A.þxz„ÓˆOkPÛ âİ%ý}6E~&êæŽƒo˜îjˆëÇ@¹bkÀ \WÍÙëÙ{v¦ÿX·†Fêeœ·÷–u›–æ^G=œK¿µÀ÷F¿óoƒŒ¹¢·Ýz¯Œ¼°¼nø剭ŸùxÇï¡ú=¶YW×U+ÛtÅîØ7lБS¶·zJ^”ü.EY JߣPI«Šjò¹D!!r! óGÓÓÓ¹\nTTÞLjjª–ŽF®èØð_µªS'ûꂲ>ã]É´ÏžË}ˆÜÍ5›ô¶|þçáÇÎ^fowD×ëc¥)WŸ[[[ùQ*ïE"‘………|!@ÅÁª›ÍT^l ø¬˜;ÑÑánµÖ3'2;È®à††ÝDv—Y;ro˜|%€rÁÖ€¯À7›ø~, øä`OÛmÞî$ØÓbˆôò¢Ê¨ðó°,àów›'SÃöÌ ¾ØèÞ冲ôÒµ•Ë^’rg…G×µQéX¸…)c:á9ùþÙ¼ðAp•Ëž£e3õ|ÌTf1€‚aYÀ°@±6àsâOŽë8-¦Ë¼™ƒšÇMj¿Ó'b_Ó×ÇWŒ_ø¬Ï‘ƒÞµËúò7?[>å²ïôg_9ÖMC’ƒ¥GÍó ¬<—òm5÷Êo»Ûà"ŒP™±5àµZý¾òxGÇŽ×gÍñqÊ’Ivâ눓[ç/8_om Ò*9¶<ắ½™w?—ÃQ]éÿÌŸê³X®@eÅÖ€O.åJv|6nƒgÌÜL§lo4ð£°5à•«4430Xpq£»>}×\’¤ .S5.,¶|±´]ëàçfZªL¨,Øð\U-5~’Š㢴.‡¯i¨Á#<ÝjZÉ!…„ à bkÀ+7˜ñéÎþùCöœ¼vÿß4 Q1²lõ«÷¤×/ ¸ÉgûwÞë°yŸó7\³63rVëQY³÷Ïm£ófÿè^›êì¹8ꟴ߱¾Ÿù›íýû_ö¹|¨›>.w剭OãéÙô›Ðo³œÛîÛnÌÂ/¤j½(\öå¾Ä¬¦‘©­©ðþ²'5¼v4ÑS#z^£lÙ™Ò­­ÜMn"##…BaþhJJ —Ë}þüyA €Š$))ICCƒY  [>'þp¿Ök-6š¨³­óXÞ† ñfŸŸ†ü5gú!“µ—6·7ûbþH2ßžÿcɹšÓ—oT&7Í´2?¦æm^NZB¦¦‰ ð»nmm-?!‰j×®-_PqDGGSSÌR¨`Øði÷ZLÛܲЀŒ_dÑaÝí1[]¬=æw´èî6íªÓŽVß°s^&ý†¯­ÇþOÒá{&Ðw—?è·¸aÖæ~i„ûlÛÛL1@ckÀ Z-ù{O¯EdÅèv]ê†Nœxh™»rĆ ÿÔZ[•Yý+¨·Xõ v£ÐÕÿÒ3F@ùakÀ~Ýѧ wúóQ'ÙP»ù!y¿¨ÄØði׆;¯h{æhO£ïû¶ BbkÀç¤%¼¿5ÁºÚæŠ}@T »ÜQî•[^àË,Pl xÜ[÷ƒ…ÆÖ€Çýà@¡±5àq?xPhl ø´°I]fÅÛöî×ß³­¥>¾qÃÖ€×pøýâEéðÃÝ£»¶oÿ+è¥aëÞý½úöhUWÀcÔ¨dØðùÄY©IÉ)©éY¾Šªªšª*W¾€Ê­Ÿrqˆë¼l7¯^=][ œK=˜5*1¶¼–kÀMWù‰ðãã cÇŽŸºo¾üôZ§o¾›@ÀÒ€—dÆÝ½ð¿ãTžŸ{(©U3-ZuæåãÛ ²l7h&³î÷'†-éï³)ò3Q7w´xÃtWC|ËåŠeŸìi9äyËI«üǺvò¶îä=OVh»ÍD_Ez¹›r~kï~çßsEoºõ^yay35f-€2IJ€¸¾¥/A/É|yòÇŽ;qþ1©]35M%.!["P.—Œ%¿KQ¨Òï)iUQM>—($D.à#""„BaþhZZšH$:wî\A €ïMýTVVfNøVÔg”@€[rUt, ø<UëN>Ôc~^‰$ûã³ôwð§Öð?µÆ± ¿ƒ×lÒÛòùŸ‡;{™½ Ü]¯Uá…ÛÚÚÊRyO5GGGùB€o†5 Êœl¥b–BÃÒ€/Š£lØÈm0õ˜Åœòݸ†ÝDv—Z;roc2@ÙCÀW8Œ=ÀwÂe+U¥€€P@x„€¯0Äñ½:i¿c}?ó7Ûû÷¿ìsùP7ýr9¬HÖã¥í¼³ge÷j‰çöôËZseVìàÂ+ÒûK½ÿ«+ȸ;ßmwùÁ)ÖY¦v_ Ø~iüóÿµ -ÁJõ³!à+ŒÔû‡ŸÔðÚÑDOèyj°ewdJ·¶8J§ÒÐïÊ-qüá>]¯¥¨9. n*¹ÿcEŠÓ`®ZEK°²A¾”ðUxvÝW»× ûhè2eÛâ&¢þÿ¹ -ÁJõ³!à+ ¾¶™VæÇÔ¢ÍËIKÈÔ4à_†kÔãÀ“äsPçYW:Mb®HªEV­¢%XÙ OÓHÇÀÎ÷à²%’tÒÓqDË™+LÑU¨h VªŸ  C­©ßâ†=Z›û¥"hì³mo3uf€"ÄŸÃ׎¿&$V¤Û¤ÿ“·5R²a¬HúÚuþ³+äSoábÜÊ‘MÍCß+Õèà·k#u¬T•¾âàê¹ú_zæÏ,( W×~âž勊®H_RgÐrÚÁ;ÓäJŠ®0_R?@!à@!à*•ä`OËaé3/Q‡Ï(·ÝæèþýçÝ–á¢þ›0æØÄ®£¾W²^tõØ`ó¼öXúkÈŸJJ­VѧÈ#[6íùþnÃ!׫÷þ}ÓôÎuɽõý{¯yªÖhવÝëk¾?6¶Ã””5wö´¡g båî³qÝø6ÆIa[&Ž?ÖhÇ™ù-´8D’|}vû!w;¯^;º•QRÄß±ël>³ÄAÀ¡žÅbÈ“Îk-sÊz%©km"×Ή?9ÚÍ÷}¿«‡;ê%\Û4aÌáz[Î.o©Ã-¨S Iê­½÷X»bˆ­öûKë'ŒÜ™mï]¤Z‰¯M¾VfäBŸÓöŸíS½ÐBÔO¿ö$w¸äœL¿‡·-½×¯›èVW]îM+¡\Pʫʉ?6¤Íb ¿€E=êŠîš4lo“}ÁSÈ¿¨BÊoxUúÙLÐZqz‡)'ýýýÓ‡n¯½óâl+ùçPXx¨”8ÇÅÇëOì>Õc×2—/LJzÓ`õÑ•=èêöÃ}[l|`A'zŠYúüm"Y5ÍFK.œhFo:è¸N9t¦J×.Ó®nt$á3ÆÜuáxz’v« ûƒ »u™~y½5®Õ|ñ’ut9D7ïéd’C&My=éBÐSz.A›iGΛtí<5ôêVç‹—î7â¦÷Éã^ÒžµVû™Ç.wh{ºhµ_›†\5áç7©ºž†Œt/¬ô¬Ù|ÍÞ¹¤/ü¦W^Ê«"ás$L;ÐW:©©÷îHÙ6K¦lqE|ëҴñî¦<<`ëÆ¸Ú5›ØÙ÷ý3r0®,‚€‡J‹kà¾öhãÀ‘ÝøìÏœ˜O"ÌIòF¸ªUù®Rñ›\®\¹Dœ#Qâó¨hàðÕ”¹òÁ3ét«=”LMURå—<M•BO#‘ˆÅ„WhA¥àPè ”ÕŠynɯMžV‹‘­cæzÕqDMfÈ ãÃO>4ïäjZú .é×,¶¼”W•*KH‘Jö ¯Š«ë²êöSzH˜ø22t‡Ï¼=Æ[/,´VeVPHx¨Ô”L<·~õç ŸfçwT•T„O^ˆqíf”üääªq¾7SmFÊÏôŸÒîOwéýqëªav:qÿ˜èÚvÏQ;úº›Íç­oÖ¾m÷W+WnSCüâôª1ó^ úû7ÓÒ’†ÃÒUuÛ¶ë¿þwG½O¡[&ŽÞ[wÝ{ú 2fÝb¨ÛûÿÕ¦{§î1þkƺTIŒØ1ÙÛÿ^š=³šÚ¾6M‡5—–¯îÝÈZ³ï”ñýÚ5©®ÃMzs7äà†?B-ü÷ú™R±Ïÿ¾,¯”W¥é¸`–®{O?­¿fw¨–ùhÿÔ¡7ÜsfJž¦¡Ê‡û/’ª&Šäùû†·1;jy»~Qƒ¶-íÕĸ¦u^æûŸ² AÀ;”ö¹PáwÔ¿Æà=çŒ Úì‰~ Ôoè±ä¼Ç’Ü …çe,ª`Ô=0&6¿X«ãì ŽùcDÃfÎÕGy# º->ß-w¸¸–‡«ëºæöãÜ딣QSòFJ˜«p¹ªå¤3÷&åNi9þàÓüý…ª•øÚ8š¿L:ù4wy4ƒÚöž~öž_ü‚åß´âËKyU<ã»îõ ký2ðÏÛ¥ƒÊŽ›î^– 1—[ì7¼*‹i—ïä›yl~à‘? øð  €ð  €ð  €ð  €ð  €ð  €ð  €ð  €ð  €ð  €ð  €ð  €ð  €ð  €ð  €ð  €ð  €ð  €ð  €ð èÿ¯^ü4¹4ŽQIEND®B`‚libkqueue-2.3.1/test/common.h000066400000000000000000000076161342472035000161350ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _COMMON_H #define _COMMON_H #if HAVE_ERR_H # include #else # define err(rc,msg,...) do { perror(msg); exit(rc); } while (0) # define errx(rc,msg,...) do { puts(msg); exit(rc); } while (0) #endif #define die(str) do { \ fprintf(stderr, "%s(): %s: %s\n", __func__,str, strerror(errno));\ abort();\ } while (0) #include #include #include #include #include #include #include #include #ifndef _WIN32 #include #include #include #include #include #include #include #include #include "config.h" #include #else # include "include/sys/event.h" # include "src/windows/platform.h" #endif struct test_context; struct unit_test { const char *ut_name; int ut_enabled; void (*ut_func)(struct test_context *); }; #define MAX_TESTS 50 struct test_context { struct unit_test tests[MAX_TESTS]; char *cur_test_id; int iterations; int iteration; int kqfd; /* EVFILT_READ and EVFILT_WRITE */ int client_fd; int server_fd; /* EVFILT_VNODE */ int vnode_fd; char testfile[1024]; }; void test_evfilt_read(struct test_context *); void test_evfilt_signal(struct test_context *); void test_evfilt_vnode(struct test_context *); void test_evfilt_timer(struct test_context *); void test_evfilt_proc(struct test_context *); #ifdef EVFILT_USER void test_evfilt_user(struct test_context *); #endif #define test(f,ctx,...) do { \ assert(ctx != NULL); \ test_begin(ctx, "test_"#f"()\t"__VA_ARGS__); \ test_##f(ctx); \ test_end(ctx); \ } while (/*CONSTCOND*/0) extern const char * kevent_to_str(struct kevent *); void kevent_get(struct kevent *, int); void kevent_get_hires(struct kevent *, int); void kevent_update(int kqfd, struct kevent *kev); #define kevent_cmp(a,b) _kevent_cmp(a,b, __FILE__, __LINE__) void _kevent_cmp(struct kevent *, struct kevent *, const char *, int); void kevent_add(int kqfd, struct kevent *kev, uintptr_t ident, short filter, u_short flags, u_int fflags, intptr_t data, void *udata); /* DEPRECATED: */ #define KEV_CMP(kev,_ident,_filter,_flags) do { \ if (kev.ident != (_ident) || \ kev.filter != (_filter) || \ kev.flags != (_flags)) \ err(1, "kevent mismatch: got [%d,%d,%d] but expecting [%d,%d,%d]", \ (int)_ident, (int)_filter, (int)_flags,\ (int)kev.ident, kev.filter, kev.flags);\ } while (0); /* Checks if any events are pending, which is an error. */ #define test_no_kevents(a) _test_no_kevents(a, __FILE__, __LINE__) void _test_no_kevents(int, const char *, int); /* From test.c */ void test_begin(struct test_context *, const char *); void test_end(struct test_context *); void test_atexit(void); void testing_begin(void); void testing_end(void); int testing_make_uid(void); #endif /* _COMMON_H */ libkqueue-2.3.1/test/kevent.c000066400000000000000000000115561342472035000161320ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "common.h" extern int kqfd; /* Checks if any events are pending, which is an error. */ void _test_no_kevents(int kqfd, const char *file, int line) { int nfds; struct timespec timeo; struct kevent kev; memset(&timeo, 0, sizeof(timeo)); nfds = kevent(kqfd, NULL, 0, &kev, 1, &timeo); if (nfds < 0) err(1, "kevent(2)"); if (nfds > 0) { printf("\n[%s:%d]: Unexpected event:", file, line); err(1, kevent_to_str(&kev)); } } /* Retrieve a single kevent */ void kevent_get(struct kevent *kev, int kqfd) { struct kevent buf; int nfds; if (kev == NULL) kev = &buf; nfds = kevent(kqfd, NULL, 0, kev, 1, NULL); if (nfds < 1) err(1, "kevent(2)"); } /* In Linux, a kevent() call with less than 1ms resolution will perform a pselect() call to obtain the higer resolution. This test exercises that codepath. */ void kevent_get_hires(struct kevent *kev, int kqfd) { int nfds; struct timespec timeo; timeo.tv_sec = 0; timeo.tv_nsec = 500000; nfds = kevent(kqfd, NULL, 0, kev, 1, &timeo); if (nfds < 1) die("kevent(2)"); } char * kevent_fflags_dump(struct kevent *kev) { char *buf; #define KEVFFL_DUMP(attrib) \ if (kev->fflags & attrib) \ strncat(buf, #attrib" ", 64); if ((buf = calloc(1, 1024)) == NULL) abort(); /* Not every filter has meaningful fflags */ if (kev->filter != EVFILT_VNODE) { snprintf(buf, 1024, "fflags = %d", kev->fflags); return (buf); } snprintf(buf, 1024, "fflags = %d (", kev->fflags); KEVFFL_DUMP(NOTE_DELETE); KEVFFL_DUMP(NOTE_WRITE); KEVFFL_DUMP(NOTE_EXTEND); #if HAVE_NOTE_TRUNCATE KEVFFL_DUMP(NOTE_TRUNCATE); #endif KEVFFL_DUMP(NOTE_ATTRIB); KEVFFL_DUMP(NOTE_LINK); KEVFFL_DUMP(NOTE_RENAME); #if HAVE_NOTE_REVOKE KEVFFL_DUMP(NOTE_REVOKE); #endif buf[strlen(buf) - 1] = ')'; return (buf); } char * kevent_flags_dump(struct kevent *kev) { char *buf; #define KEVFL_DUMP(attrib) \ if (kev->flags & attrib) \ strncat(buf, #attrib" ", 64); if ((buf = calloc(1, 1024)) == NULL) abort(); snprintf(buf, 1024, "flags = %d (", kev->flags); KEVFL_DUMP(EV_ADD); KEVFL_DUMP(EV_ENABLE); KEVFL_DUMP(EV_DISABLE); KEVFL_DUMP(EV_DELETE); KEVFL_DUMP(EV_ONESHOT); KEVFL_DUMP(EV_CLEAR); KEVFL_DUMP(EV_EOF); KEVFL_DUMP(EV_ERROR); #ifdef EV_DISPATCH KEVFL_DUMP(EV_DISPATCH); #endif #ifdef EV_RECEIPT KEVFL_DUMP(EV_RECEIPT); #endif buf[strlen(buf) - 1] = ')'; return (buf); } /* TODO - backport changes from src/common/kevent.c kevent_dump() */ const char * kevent_to_str(struct kevent *kev) { char buf[512]; snprintf(&buf[0], sizeof(buf), "[ident=%d, filter=%d, %s, %s, data=%d, udata=%p]", (u_int) kev->ident, kev->filter, kevent_flags_dump(kev), kevent_fflags_dump(kev), (int) kev->data, kev->udata); return (strdup(buf)); } void kevent_update(int kqfd, struct kevent *kev) { if (kevent(kqfd, kev, 1, NULL, 0, NULL) < 0) { printf("Unable to add the following kevent:\n%s\n", kevent_to_str(kev)); die("kevent"); } } void kevent_add(int kqfd, struct kevent *kev, uintptr_t ident, short filter, u_short flags, u_int fflags, intptr_t data, void *udata) { EV_SET(kev, ident, filter, flags, fflags, data, NULL); if (kevent(kqfd, kev, 1, NULL, 0, NULL) < 0) { printf("Unable to add the following kevent:\n%s\n", kevent_to_str(kev)); die("kevent"); } } void _kevent_cmp(struct kevent *k1, struct kevent *k2, const char *file, int line) { /* XXX- Workaround for inconsistent implementation of kevent(2) */ #if defined (__FreeBSD_kernel__) || defined (__FreeBSD__) if (k1->flags & EV_ADD) k2->flags |= EV_ADD; #endif if (memcmp(k1, k2, sizeof(*k1)) != 0) { printf("[%s:%d]: kevent_cmp() failed:\n expected %s\n but got %s\n", file, line, kevent_to_str(k1), kevent_to_str(k2)); abort(); } } libkqueue-2.3.1/test/libdispatch/000077500000000000000000000000001342472035000167505ustar00rootroot00000000000000libkqueue-2.3.1/test/libdispatch/Makefile000066400000000000000000000003671342472035000204160ustar00rootroot00000000000000CFLAGS=`pkg-config --cflags libkqueue` LDADD=`pkg-config --libs libkqueue` -ldispatch all: disptest disptest: main.o $(CC) -o disptest $(CFLAGS) main.c $(LDADD) check: disptest ./disptest clean: rm -f *.o distclean: clean rm -f disptest libkqueue-2.3.1/test/libdispatch/main.c000066400000000000000000000043721342472035000200460ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER; int testnum; void test_countdown(void); void say_hello(void *arg) { puts("hello"); test_countdown(); } void final_countdown(void *arg, size_t count) { static int europe = 10; if (europe == 0) { printf("It's the final countdown..\n"); exit(0); } else { printf("%d.. ", europe); fflush(stdout); } pthread_mutex_lock(&mtx); europe--; pthread_mutex_unlock(&mtx); } /* Adapted from: http://developer.apple.com/mac/articles/cocoa/introblocksgcd.html */ void test_timer() { dispatch_source_t timer; dispatch_time_t now; timer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_get_current_queue()); //NOTE: q_default doesn't work now = dispatch_walltime(DISPATCH_TIME_NOW, 0); dispatch_source_set_timer(timer, now, 1, 1); dispatch_source_set_event_handler_f(timer, say_hello); puts("starting timer\n"); } void test_countdown(void) { dispatch_apply_f(15, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,0), NULL, final_countdown); } int main(int argc, char **argv) { while (argc) { #if TODO if (strcmp(argv[0], "--no-proc") == 0) test_proc = 0; #endif argv++; argc--; } test_timer(); dispatch_main(); printf("\n---\n" "+OK All %d tests completed.\n", testnum - 1); return (0); } libkqueue-2.3.1/test/lockstat.c000066400000000000000000000043701342472035000164560ustar00rootroot00000000000000/* * Copyright (c) 2011 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include "src/common/private.h" int DEBUG_KQUEUE = 1; char * KQUEUE_DEBUG_IDENT = "lockstat"; struct foo { tracing_mutex_t foo_lock; }; void *test_assert_unlocked(void *_x) { struct foo *x = (struct foo *) _x; tracing_mutex_assert(&x->foo_lock, MTX_UNLOCKED); pthread_exit(NULL); } void *test_assert_locked(void *_x) { struct foo *x = (struct foo *) _x; puts("The following assertion should fail"); tracing_mutex_assert(&x->foo_lock, MTX_LOCKED); pthread_exit(NULL); } /* * Test the lockstat.h API */ int main() { struct foo x; pthread_t tid; void *rv; tracing_mutex_init(&x.foo_lock, NULL); tracing_mutex_lock(&x.foo_lock); tracing_mutex_assert(&x.foo_lock, MTX_LOCKED); tracing_mutex_unlock(&x.foo_lock); tracing_mutex_assert(&x.foo_lock, MTX_UNLOCKED); /* * Ensure that the assert() function works when there * are multiple threads contenting for the mutex. */ tracing_mutex_lock(&x.foo_lock); if (pthread_create(&tid, NULL, test_assert_unlocked, &x) != 0) err(1, "pthread_create"); pthread_join(tid, &rv); tracing_mutex_unlock(&x.foo_lock); tracing_mutex_lock(&x.foo_lock); if (pthread_create(&tid, NULL, test_assert_locked, &x) != 0) err(1, "pthread_create"); sleep(3); // Crude way to ensure the other thread is scheduled pthread_join(tid, &rv); tracing_mutex_unlock(&x.foo_lock); puts("+OK"); return (0); } libkqueue-2.3.1/test/main.c000066400000000000000000000203561342472035000155600ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #if defined(__linux__) #include #include #endif #include "common.h" /* Maximum number of threads that can be created */ #define MAX_THREADS 100 void test_kqueue_descriptor_is_pollable(void) { int kq, rv; struct kevent kev; fd_set fds; struct timeval tv; if ((kq = kqueue()) < 0) die("kqueue()"); test_no_kevents(kq); kevent_add(kq, &kev, 2, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, 1000, NULL); test_no_kevents(kq); FD_ZERO(&fds); FD_SET(kq, &fds); tv.tv_sec = 5; tv.tv_usec = 0; rv = select(1, &fds, NULL, NULL, &tv); if (rv < 0) die("select() error"); if (rv == 0) die("select() no events"); if (!FD_ISSET(kq, &fds)) { die("descriptor is not ready for reading"); } close(kq); } /* * Test the method for detecting when one end of a socketpair * has been closed. This technique is used in kqueue_validate() */ static void test_peer_close_detection(void *unused) { #ifdef _WIN32 return; //FIXME #else int sockfd[2]; char buf[1]; struct pollfd pfd; if (socketpair(AF_UNIX, SOCK_STREAM, 0, sockfd) < 0) die("socketpair"); pfd.fd = sockfd[0]; pfd.events = POLLIN | POLLHUP; pfd.revents = 0; if (poll(&pfd, 1, 0) > 0) die("unexpected data"); if (close(sockfd[1]) < 0) die("close"); if (poll(&pfd, 1, 0) > 0) { if (recv(sockfd[0], buf, sizeof(buf), MSG_PEEK | MSG_DONTWAIT) != 0) die("failed to detect peer shutdown"); } #endif } void test_kqueue(void *unused) { int kqfd; if ((kqfd = kqueue()) < 0) die("kqueue()"); test_no_kevents(kqfd); if (close(kqfd) < 0) die("close()"); } void test_kevent(void *unused) { struct kevent kev; memset(&kev, 0, sizeof(kev)); /* Provide an invalid kqueue descriptor */ if (kevent(-1, &kev, 1, NULL, 0, NULL) == 0) die("invalid kq parameter"); } #if defined(__linux__) /* Maximum number of FD for current process */ #define MAX_FDS 32 /* * Test the cleanup process for Linux */ void test_cleanup(void *unused) { int i; int max_fds = MAX_FDS; struct rlimit curr_rlim, rlim; int kqfd1, kqfd2; struct kevent kev; /* Remeber current FD limit */ if (getrlimit(RLIMIT_NOFILE, &curr_rlim) < 0) { die("getrlimit failed"); } /* lower FD limit to 32 */ if (max_fds < rlim.rlim_cur) { /* Set FD limit to MAX_FDS */ rlim = curr_rlim; rlim.rlim_cur = 32; if (setrlimit(RLIMIT_NOFILE, &rlim) < 0) { die("setrlimit failed"); } } else { max_fds = rlim.rlim_cur; } /* Create initial kqueue to avoid cleanup thread being destroyed on each close */ if ((kqfd1 = kqueue()) < 0) die("kqueue()"); /* Create and close 2 * max fd number of kqueues */ for (i=0; i < 2 * max_fds + 1; i++) { if ((kqfd2 = kqueue()) < 0) die("kqueue()"); kevent_add(kqfd2, &kev, 1, EVFILT_TIMER, EV_ADD, 0, 1000,NULL); if (close(kqfd2) < 0) die("close()"); } if (close(kqfd1) < 0) die("close()"); /* * Run same test again but without extra kqueue * Cleanup thread will be destroyed * Create and close 2 * max fd number of kqueues */ for (i=0; i < 2 * max_fds + 1; i++) { if ((kqfd2 = kqueue()) < 0) die("kqueue()"); kevent_add(kqfd2, &kev, 1, EVFILT_TIMER, EV_ADD, 0, 1000,NULL); if (close(kqfd2) < 0) die("close()"); } /* Restore FD limit */ if (setrlimit(RLIMIT_NOFILE, &curr_rlim) < 0) { die("setrlimit failed"); } } #endif void test_ev_receipt(void *unused) { int kq; struct kevent kev; if ((kq = kqueue()) < 0) die("kqueue()"); #if !defined(_WIN32) EV_SET(&kev, SIGUSR2, EVFILT_SIGNAL, EV_ADD | EV_RECEIPT, 0, 0, NULL); if (kevent(kq, &kev, 1, &kev, 1, NULL) < 0) die("kevent"); /* TODO: check the receipt */ close(kq); #else memset(&kev, 0, sizeof(kev)); puts("Skipped -- EV_RECEIPT is not available or running on Win32"); #endif } void run_iteration(struct test_context *ctx) { struct unit_test *test; for (test = &ctx->tests[0]; test->ut_name != NULL; test++) { if (test->ut_enabled) test->ut_func(ctx); } free(ctx); } void test_harness(struct unit_test tests[MAX_TESTS], int iterations) { int i, n, kqfd; struct test_context *ctx; printf("Running %d iterations\n", iterations); testing_begin(); ctx = calloc(1, sizeof(*ctx)); test(peer_close_detection, ctx); test(kqueue, ctx); test(kevent, ctx); #if defined(__linux__) test(cleanup, ctx); #endif if ((kqfd = kqueue()) < 0) die("kqueue()"); test(ev_receipt, ctx); /* TODO: this fails now, but would be good later test(kqueue_descriptor_is_pollable); */ free(ctx); n = 0; for (i = 0; i < iterations; i++) { ctx = calloc(1, sizeof(*ctx)); if (ctx == NULL) abort(); ctx->iteration = n++; ctx->kqfd = kqfd; memcpy(&ctx->tests, tests, sizeof(ctx->tests)); ctx->iterations = iterations; run_iteration(ctx); } testing_end(); close(kqfd); } void usage(void) { printf("usage: [-hn] [testclass ...]\n" " -h This message\n" " -n Number of iterations (default: 1)\n" " testclass Tests suites to run: [socket signal timer vnode user]\n" " All tests are run by default\n" "\n" ); exit(1); } int main(int argc, char **argv) { struct unit_test tests[MAX_TESTS] = { { "socket", 1, test_evfilt_read }, #if !defined(_WIN32) && !defined(__ANDROID__) // XXX-FIXME -- BROKEN ON LINUX WHEN RUN IN A SEPARATE THREAD { "signal", 1, test_evfilt_signal }, #endif #if FIXME { "proc", 1, test_evfilt_proc }, #endif { "timer", 1, test_evfilt_timer }, #ifndef _WIN32 { "vnode", 1, test_evfilt_vnode }, #endif #ifdef EVFILT_USER { "user", 1, test_evfilt_user }, #endif { NULL, 0, NULL }, }; struct unit_test *test; int c, i, iterations; char *arg; int match; #ifdef _WIN32 /* Initialize the Winsock library */ WSADATA wsaData; if (WSAStartup(MAKEWORD(2,2), &wsaData) != 0) err(1, "WSAStartup failed"); #endif iterations = 1; /* Windows does not provide a POSIX-compatible getopt */ #ifndef _WIN32 while ((c = getopt (argc, argv, "hn:")) != -1) { switch (c) { case 'h': usage(); break; case 'n': iterations = atoi(optarg); break; default: usage(); } } /* If specific tests are requested, disable all tests by default */ if (optind < argc) { for (test = &tests[0]; test->ut_name != NULL; test++) { test->ut_enabled = 0; } } for (i = optind; i < argc; i++) { match = 0; arg = argv[i]; for (test = &tests[0]; test->ut_name != NULL; test++) { if (strcmp(arg, test->ut_name) == 0) { test->ut_enabled = 1; match = 1; break; } } if (!match) { printf("ERROR: invalid option: %s\n", arg); exit(1); } else { printf("enabled test: %s\n", arg); } } #endif test_harness(tests, iterations); return (0); } libkqueue-2.3.1/test/proc.c000066400000000000000000000132421342472035000155730ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "common.h" static int sigusr1_caught = 0; static pid_t pid; static int kqfd; static void sig_handler(int signum) { sigusr1_caught = 1; } static void test_kevent_proc_add(struct test_context *ctx) { struct kevent kev; test_no_kevents(kqfd); kevent_add(kqfd, &kev, pid, EVFILT_PROC, EV_ADD, 0, 0, NULL); test_no_kevents(kqfd); } static void test_kevent_proc_delete(struct test_context *ctx) { struct kevent kev; test_no_kevents(kqfd); kevent_add(kqfd, &kev, pid, EVFILT_PROC, EV_DELETE, 0, 0, NULL); if (kill(pid, SIGKILL) < 0) die("kill"); sleep(1); test_no_kevents(kqfd); } static void test_kevent_proc_get(struct test_context *ctx) { struct kevent kev, buf; /* Create a child that waits to be killed and then exits */ pid = fork(); if (pid == 0) { pause(); printf(" -- child caught signal, exiting\n"); exit(2); } printf(" -- child created (pid %d)\n", (int) pid); test_no_kevents(kqfd); kevent_add(kqfd, &kev, pid, EVFILT_PROC, EV_ADD, 0, 0, NULL); /* Cause the child to exit, then retrieve the event */ printf(" -- killing process %d\n", (int) pid); if (kill(pid, SIGUSR1) < 0) die("kill"); kevent_get(&buf, kqfd); kevent_cmp(&kev, &buf); test_no_kevents(kqfd); } #ifdef TODO void test_kevent_signal_disable(struct test_context *ctx) { const char *test_id = "kevent(EVFILT_SIGNAL, EV_DISABLE)"; struct kevent kev; test_begin(test_id); EV_SET(&kev, SIGUSR1, EVFILT_SIGNAL, EV_DISABLE, 0, 0, NULL); if (kevent(kqfd, &kev, 1, NULL, 0, NULL) < 0) die("%s", test_id); /* Block SIGUSR1, then send it to ourselves */ sigset_t mask; sigemptyset(&mask); sigaddset(&mask, SIGUSR1); if (sigprocmask(SIG_BLOCK, &mask, NULL) == -1) die("sigprocmask"); if (kill(getpid(), SIGKILL) < 0) die("kill"); test_no_kevents(); success(); } void test_kevent_signal_enable(struct test_context *ctx) { const char *test_id = "kevent(EVFILT_SIGNAL, EV_ENABLE)"; struct kevent kev; test_begin(test_id); EV_SET(&kev, SIGUSR1, EVFILT_SIGNAL, EV_ENABLE, 0, 0, NULL); if (kevent(kqfd, &kev, 1, NULL, 0, NULL) < 0) die("%s", test_id); /* Block SIGUSR1, then send it to ourselves */ sigset_t mask; sigemptyset(&mask); sigaddset(&mask, SIGUSR1); if (sigprocmask(SIG_BLOCK, &mask, NULL) == -1) die("sigprocmask"); if (kill(getpid(), SIGUSR1) < 0) die("kill"); kev.flags = EV_ADD | EV_CLEAR; #if LIBKQUEUE kev.data = 1; /* WORKAROUND */ #else kev.data = 2; // one extra time from test_kevent_signal_disable() #endif kevent_cmp(&kev, kevent_get(kqfd)); /* Delete the watch */ kev.flags = EV_DELETE; if (kevent(kqfd, &kev, 1, NULL, 0, NULL) < 0) die("%s", test_id); success(); } void test_kevent_signal_del(struct test_context *ctx) { const char *test_id = "kevent(EVFILT_SIGNAL, EV_DELETE)"; struct kevent kev; test_begin(test_id); /* Delete the kevent */ EV_SET(&kev, SIGUSR1, EVFILT_SIGNAL, EV_DELETE, 0, 0, NULL); if (kevent(kqfd, &kev, 1, NULL, 0, NULL) < 0) die("%s", test_id); /* Block SIGUSR1, then send it to ourselves */ sigset_t mask; sigemptyset(&mask); sigaddset(&mask, SIGUSR1); if (sigprocmask(SIG_BLOCK, &mask, NULL) == -1) die("sigprocmask"); if (kill(getpid(), SIGUSR1) < 0) die("kill"); test_no_kevents(); success(); } void test_kevent_signal_oneshot(struct test_context *ctx) { const char *test_id = "kevent(EVFILT_SIGNAL, EV_ONESHOT)"; struct kevent kev; test_begin(test_id); EV_SET(&kev, SIGUSR1, EVFILT_SIGNAL, EV_ADD | EV_ONESHOT, 0, 0, NULL); if (kevent(kqfd, &kev, 1, NULL, 0, NULL) < 0) die("%s", test_id); /* Block SIGUSR1, then send it to ourselves */ sigset_t mask; sigemptyset(&mask); sigaddset(&mask, SIGUSR1); if (sigprocmask(SIG_BLOCK, &mask, NULL) == -1) die("sigprocmask"); if (kill(getpid(), SIGUSR1) < 0) die("kill"); kev.flags |= EV_CLEAR; kev.data = 1; kevent_cmp(&kev, kevent_get(kqfd)); /* Send another one and make sure we get no events */ if (kill(getpid(), SIGUSR1) < 0) die("kill"); test_no_kevents(); success(); } #endif void test_evfilt_proc(struct test_context *ctx) { signal(SIGUSR1, sig_handler); /* Create a child that waits to be killed and then exits */ pid = fork(); if (pid == 0) { pause(); exit(2); } printf(" -- child created (pid %d)\n", (int) pid); test(kevent_proc_add, ctx); test(kevent_proc_delete, ctx); test(kevent_proc_get, ctx); signal(SIGUSR1, SIG_DFL); #if TODO test_kevent_signal_add(); test_kevent_signal_del(); test_kevent_signal_get(); test_kevent_signal_disable(); test_kevent_signal_enable(); test_kevent_signal_oneshot(); #endif } libkqueue-2.3.1/test/read.c000066400000000000000000000310741342472035000155460ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "common.h" /* * Create a connected TCP socket. */ static void create_socket_connection(int *client, int *server) { struct sockaddr_in sain; socklen_t sa_len = sizeof(sain); int one = 1; int clnt, srvr, accepted; short port; /* Create a passive socket */ memset(&sain, 0, sizeof(sain)); sain.sin_family = AF_INET; sain.sin_port = 0; if ((srvr = socket(PF_INET, SOCK_STREAM, 0)) < 0) err(1, "socket"); if (setsockopt(srvr, SOL_SOCKET, SO_REUSEADDR, (char *) &one, sizeof(one)) != 0) err(1, "setsockopt"); if (bind(srvr, (struct sockaddr *) &sain, sa_len) < 0) { printf("unable to bind to auto-assigned port\n"); err(1, "bind-1"); } if (getsockname(srvr, (struct sockaddr *) &sain, &sa_len) < 0) err(1, "getsockname-1"); port = ntohs(sain.sin_port); if (listen(srvr, 100) < 0) err(1, "listen"); /* Simulate a client connecting to the server */ sain.sin_family = AF_INET; sain.sin_port = htons(port); sain.sin_addr.s_addr = inet_addr("127.0.0.1"); if ((clnt = socket(AF_INET, SOCK_STREAM, 0)) < 0) err(1, "clnt: socket"); if (connect(clnt, (struct sockaddr *) &sain, sa_len) < 0) err(1, "clnt: connect"); if ((accepted = accept(srvr, NULL, 0)) < 0) err(1, "srvr: accept"); *client = clnt; *server = accepted; } static void kevent_socket_drain(struct test_context *ctx) { char buf[1]; /* Drain the read buffer, then make sure there are no more events. */ if (recv(ctx->client_fd, &buf[0], 1, 0) < 1) die("recv(2)"); } static void kevent_socket_fill(struct test_context *ctx) { if (send(ctx->server_fd, ".", 1, 0) < 1) die("send(2)"); } void test_kevent_socket_add(struct test_context *ctx) { struct kevent kev; kevent_add(ctx->kqfd, &kev, ctx->client_fd, EVFILT_READ, EV_ADD, 0, 0, &ctx->client_fd); } void test_kevent_socket_add_without_ev_add(struct test_context *ctx) { struct kevent kev; /* Try to add a kevent without specifying EV_ADD */ EV_SET(&kev, ctx->client_fd, EVFILT_READ, 0, 0, 0, &ctx->client_fd); if (kevent(ctx->kqfd, &kev, 1, NULL, 0, NULL) == 0) die("kevent should have failed"); kevent_socket_fill(ctx); test_no_kevents(ctx->kqfd); kevent_socket_drain(ctx); /* Try to delete a kevent which does not exist */ kev.flags = EV_DELETE; if (kevent(ctx->kqfd, &kev, 1, NULL, 0, NULL) == 0) die("kevent should have failed"); } void test_kevent_socket_get(struct test_context *ctx) { struct kevent kev, ret; EV_SET(&kev, ctx->client_fd, EVFILT_READ, EV_ADD, 0, 0, &ctx->client_fd); if (kevent(ctx->kqfd, &kev, 1, NULL, 0, NULL) < 0) die("kevent:1"); kevent_socket_fill(ctx); kev.data = 1; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); kevent_socket_drain(ctx); test_no_kevents(ctx->kqfd); kev.flags = EV_DELETE; if (kevent(ctx->kqfd, &kev, 1, NULL, 0, NULL) < 0) die("kevent:2"); } void test_kevent_socket_clear(struct test_context *ctx) { struct kevent kev, ret; test_no_kevents(ctx->kqfd); kevent_socket_drain(ctx); EV_SET(&kev, ctx->client_fd, EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, &ctx->client_fd); if (kevent(ctx->kqfd, &kev, 1, NULL, 0, NULL) < 0) die("kevent1"); kevent_socket_fill(ctx); kevent_socket_fill(ctx); /* Solaris does not offer a way to get the amount of data pending */ #if defined(__sun__) kev.data = 1; #else kev.data = 2; #endif kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); /* We filled twice, but drain once. Edge-triggered would not generate additional events. */ kevent_socket_drain(ctx); test_no_kevents(ctx->kqfd); kevent_socket_drain(ctx); EV_SET(&kev, ctx->client_fd, EVFILT_READ, EV_DELETE, 0, 0, &ctx->client_fd); if (kevent(ctx->kqfd, &kev, 1, NULL, 0, NULL) < 0) die("kevent2"); } void test_kevent_socket_disable_and_enable(struct test_context *ctx) { struct kevent kev, ret; /* Add an event, then disable it. */ EV_SET(&kev, ctx->client_fd, EVFILT_READ, EV_ADD, 0, 0, &ctx->client_fd); if (kevent(ctx->kqfd, &kev, 1, NULL, 0, NULL) < 0) die("kevent"); EV_SET(&kev, ctx->client_fd, EVFILT_READ, EV_DISABLE, 0, 0, &ctx->client_fd); if (kevent(ctx->kqfd, &kev, 1, NULL, 0, NULL) < 0) die("kevent"); kevent_socket_fill(ctx); test_no_kevents(ctx->kqfd); /* Re-enable the knote, then see if an event is generated */ kev.flags = EV_ENABLE; if (kevent(ctx->kqfd, &kev, 1, NULL, 0, NULL) < 0) die("kevent"); kev.flags = EV_ADD; kev.data = 1; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); kevent_socket_drain(ctx); kev.flags = EV_DELETE; if (kevent(ctx->kqfd, &kev, 1, NULL, 0, NULL) < 0) die("kevent"); } void test_kevent_socket_del(struct test_context *ctx) { struct kevent kev; EV_SET(&kev, ctx->client_fd, EVFILT_READ, EV_DELETE, 0, 0, &ctx->client_fd); if (kevent(ctx->kqfd, &kev, 1, NULL, 0, NULL) < 0) die("kevent"); kevent_socket_fill(ctx); test_no_kevents(ctx->kqfd); kevent_socket_drain(ctx); } void test_kevent_socket_oneshot(struct test_context *ctx) { struct kevent kev, ret; /* Re-add the watch and make sure no events are pending */ kevent_add(ctx->kqfd, &kev, ctx->client_fd, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, &ctx->client_fd); test_no_kevents(ctx->kqfd); kevent_socket_fill(ctx); kev.data = 1; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); test_no_kevents(ctx->kqfd); /* Verify that the kernel watch has been deleted */ kevent_socket_fill(ctx); test_no_kevents(ctx->kqfd); kevent_socket_drain(ctx); /* Verify that the kevent structure does not exist. */ kev.flags = EV_DELETE; if (kevent(ctx->kqfd, &kev, 1, NULL, 0, NULL) == 0) die("kevent() should have failed"); } /* * Test if the data field returns 1 when a listen(2) socket has * a pending connection. */ void test_kevent_socket_listen_backlog(struct test_context *ctx) { struct kevent kev, ret; struct sockaddr_in sain; socklen_t sa_len = sizeof(sain); int one = 1; short port; int clnt, srvr; /* Create a passive socket */ memset(&sain, 0, sizeof(sain)); sain.sin_family = AF_INET; sain.sin_port = 0; if ((srvr = socket(PF_INET, SOCK_STREAM, 0)) < 0) err(1, "socket()"); if (setsockopt(srvr, SOL_SOCKET, SO_REUSEADDR, (char *) &one, sizeof(one)) != 0) err(1, "setsockopt()"); if (bind(srvr, (struct sockaddr *) &sain, sa_len) < 0) err(1, "bind-2"); if (getsockname(srvr, (struct sockaddr *) &sain, &sa_len) < 0) err(1, "getsockname-2"); port = ntohs(sain.sin_port); if (listen(srvr, 100) < 0) err(1, "listen()"); /* Watch for events on the socket */ test_no_kevents(ctx->kqfd); kevent_add(ctx->kqfd, &kev, srvr, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, NULL); test_no_kevents(ctx->kqfd); /* Simulate a client connecting to the server */ sain.sin_family = AF_INET; sain.sin_port = htons(port); sain.sin_addr.s_addr = inet_addr("127.0.0.1"); if ((clnt = socket(AF_INET, SOCK_STREAM, 0)) < 0) err(1, "socket()"); if (connect(clnt, (struct sockaddr *) &sain, sa_len) < 0) err(1, "connect()"); /* Verify that data=1 */ kev.data = 1; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); test_no_kevents(ctx->kqfd); } #ifdef EV_DISPATCH void test_kevent_socket_dispatch(struct test_context *ctx) { struct kevent kev, ret; /* Re-add the watch and make sure no events are pending */ kevent_add(ctx->kqfd, &kev, ctx->client_fd, EVFILT_READ, EV_ADD | EV_DISPATCH, 0, 0, &ctx->client_fd); test_no_kevents(ctx->kqfd); /* The event will occur only once, even though EV_CLEAR is not specified. */ kevent_socket_fill(ctx); kev.data = 1; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); test_no_kevents(ctx->kqfd); /* Re-enable the kevent */ /* FIXME- is EV_DISPATCH needed when rearming ? */ kevent_add(ctx->kqfd, &kev, ctx->client_fd, EVFILT_READ, EV_ENABLE | EV_DISPATCH, 0, 0, &ctx->client_fd); kev.data = 1; kev.flags = EV_ADD | EV_DISPATCH; /* FIXME: may not be portable */ kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); test_no_kevents(ctx->kqfd); /* Since the knote is disabled, the EV_DELETE operation succeeds. */ kevent_add(ctx->kqfd, &kev, ctx->client_fd, EVFILT_READ, EV_DELETE, 0, 0, &ctx->client_fd); kevent_socket_drain(ctx); } #endif /* EV_DISPATCH */ #if BROKEN_ON_LINUX void test_kevent_socket_lowat(struct test_context *ctx) { struct kevent kev; test_begin(test_id); /* Re-add the watch and make sure no events are pending */ puts("-- re-adding knote, setting low watermark to 2 bytes"); EV_SET(&kev, ctx->client_fd, EVFILT_READ, EV_ADD | EV_ONESHOT, NOTE_LOWAT, 2, &ctx->client_fd); if (kevent(ctx->kqfd, &kev, 1, NULL, 0, NULL) < 0) die("%s", test_id); test_no_kevents(); puts("-- checking that one byte does not trigger an event.."); kevent_socket_fill(ctx); test_no_kevents(); puts("-- checking that two bytes triggers an event.."); kevent_socket_fill(ctx); if (kevent(ctx->kqfd, NULL, 0, &kev, 1, NULL) != 1) die("%s", test_id); KEV_CMP(kev, ctx->client_fd, EVFILT_READ, 0); test_no_kevents(); kevent_socket_drain(ctx); kevent_socket_drain(ctx); } #endif void test_kevent_socket_eof(struct test_context *ctx) { struct kevent kev, ret; /* Re-add the watch and make sure no events are pending */ kevent_add(ctx->kqfd, &kev, ctx->client_fd, EVFILT_READ, EV_ADD, 0, 0, &ctx->client_fd); test_no_kevents(ctx->kqfd); //if (shutdown(ctx->server_fd, SHUT_RDWR) < 0) // die("close(2)"); if (close(ctx->server_fd) < 0) die("close(2)"); kev.flags |= EV_EOF; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); /* Delete the watch */ kevent_add(ctx->kqfd, &kev, ctx->client_fd, EVFILT_READ, EV_DELETE, 0, 0, &ctx->client_fd); } /* Test if EVFILT_READ works with regular files */ void test_kevent_regular_file(struct test_context *ctx) { struct kevent kev, ret; off_t curpos; int fd; fd = open("/etc/hosts", O_RDONLY); if (fd < 0) abort(); EV_SET(&kev, fd, EVFILT_READ, EV_ADD, 0, 0, &fd); if (kevent(ctx->kqfd, &kev, 1, NULL, 0, NULL) < 0) die("kevent"); kevent_get(&ret, ctx->kqfd); /* Set file position to EOF-1 */ ret.data--; if ((curpos = lseek(fd, ret.data, SEEK_SET)) != ret.data) { printf("seek to %u failed with rv=%lu\n", (unsigned int) ret.data, (unsigned long) curpos); abort(); } /* Set file position to EOF */ kevent_get(NULL, ctx->kqfd); ret.data = curpos + 1; if ((curpos = lseek(fd, ret.data, SEEK_SET)) != ret.data) { printf("seek to %u failed with rv=%lu\n", (unsigned int) ret.data, (unsigned long) curpos); abort(); } test_no_kevents(ctx->kqfd); kev.flags = EV_DELETE; if (kevent(ctx->kqfd, &kev, 1, NULL, 0, NULL) < 0) die("kevent"); close(fd); } void test_evfilt_read(struct test_context *ctx) { create_socket_connection(&ctx->client_fd, &ctx->server_fd); test(kevent_socket_add, ctx); test(kevent_socket_del, ctx); test(kevent_socket_add_without_ev_add, ctx); test(kevent_socket_get, ctx); test(kevent_socket_disable_and_enable, ctx); test(kevent_socket_oneshot, ctx); test(kevent_socket_clear, ctx); #ifdef EV_DISPATCH test(kevent_socket_dispatch, ctx); #endif test(kevent_socket_listen_backlog, ctx); test(kevent_socket_eof, ctx); test(kevent_regular_file, ctx); close(ctx->client_fd); close(ctx->server_fd); } libkqueue-2.3.1/test/signal.c000066400000000000000000000117121342472035000161050ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "common.h" void test_kevent_signal_add(struct test_context *ctx) { struct kevent kev; kevent_add(ctx->kqfd, &kev, SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL); } void test_kevent_signal_get(struct test_context *ctx) { struct kevent kev, ret; kevent_add(ctx->kqfd, &kev, SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL); if (kill(getpid(), SIGUSR1) < 0) die("kill"); kev.flags |= EV_CLEAR; kev.data = 1; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); } void test_kevent_signal_disable(struct test_context *ctx) { struct kevent kev; kevent_add(ctx->kqfd, &kev, SIGUSR1, EVFILT_SIGNAL, EV_DISABLE, 0, 0, NULL); if (kill(getpid(), SIGUSR1) < 0) die("kill"); test_no_kevents(ctx->kqfd); } void test_kevent_signal_enable(struct test_context *ctx) { struct kevent kev, ret; kevent_add(ctx->kqfd, &kev, SIGUSR1, EVFILT_SIGNAL, EV_ENABLE, 0, 0, NULL); if (kill(getpid(), SIGUSR1) < 0) die("kill"); kev.flags = EV_ADD | EV_CLEAR; #if LIBKQUEUE kev.data = 1; /* WORKAROUND */ #else kev.data = 2; // one extra time from test_kevent_signal_disable() #endif kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); /* Delete the watch */ kev.flags = EV_DELETE; if (kevent(ctx->kqfd, &kev, 1, NULL, 0, NULL) < 0) die("kevent"); } void test_kevent_signal_del(struct test_context *ctx) { struct kevent kev; /* Delete the kevent */ kevent_add(ctx->kqfd, &kev, SIGUSR1, EVFILT_SIGNAL, EV_DELETE, 0, 0, NULL); signal(SIGUSR1, SIG_IGN); if (kill(getpid(), SIGUSR1) < 0) die("kill"); test_no_kevents(ctx->kqfd); } void test_kevent_signal_oneshot(struct test_context *ctx) { struct kevent kev, ret; kevent_add(ctx->kqfd, &kev, SIGUSR1, EVFILT_SIGNAL, EV_ADD | EV_ONESHOT, 0, 0, NULL); if (kill(getpid(), SIGUSR1) < 0) die("kill"); kev.flags |= EV_CLEAR; kev.data = 1; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); /* Send another one and make sure we get no events */ test_no_kevents(ctx->kqfd); if (kill(getpid(), SIGUSR1) < 0) die("kill"); test_no_kevents(ctx->kqfd); } void test_kevent_signal_modify(struct test_context *ctx) { struct kevent kev, ret; kevent_add(ctx->kqfd, &kev, SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL); kevent_add(ctx->kqfd, &kev, SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, ((void *)-1)); if (kill(getpid(), SIGUSR1) < 0) die("kill"); kev.flags |= EV_CLEAR; kev.data = 1; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); test_kevent_signal_del(ctx); } #ifdef EV_DISPATCH void test_kevent_signal_dispatch(struct test_context *ctx) { struct kevent kev, ret; test_no_kevents(ctx->kqfd); kevent_add(ctx->kqfd, &kev, SIGUSR1, EVFILT_SIGNAL, EV_ADD | EV_CLEAR | EV_DISPATCH, 0, 0, NULL); /* Get one event */ if (kill(getpid(), SIGUSR1) < 0) die("kill"); kev.data = 1; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); /* Confirm that the knote is disabled */ if (kill(getpid(), SIGUSR1) < 0) die("kill"); test_no_kevents(ctx->kqfd); /* Enable the knote and make sure no events are pending */ kevent_add(ctx->kqfd, &kev, SIGUSR1, EVFILT_SIGNAL, EV_ENABLE | EV_DISPATCH, 0, 0, NULL); test_no_kevents(ctx->kqfd); /* Get the next event */ if (kill(getpid(), SIGUSR1) < 0) die("kill"); kev.flags = EV_ADD | EV_CLEAR | EV_DISPATCH; kev.data = 1; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); /* Remove the knote and ensure the event no longer fires */ kevent_add(ctx->kqfd, &kev, SIGUSR1, EVFILT_SIGNAL, EV_DELETE, 0, 0, NULL); if (kill(getpid(), SIGUSR1) < 0) die("kill"); test_no_kevents(ctx->kqfd); } #endif /* EV_DISPATCH */ void test_evfilt_signal(struct test_context *ctx) { signal(SIGUSR1, SIG_IGN); test(kevent_signal_add, ctx); test(kevent_signal_del, ctx); test(kevent_signal_get, ctx); test(kevent_signal_disable, ctx); test(kevent_signal_enable, ctx); test(kevent_signal_oneshot, ctx); test(kevent_signal_modify, ctx); #ifdef EV_DISPATCH test(kevent_signal_dispatch, ctx); #endif } libkqueue-2.3.1/test/stress/000077500000000000000000000000001342472035000160055ustar00rootroot00000000000000libkqueue-2.3.1/test/stress/Makefile000066400000000000000000000013201342472035000174410ustar00rootroot00000000000000CFLAGS=-I../../include -O0 -g LDADD=-lpthread PROGRAM=stresstest SOURCES=main.c ../timer.c ../user.c ../kevent.c ../read.c ../vnode.c ../test.c all: $(PROGRAM) $(PROGRAM): $(SOURCES) $(CC) -o $(PROGRAM) $(CFLAGS) $(SOURCES) ../../libkqueue.a $(LDADD) check: $(PROGRAM) @echo "ERROR: The stresstest is currently not usable" ; false rm -f core 2>/dev/null ulimit -c 999999 ; ./$(PROGRAM) || true if [ -f core ] ; then gdb ./$(PROGRAM) core ; fi valgrind: $(PROGRAM) valgrind --tool=memcheck --leak-check=full --show-reachable=yes \ --num-callers=20 --track-fds=yes ./$(PROGRAM) clean: rm -f $(PROGRAM) core tags *.o edit: ctags $(SOURCES) $(EDITOR) $(SOURCES) distclean: clean rm -f $(PROGRAM) libkqueue-2.3.1/test/stress/main.c000066400000000000000000000045621342472035000171040ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "config.h" #include #include #include #include #include #include /* Number of threads to create */ static const int nthreads = 64; /* Number of iterations performed by each thread */ static const int nrounds = 1000000; //pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER; void test_kqueue_conc(void) { int i, fd; for (i = 0; i < 256; i++) { fd = kqueue(); if (i < 0) err(1, "kqueue"); close(fd); } } void * test_harness(void *arg) { int id = (long) arg; int i; int kqfd; kqfd = kqueue(); if (kqfd < 0) err(1, "kqueue"); printf("thread %d runs %d\n", id, id % 4); //test_kqueue_conc(); for (i = 0; i < nrounds; i++) { switch (id % 4) { case 0: test_evfilt_user(kqfd); break; case 1: test_evfilt_read(kqfd); break; case 2: test_evfilt_timer(kqfd); break; case 3: test_evfilt_vnode(kqfd); break; } printf("thread %d round %d / %d\n", id, i, nrounds); } printf("thread %d done\n", id); } int main(int argc, char **argv) { pthread_t tid[nthreads]; long i; for (i=0; i * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #if defined(__linux__) && (defined(__GLIBC__) && !defined(__UCLIBC__)) #include #endif #include #include #include #include "common.h" static int testnum = 1; static int error_flag = 1; /* FIXME: not portable beyond linux */ #ifndef _WIN32 static void error_handler(int signum) { #if defined(__linux__) && (defined(__GLIBC__) && !defined(__UCLIBC__)) void *buf[32]; /* FIXME: the symbols aren't printing */ printf("***** ERROR: Program received signal %d *****\n", signum); backtrace_symbols_fd(buf, sizeof(buf) / sizeof(void *), 2); #else printf("***** ERROR: Program received signal %d *****\n", signum); #endif exit(1); } #endif /* ! _WIN32 */ static void testing_atexit(void) { if (error_flag) { printf(" *** TEST FAILED ***\n"); //TODO: print detailed log } else { printf("\n---\n" "+OK All %d tests completed.\n", testnum - 1); } } void test_begin(struct test_context *ctx, const char *func) { if (ctx->cur_test_id) free(ctx->cur_test_id); ctx->cur_test_id = strdup(func); printf("%d: %s\n", testnum++, ctx->cur_test_id); //TODO: redirect stdout/err to logfile } void test_end(struct test_context *ctx) { free(ctx->cur_test_id); ctx->cur_test_id = NULL; } void testing_begin(void) { #ifndef _WIN32 struct sigaction sa; /* Install a signal handler for crashes and hangs */ memset(&sa, 0, sizeof(sa)); sa.sa_handler = error_handler; sigemptyset(&sa.sa_mask); sigaction(SIGSEGV, &sa, NULL); sigaction(SIGABRT, &sa, NULL); sigaction(SIGINT, &sa, NULL); #endif atexit(testing_atexit); } void testing_end(void) { error_flag = 0; } /* Generate a unique ID */ int testing_make_uid(void) { static int id = 0; if (id == INT_MAX) abort(); id++; return (id); } libkqueue-2.3.1/test/timer.c000066400000000000000000000106231342472035000157500ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "common.h" void test_kevent_timer_add(struct test_context *ctx) { struct kevent kev; kevent_add(ctx->kqfd, &kev, 1, EVFILT_TIMER, EV_ADD, 0, 1000, NULL); } void test_kevent_timer_del(struct test_context *ctx) { struct kevent kev; kevent_add(ctx->kqfd, &kev, 1, EVFILT_TIMER, EV_DELETE, 0, 0, NULL); test_no_kevents(ctx->kqfd); } void test_kevent_timer_get(struct test_context *ctx) { struct kevent kev, ret; kevent_add(ctx->kqfd, &kev, 1, EVFILT_TIMER, EV_ADD, 0, 1000, NULL); kev.flags |= EV_CLEAR; kev.data = 1; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); kevent_add(ctx->kqfd, &kev, 1, EVFILT_TIMER, EV_DELETE, 0, 0, NULL); } static void test_kevent_timer_oneshot(struct test_context *ctx) { struct kevent kev, ret; test_no_kevents(ctx->kqfd); kevent_add(ctx->kqfd, &kev, 2, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, 500,NULL); /* Retrieve the event */ kev.flags = EV_ADD | EV_CLEAR | EV_ONESHOT; kev.data = 1; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); /* Check if the event occurs again */ sleep(3); test_no_kevents(ctx->kqfd); } static void test_kevent_timer_periodic(struct test_context *ctx) { struct kevent kev, ret; test_no_kevents(ctx->kqfd); kevent_add(ctx->kqfd, &kev, 3, EVFILT_TIMER, EV_ADD, 0, 1000,NULL); /* Retrieve the event */ kev.flags = EV_ADD | EV_CLEAR; kev.data = 1; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); /* Check if the event occurs again */ sleep(1); kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); /* Delete the event */ kev.flags = EV_DELETE; kevent_update(ctx->kqfd, &kev); } static void test_kevent_timer_disable_and_enable(struct test_context *ctx) { struct kevent kev, ret; test_no_kevents(ctx->kqfd); /* Add the watch and immediately disable it */ kevent_add(ctx->kqfd, &kev, 4, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, 2000,NULL); kev.flags = EV_DISABLE; kevent_update(ctx->kqfd, &kev); test_no_kevents(ctx->kqfd); /* Re-enable and check again */ kev.flags = EV_ENABLE; kevent_update(ctx->kqfd, &kev); kev.flags = EV_ADD | EV_CLEAR | EV_ONESHOT; kev.data = 1; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); } #ifdef EV_DISPATCH void test_kevent_timer_dispatch(struct test_context *ctx) { struct kevent kev, ret; test_no_kevents(ctx->kqfd); kevent_add(ctx->kqfd, &kev, 4, EVFILT_TIMER, EV_ADD | EV_DISPATCH, 0, 800, NULL); /* Get one event */ kev.flags = EV_ADD | EV_CLEAR | EV_DISPATCH; kev.data = 1; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); /* Confirm that the knote is disabled */ sleep(1); test_no_kevents(ctx->kqfd); /* Enable the knote and make sure no events are pending */ kevent_add(ctx->kqfd, &kev, 4, EVFILT_TIMER, EV_ENABLE | EV_DISPATCH, 0, 800, NULL); test_no_kevents(ctx->kqfd); /* Get the next event */ sleep(1); kev.flags = EV_ADD | EV_CLEAR | EV_DISPATCH; kev.data = 1; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); /* Remove the knote and ensure the event no longer fires */ kevent_add(ctx->kqfd, &kev, 4, EVFILT_TIMER, EV_DELETE, 0, 0, NULL); sleep(1); test_no_kevents(ctx->kqfd); } #endif /* EV_DISPATCH */ void test_evfilt_timer(struct test_context *ctx) { test(kevent_timer_add, ctx); test(kevent_timer_del, ctx); test(kevent_timer_get, ctx); test(kevent_timer_oneshot, ctx); test(kevent_timer_periodic, ctx); test(kevent_timer_disable_and_enable, ctx); #ifdef EV_DISPATCH test(kevent_timer_dispatch, ctx); #endif } libkqueue-2.3.1/test/user.c000066400000000000000000000130531342472035000156060ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "common.h" static void test_kevent_user_add_and_delete(struct test_context *ctx) { struct kevent kev; kevent_add(ctx->kqfd, &kev, 1, EVFILT_USER, EV_ADD, 0, 0, NULL); test_no_kevents(ctx->kqfd); kevent_add(ctx->kqfd, &kev, 1, EVFILT_USER, EV_DELETE, 0, 0, NULL); test_no_kevents(ctx->kqfd); } static void test_kevent_user_get(struct test_context *ctx) { struct kevent kev, ret; test_no_kevents(ctx->kqfd); /* Add the event, and then trigger it */ kevent_add(ctx->kqfd, &kev, 1, EVFILT_USER, EV_ADD | EV_CLEAR, 0, 0, NULL); kevent_add(ctx->kqfd, &kev, 1, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); kev.fflags &= ~NOTE_FFCTRLMASK; kev.fflags &= ~NOTE_TRIGGER; kev.flags = EV_CLEAR; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); test_no_kevents(ctx->kqfd); } static void test_kevent_user_get_hires(struct test_context *ctx) { struct kevent kev, ret; test_no_kevents(ctx->kqfd); /* Add the event, and then trigger it */ kevent_add(ctx->kqfd, &kev, 1, EVFILT_USER, EV_ADD | EV_CLEAR, 0, 0, NULL); kevent_add(ctx->kqfd, &kev, 1, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); kev.fflags &= ~NOTE_FFCTRLMASK; kev.fflags &= ~NOTE_TRIGGER; kev.flags = EV_CLEAR; kevent_get_hires(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); test_no_kevents(ctx->kqfd); } static void test_kevent_user_disable_and_enable(struct test_context *ctx) { struct kevent kev, ret; test_no_kevents(ctx->kqfd); kevent_add(ctx->kqfd, &kev, 1, EVFILT_USER, EV_ADD, 0, 0, NULL); kevent_add(ctx->kqfd, &kev, 1, EVFILT_USER, EV_DISABLE, 0, 0, NULL); /* Trigger the event, but since it is disabled, nothing will happen. */ kevent_add(ctx->kqfd, &kev, 1, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); test_no_kevents(ctx->kqfd); kevent_add(ctx->kqfd, &kev, 1, EVFILT_USER, EV_ENABLE, 0, 0, NULL); kevent_add(ctx->kqfd, &kev, 1, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); kev.flags = EV_CLEAR; kev.fflags &= ~NOTE_FFCTRLMASK; kev.fflags &= ~NOTE_TRIGGER; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); } static void test_kevent_user_oneshot(struct test_context *ctx) { struct kevent kev, ret; test_no_kevents(ctx->kqfd); kevent_add(ctx->kqfd, &kev, 2, EVFILT_USER, EV_ADD | EV_ONESHOT, 0, 0, NULL); kevent_add(ctx->kqfd, &kev, 2, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); kev.flags = EV_ONESHOT; kev.fflags &= ~NOTE_FFCTRLMASK; kev.fflags &= ~NOTE_TRIGGER; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); test_no_kevents(ctx->kqfd); } static void test_kevent_user_multi_trigger_merged(struct test_context *ctx) { struct kevent kev, ret; int i; test_no_kevents(ctx->kqfd); kevent_add(ctx->kqfd, &kev, 2, EVFILT_USER, EV_ADD | EV_CLEAR, 0, 0, NULL); for (i = 0; i < 10; i++) kevent_add(ctx->kqfd, &kev, 2, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); kev.flags = EV_CLEAR; kev.fflags &= ~NOTE_FFCTRLMASK; kev.fflags &= ~NOTE_TRIGGER; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); test_no_kevents(ctx->kqfd); } #ifdef EV_DISPATCH void test_kevent_user_dispatch(struct test_context *ctx) { struct kevent kev, ret; test_no_kevents(ctx->kqfd); /* Add the event, and then trigger it */ kevent_add(ctx->kqfd, &kev, 1, EVFILT_USER, EV_ADD | EV_CLEAR | EV_DISPATCH, 0, 0, NULL); kevent_add(ctx->kqfd, &kev, 1, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); /* Retrieve one event */ kev.fflags &= ~NOTE_FFCTRLMASK; kev.fflags &= ~NOTE_TRIGGER; kev.flags = EV_CLEAR; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); /* Confirm that the knote is disabled automatically */ test_no_kevents(ctx->kqfd); /* Re-enable the kevent */ /* FIXME- is EV_DISPATCH needed when rearming ? */ kevent_add(ctx->kqfd, &kev, 1, EVFILT_USER, EV_ENABLE | EV_CLEAR | EV_DISPATCH, 0, 0, NULL); test_no_kevents(ctx->kqfd); /* Trigger the event */ kevent_add(ctx->kqfd, &kev, 1, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); kev.fflags &= ~NOTE_FFCTRLMASK; kev.fflags &= ~NOTE_TRIGGER; kev.flags = EV_CLEAR; kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); test_no_kevents(ctx->kqfd); /* Delete the watch */ kevent_add(ctx->kqfd, &kev, 1, EVFILT_USER, EV_DELETE, 0, 0, NULL); test_no_kevents(ctx->kqfd); } #endif /* EV_DISPATCH */ void test_evfilt_user(struct test_context *ctx) { test(kevent_user_add_and_delete, ctx); test(kevent_user_get, ctx); test(kevent_user_get_hires, ctx); test(kevent_user_disable_and_enable, ctx); test(kevent_user_oneshot, ctx); test(kevent_user_multi_trigger_merged, ctx); #ifdef EV_DISPATCH test(kevent_user_dispatch, ctx); #endif /* TODO: try different fflags operations */ } libkqueue-2.3.1/test/vnode.c000066400000000000000000000172771342472035000157570ustar00rootroot00000000000000/* * Copyright (c) 2009 Mark Heily * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "common.h" /* Create an empty file */ static void testfile_create(const char *path) { int fd; if ((fd = open(path, O_CREAT | O_WRONLY, 0600)) < 0) die("open"); close(fd); } static void testfile_touch(const char *path) { char buf[1024]; snprintf(&buf[0], sizeof(buf), "touch %s", path); if (system(buf) != 0) die("system"); } static void testfile_write(const char *path) { char buf[1024]; snprintf(&buf[0], sizeof(buf), "echo hi >> %s", path); if (system(buf) != 0) die("system"); } static void testfile_rename(const char *path, int step) { char buf[1024]; snprintf(&buf[0], sizeof(buf), "%s.tmp", path); /* XXX-FIXME use of 'step' conceals a major memory corruption when the file is renamed twice. To replicate, remove "if step" conditional so two renames occur in this function. */ if (step == 0) { if (rename(path, buf) != 0) err(1,"rename"); } else { if (rename(buf, path) != 0) err(1,"rename"); } } void test_kevent_vnode_add(struct test_context *ctx) { struct kevent kev; testfile_create(ctx->testfile); ctx->vnode_fd = open(ctx->testfile, O_RDWR); if (ctx->vnode_fd < 0) err(1, "open of %s", ctx->testfile); kevent_add(ctx->kqfd, &kev, ctx->vnode_fd, EVFILT_VNODE, EV_ADD, NOTE_WRITE | NOTE_ATTRIB | NOTE_RENAME | NOTE_DELETE, 0, NULL); } void test_kevent_vnode_note_delete(struct test_context *ctx) { struct kevent kev, ret; kevent_add(ctx->kqfd, &kev, ctx->vnode_fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_DELETE, 0, NULL); if (unlink(ctx->testfile) < 0) die("unlink"); kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); } void test_kevent_vnode_note_write(struct test_context *ctx) { struct kevent kev, ret; kevent_add(ctx->kqfd, &kev, ctx->vnode_fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_WRITE, 0, NULL); testfile_write(ctx->testfile); /* BSD kqueue adds NOTE_EXTEND even though it was not requested */ /* BSD kqueue removes EV_ENABLE */ kev.flags &= ~EV_ENABLE; // XXX-FIXME compatibility issue kev.fflags |= NOTE_EXTEND; // XXX-FIXME compatibility issue kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); } void test_kevent_vnode_note_attrib(struct test_context *ctx) { struct kevent kev; int nfds; kevent_add(ctx->kqfd, &kev, ctx->vnode_fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_ATTRIB, 0, NULL); testfile_touch(ctx->testfile); nfds = kevent(ctx->kqfd, NULL, 0, &kev, 1, NULL); if (nfds < 1) die("kevent"); if (kev.ident != ctx->vnode_fd || kev.filter != EVFILT_VNODE || kev.fflags != NOTE_ATTRIB) err(1, "%s - incorrect event (sig=%u; filt=%d; flags=%d)", test_id, (unsigned int)kev.ident, kev.filter, kev.flags); } void test_kevent_vnode_note_rename(struct test_context *ctx) { struct kevent kev; int nfds; kevent_add(ctx->kqfd, &kev, ctx->vnode_fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_RENAME, 0, NULL); testfile_rename(ctx->testfile, 0); nfds = kevent(ctx->kqfd, NULL, 0, &kev, 1, NULL); if (nfds < 1) die("kevent"); if (kev.ident != ctx->vnode_fd || kev.filter != EVFILT_VNODE || kev.fflags != NOTE_RENAME) err(1, "%s - incorrect event (sig=%u; filt=%d; flags=%d)", test_id, (unsigned int)kev.ident, kev.filter, kev.flags); testfile_rename(ctx->testfile, 1); test_no_kevents(ctx->kqfd); } void test_kevent_vnode_del(struct test_context *ctx) { struct kevent kev; kevent_add(ctx->kqfd, &kev, ctx->vnode_fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL); } void test_kevent_vnode_disable_and_enable(struct test_context *ctx) { struct kevent kev; int nfds; test_no_kevents(ctx->kqfd); /* Add the watch and immediately disable it */ kevent_add(ctx->kqfd, &kev, ctx->vnode_fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_ATTRIB, 0, NULL); kev.flags = EV_DISABLE; kevent_update(ctx->kqfd, &kev); /* Confirm that the watch is disabled */ testfile_touch(ctx->testfile); test_no_kevents(ctx->kqfd); /* Re-enable and check again */ kev.flags = EV_ENABLE; kevent_update(ctx->kqfd, &kev); testfile_touch(ctx->testfile); nfds = kevent(ctx->kqfd, NULL, 0, &kev, 1, NULL); if (nfds < 1) die("kevent"); if (kev.ident != ctx->vnode_fd || kev.filter != EVFILT_VNODE || kev.fflags != NOTE_ATTRIB) err(1, "%s - incorrect event (sig=%u; filt=%d; flags=%d)", test_id, (unsigned int)kev.ident, kev.filter, kev.flags); } #ifdef EV_DISPATCH void test_kevent_vnode_dispatch(struct test_context *ctx) { struct kevent kev, ret; int nfds; test_no_kevents(ctx->kqfd); kevent_add(ctx->kqfd, &kev, ctx->vnode_fd, EVFILT_VNODE, EV_ADD | EV_DISPATCH, NOTE_ATTRIB, 0, NULL); testfile_touch(ctx->testfile); nfds = kevent(ctx->kqfd, NULL, 0, &kev, 1, NULL); if (nfds < 1) die("kevent"); if (kev.ident != ctx->vnode_fd || kev.filter != EVFILT_VNODE || kev.fflags != NOTE_ATTRIB) err(1, "%s - incorrect event (sig=%u; filt=%d; flags=%d)", test_id, (unsigned int)kev.ident, kev.filter, kev.flags); /* Confirm that the watch is disabled automatically */ testfile_touch(ctx->testfile); test_no_kevents(ctx->kqfd); /* Re-enable the kevent */ /* FIXME- is EV_DISPATCH needed when rearming ? */ kevent_add(ctx->kqfd, &kev, ctx->vnode_fd, EVFILT_VNODE, EV_ENABLE | EV_DISPATCH, 0, 0, NULL); kev.flags = EV_ADD | EV_DISPATCH; /* FIXME: may not be portable */ kev.fflags = NOTE_ATTRIB; testfile_touch(ctx->testfile); kevent_get(&ret, ctx->kqfd); kevent_cmp(&kev, &ret); test_no_kevents(ctx->kqfd); /* Delete the watch */ kevent_add(ctx->kqfd, &kev, ctx->vnode_fd, EVFILT_VNODE, EV_DELETE, NOTE_ATTRIB, 0, NULL); } #endif /* EV_DISPATCH */ void test_evfilt_vnode(struct test_context *ctx) { #if (defined(__sun) && !defined(HAVE_PORT_SOURCE_FILE)) puts("**NOTE** EVFILT_VNODE is not supported on this version of Solaris"); return; #endif char *tmpdir = getenv("TMPDIR"); if (tmpdir == NULL) #ifdef __ANDROID__ tmpdir = "/data/local/tmp"; #else tmpdir = "/tmp"; #endif snprintf(ctx->testfile, sizeof(ctx->testfile), "%s/kqueue-test%d.tmp", tmpdir, testing_make_uid()); test(kevent_vnode_add, ctx); test(kevent_vnode_del, ctx); test(kevent_vnode_disable_and_enable, ctx); #ifdef EV_DISPATCH test(kevent_vnode_dispatch, ctx); #endif test(kevent_vnode_note_write, ctx); test(kevent_vnode_note_attrib, ctx); test(kevent_vnode_note_rename, ctx); test(kevent_vnode_note_delete, ctx); /* TODO: test r590 corner case where a descriptor is closed and the associated knote is automatically freed. */ unlink(ctx->testfile); }