pax_global_header00006660000000000000000000000064141325514220014511gustar00rootroot0000000000000052 comment=08b6b18d1935fcc2567368c16d8a31d1d2b17aa8 aml-0.2.1/000077500000000000000000000000001413255142200122625ustar00rootroot00000000000000aml-0.2.1/.gitignore000066400000000000000000000000321413255142200142450ustar00rootroot00000000000000build* .ycm_extra_conf.py aml-0.2.1/COPYING000066400000000000000000000013341413255142200133160ustar00rootroot00000000000000Copyright (c) 2020 Andri Yngvason Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. aml-0.2.1/FUNDING.yml000066400000000000000000000000271413255142200140760ustar00rootroot00000000000000patreon: andriyngvason aml-0.2.1/README.md000066400000000000000000000004611413255142200135420ustar00rootroot00000000000000# Andri's Main Loop Goals: * Portability * Utility * Simplicity Non-goals: * MS Windows (TM) support * Solving the C10K problem Features: * File descriptor event handlers * Timers * Tickers * Signal handlers * Idle dispatch callbacks * Thread pool * Interoperability with other event loops aml-0.2.1/examples/000077500000000000000000000000001413255142200141005ustar00rootroot00000000000000aml-0.2.1/examples/meson.build000066400000000000000000000004301413255142200162370ustar00rootroot00000000000000executable( 'ticker', [ 'ticker.c', ], dependencies: [ aml_dep, threads, ] ) executable( 'reader', [ 'reader.c', ], dependencies: [ aml_dep, threads, ] ) executable( 'nested-ticker', [ 'nested-ticker.c', ], dependencies: [ aml_dep, threads, ] ) aml-0.2.1/examples/nested-ticker.c000066400000000000000000000021131413255142200170020ustar00rootroot00000000000000#include #include #include #include #include static int do_exit = 0; static void on_tick(void* ticker) { int* count_ptr = aml_get_userdata(ticker); *count_ptr += 1; printf("tick %d!\n", *count_ptr); if (*count_ptr >= 10) aml_exit(aml_get_default()); } static void on_sigint(void* sig) { do_exit = 1; } int main() { struct aml* aml = aml_new(); if (!aml) return 1; aml_set_default(aml); int fd = aml_get_fd(aml); assert(fd >= 0); int count = 0; struct aml_signal* sig = aml_signal_new(SIGINT, on_sigint, NULL, NULL); if (!sig) goto failure; aml_start(aml, sig); aml_unref(sig); struct aml_ticker* ticker = aml_ticker_new(1000, on_tick, &count, NULL); if (!ticker) goto failure; aml_start(aml, ticker); aml_unref(ticker); struct pollfd pollfd = { .fd = fd, .events = POLLIN, }; while (!do_exit) { aml_poll(aml, 0); aml_dispatch(aml); int nfds = poll(&pollfd, 1, -1); if (nfds != 1) continue; } printf("Exiting...\n"); aml_unref(aml); return 0; failure: aml_unref(aml); return 1; } aml-0.2.1/examples/reader.c000066400000000000000000000031351413255142200155100ustar00rootroot00000000000000/* * Copyright (c) 2020 Andri Yngvason * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include static void on_line(void* handler) { char line[256]; fscanf(stdin, "%s", line); printf("Got line: %s\n", line); if (strncmp(line, "exit", sizeof(line)) == 0) aml_exit(aml_get_default()); } static void on_sigint(void* sig) { aml_exit(aml_get_default()); } int main() { struct aml* aml = aml_new(); if (!aml) return 1; aml_set_default(aml); struct aml_signal* sig = aml_signal_new(SIGINT, on_sigint, NULL, NULL); if (!sig) goto failure; aml_start(aml, sig); aml_unref(sig); struct aml_handler* handler = aml_handler_new(fileno(stdin), on_line, NULL, NULL); if (!handler) goto failure; aml_start(aml, handler); aml_unref(handler); aml_run(aml); printf("Exiting...\n"); aml_unref(aml); return 0; failure: aml_unref(aml); return 1; } aml-0.2.1/examples/ticker.c000066400000000000000000000014741413255142200155330ustar00rootroot00000000000000#include #include #include static void on_tick(void* ticker) { int* count_ptr = aml_get_userdata(ticker); *count_ptr += 1; printf("tick %d!\n", *count_ptr); if (*count_ptr >= 10) aml_exit(aml_get_default()); } static void on_sigint(void* sig) { aml_exit(aml_get_default()); } int main() { struct aml* aml = aml_new(); if (!aml) return 1; aml_set_default(aml); int count = 0; struct aml_signal* sig = aml_signal_new(SIGINT, on_sigint, NULL, NULL); if (!sig) goto failure; aml_start(aml, sig); aml_unref(sig); struct aml_ticker* ticker = aml_ticker_new(1000, on_tick, &count, NULL); if (!ticker) goto failure; aml_start(aml, ticker); aml_unref(ticker); aml_run(aml); printf("Exiting...\n"); aml_unref(aml); return 0; failure: aml_unref(aml); return 1; } aml-0.2.1/include/000077500000000000000000000000001413255142200137055ustar00rootroot00000000000000aml-0.2.1/include/aml.h000066400000000000000000000130321413255142200146260ustar00rootroot00000000000000/* * Copyright (c) 2020 Andri Yngvason * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #pragma once #include #include #include struct aml; struct aml_handler; struct aml_timer; struct aml_ticker; struct aml_signal; struct aml_work; struct aml_idle; enum aml_event { AML_EVENT_NONE = 0, AML_EVENT_READ = 1 << 0, AML_EVENT_WRITE = 1 << 1, }; typedef void (*aml_callback_fn)(void* obj); typedef void (*aml_free_fn)(void*); extern const char aml_version[]; /* Create a new main loop instance */ struct aml* aml_new(void); /* The backend should supply a minimum of n worker threads in its thread pool. * * If n == -1, the backend should supply as many workers as there are available * CPU cores/threads on the system. */ int aml_require_workers(struct aml*, int n); /* Get/set the default main loop instance */ void aml_set_default(struct aml*); struct aml* aml_get_default(void); /* Check if there are pending events. The user should call aml_dispatch() * afterwards if there are any pending events. * * This function behaves like poll(): it will wait for either a timeout (in ms) * or a signal. * * Returns: -1 on timeout or signal; otherwise number of pending events. */ int aml_poll(struct aml*, int timeout); /* This is a convenience function that calls aml_poll() and aml_dispatch() in * a loop until aml_exit() is called. */ int aml_run(struct aml*); /* Instruct the main loop to exit. */ void aml_exit(struct aml*); /* Dispatch pending events */ void aml_dispatch(struct aml* self); /* Trigger an immediate return from aml_poll(). */ void aml_interrupt(struct aml*); /* Increment the reference count by one. * * Returns how many references there were BEFORE the call. */ int aml_ref(void* obj); /* Decrement the reference count by one. * * Returns how many references there are AFTER the call. */ int aml_unref(void* obj); /* Get global object id. * * This can be used to break reference loops. * * Returns an id that can be used to access the object using aml_try_ref(). */ unsigned long long aml_get_id(const void* obj); /* Try to reference an object with an id returned by aml_get_id(). * * This increments the reference count by one. * * Returns the aml object if found. Otherwise NULL. */ void* aml_try_ref(unsigned long long id); /* The following calls create event handler objects. * * An object will have a reference count of 1 upon creation and must be freed * using aml_unref(). */ struct aml_handler* aml_handler_new(int fd, aml_callback_fn, void* userdata, aml_free_fn); struct aml_timer* aml_timer_new(uint32_t timeout, aml_callback_fn, void* userdata, aml_free_fn); struct aml_ticker* aml_ticker_new(uint32_t period, aml_callback_fn, void* userdata, aml_free_fn); struct aml_signal* aml_signal_new(int signo, aml_callback_fn, void* userdata, aml_free_fn); struct aml_work* aml_work_new(aml_callback_fn work_fn, aml_callback_fn done_fn, void* userdata, aml_free_fn); struct aml_idle* aml_idle_new(aml_callback_fn done_fn, void* userdata, aml_free_fn); /* Get the file descriptor associated with either a handler or the main loop. * * Calling this on objects of other types is illegal and may cause SIGABRT to * be raised. * * The fd returned from the main loop object can be used in other main loops to * monitor events on an aml main loop. */ int aml_get_fd(const void* obj); /* Associate random data with an object. * * If a free function is defined, it will be called to free the assigned * userdata when the object is freed as a result of aml_unref(). */ void aml_set_userdata(void* obj, void* userdata, aml_free_fn); void* aml_get_userdata(const void* obj); void aml_set_event_mask(struct aml_handler* obj, enum aml_event mask); enum aml_event aml_get_event_mask(const struct aml_handler* obj); /* Check which events are pending on an fd event handler. */ enum aml_event aml_get_revents(const struct aml_handler* obj); /* Set timeout/period of a timer/ticker * * Calling this on a started timer/ticker yields undefined behaviour */ void aml_set_duration(void* obj, uint32_t value); /* Start an event handler. * * This increases the reference count on the handler object. * * Returns: 0 on success, -1 if the handler is already started. */ int aml_start(struct aml*, void* obj); /* Stop an event handler. * * This decreases the reference count on a handler object. * * Returns: 0 on success, -1 if the handler is already stopped. */ int aml_stop(struct aml*, void* obj); /* Check if an event handler is started. * * Returns: true if it has been started, false otherwise. */ bool aml_is_started(struct aml*, void* obj); /* Get the signal assigned to a signal handler. */ int aml_get_signo(const struct aml_signal* sig); aml-0.2.1/include/backend.h000066400000000000000000000053341413255142200154520ustar00rootroot00000000000000/* * Copyright (c) 2020 Andri Yngvason * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #pragma once #include struct aml; struct aml_handler; struct aml_signal; struct aml_work; typedef void (*aml_callback_fn)(void* obj); enum { AML_BACKEND_EDGE_TRIGGERED = 1 << 0, }; struct aml_backend { uint32_t flags; uint32_t clock; void* (*new_state)(struct aml*); void (*del_state)(void* state); int (*get_fd)(const void* state); int (*poll)(void* state, int timeout); void (*exit)(void* state); int (*add_fd)(void* state, struct aml_handler*); int (*mod_fd)(void* state, struct aml_handler*); int (*del_fd)(void* state, struct aml_handler*); int (*add_signal)(void* state, struct aml_signal*); int (*del_signal)(void* state, struct aml_signal*); int (*set_deadline)(void* state, uint64_t deadline); void (*post_dispatch)(void* state); void (*interrupt)(void* state); int (*thread_pool_acquire)(struct aml*, int n_threads); void (*thread_pool_release)(struct aml*); int (*thread_pool_enqueue)(struct aml*, struct aml_work*); }; /* These are for setting random data required by the backend implementation. * * The backend implementation shall NOT use aml_set_userdata() or * aml_get_userdata(). */ void aml_set_backend_data(void* ptr, void* data); void* aml_get_backend_data(const void* ptr); void* aml_get_backend_state(const struct aml*); /* Get the work function pointer assigned to a work object. */ aml_callback_fn aml_get_work_fn(const struct aml_work*); /* revents is only used for fd events. Zero otherwise. * This function may be called inside a signal handler */ void aml_emit(struct aml* self, void* obj, uint32_t revents); /* Get time in milliseconds until the next timeout event. * * If timeout is -1, this returns: * -1 if no event is pending * 0 if a timer has already expired * time until next event, otherwise * * Otherwise, if timeout is less than the time until the next event, timeout is * returned, if it is greater, then the time until next event is returned. */ int aml_get_next_timeout(struct aml* self, int timeout); aml-0.2.1/include/sys/000077500000000000000000000000001413255142200145235ustar00rootroot00000000000000aml-0.2.1/include/sys/queue.h000066400000000000000000000711621413255142200160270ustar00rootroot00000000000000/*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)queue.h 8.5 (Berkeley) 8/20/94 * $FreeBSD$ */ #ifndef _SYS_QUEUE_H_ #define _SYS_QUEUE_H_ #include /* * This file defines four types of data structures: singly-linked lists, * singly-linked tail queues, lists and tail queues. * * A singly-linked list is headed by a single forward pointer. The elements * are singly linked for minimum space and pointer manipulation overhead at * the expense of O(n) removal for arbitrary elements. New elements can be * added to the list after an existing element or at the head of the list. * Elements being removed from the head of the list should use the explicit * macro for this purpose for optimum efficiency. A singly-linked list may * only be traversed in the forward direction. Singly-linked lists are ideal * for applications with large datasets and few or no removals or for * implementing a LIFO queue. * * A singly-linked tail queue is headed by a pair of pointers, one to the * head of the list and the other to the tail of the list. The elements are * singly linked for minimum space and pointer manipulation overhead at the * expense of O(n) removal for arbitrary elements. New elements can be added * to the list after an existing element, at the head of the list, or at the * end of the list. Elements being removed from the head of the tail queue * should use the explicit macro for this purpose for optimum efficiency. * A singly-linked tail queue may only be traversed in the forward direction. * Singly-linked tail queues are ideal for applications with large datasets * and few or no removals or for implementing a FIFO queue. * * A list is headed by a single forward pointer (or an array of forward * pointers for a hash table header). The elements are doubly linked * so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before * or after an existing element or at the head of the list. A list * may be traversed in either direction. * * A tail queue is headed by a pair of pointers, one to the head of the * list and the other to the tail of the list. The elements are doubly * linked so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before or * after an existing element, at the head of the list, or at the end of * the list. A tail queue may be traversed in either direction. * * For details on the use of these macros, see the queue(3) manual page. * * Below is a summary of implemented functions where: * + means the macro is available * - means the macro is not available * s means the macro is available but is slow (runs in O(n) time) * * SLIST LIST STAILQ TAILQ * _HEAD + + + + * _CLASS_HEAD + + + + * _HEAD_INITIALIZER + + + + * _ENTRY + + + + * _CLASS_ENTRY + + + + * _INIT + + + + * _EMPTY + + + + * _FIRST + + + + * _NEXT + + + + * _PREV - + - + * _LAST - - + + * _LAST_FAST - - - + * _FOREACH + + + + * _FOREACH_FROM + + + + * _FOREACH_SAFE + + + + * _FOREACH_FROM_SAFE + + + + * _FOREACH_REVERSE - - - + * _FOREACH_REVERSE_FROM - - - + * _FOREACH_REVERSE_SAFE - - - + * _FOREACH_REVERSE_FROM_SAFE - - - + * _INSERT_HEAD + + + + * _INSERT_BEFORE - + - + * _INSERT_AFTER + + + + * _INSERT_TAIL - - + + * _CONCAT s s + + * _REMOVE_AFTER + - + - * _REMOVE_HEAD + - + - * _REMOVE s + s + * _SWAP + + + + * */ #ifdef QUEUE_MACRO_DEBUG #warn Use QUEUE_MACRO_DEBUG_TRACE and/or QUEUE_MACRO_DEBUG_TRASH #define QUEUE_MACRO_DEBUG_TRACE #define QUEUE_MACRO_DEBUG_TRASH #endif #ifdef QUEUE_MACRO_DEBUG_TRACE /* Store the last 2 places the queue element or head was altered */ struct qm_trace { unsigned long lastline; unsigned long prevline; const char *lastfile; const char *prevfile; }; #define TRACEBUF struct qm_trace trace; #define TRACEBUF_INITIALIZER { __LINE__, 0, __FILE__, NULL } , #define QMD_TRACE_HEAD(head) do { \ (head)->trace.prevline = (head)->trace.lastline; \ (head)->trace.prevfile = (head)->trace.lastfile; \ (head)->trace.lastline = __LINE__; \ (head)->trace.lastfile = __FILE__; \ } while (0) #define QMD_TRACE_ELEM(elem) do { \ (elem)->trace.prevline = (elem)->trace.lastline; \ (elem)->trace.prevfile = (elem)->trace.lastfile; \ (elem)->trace.lastline = __LINE__; \ (elem)->trace.lastfile = __FILE__; \ } while (0) #else /* !QUEUE_MACRO_DEBUG_TRACE */ #define QMD_TRACE_ELEM(elem) #define QMD_TRACE_HEAD(head) #define TRACEBUF #define TRACEBUF_INITIALIZER #endif /* QUEUE_MACRO_DEBUG_TRACE */ #ifdef QUEUE_MACRO_DEBUG_TRASH #define QMD_SAVELINK(name, link) void **name = (void *)&(link) #define TRASHIT(x) do {(x) = (void *)-1;} while (0) #define QMD_IS_TRASHED(x) ((x) == (void *)(intptr_t)-1) #else /* !QUEUE_MACRO_DEBUG_TRASH */ #define QMD_SAVELINK(name, link) #define TRASHIT(x) #define QMD_IS_TRASHED(x) 0 #endif /* QUEUE_MACRO_DEBUG_TRASH */ #ifdef __cplusplus /* * In C++ there can be structure lists and class lists: */ #define QUEUE_TYPEOF(type) type #else #define QUEUE_TYPEOF(type) struct type #endif /* * Singly-linked List declarations. */ #define SLIST_HEAD(name, type) \ struct name { \ struct type *slh_first; /* first element */ \ } #define SLIST_CLASS_HEAD(name, type) \ struct name { \ class type *slh_first; /* first element */ \ } #define SLIST_HEAD_INITIALIZER(head) \ { NULL } #define SLIST_ENTRY(type) \ struct { \ struct type *sle_next; /* next element */ \ } #define SLIST_CLASS_ENTRY(type) \ struct { \ class type *sle_next; /* next element */ \ } /* * Singly-linked List functions. */ #if (defined(_KERNEL) && defined(INVARIANTS)) #define QMD_SLIST_CHECK_PREVPTR(prevp, elm) do { \ if (*(prevp) != (elm)) \ panic("Bad prevptr *(%p) == %p != %p", \ (prevp), *(prevp), (elm)); \ } while (0) #else #define QMD_SLIST_CHECK_PREVPTR(prevp, elm) #endif #define SLIST_CONCAT(head1, head2, type, field) do { \ QUEUE_TYPEOF(type) *curelm = SLIST_FIRST(head1); \ if (curelm == NULL) { \ if ((SLIST_FIRST(head1) = SLIST_FIRST(head2)) != NULL) \ SLIST_INIT(head2); \ } else if (SLIST_FIRST(head2) != NULL) { \ while (SLIST_NEXT(curelm, field) != NULL) \ curelm = SLIST_NEXT(curelm, field); \ SLIST_NEXT(curelm, field) = SLIST_FIRST(head2); \ SLIST_INIT(head2); \ } \ } while (0) #define SLIST_EMPTY(head) ((head)->slh_first == NULL) #define SLIST_FIRST(head) ((head)->slh_first) #define SLIST_FOREACH(var, head, field) \ for ((var) = SLIST_FIRST((head)); \ (var); \ (var) = SLIST_NEXT((var), field)) #define SLIST_FOREACH_FROM(var, head, field) \ for ((var) = ((var) ? (var) : SLIST_FIRST((head))); \ (var); \ (var) = SLIST_NEXT((var), field)) #define SLIST_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = SLIST_FIRST((head)); \ (var) && ((tvar) = SLIST_NEXT((var), field), 1); \ (var) = (tvar)) #define SLIST_FOREACH_FROM_SAFE(var, head, field, tvar) \ for ((var) = ((var) ? (var) : SLIST_FIRST((head))); \ (var) && ((tvar) = SLIST_NEXT((var), field), 1); \ (var) = (tvar)) #define SLIST_FOREACH_PREVPTR(var, varp, head, field) \ for ((varp) = &SLIST_FIRST((head)); \ ((var) = *(varp)) != NULL; \ (varp) = &SLIST_NEXT((var), field)) #define SLIST_INIT(head) do { \ SLIST_FIRST((head)) = NULL; \ } while (0) #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \ SLIST_NEXT((slistelm), field) = (elm); \ } while (0) #define SLIST_INSERT_HEAD(head, elm, field) do { \ SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \ SLIST_FIRST((head)) = (elm); \ } while (0) #define SLIST_NEXT(elm, field) ((elm)->field.sle_next) #define SLIST_REMOVE(head, elm, type, field) do { \ QMD_SAVELINK(oldnext, (elm)->field.sle_next); \ if (SLIST_FIRST((head)) == (elm)) { \ SLIST_REMOVE_HEAD((head), field); \ } \ else { \ QUEUE_TYPEOF(type) *curelm = SLIST_FIRST(head); \ while (SLIST_NEXT(curelm, field) != (elm)) \ curelm = SLIST_NEXT(curelm, field); \ SLIST_REMOVE_AFTER(curelm, field); \ } \ TRASHIT(*oldnext); \ } while (0) #define SLIST_REMOVE_AFTER(elm, field) do { \ SLIST_NEXT(elm, field) = \ SLIST_NEXT(SLIST_NEXT(elm, field), field); \ } while (0) #define SLIST_REMOVE_HEAD(head, field) do { \ SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \ } while (0) #define SLIST_REMOVE_PREVPTR(prevp, elm, field) do { \ QMD_SLIST_CHECK_PREVPTR(prevp, elm); \ *(prevp) = SLIST_NEXT(elm, field); \ TRASHIT((elm)->field.sle_next); \ } while (0) #define SLIST_SWAP(head1, head2, type) do { \ QUEUE_TYPEOF(type) *swap_first = SLIST_FIRST(head1); \ SLIST_FIRST(head1) = SLIST_FIRST(head2); \ SLIST_FIRST(head2) = swap_first; \ } while (0) /* * Singly-linked Tail queue declarations. */ #define STAILQ_HEAD(name, type) \ struct name { \ struct type *stqh_first;/* first element */ \ struct type **stqh_last;/* addr of last next element */ \ } #define STAILQ_CLASS_HEAD(name, type) \ struct name { \ class type *stqh_first; /* first element */ \ class type **stqh_last; /* addr of last next element */ \ } #define STAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).stqh_first } #define STAILQ_ENTRY(type) \ struct { \ struct type *stqe_next; /* next element */ \ } #define STAILQ_CLASS_ENTRY(type) \ struct { \ class type *stqe_next; /* next element */ \ } /* * Singly-linked Tail queue functions. */ #define STAILQ_CONCAT(head1, head2) do { \ if (!STAILQ_EMPTY((head2))) { \ *(head1)->stqh_last = (head2)->stqh_first; \ (head1)->stqh_last = (head2)->stqh_last; \ STAILQ_INIT((head2)); \ } \ } while (0) #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) #define STAILQ_FIRST(head) ((head)->stqh_first) #define STAILQ_FOREACH(var, head, field) \ for((var) = STAILQ_FIRST((head)); \ (var); \ (var) = STAILQ_NEXT((var), field)) #define STAILQ_FOREACH_FROM(var, head, field) \ for ((var) = ((var) ? (var) : STAILQ_FIRST((head))); \ (var); \ (var) = STAILQ_NEXT((var), field)) #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = STAILQ_FIRST((head)); \ (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ (var) = (tvar)) #define STAILQ_FOREACH_FROM_SAFE(var, head, field, tvar) \ for ((var) = ((var) ? (var) : STAILQ_FIRST((head))); \ (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ (var) = (tvar)) #define STAILQ_INIT(head) do { \ STAILQ_FIRST((head)) = NULL; \ (head)->stqh_last = &STAILQ_FIRST((head)); \ } while (0) #define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\ (head)->stqh_last = &STAILQ_NEXT((elm), field); \ STAILQ_NEXT((tqelm), field) = (elm); \ } while (0) #define STAILQ_INSERT_HEAD(head, elm, field) do { \ if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \ (head)->stqh_last = &STAILQ_NEXT((elm), field); \ STAILQ_FIRST((head)) = (elm); \ } while (0) #define STAILQ_INSERT_TAIL(head, elm, field) do { \ STAILQ_NEXT((elm), field) = NULL; \ *(head)->stqh_last = (elm); \ (head)->stqh_last = &STAILQ_NEXT((elm), field); \ } while (0) #define STAILQ_LAST(head, type, field) \ (STAILQ_EMPTY((head)) ? NULL : \ __containerof((head)->stqh_last, \ QUEUE_TYPEOF(type), field.stqe_next)) #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) #define STAILQ_REMOVE(head, elm, type, field) do { \ QMD_SAVELINK(oldnext, (elm)->field.stqe_next); \ if (STAILQ_FIRST((head)) == (elm)) { \ STAILQ_REMOVE_HEAD((head), field); \ } \ else { \ QUEUE_TYPEOF(type) *curelm = STAILQ_FIRST(head); \ while (STAILQ_NEXT(curelm, field) != (elm)) \ curelm = STAILQ_NEXT(curelm, field); \ STAILQ_REMOVE_AFTER(head, curelm, field); \ } \ TRASHIT(*oldnext); \ } while (0) #define STAILQ_REMOVE_AFTER(head, elm, field) do { \ if ((STAILQ_NEXT(elm, field) = \ STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \ (head)->stqh_last = &STAILQ_NEXT((elm), field); \ } while (0) #define STAILQ_REMOVE_HEAD(head, field) do { \ if ((STAILQ_FIRST((head)) = \ STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \ (head)->stqh_last = &STAILQ_FIRST((head)); \ } while (0) #define STAILQ_SWAP(head1, head2, type) do { \ QUEUE_TYPEOF(type) *swap_first = STAILQ_FIRST(head1); \ QUEUE_TYPEOF(type) **swap_last = (head1)->stqh_last; \ STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \ (head1)->stqh_last = (head2)->stqh_last; \ STAILQ_FIRST(head2) = swap_first; \ (head2)->stqh_last = swap_last; \ if (STAILQ_EMPTY(head1)) \ (head1)->stqh_last = &STAILQ_FIRST(head1); \ if (STAILQ_EMPTY(head2)) \ (head2)->stqh_last = &STAILQ_FIRST(head2); \ } while (0) /* * List declarations. */ #define LIST_HEAD(name, type) \ struct name { \ struct type *lh_first; /* first element */ \ } #define LIST_CLASS_HEAD(name, type) \ struct name { \ class type *lh_first; /* first element */ \ } #define LIST_HEAD_INITIALIZER(head) \ { NULL } #define LIST_ENTRY(type) \ struct { \ struct type *le_next; /* next element */ \ struct type **le_prev; /* address of previous next element */ \ } #define LIST_CLASS_ENTRY(type) \ struct { \ class type *le_next; /* next element */ \ class type **le_prev; /* address of previous next element */ \ } /* * List functions. */ #if (defined(_KERNEL) && defined(INVARIANTS)) /* * QMD_LIST_CHECK_HEAD(LIST_HEAD *head, LIST_ENTRY NAME) * * If the list is non-empty, validates that the first element of the list * points back at 'head.' */ #define QMD_LIST_CHECK_HEAD(head, field) do { \ if (LIST_FIRST((head)) != NULL && \ LIST_FIRST((head))->field.le_prev != \ &LIST_FIRST((head))) \ panic("Bad list head %p first->prev != head", (head)); \ } while (0) /* * QMD_LIST_CHECK_NEXT(TYPE *elm, LIST_ENTRY NAME) * * If an element follows 'elm' in the list, validates that the next element * points back at 'elm.' */ #define QMD_LIST_CHECK_NEXT(elm, field) do { \ if (LIST_NEXT((elm), field) != NULL && \ LIST_NEXT((elm), field)->field.le_prev != \ &((elm)->field.le_next)) \ panic("Bad link elm %p next->prev != elm", (elm)); \ } while (0) /* * QMD_LIST_CHECK_PREV(TYPE *elm, LIST_ENTRY NAME) * * Validates that the previous element (or head of the list) points to 'elm.' */ #define QMD_LIST_CHECK_PREV(elm, field) do { \ if (*(elm)->field.le_prev != (elm)) \ panic("Bad link elm %p prev->next != elm", (elm)); \ } while (0) #else #define QMD_LIST_CHECK_HEAD(head, field) #define QMD_LIST_CHECK_NEXT(elm, field) #define QMD_LIST_CHECK_PREV(elm, field) #endif /* (_KERNEL && INVARIANTS) */ #define LIST_CONCAT(head1, head2, type, field) do { \ QUEUE_TYPEOF(type) *curelm = LIST_FIRST(head1); \ if (curelm == NULL) { \ if ((LIST_FIRST(head1) = LIST_FIRST(head2)) != NULL) { \ LIST_FIRST(head2)->field.le_prev = \ &LIST_FIRST((head1)); \ LIST_INIT(head2); \ } \ } else if (LIST_FIRST(head2) != NULL) { \ while (LIST_NEXT(curelm, field) != NULL) \ curelm = LIST_NEXT(curelm, field); \ LIST_NEXT(curelm, field) = LIST_FIRST(head2); \ LIST_FIRST(head2)->field.le_prev = &LIST_NEXT(curelm, field); \ LIST_INIT(head2); \ } \ } while (0) #define LIST_EMPTY(head) ((head)->lh_first == NULL) #define LIST_FIRST(head) ((head)->lh_first) #define LIST_FOREACH(var, head, field) \ for ((var) = LIST_FIRST((head)); \ (var); \ (var) = LIST_NEXT((var), field)) #define LIST_FOREACH_FROM(var, head, field) \ for ((var) = ((var) ? (var) : LIST_FIRST((head))); \ (var); \ (var) = LIST_NEXT((var), field)) #define LIST_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = LIST_FIRST((head)); \ (var) && ((tvar) = LIST_NEXT((var), field), 1); \ (var) = (tvar)) #define LIST_FOREACH_FROM_SAFE(var, head, field, tvar) \ for ((var) = ((var) ? (var) : LIST_FIRST((head))); \ (var) && ((tvar) = LIST_NEXT((var), field), 1); \ (var) = (tvar)) #define LIST_INIT(head) do { \ LIST_FIRST((head)) = NULL; \ } while (0) #define LIST_INSERT_AFTER(listelm, elm, field) do { \ QMD_LIST_CHECK_NEXT(listelm, field); \ if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\ LIST_NEXT((listelm), field)->field.le_prev = \ &LIST_NEXT((elm), field); \ LIST_NEXT((listelm), field) = (elm); \ (elm)->field.le_prev = &LIST_NEXT((listelm), field); \ } while (0) #define LIST_INSERT_BEFORE(listelm, elm, field) do { \ QMD_LIST_CHECK_PREV(listelm, field); \ (elm)->field.le_prev = (listelm)->field.le_prev; \ LIST_NEXT((elm), field) = (listelm); \ *(listelm)->field.le_prev = (elm); \ (listelm)->field.le_prev = &LIST_NEXT((elm), field); \ } while (0) #define LIST_INSERT_HEAD(head, elm, field) do { \ QMD_LIST_CHECK_HEAD((head), field); \ if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\ LIST_FIRST((head)) = (elm); \ (elm)->field.le_prev = &LIST_FIRST((head)); \ } while (0) #define LIST_NEXT(elm, field) ((elm)->field.le_next) #define LIST_PREV(elm, head, type, field) \ ((elm)->field.le_prev == &LIST_FIRST((head)) ? NULL : \ __containerof((elm)->field.le_prev, \ QUEUE_TYPEOF(type), field.le_next)) #define LIST_REMOVE(elm, field) do { \ QMD_SAVELINK(oldnext, (elm)->field.le_next); \ QMD_SAVELINK(oldprev, (elm)->field.le_prev); \ QMD_LIST_CHECK_NEXT(elm, field); \ QMD_LIST_CHECK_PREV(elm, field); \ if (LIST_NEXT((elm), field) != NULL) \ LIST_NEXT((elm), field)->field.le_prev = \ (elm)->field.le_prev; \ *(elm)->field.le_prev = LIST_NEXT((elm), field); \ TRASHIT(*oldnext); \ TRASHIT(*oldprev); \ } while (0) #define LIST_SWAP(head1, head2, type, field) do { \ QUEUE_TYPEOF(type) *swap_tmp = LIST_FIRST(head1); \ LIST_FIRST((head1)) = LIST_FIRST((head2)); \ LIST_FIRST((head2)) = swap_tmp; \ if ((swap_tmp = LIST_FIRST((head1))) != NULL) \ swap_tmp->field.le_prev = &LIST_FIRST((head1)); \ if ((swap_tmp = LIST_FIRST((head2))) != NULL) \ swap_tmp->field.le_prev = &LIST_FIRST((head2)); \ } while (0) /* * Tail queue declarations. */ #define TAILQ_HEAD(name, type) \ struct name { \ struct type *tqh_first; /* first element */ \ struct type **tqh_last; /* addr of last next element */ \ TRACEBUF \ } #define TAILQ_CLASS_HEAD(name, type) \ struct name { \ class type *tqh_first; /* first element */ \ class type **tqh_last; /* addr of last next element */ \ TRACEBUF \ } #define TAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).tqh_first, TRACEBUF_INITIALIZER } #define TAILQ_ENTRY(type) \ struct { \ struct type *tqe_next; /* next element */ \ struct type **tqe_prev; /* address of previous next element */ \ TRACEBUF \ } #define TAILQ_CLASS_ENTRY(type) \ struct { \ class type *tqe_next; /* next element */ \ class type **tqe_prev; /* address of previous next element */ \ TRACEBUF \ } /* * Tail queue functions. */ #if (defined(_KERNEL) && defined(INVARIANTS)) /* * QMD_TAILQ_CHECK_HEAD(TAILQ_HEAD *head, TAILQ_ENTRY NAME) * * If the tailq is non-empty, validates that the first element of the tailq * points back at 'head.' */ #define QMD_TAILQ_CHECK_HEAD(head, field) do { \ if (!TAILQ_EMPTY(head) && \ TAILQ_FIRST((head))->field.tqe_prev != \ &TAILQ_FIRST((head))) \ panic("Bad tailq head %p first->prev != head", (head)); \ } while (0) /* * QMD_TAILQ_CHECK_TAIL(TAILQ_HEAD *head, TAILQ_ENTRY NAME) * * Validates that the tail of the tailq is a pointer to pointer to NULL. */ #define QMD_TAILQ_CHECK_TAIL(head, field) do { \ if (*(head)->tqh_last != NULL) \ panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head)); \ } while (0) /* * QMD_TAILQ_CHECK_NEXT(TYPE *elm, TAILQ_ENTRY NAME) * * If an element follows 'elm' in the tailq, validates that the next element * points back at 'elm.' */ #define QMD_TAILQ_CHECK_NEXT(elm, field) do { \ if (TAILQ_NEXT((elm), field) != NULL && \ TAILQ_NEXT((elm), field)->field.tqe_prev != \ &((elm)->field.tqe_next)) \ panic("Bad link elm %p next->prev != elm", (elm)); \ } while (0) /* * QMD_TAILQ_CHECK_PREV(TYPE *elm, TAILQ_ENTRY NAME) * * Validates that the previous element (or head of the tailq) points to 'elm.' */ #define QMD_TAILQ_CHECK_PREV(elm, field) do { \ if (*(elm)->field.tqe_prev != (elm)) \ panic("Bad link elm %p prev->next != elm", (elm)); \ } while (0) #else #define QMD_TAILQ_CHECK_HEAD(head, field) #define QMD_TAILQ_CHECK_TAIL(head, headname) #define QMD_TAILQ_CHECK_NEXT(elm, field) #define QMD_TAILQ_CHECK_PREV(elm, field) #endif /* (_KERNEL && INVARIANTS) */ #define TAILQ_CONCAT(head1, head2, field) do { \ if (!TAILQ_EMPTY(head2)) { \ *(head1)->tqh_last = (head2)->tqh_first; \ (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ (head1)->tqh_last = (head2)->tqh_last; \ TAILQ_INIT((head2)); \ QMD_TRACE_HEAD(head1); \ QMD_TRACE_HEAD(head2); \ } \ } while (0) #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) #define TAILQ_FIRST(head) ((head)->tqh_first) #define TAILQ_FOREACH(var, head, field) \ for ((var) = TAILQ_FIRST((head)); \ (var); \ (var) = TAILQ_NEXT((var), field)) #define TAILQ_FOREACH_FROM(var, head, field) \ for ((var) = ((var) ? (var) : TAILQ_FIRST((head))); \ (var); \ (var) = TAILQ_NEXT((var), field)) #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = TAILQ_FIRST((head)); \ (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ (var) = (tvar)) #define TAILQ_FOREACH_FROM_SAFE(var, head, field, tvar) \ for ((var) = ((var) ? (var) : TAILQ_FIRST((head))); \ (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ (var) = (tvar)) #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ for ((var) = TAILQ_LAST((head), headname); \ (var); \ (var) = TAILQ_PREV((var), headname, field)) #define TAILQ_FOREACH_REVERSE_FROM(var, head, headname, field) \ for ((var) = ((var) ? (var) : TAILQ_LAST((head), headname)); \ (var); \ (var) = TAILQ_PREV((var), headname, field)) #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \ for ((var) = TAILQ_LAST((head), headname); \ (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \ (var) = (tvar)) #define TAILQ_FOREACH_REVERSE_FROM_SAFE(var, head, headname, field, tvar) \ for ((var) = ((var) ? (var) : TAILQ_LAST((head), headname)); \ (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \ (var) = (tvar)) #define TAILQ_INIT(head) do { \ TAILQ_FIRST((head)) = NULL; \ (head)->tqh_last = &TAILQ_FIRST((head)); \ QMD_TRACE_HEAD(head); \ } while (0) #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ QMD_TAILQ_CHECK_NEXT(listelm, field); \ if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\ TAILQ_NEXT((elm), field)->field.tqe_prev = \ &TAILQ_NEXT((elm), field); \ else { \ (head)->tqh_last = &TAILQ_NEXT((elm), field); \ QMD_TRACE_HEAD(head); \ } \ TAILQ_NEXT((listelm), field) = (elm); \ (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \ QMD_TRACE_ELEM(&(elm)->field); \ QMD_TRACE_ELEM(&(listelm)->field); \ } while (0) #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ QMD_TAILQ_CHECK_PREV(listelm, field); \ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ TAILQ_NEXT((elm), field) = (listelm); \ *(listelm)->field.tqe_prev = (elm); \ (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \ QMD_TRACE_ELEM(&(elm)->field); \ QMD_TRACE_ELEM(&(listelm)->field); \ } while (0) #define TAILQ_INSERT_HEAD(head, elm, field) do { \ QMD_TAILQ_CHECK_HEAD(head, field); \ if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \ TAILQ_FIRST((head))->field.tqe_prev = \ &TAILQ_NEXT((elm), field); \ else \ (head)->tqh_last = &TAILQ_NEXT((elm), field); \ TAILQ_FIRST((head)) = (elm); \ (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ QMD_TRACE_HEAD(head); \ QMD_TRACE_ELEM(&(elm)->field); \ } while (0) #define TAILQ_INSERT_TAIL(head, elm, field) do { \ QMD_TAILQ_CHECK_TAIL(head, field); \ TAILQ_NEXT((elm), field) = NULL; \ (elm)->field.tqe_prev = (head)->tqh_last; \ *(head)->tqh_last = (elm); \ (head)->tqh_last = &TAILQ_NEXT((elm), field); \ QMD_TRACE_HEAD(head); \ QMD_TRACE_ELEM(&(elm)->field); \ } while (0) #define TAILQ_LAST(head, headname) \ (*(((struct headname *)((head)->tqh_last))->tqh_last)) /* * The FAST function is fast in that it causes no data access other * then the access to the head. The standard LAST function above * will cause a data access of both the element you want and * the previous element. FAST is very useful for instances when * you may want to prefetch the last data element. */ #define TAILQ_LAST_FAST(head, type, field) \ (TAILQ_EMPTY(head) ? NULL : __containerof((head)->tqh_last, QUEUE_TYPEOF(type), field.tqe_next)) #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) #define TAILQ_PREV(elm, headname, field) \ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) #define TAILQ_PREV_FAST(elm, head, type, field) \ ((elm)->field.tqe_prev == &(head)->tqh_first ? NULL : \ __containerof((elm)->field.tqe_prev, QUEUE_TYPEOF(type), field.tqe_next)) #define TAILQ_REMOVE(head, elm, field) do { \ QMD_SAVELINK(oldnext, (elm)->field.tqe_next); \ QMD_SAVELINK(oldprev, (elm)->field.tqe_prev); \ QMD_TAILQ_CHECK_NEXT(elm, field); \ QMD_TAILQ_CHECK_PREV(elm, field); \ if ((TAILQ_NEXT((elm), field)) != NULL) \ TAILQ_NEXT((elm), field)->field.tqe_prev = \ (elm)->field.tqe_prev; \ else { \ (head)->tqh_last = (elm)->field.tqe_prev; \ QMD_TRACE_HEAD(head); \ } \ *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \ TRASHIT(*oldnext); \ TRASHIT(*oldprev); \ QMD_TRACE_ELEM(&(elm)->field); \ } while (0) #define TAILQ_SWAP(head1, head2, type, field) do { \ QUEUE_TYPEOF(type) *swap_first = (head1)->tqh_first; \ QUEUE_TYPEOF(type) **swap_last = (head1)->tqh_last; \ (head1)->tqh_first = (head2)->tqh_first; \ (head1)->tqh_last = (head2)->tqh_last; \ (head2)->tqh_first = swap_first; \ (head2)->tqh_last = swap_last; \ if ((swap_first = (head1)->tqh_first) != NULL) \ swap_first->field.tqe_prev = &(head1)->tqh_first; \ else \ (head1)->tqh_last = &(head1)->tqh_first; \ if ((swap_first = (head2)->tqh_first) != NULL) \ swap_first->field.tqe_prev = &(head2)->tqh_first; \ else \ (head2)->tqh_last = &(head2)->tqh_first; \ } while (0) #endif /* !_SYS_QUEUE_H_ */ aml-0.2.1/include/thread-pool.h000066400000000000000000000017411413255142200162770ustar00rootroot00000000000000/* * Copyright (c) 2020 Andri Yngvason * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #pragma once struct aml; struct aml_work; int thread_pool_acquire_default(struct aml*, int n_threads); void thread_pool_release_default(struct aml*); int thread_pool_enqueue_default(struct aml*, struct aml_work*); aml-0.2.1/meson.build000066400000000000000000000040211413255142200144210ustar00rootroot00000000000000project( 'aml', 'c', version: '0.2.1', license: 'ISC', default_options: [ 'c_std=c11', ] ) buildtype = get_option('buildtype') default_library = get_option('default_library') is_static_subproject = meson.is_subproject() and default_library == 'static' c_args = [ '-DPROJECT_VERSION="@0@"'.format(meson.project_version()), '-D_POSIX_C_SOURCE=200809L', '-fvisibility=hidden', '-Wmissing-prototypes', ] git = find_program('git', native: true, required: false) if git.found() git_describe = run_command([git, 'describe', '--tags', '--long']) git_branch = run_command([git, 'rev-parse', '--abbrev-ref', 'HEAD']) if git_describe.returncode() == 0 and git_branch.returncode() == 0 c_args += '-DGIT_VERSION="@0@ (@1@)"'.format( git_describe.stdout().strip(), git_branch.stdout().strip(), ) endif endif if buildtype != 'debug' and buildtype != 'debugoptimized' c_args += '-DNDEBUG' endif add_project_arguments(c_args, language: 'c') cc = meson.get_compiler('c') librt = cc.find_library('rt', required: true) threads = dependency('threads') inc = include_directories('include') sources = [ 'src/aml.c', 'src/thread-pool.c', ] have_epoll = cc.has_header_symbol('sys/epoll.h', 'epoll_create') have_kqueue = cc.has_header_symbol('sys/event.h', 'kqueue') if have_epoll sources += 'src/epoll.c' message('epoll backend chosen') elif have_kqueue sources += 'src/kqueue.c' message('kqueue backend chosen') else error('Unsupported system') endif dependencies = [ librt, threads, ] aml = library( 'aml', sources, version: '0.0.0', dependencies: dependencies, include_directories: inc, install: not is_static_subproject, ) aml_dep = declare_dependency( include_directories: inc, link_with: aml, ) if get_option('examples') subdir('examples') endif if not is_static_subproject install_headers('include/aml.h') pkgconfig = import('pkgconfig') pkgconfig.generate( aml, version: meson.project_version(), filebase: meson.project_name(), name: meson.project_name(), description: 'Another main loop library', ) endif aml-0.2.1/meson_options.txt000066400000000000000000000001301413255142200157110ustar00rootroot00000000000000option( 'examples', type: 'boolean', value: false, description: 'Build examples', ) aml-0.2.1/src/000077500000000000000000000000001413255142200130515ustar00rootroot00000000000000aml-0.2.1/src/aml.c000066400000000000000000000536501413255142200137770ustar00rootroot00000000000000/* * Copyright (c) 2020 Andri Yngvason * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include "aml.h" #include "backend.h" #include "sys/queue.h" #include "thread-pool.h" #define EXPORT __attribute__((visibility("default"))) #define EVENT_MASK_DEFAULT AML_EVENT_READ #ifndef MIN #define MIN(a, b) ((a) < (b) ? (a) : (b)) #endif enum aml_obj_type { AML_OBJ_UNSPEC = 0, AML_OBJ_AML, AML_OBJ_HANDLER, AML_OBJ_TIMER, AML_OBJ_TICKER, AML_OBJ_SIGNAL, AML_OBJ_WORK, AML_OBJ_IDLE, }; struct aml_obj { enum aml_obj_type type; int ref; void* userdata; aml_free_fn free_fn; aml_callback_fn cb; unsigned long long id; void* backend_data; LIST_ENTRY(aml_obj) link; LIST_ENTRY(aml_obj) global_link; TAILQ_ENTRY(aml_obj) event_link; }; LIST_HEAD(aml_obj_list, aml_obj); TAILQ_HEAD(aml_obj_queue, aml_obj); struct aml_handler { struct aml_obj obj; int fd; enum aml_event event_mask; atomic_uint revents; struct aml* parent; }; struct aml_timer { struct aml_obj obj; uint32_t timeout; uint64_t deadline; LIST_ENTRY(aml_timer) link; }; LIST_HEAD(aml_timer_list, aml_timer); struct aml_signal { struct aml_obj obj; int signo; }; struct aml_work { struct aml_obj obj; aml_callback_fn work_fn; }; struct aml_idle { struct aml_obj obj; LIST_ENTRY(aml_idle) link; }; LIST_HEAD(aml_idle_list, aml_idle); struct aml { struct aml_obj obj; void* state; struct aml_backend backend; int self_pipe_rfd, self_pipe_wfd; bool do_exit; struct aml_obj_list obj_list; pthread_mutex_t obj_list_mutex; struct aml_timer_list timer_list; pthread_mutex_t timer_list_mutex; struct aml_idle_list idle_list; struct aml_obj_queue event_queue; pthread_mutex_t event_queue_mutex; bool have_thread_pool; }; static struct aml* aml__default = NULL; static unsigned long long aml__obj_id = 0; static struct aml_obj_list aml__obj_list = LIST_HEAD_INITIALIZER(aml__obj_list); // TODO: Properly initialise this? static pthread_mutex_t aml__ref_mutex; extern struct aml_backend implementation; static struct aml_timer* aml__get_timer_with_earliest_deadline(struct aml* self); #if defined(GIT_VERSION) EXPORT const char aml_version[] = GIT_VERSION; #elif defined(PROJECT_VERSION) EXPORT const char aml_version[] = PROJECT_VERSION; #else EXPORT const char aml_version[] = "UNKNOWN"; #endif EXPORT void aml_set_default(struct aml* aml) { aml__default = aml; } EXPORT struct aml* aml_get_default(void) { return aml__default; } static int aml__poll(struct aml* self, int timeout) { return self->backend.poll(self->state, timeout); } static int aml__add_fd(struct aml* self, struct aml_handler* handler) { return self->backend.add_fd(self->state, handler); } static int aml__del_fd(struct aml* self, struct aml_handler* handler) { return self->backend.del_fd(self->state, handler); } static int aml__mod_fd(struct aml* self, struct aml_handler* handler) { if (!self->backend.mod_fd) { aml__del_fd(self, handler); return aml__add_fd(self, handler); } return self->backend.mod_fd(self->state, handler); } static int aml__set_deadline(struct aml* self, uint64_t deadline) { return self->backend.set_deadline(self->state, deadline); } static void aml__post_dispatch(struct aml* self) { if (self->backend.post_dispatch) self->backend.post_dispatch(self->state); } static void aml__dont_block(int fd) { fcntl(fd, F_SETFL, fcntl(fd, F_GETFL, 0) | O_NONBLOCK); } static uint64_t aml__gettime_ms(struct aml* self) { struct timespec ts = { 0 }; clock_gettime(self->backend.clock, &ts); return ts.tv_sec * 1000ULL + ts.tv_nsec / 1000000ULL; } static void aml__ref_lock(void) { pthread_mutex_lock(&aml__ref_mutex); } static void aml__ref_unlock(void) { pthread_mutex_unlock(&aml__ref_mutex); } static void aml__obj_global_ref(struct aml_obj* obj) { aml__ref_lock(); obj->id = aml__obj_id++; LIST_INSERT_HEAD(&aml__obj_list, obj, global_link); aml__ref_unlock(); } static void on_self_pipe_read(void* obj) { struct aml* self = aml_get_userdata(obj); assert(self); assert(self->self_pipe_rfd == aml_get_fd(obj)); char dummy[256]; while (read(self->self_pipe_rfd, dummy, sizeof(dummy)) > 0); } static void aml__destroy_self_pipe(void* userdata) { struct aml* self = userdata; close(self->self_pipe_rfd); close(self->self_pipe_wfd); } static int aml__init_self_pipe(struct aml* self) { if (self->backend.interrupt) return 0; int fds[2]; if (pipe(fds) < 0) return -1; aml__dont_block(fds[0]); aml__dont_block(fds[1]); self->self_pipe_rfd = fds[0]; self->self_pipe_wfd = fds[1]; struct aml_handler* handler = aml_handler_new(self->self_pipe_rfd, on_self_pipe_read, self, aml__destroy_self_pipe); if (!handler) goto failure; aml_start(self, handler); aml_unref(handler); return 0; failure: close(fds[1]); close(fds[0]); return -1; } EXPORT void aml_interrupt(struct aml* self) { if (self->backend.interrupt) { self->backend.interrupt(self->state); return; } char one = 1; write(self->self_pipe_wfd, &one, sizeof(one)); } EXPORT struct aml* aml_new(void) { struct aml* self = calloc(1, sizeof(*self)); if (!self) return NULL; self->obj.type = AML_OBJ_AML; self->obj.ref = 1; LIST_INIT(&self->obj_list); LIST_INIT(&self->timer_list); LIST_INIT(&self->idle_list); TAILQ_INIT(&self->event_queue); pthread_mutex_init(&self->event_queue_mutex, NULL); pthread_mutex_init(&self->obj_list_mutex, NULL); pthread_mutex_init(&self->timer_list_mutex, NULL); memcpy(&self->backend, &implementation, sizeof(self->backend)); if (!self->backend.thread_pool_acquire) self->backend.thread_pool_acquire = thread_pool_acquire_default; if (!self->backend.thread_pool_release) self->backend.thread_pool_release = thread_pool_release_default; if (!self->backend.thread_pool_enqueue) self->backend.thread_pool_enqueue = thread_pool_enqueue_default; self->state = self->backend.new_state(self); if (!self->state) goto failure; if (aml__init_self_pipe(self) < 0) goto pipe_failure; aml__obj_global_ref(&self->obj); return self; pipe_failure: self->backend.del_state(self->state); failure: free(self); return NULL; } static int get_n_processors(void) { #ifdef _SC_NPROCESSORS_ONLN return sysconf(_SC_NPROCESSORS_ONLN); #else return 4; /* Guess */ #endif } EXPORT int aml_require_workers(struct aml* self, int n) { if (n < 0) n = get_n_processors(); if (self->backend.thread_pool_acquire(self, n) < 0) return -1; self->have_thread_pool = true; return 0; } EXPORT struct aml_handler* aml_handler_new(int fd, aml_callback_fn callback, void* userdata, aml_free_fn free_fn) { struct aml_handler* self = calloc(1, sizeof(*self)); if (!self) return NULL; self->obj.type = AML_OBJ_HANDLER; self->obj.ref = 1; self->obj.userdata = userdata; self->obj.free_fn = free_fn; self->obj.cb = callback; self->fd = fd; self->event_mask = EVENT_MASK_DEFAULT; aml__obj_global_ref(&self->obj); return self; } EXPORT struct aml_timer* aml_timer_new(uint32_t timeout, aml_callback_fn callback, void* userdata, aml_free_fn free_fn) { struct aml_timer* self = calloc(1, sizeof(*self)); if (!self) return NULL; self->obj.type = AML_OBJ_TIMER; self->obj.ref = 1; self->obj.userdata = userdata; self->obj.free_fn = free_fn; self->obj.cb = callback; self->timeout = timeout; aml__obj_global_ref(&self->obj); return self; } EXPORT struct aml_ticker* aml_ticker_new(uint32_t period, aml_callback_fn callback, void* userdata, aml_free_fn free_fn) { struct aml_timer* timer = aml_timer_new(period, callback, userdata, free_fn); timer->obj.type = AML_OBJ_TICKER; return (struct aml_ticker*)timer; } EXPORT struct aml_signal* aml_signal_new(int signo, aml_callback_fn callback, void* userdata, aml_free_fn free_fn) { struct aml_signal* self = calloc(1, sizeof(*self)); if (!self) return NULL; self->obj.type = AML_OBJ_SIGNAL; self->obj.ref = 1; self->obj.userdata = userdata; self->obj.free_fn = free_fn; self->obj.cb = callback; self->signo = signo; aml__obj_global_ref(&self->obj); return self; } EXPORT struct aml_work* aml_work_new(aml_callback_fn work_fn, aml_callback_fn callback, void* userdata, aml_free_fn free_fn) { struct aml_work* self = calloc(1, sizeof(*self)); if (!self) return NULL; self->obj.type = AML_OBJ_WORK; self->obj.ref = 1; self->obj.userdata = userdata; self->obj.free_fn = free_fn; self->obj.cb = callback; self->work_fn = work_fn; aml__obj_global_ref(&self->obj); return self; } EXPORT struct aml_idle* aml_idle_new(aml_callback_fn callback, void* userdata, aml_free_fn free_fn) { struct aml_idle* self = calloc(1, sizeof(*self)); if (!self) return NULL; self->obj.type = AML_OBJ_IDLE; self->obj.ref = 1; self->obj.userdata = userdata; self->obj.free_fn = free_fn; self->obj.cb = callback; aml__obj_global_ref(&self->obj); return self; } static bool aml__obj_is_started_unlocked(struct aml* self, void* obj) { struct aml_obj* elem; LIST_FOREACH(elem, &self->obj_list, link) if (elem == obj) return true; return false; } EXPORT bool aml_is_started(struct aml* self, void* obj) { pthread_mutex_lock(&self->obj_list_mutex); bool result = aml__obj_is_started_unlocked(self, obj); pthread_mutex_unlock(&self->obj_list_mutex); return result; } static int aml__obj_try_add(struct aml* self, void* obj) { int rc = -1; pthread_mutex_lock(&self->obj_list_mutex); if (!aml__obj_is_started_unlocked(self, obj)) { aml_ref(obj); LIST_INSERT_HEAD(&self->obj_list, (struct aml_obj*)obj, link); rc = 0; } pthread_mutex_unlock(&self->obj_list_mutex); return rc; } static void aml__obj_remove_unlocked(struct aml* self, void* obj) { LIST_REMOVE((struct aml_obj*)obj, link); aml_unref(obj); } static void aml__obj_remove(struct aml* self, void* obj) { pthread_mutex_lock(&self->obj_list_mutex); aml__obj_remove_unlocked(self, obj); pthread_mutex_unlock(&self->obj_list_mutex); } static int aml__obj_try_remove(struct aml* self, void* obj) { int rc = -1; pthread_mutex_lock(&self->obj_list_mutex); if (aml__obj_is_started_unlocked(self, obj)) { aml__obj_remove_unlocked(self, obj); rc = 0; } pthread_mutex_unlock(&self->obj_list_mutex); return rc; } static int aml__start_handler(struct aml* self, struct aml_handler* handler) { if (aml__add_fd(self, handler) < 0) return -1; handler->parent = self; return 0; } static int aml__start_timer(struct aml* self, struct aml_timer* timer) { timer->deadline = aml__gettime_ms(self) + timer->timeout; pthread_mutex_lock(&self->timer_list_mutex); LIST_INSERT_HEAD(&self->timer_list, timer, link); pthread_mutex_unlock(&self->timer_list_mutex); if (timer->timeout == 0) { assert(timer->obj.type != AML_OBJ_TICKER); aml_stop(self, timer); aml_emit(self, timer, 0); aml_interrupt(self); return 0; } struct aml_timer* earliest = aml__get_timer_with_earliest_deadline(self); if (earliest == timer) aml__set_deadline(self, timer->deadline); return 0; } static int aml__start_signal(struct aml* self, struct aml_signal* sig) { return self->backend.add_signal(self->state, sig); } static int aml__start_work(struct aml* self, struct aml_work* work) { return self->backend.thread_pool_enqueue(self, work); } static int aml__start_idle(struct aml* self, struct aml_idle* idle) { LIST_INSERT_HEAD(&self->idle_list, idle, link); return 0; } static int aml__start_unchecked(struct aml* self, void* obj) { struct aml_obj* head = obj; switch (head->type) { case AML_OBJ_AML: return -1; case AML_OBJ_HANDLER: return aml__start_handler(self, obj); case AML_OBJ_TIMER: /* fallthrough */ case AML_OBJ_TICKER: return aml__start_timer(self, obj); case AML_OBJ_SIGNAL: return aml__start_signal(self, obj); case AML_OBJ_WORK: return aml__start_work(self, obj); case AML_OBJ_IDLE: return aml__start_idle(self, obj); case AML_OBJ_UNSPEC: break; } abort(); return -1; } EXPORT int aml_start(struct aml* self, void* obj) { if (aml__obj_try_add(self, obj) < 0) return -1; if (aml__start_unchecked(self, obj) == 0) return 0; aml__obj_remove(self, obj); return -1; } static int aml__stop_handler(struct aml* self, struct aml_handler* handler) { if (aml__del_fd(self, handler) < 0) return -1; handler->parent = NULL; return 0; } static int aml__stop_timer(struct aml* self, struct aml_timer* timer) { pthread_mutex_lock(&self->timer_list_mutex); LIST_REMOVE(timer, link); pthread_mutex_unlock(&self->timer_list_mutex); return 0; } static int aml__stop_signal(struct aml* self, struct aml_signal* sig) { return self->backend.del_signal(self->state, sig); } static int aml__stop_work(struct aml* self, struct aml_work* work) { /* Note: The cb may be executed anyhow */ return 0; } static int aml__stop_idle(struct aml* self, struct aml_idle* idle) { LIST_REMOVE(idle, link); return 0; } static int aml__stop_unchecked(struct aml* self, void* obj) { struct aml_obj* head = obj; switch (head->type) { case AML_OBJ_AML: return -1; case AML_OBJ_HANDLER: return aml__stop_handler(self, obj); case AML_OBJ_TIMER: /* fallthrough */ case AML_OBJ_TICKER: return aml__stop_timer(self, obj); case AML_OBJ_SIGNAL: return aml__stop_signal(self, obj); case AML_OBJ_WORK: return aml__stop_work(self, obj); case AML_OBJ_IDLE: return aml__stop_idle(self, obj); case AML_OBJ_UNSPEC: break; } abort(); return -1; } EXPORT int aml_stop(struct aml* self, void* obj) { aml_ref(obj); if (aml__obj_try_remove(self, obj) >= 0) aml__stop_unchecked(self, obj); aml_unref(obj); return 0; } static struct aml_timer* aml__get_timer_with_earliest_deadline(struct aml* self) { uint64_t deadline = UINT64_MAX; struct aml_timer* result = NULL; struct aml_timer* timer; pthread_mutex_lock(&self->timer_list_mutex); LIST_FOREACH(timer, &self->timer_list, link) if (timer->deadline < deadline) { deadline = timer->deadline; result = timer; } pthread_mutex_unlock(&self->timer_list_mutex); return result; } static bool aml__handle_timeout(struct aml* self, uint64_t now) { struct aml_timer* timer = aml__get_timer_with_earliest_deadline(self); if (!timer || timer->deadline > now) return false; aml_emit(self, timer, 0); switch (timer->obj.type) { case AML_OBJ_TIMER: aml_stop(self, timer); break; case AML_OBJ_TICKER: timer->deadline += timer->timeout; break; default: abort(); break; } return true; } static void aml__handle_idle(struct aml* self) { struct aml_idle* idle; LIST_FOREACH(idle, &self->idle_list, link) if (idle->obj.cb) idle->obj.cb(idle); } static void aml__handle_event(struct aml* self, struct aml_obj* obj) { /* A reference is kept here in case an object is stopped inside the * callback. We want the object to live until we're done with it. */ aml_ref(obj); if (obj->cb) obj->cb(obj); if (obj->type == AML_OBJ_HANDLER) { struct aml_handler* handler = (struct aml_handler*)obj; handler->revents = 0; if (self->backend.flags & AML_BACKEND_EDGE_TRIGGERED) aml__mod_fd(self, handler); } aml_unref(obj); } /* Might exit earlier than timeout. It's up to the user to check */ EXPORT int aml_poll(struct aml* self, int timeout) { return aml__poll(self, timeout); } static struct aml_obj* aml__event_dequeue(struct aml* self) { pthread_mutex_lock(&self->event_queue_mutex); struct aml_obj* obj = TAILQ_FIRST(&self->event_queue); if (obj) TAILQ_REMOVE(&self->event_queue, obj, event_link); pthread_mutex_unlock(&self->event_queue_mutex); return obj; } EXPORT void aml_dispatch(struct aml* self) { uint64_t now = aml__gettime_ms(self); while (aml__handle_timeout(self, now)); struct aml_timer* earliest = aml__get_timer_with_earliest_deadline(self); if (earliest) { assert(earliest->deadline > now); aml__set_deadline(self, earliest->deadline); } sigset_t sig_old, sig_new; sigfillset(&sig_new); pthread_sigmask(SIG_BLOCK, &sig_new, &sig_old); struct aml_obj* obj; while ((obj = aml__event_dequeue(self)) != NULL) { aml__handle_event(self, obj); aml_unref(obj); } pthread_sigmask(SIG_SETMASK, &sig_old, NULL); aml__handle_idle(self); aml__post_dispatch(self); } EXPORT int aml_run(struct aml* self) { self->do_exit = false; do { aml_poll(self, -1); aml_dispatch(self); } while (!self->do_exit); return 0; } EXPORT void aml_exit(struct aml* self) { self->do_exit = true; if (self->backend.exit) self->backend.exit(self->state); } EXPORT int aml_ref(void* obj) { struct aml_obj* self = obj; aml__ref_lock(); int ref = self->ref++; aml__ref_unlock(); return ref; } static void aml__free(struct aml* self) { while (!LIST_EMPTY(&self->obj_list)) { struct aml_obj* obj = LIST_FIRST(&self->obj_list); aml__stop_unchecked(self, obj); aml__obj_remove_unlocked(self, obj); } if (self->have_thread_pool) self->backend.thread_pool_release(self); self->backend.del_state(self->state); while (!TAILQ_EMPTY(&self->event_queue)) { struct aml_obj* obj = TAILQ_FIRST(&self->event_queue); TAILQ_REMOVE(&self->event_queue, obj, event_link); aml_unref(obj); } pthread_mutex_destroy(&self->timer_list_mutex); pthread_mutex_destroy(&self->obj_list_mutex); pthread_mutex_destroy(&self->event_queue_mutex); free(self); } static void aml__free_handler(struct aml_handler* self) { if (self->obj.free_fn) self->obj.free_fn(self->obj.userdata); free(self); } static void aml__free_timer(struct aml_timer* self) { if (self->obj.free_fn) self->obj.free_fn(self->obj.userdata); free(self); } static void aml__free_signal(struct aml_signal* self) { if (self->obj.free_fn) self->obj.free_fn(self->obj.userdata); free(self); } static void aml__free_work(struct aml_work* self) { if (self->obj.free_fn) self->obj.free_fn(self->obj.userdata); free(self); } static void aml__free_idle(struct aml_idle* self) { if (self->obj.free_fn) self->obj.free_fn(self->obj.userdata); free(self); } EXPORT int aml_unref(void* obj) { struct aml_obj* self = obj; aml__ref_lock(); int ref = --self->ref; if (ref == 0) LIST_REMOVE(self, global_link); aml__ref_unlock(); assert(ref >= 0); if (ref > 0) goto done; switch (self->type) { case AML_OBJ_AML: aml__free(obj); break; case AML_OBJ_HANDLER: aml__free_handler(obj); break; case AML_OBJ_TIMER: /* fallthrough */ case AML_OBJ_TICKER: aml__free_timer(obj); break; case AML_OBJ_SIGNAL: aml__free_signal(obj); break; case AML_OBJ_WORK: aml__free_work(obj); break; case AML_OBJ_IDLE: aml__free_idle(obj); break; default: abort(); break; } done: return ref; } EXPORT unsigned long long aml_get_id(const void* obj) { const struct aml_obj* aml_obj = obj; return aml_obj->id; } EXPORT void* aml_try_ref(unsigned long long id) { struct aml_obj* obj = NULL; aml__ref_lock(); LIST_FOREACH(obj, &aml__obj_list, global_link) if (obj->id == id) break; if (obj && obj->id == id) obj->ref++; else obj = NULL; aml__ref_unlock(); return obj; } EXPORT void* aml_get_userdata(const void* obj) { const struct aml_obj* aml_obj = obj; return aml_obj->userdata; } EXPORT void aml_set_userdata(void* obj, void* userdata, aml_free_fn free_fn) { struct aml_obj* aml_obj = obj; aml_obj->userdata = userdata; aml_obj->free_fn = free_fn; } void aml_emit(struct aml* self, void* ptr, uint32_t revents) { struct aml_obj* obj = ptr; if (obj->type == AML_OBJ_HANDLER) { struct aml_handler* handler = ptr; uint32_t old = atomic_fetch_or(&handler->revents, revents); if (old != 0) return; } sigset_t sig_old, sig_new; sigfillset(&sig_new); pthread_sigmask(SIG_BLOCK, &sig_new, &sig_old); pthread_mutex_lock(&self->event_queue_mutex); TAILQ_INSERT_TAIL(&self->event_queue, obj, event_link); aml_ref(obj); pthread_mutex_unlock(&self->event_queue_mutex); pthread_sigmask(SIG_SETMASK, &sig_old, NULL); } EXPORT enum aml_event aml_get_event_mask(const struct aml_handler* handler) { return handler->event_mask; } EXPORT void aml_set_event_mask(struct aml_handler* handler, enum aml_event mask) { handler->event_mask = mask; if (handler->parent && aml_is_started(handler->parent, handler)) aml__mod_fd(handler->parent, handler); } EXPORT enum aml_event aml_get_revents(const struct aml_handler* handler) { return handler->revents; } EXPORT int aml_get_fd(const void* ptr) { const struct aml_obj* obj = ptr; switch (obj->type) { case AML_OBJ_AML:; const struct aml* aml = ptr; return aml->backend.get_fd ? aml->backend.get_fd(aml->state) : -1; case AML_OBJ_HANDLER: return ((struct aml_handler*)ptr)->fd; default: break; } return -1; } EXPORT int aml_get_signo(const struct aml_signal* sig) { return sig->signo; } aml_callback_fn aml_get_work_fn(const struct aml_work* work) { return work->work_fn; } void* aml_get_backend_data(const void* ptr) { const struct aml_obj* obj = ptr; return obj->backend_data; } void aml_set_backend_data(void* ptr, void* data) { struct aml_obj* obj = ptr; obj->backend_data = data; } void* aml_get_backend_state(const struct aml* self) { return self->state; } EXPORT void aml_set_duration(void* ptr, uint32_t duration) { struct aml_obj* obj = ptr; switch (obj->type) { case AML_OBJ_TIMER: /* fallthrough */ case AML_OBJ_TICKER: ((struct aml_timer*)ptr)->timeout = duration; return; default: break; } abort(); } aml-0.2.1/src/epoll.c000066400000000000000000000141701413255142200143330ustar00rootroot00000000000000/* * Copyright (c) 2020 Andri Yngvason * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #include "aml.h" #include "backend.h" #include #include #include #include #include #include #include #include #include struct epoll_state { struct aml* aml; int epoll_fd; int timer_fd; }; struct epoll_signal { struct epoll_state* state; int fd; int ref; }; static void* epoll_new_state(struct aml* aml) { struct epoll_state* self = calloc(1, sizeof(*self)); if (!self) return NULL; self->aml = aml; self->epoll_fd = epoll_create(16); if (self->epoll_fd < 0) goto epoll_failure; self->timer_fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK); if (self->timer_fd < 0) goto timer_fd_failure; struct epoll_event event = { .events = EPOLLIN, }; if (epoll_ctl(self->epoll_fd, EPOLL_CTL_ADD, self->timer_fd, &event) < 0) goto timer_add_failure; return self; timer_add_failure: close(self->timer_fd); timer_fd_failure: close(self->epoll_fd); epoll_failure: free(self); return NULL; } static void epoll_del_state(void* state) { struct epoll_state* self = state; close(self->timer_fd); close(self->epoll_fd); free(self); } static int epoll_get_fd(const void* state) { const struct epoll_state* self = state; return self->epoll_fd; } static void epoll_emit_event(struct epoll_state* self, struct epoll_event* event) { if (event->data.ptr == NULL) { // Must be the timerfd uint64_t count = 0; (void)read(self->timer_fd, &count, sizeof(count)); return; } enum aml_event aml_events = AML_EVENT_NONE; if (event->events & (EPOLLIN | EPOLLPRI)) aml_events |= AML_EVENT_READ; if (event->events & EPOLLOUT) aml_events |= AML_EVENT_WRITE; aml_emit(self->aml, event->data.ptr, aml_events); } static int epoll_poll(void* state, int timeout) { struct epoll_state* self = state; struct epoll_event events[16]; size_t max_events = sizeof(events) / sizeof(events[0]); int nfds = epoll_wait(self->epoll_fd, events, max_events, timeout); for (int i = 0; i < nfds; ++i) epoll_emit_event(self, &events[i]); return nfds; } static void epoll_event_from_aml_handler(struct epoll_event* event, struct aml_handler* handler) { enum aml_event in = aml_get_event_mask(handler); event->events = 0; if (in & AML_EVENT_READ) event->events |= EPOLLIN | EPOLLPRI; if (in & AML_EVENT_WRITE) event->events |= EPOLLOUT; event->data.ptr = handler; } static int epoll_add_fd(void* state, struct aml_handler* handler) { struct epoll_state* self = state; struct epoll_event event; epoll_event_from_aml_handler(&event, handler); return epoll_ctl(self->epoll_fd, EPOLL_CTL_ADD, aml_get_fd(handler), &event); } static int epoll_mod_fd(void* state, struct aml_handler* handler) { struct epoll_state* self = state; struct epoll_event event; epoll_event_from_aml_handler(&event, handler); return epoll_ctl(self->epoll_fd, EPOLL_CTL_MOD, aml_get_fd(handler), &event); } static int epoll_del_fd(void* state, struct aml_handler* handler) { struct epoll_state* self = state; // Dummy event to appease valgrind struct epoll_event event = { 0 }; return epoll_ctl(self->epoll_fd, EPOLL_CTL_DEL, aml_get_fd(handler), &event); } static void epoll_signal_cleanup(void* userdata) { struct epoll_signal* sig = userdata; close(sig->fd); free(sig); } static void epoll_on_signal(void* obj) { struct aml_handler* handler = obj; struct epoll_signal* ctx = aml_get_userdata(handler); struct signalfd_siginfo fdsi; (void)read(ctx->fd, &fdsi, sizeof(fdsi)); struct aml_signal* sig = aml_try_ref(ctx->ref); if (!sig) return; aml_emit(ctx->state->aml, sig, 0); aml_unref(sig); } static int epoll_add_signal(void* state, struct aml_signal* sig) { struct epoll_state* self = state; struct epoll_signal* ctx = calloc(1, sizeof(*ctx)); if (!ctx) return -1; int signo = aml_get_signo(sig); sigset_t ss; sigemptyset(&ss); sigaddset(&ss, signo); ctx->state = self; ctx->ref = aml_get_id(sig); ctx->fd = signalfd(-1, &ss, SFD_NONBLOCK | SFD_CLOEXEC); if (ctx->fd < 0) goto signalfd_failure; struct aml_handler* handler = aml_handler_new(ctx->fd, epoll_on_signal, ctx, epoll_signal_cleanup); if (!handler) goto handler_failure; if (aml_start(self->aml, handler) < 0) goto start_failure; aml_set_backend_data(sig, handler); pthread_sigmask(SIG_BLOCK, &ss, NULL); return 0; start_failure: aml_unref(handler); handler_failure: close(ctx->fd); signalfd_failure: free(ctx); return -1; } static int epoll_del_signal(void* state, struct aml_signal* sig) { struct epoll_state* self = state; struct aml_handler* handler = aml_get_backend_data(sig); assert(handler); int rc = aml_stop(self->aml, handler); if (rc >= 0) aml_unref(handler); return rc; } static int epoll_set_deadline(void* state, uint64_t deadline) { struct epoll_state* self = state; struct itimerspec it = { .it_value = { .tv_sec = (uint32_t)(deadline / UINT64_C(1000)), .tv_nsec = (uint32_t)((deadline % UINT64_C(1000)) * UINT64_C(1000000)), }, }; return timerfd_settime(self->timer_fd, TFD_TIMER_ABSTIME, &it, NULL); } const struct aml_backend implementation = { .new_state = epoll_new_state, .del_state = epoll_del_state, .clock = CLOCK_MONOTONIC, .get_fd = epoll_get_fd, .poll = epoll_poll, .add_fd = epoll_add_fd, .mod_fd = epoll_mod_fd, .del_fd = epoll_del_fd, .add_signal = epoll_add_signal, .del_signal = epoll_del_signal, .set_deadline = epoll_set_deadline, }; aml-0.2.1/src/kqueue.c000066400000000000000000000113651413255142200145220ustar00rootroot00000000000000/* * Copyright (c) 2020 Andri Yngvason * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #include "aml.h" #include "backend.h" #include #include #include #include #include #include #include struct kq_state { struct aml* aml; int fd; }; static void* kq_new_state(struct aml* aml) { struct kq_state* self = calloc(1, sizeof(*self)); if (!self) return NULL; self->aml = aml; self->fd = kqueue(); if (self->fd < 0) goto kqueue_failure; return self; kqueue_failure: free(self); return NULL; } static void kq_del_state(void* state) { struct kq_state* self = state; close(self->fd); free(self); } static int kq_get_fd(const void* state) { const struct kq_state* self = state; return self->fd; } static void kq_emit_event(struct kq_state* self, struct kevent* event) { // TODO: Maybe joint read/write into one for fds? switch (event->filter) { case EVFILT_READ: aml_emit(self->aml, event->udata, AML_EVENT_READ); break; case EVFILT_WRITE: aml_emit(self->aml, event->udata, AML_EVENT_WRITE); break; case EVFILT_SIGNAL: aml_emit(self->aml, event->udata, 0); break; case EVFILT_TIMER: assert(event->ident == 0); break; } } static int kq_poll(void* state, int timeout) { struct kq_state* self = state; struct timespec ts = { .tv_sec = timeout / 1000UL, .tv_nsec = (timeout % 1000UL) * 1000000UL, }; struct kevent events[16]; size_t max_events = sizeof(events) / sizeof(events[0]); int nfds = kevent(self->fd, NULL, 0, events, max_events, &ts); for (int i = 0; i < nfds; ++i) kq_emit_event(self, &events[i]); return nfds; } static int kq_add_fd(void* state, struct aml_handler* handler) { struct kq_state* self = state; int fd = aml_get_fd(handler); enum aml_event last_mask = (intptr_t)aml_get_backend_data(handler); enum aml_event mask = aml_get_event_mask(handler); aml_set_backend_data(handler, (void*)(intptr_t)mask); struct kevent events[2]; int n = 0; if ((mask ^ last_mask) & AML_EVENT_READ) EV_SET(&events[n++], fd, EVFILT_READ, mask & AML_EVENT_READ ? EV_ADD : EV_DELETE, 0, 0, handler); if ((mask ^ last_mask) & AML_EVENT_WRITE) EV_SET(&events[n++], fd, EVFILT_WRITE, mask & AML_EVENT_WRITE ? EV_ADD : EV_DELETE, 0, 0, handler); return kevent(self->fd, events, n, NULL, 0, NULL); } static int kq_del_fd(void* state, struct aml_handler* handler) { struct kq_state* self = state; int fd = aml_get_fd(handler); enum aml_event last_mask = (intptr_t)aml_get_backend_data(handler); struct kevent events[2]; int n = 0; if (last_mask & AML_EVENT_READ) EV_SET(&events[n++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); if (last_mask & AML_EVENT_WRITE) EV_SET(&events[n++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); return kevent(self->fd, events, n, NULL, 0, NULL); } static int kq_add_signal(void* state, struct aml_signal* sig) { struct kq_state* self = state; int signo = aml_get_signo(sig); struct kevent event; EV_SET(&event, signo, EVFILT_SIGNAL, EV_ADD, 0, 0, sig); int rc = kevent(self->fd, &event, 1, NULL, 0, NULL); sigset_t ss; sigemptyset(&ss); sigaddset(&ss, signo); pthread_sigmask(SIG_BLOCK, &ss, NULL); return rc; } static int kq_del_signal(void* state, struct aml_signal* sig) { struct kq_state* self = state; int signo = aml_get_signo(sig); struct kevent event; EV_SET(&event, signo, EVFILT_SIGNAL, EV_DELETE, 0, 0, NULL); // TODO: Restore signal mask return kevent(self->fd, &event, 1, NULL, 0, NULL); } static int kq_set_deadline(void* state, uint64_t deadline) { struct kq_state* self = state; struct kevent event; EV_SET(&event, 0, EVFILT_TIMER, EV_ADD | EV_ONESHOT, NOTE_MSECONDS | NOTE_ABSTIME, deadline, NULL); return kevent(self->fd, &event, 1, NULL, 0, NULL); } const struct aml_backend implementation = { .new_state = kq_new_state, .del_state = kq_del_state, .clock = CLOCK_REALTIME, .get_fd = kq_get_fd, .poll = kq_poll, .add_fd = kq_add_fd, .mod_fd = kq_add_fd, // Same as add_fd .del_fd = kq_del_fd, .add_signal = kq_add_signal, .del_signal = kq_del_signal, .set_deadline = kq_set_deadline, }; aml-0.2.1/src/posix.c000066400000000000000000000312451413255142200143640ustar00rootroot00000000000000/* * Copyright (c) 2020 Andri Yngvason * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include "aml.h" #include "backend.h" #include "sys/queue.h" struct posix_state; typedef void (*fd_op_fn)(struct posix_state*, struct aml_handler*); struct posix_fd_op { struct aml_handler* handler; fd_op_fn call; TAILQ_ENTRY(posix_fd_op) link; }; TAILQ_HEAD(posix_fd_op_queue, posix_fd_op); struct posix_state { struct aml* aml; struct pollfd* fds; struct aml_handler** handlers; uint32_t max_fds; uint32_t num_fds; pthread_t poller_thread; int event_pipe_rfd, event_pipe_wfd; struct posix_fd_op_queue fd_ops; pthread_mutex_t fd_ops_mutex; int nfds; pthread_mutex_t wait_mutex; pthread_cond_t wait_cond; bool waiting_for_dispatch; pthread_mutex_t dispatch_mutex; pthread_cond_t dispatch_cond; }; struct signal_handler { struct posix_state* state; struct aml_signal* sig; LIST_ENTRY(signal_handler) link; }; LIST_HEAD(signal_handler_list, signal_handler); static int posix_spawn_poller(struct posix_state* self); static void posix_post_dispatch(void* state); static void posix_interrupt(void* state); static struct signal_handler_list signal_handlers = LIST_HEAD_INITIALIZER(NULL); static int posix__enqueue_fd_op(struct posix_state* self, fd_op_fn call, struct aml_handler* handler) { struct posix_fd_op* op = calloc(1, sizeof(*op)); if (!op) return -1; aml_ref(handler); op->call = call; op->handler = handler; pthread_mutex_lock(&self->fd_ops_mutex); TAILQ_INSERT_TAIL(&self->fd_ops, op, link); pthread_mutex_unlock(&self->fd_ops_mutex); posix_interrupt(self); return 0; } static struct posix_fd_op* posix__dequeue_fd_op(struct posix_state* self) { pthread_mutex_lock(&self->fd_ops_mutex); struct posix_fd_op* op = TAILQ_FIRST(&self->fd_ops); if (op) TAILQ_REMOVE(&self->fd_ops, op, link); pthread_mutex_unlock(&self->fd_ops_mutex); return op; } static struct signal_handler* signal_handler_find_by_signo(int signo) { struct signal_handler* handler; LIST_FOREACH(handler, &signal_handlers, link) if (aml_get_signo(handler->sig) == signo) return handler; return NULL; } static struct signal_handler* signal_handler_find_by_obj(struct aml_signal* obj) { struct signal_handler* handler; LIST_FOREACH(handler, &signal_handlers, link) if (handler->sig == obj) return handler; return NULL; } static void posix__signal_handler(int signo) { struct signal_handler* handler; LIST_FOREACH(handler, &signal_handlers, link) if (aml_get_signo(handler->sig) == signo) aml_emit(handler->state->aml, handler->sig, 0); } static void dont_block(int fd) { fcntl(fd, F_SETFL, fcntl(fd, F_GETFL, 0) | O_NONBLOCK); } static int posix_init_event_pipe(struct posix_state* self) { int fds[2]; if (pipe(fds) < 0) return -1; dont_block(fds[0]); dont_block(fds[1]); self->event_pipe_rfd = fds[0]; self->event_pipe_wfd = fds[1]; return 0; } static void* posix_new_state(struct aml* aml) { struct posix_state* self = calloc(1, sizeof(*self)); if (!self) return NULL; self->aml = aml; self->max_fds = 128; self->fds = malloc(sizeof(*self->fds) * self->max_fds); self->handlers = malloc(sizeof(*self->handlers) * self->max_fds); if (!self->fds || !self->handlers) { free(self->fds); free(self->handlers); goto failure; } TAILQ_INIT(&self->fd_ops); pthread_mutex_init(&self->fd_ops_mutex, NULL); pthread_mutex_init(&self->wait_mutex, NULL); pthread_cond_init(&self->wait_cond, NULL); pthread_mutex_init(&self->dispatch_mutex, NULL); pthread_cond_init(&self->dispatch_cond, NULL); if (posix_init_event_pipe(self) < 0) goto pipe_failure; if (posix_spawn_poller(self) < 0) goto thread_failure; return self; thread_failure: close(self->event_pipe_rfd); close(self->event_pipe_wfd); pipe_failure: pthread_mutex_destroy(&self->fd_ops_mutex); failure: free(self); return NULL; } static int posix__find_handler(struct posix_state* self, struct aml_handler* handler) { for (uint32_t i = 0; i < self->num_fds; ++i) if (self->handlers[i] == handler) return i; return -1; } static void posix_del_state(void* state) { struct posix_state* self = state; posix_post_dispatch(self); pthread_cancel(self->poller_thread); pthread_join(self->poller_thread, NULL); struct posix_fd_op* op; while ((op = posix__dequeue_fd_op(self))) { aml_unref(op->handler); free(op); } close(self->event_pipe_rfd); close(self->event_pipe_wfd); pthread_cond_destroy(&self->dispatch_cond); pthread_mutex_destroy(&self->dispatch_mutex); pthread_cond_destroy(&self->wait_cond); pthread_mutex_destroy(&self->wait_mutex); pthread_mutex_destroy(&self->fd_ops_mutex); free(self->handlers); free(self->fds); free(self); } static int posix_get_fd(const void* state) { const struct posix_state* self = state; return self->event_pipe_rfd; } static void posix__apply_fd_ops(struct posix_state* self) { while (1) { struct posix_fd_op* op = posix__dequeue_fd_op(self); if (!op) break; op->call(self, op->handler); aml_unref(op->handler); free(op); } } static enum aml_event posix_poll_events_to_aml_events(uint32_t poll_events) { enum aml_event aml_events = 0; if (poll_events & (POLLIN | POLLPRI)) aml_events |= AML_EVENT_READ; if (poll_events & POLLOUT) aml_events |= AML_EVENT_READ; return aml_events; } static int posix_do_poll(struct posix_state* self, int timeout) { int nfds = poll(self->fds, self->num_fds, timeout); if (nfds <= 0) return nfds; for (uint32_t i = 0; i < self->num_fds; ++i) if (self->fds[i].revents) { struct pollfd* pfd = &self->fds[i]; struct aml_handler* handler = self->handlers[i]; assert(pfd->fd == aml_get_fd(handler)); enum aml_event events = posix_poll_events_to_aml_events(pfd->revents); aml_emit(self->aml, handler, events); } return nfds; } static void posix_wake_up_main(struct posix_state* self, int nfds) { pthread_mutex_lock(&self->dispatch_mutex); self->waiting_for_dispatch = true; pthread_mutex_unlock(&self->dispatch_mutex); pthread_mutex_lock(&self->wait_mutex); self->nfds = nfds; pthread_cond_signal(&self->wait_cond); pthread_mutex_unlock(&self->wait_mutex); pthread_mutex_lock(&self->dispatch_mutex); while (self->waiting_for_dispatch) pthread_cond_wait(&self->dispatch_cond, &self->dispatch_mutex); pthread_mutex_unlock(&self->dispatch_mutex); } static void dummy_handler() { } static void* posix_poll_thread(void* state) { struct posix_state* self = state; while (1) { posix__apply_fd_ops(self); int nfds = posix_do_poll(self, -1); if (nfds > 0) { char one = 1; write(self->event_pipe_wfd, &one, sizeof(one)); } if (nfds != 0) posix_wake_up_main(self, nfds); } return NULL; } static int posix_spawn_poller(struct posix_state* self) { struct sigaction sa = { .sa_handler = dummy_handler }; struct sigaction sa_old; sigaction(SIGUSR1, &sa, &sa_old); return pthread_create(&self->poller_thread, NULL, posix_poll_thread, self); sigaction(SIGUSR1, &sa_old, NULL); } static int posix_poll(void* state, int timeout) { struct posix_state* self = state; int nfds; if (timeout == 0) { pthread_mutex_lock(&self->wait_mutex); nfds = self->nfds; self->nfds = 0; pthread_mutex_unlock(&self->wait_mutex); } else if (timeout < 0) { pthread_mutex_lock(&self->wait_mutex); while (self->nfds == 0) pthread_cond_wait(&self->wait_cond, &self->wait_mutex); nfds = self->nfds; self->nfds = 0; pthread_mutex_unlock(&self->wait_mutex); } else { struct timespec ts = { 0 }; clock_gettime(CLOCK_REALTIME, &ts); uint32_t ms = timeout + ts.tv_nsec / 1000000UL; ts.tv_sec += ms / 1000UL; ts.tv_nsec = (ms % 1000UL) * 1000000UL; pthread_mutex_lock(&self->wait_mutex); while (self->nfds == 0) { int rc = pthread_cond_timedwait(&self->wait_cond, &self->wait_mutex, &ts); if (rc == ETIMEDOUT) break; } nfds = self->nfds; self->nfds = 0; pthread_mutex_unlock(&self->wait_mutex); } if (nfds > 0) { char dummy[256]; while (read(self->event_pipe_rfd, dummy, sizeof(dummy)) == sizeof(dummy)); } else if (nfds < 0) { errno = EINTR; } return nfds; } static uint32_t posix_get_event_mask(struct aml_handler* handler) { uint32_t poll_events = 0; enum aml_event aml_events = aml_get_event_mask(handler); if (aml_events & AML_EVENT_READ) poll_events |= POLLIN | POLLPRI; if (aml_events & AML_EVENT_WRITE) poll_events |= POLLOUT; return poll_events; } static void posix_add_fd_op(struct posix_state* self, struct aml_handler* handler) { if (self->num_fds >= self->max_fds) { uint32_t new_max = self->max_fds * 2; struct pollfd* fds = realloc(self->fds, sizeof(*fds) * new_max); struct aml_handler** hds = realloc(self->handlers, sizeof(*hds) * new_max); assert(fds && hds); self->fds = fds; self->handlers = hds; self->max_fds = new_max; } struct pollfd* event = &self->fds[self->num_fds]; event->events = posix_get_event_mask(handler); event->revents = 0; event->fd = aml_get_fd(handler); self->handlers[self->num_fds] = handler; self->num_fds++; } static void posix_mod_fd_op(struct posix_state* self, struct aml_handler* handler) { int index = posix__find_handler(self, handler); if (index < 0) return; self->fds[index].fd = aml_get_fd(handler); self->fds[index].events = posix_get_event_mask(handler); } static void posix_del_fd_op(struct posix_state* self, struct aml_handler* handler) { int index = posix__find_handler(self, handler); if (index < 0) return; self->num_fds--; self->fds[index] = self->fds[self->num_fds]; self->handlers[index] = self->handlers[self->num_fds]; } static int posix_add_fd(void* state, struct aml_handler* handler) { return posix__enqueue_fd_op(state, posix_add_fd_op, handler); } static int posix_mod_fd(void* state, struct aml_handler* handler) { return posix__enqueue_fd_op(state, posix_mod_fd_op, handler); } static int posix_del_fd(void* state, struct aml_handler* handler) { return posix__enqueue_fd_op(state, posix_del_fd_op, handler); } static int posix_add_signal(void* state, struct aml_signal* sig) { int signo = aml_get_signo(sig); struct signal_handler* handler = calloc(1, sizeof(*handler)); if (!handler) return -1; handler->state = state; handler->sig = sig; if (!signal_handler_find_by_signo(signo)) { sigset_t set; sigemptyset(&set); sigaddset(&set, signo); pthread_sigmask(SIG_BLOCK, &set, NULL); struct sigaction sa = { .sa_handler = posix__signal_handler, }; if (sigaction(aml_get_signo(sig), &sa, NULL) < 0) goto failure; } LIST_INSERT_HEAD(&signal_handlers, handler, link); return 0; failure: free(handler); return -1; } static int posix_del_signal(void* state, struct aml_signal* sig) { struct signal_handler* handler = signal_handler_find_by_obj(sig); if (!handler) return -1; LIST_REMOVE(handler, link); if (!signal_handler_find_by_signo(aml_get_signo(sig))) { struct sigaction sa = { .sa_handler = SIG_DFL, }; int signo = aml_get_signo(sig); sigaction(signo, &sa, NULL); sigset_t set; sigemptyset(&set); sigaddset(&set, signo); pthread_sigmask(SIG_UNBLOCK, &set, NULL); } free(handler); return 0; } static void posix_post_dispatch(void* state) { struct posix_state* self = state; pthread_mutex_lock(&self->dispatch_mutex); self->waiting_for_dispatch = false; pthread_cond_signal(&self->dispatch_cond); pthread_mutex_unlock(&self->dispatch_mutex); } static void posix_interrupt(void* state) { struct posix_state* self = state; pthread_kill(self->poller_thread, SIGUSR1); } const struct aml_backend posix_backend = { .new_state = posix_new_state, .del_state = posix_del_state, .get_fd = posix_get_fd, .poll = posix_poll, .exit = NULL, .add_fd = posix_add_fd, .mod_fd = posix_mod_fd, .del_fd = posix_del_fd, .add_signal = posix_add_signal, .del_signal = posix_del_signal, .post_dispatch = posix_post_dispatch, .interrupt = posix_interrupt, }; aml-0.2.1/src/thread-pool.c000066400000000000000000000104571413255142200154420ustar00rootroot00000000000000/* * Copyright (c) 2020 Andri Yngvason * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include "aml.h" #include "backend.h" #include "thread-pool.h" #include "sys/queue.h" struct default_work { unsigned long long aml_id; struct aml_work* work; TAILQ_ENTRY(default_work) link; }; TAILQ_HEAD(default_work_queue, default_work); static struct default_work_queue default_work_queue = TAILQ_HEAD_INITIALIZER(default_work_queue); static atomic_int n_thread_pool_users = 0; static pthread_t* thread_pool = NULL; static pthread_mutex_t work_queue_mutex; static pthread_cond_t work_queue_cond; static int n_threads = 0; static int enqueue_work(struct aml* aml, struct aml_work* work, int broadcast); static void reap_threads(void) { enqueue_work(NULL, NULL, 1); for (int i = 0; i < n_threads; ++i) pthread_join(thread_pool[i], NULL); free(thread_pool); thread_pool = NULL; pthread_mutex_destroy(&work_queue_mutex); pthread_cond_destroy(&work_queue_cond); while (!TAILQ_EMPTY(&default_work_queue)) { struct default_work* work = TAILQ_FIRST(&default_work_queue); TAILQ_REMOVE(&default_work_queue, work, link); free(work); } } static struct default_work* dequeue_work(void) { struct default_work* work; pthread_mutex_lock(&work_queue_mutex); while ((work = TAILQ_FIRST(&default_work_queue)) == NULL) pthread_cond_wait(&work_queue_cond, &work_queue_mutex); if (work->work) TAILQ_REMOVE(&default_work_queue, work, link); pthread_mutex_unlock(&work_queue_mutex); return work; } static void* worker_fn(void* context) { (void)context; sigset_t ss; sigfillset(&ss); sigdelset(&ss, SIGCHLD); pthread_sigmask(SIG_BLOCK, &ss, NULL); pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); while (1) { struct default_work* work = dequeue_work(); assert(work); if (!work->work) break; aml_callback_fn cb = aml_get_work_fn(work->work); if (cb) cb(work->work); struct aml* aml = aml_try_ref(work->aml_id); if (aml) { aml_emit(aml, work->work, 0); aml_stop(aml, work->work); aml_interrupt(aml); aml_unref(aml); } free(work); } return NULL; } int thread_pool_acquire_default(struct aml* aml, int n) { (void)aml; int rc = 0; if (n_threads == 0) { pthread_mutex_init(&work_queue_mutex, NULL); pthread_cond_init(&work_queue_cond, NULL); } if (n > n_threads) { pthread_t* new_pool = realloc(thread_pool, n * sizeof(pthread_t)); if (!new_pool) return -1; thread_pool = new_pool; } int i; for (i = n_threads; i < n; ++i) { rc = pthread_create(&thread_pool[i], NULL, worker_fn, NULL); if (rc < 0) break; } n_threads = i; if (rc < 0) goto failure; ++n_thread_pool_users; return rc; failure: errno = rc; reap_threads(); return -1; } static int enqueue_work(struct aml* aml, struct aml_work* work, int broadcast) { struct default_work* default_work = calloc(1, sizeof(*default_work)); if (!default_work) return -1; default_work->work = work; if (aml) default_work->aml_id = aml_get_id(aml); else default_work->aml_id = ULLONG_MAX; pthread_mutex_lock(&work_queue_mutex); TAILQ_INSERT_TAIL(&default_work_queue, default_work, link); if (broadcast) pthread_cond_broadcast(&work_queue_cond); else pthread_cond_signal(&work_queue_cond); pthread_mutex_unlock(&work_queue_mutex); return 0; } int thread_pool_enqueue_default(struct aml* aml, struct aml_work* work) { return enqueue_work(aml, work, 0); } void thread_pool_release_default(struct aml* aml) { if (--n_thread_pool_users == 0) reap_threads(); }