pax_global_header00006660000000000000000000000064141176111100014503gustar00rootroot0000000000000052 comment=4e7aa64539541905baf2e007694210c8d34f43b6 libatomic_ops-7.6.12/000077500000000000000000000000001411761111000144245ustar00rootroot00000000000000libatomic_ops-7.6.12/.gitattributes000066400000000000000000000004141411761111000173160ustar00rootroot00000000000000# Git repo attributes. # Ensure all text files have normalized (LF) line endings in the repository. * text=auto # Note: "core.eol" configuration variable controls which line endings to use # for the normalized files in the working directory (the default is native). libatomic_ops-7.6.12/.gitignore000066400000000000000000000021541411761111000164160ustar00rootroot00000000000000# Ignored files in libatomic_ops Git repo. Makefile /pkgconfig/atomic_ops.pc /pkgconfig/atomic_ops-uninstalled.pc /autom4te.cache/ /config.cache /config.log /config.status /libatomic_ops-* *.a *.dll *.exe *.gcda *.gch *.gcno *.la *.lib *.lo *.o *.obj *.so /out/ /src/.deps/ /src/.dirstamp /src/.libs/ /src/config.h /src/config.h.in~ /src/stamp-h1 /tests/.deps/ /tests/.dirstamp /tests/.libs/ /tests/core /tests/list_atomic.i /tests/test_atomic /tests/test_atomic_generalized /tests/test_atomic_pthreads /tests/test_malloc /tests/test_stack # External library (without trailing slash to allow symlinks): /pthreads-w32* # These files are generated by autoreconf: /aclocal.m4 /compile /config.guess /config.sub /configure /depcomp /install-sh /missing /mkinstalldirs /src/config.h.in /test-driver Makefile.in # Generated by libtoolize: /libtool /ltmain.sh /m4/*.m4 # These files are generated by make check: /tests/list_atomic.c /tests/test_atomic_include.h /tests/test*.log /tests/test*.trs # Code analysis tools: *.c.gcov *.h.gcov *.sancov /.sv*-dir /cov-int /coverage.info /pvs-project.log /pvs-project.tasks /strace_out libatomic_ops-7.6.12/.travis.yml000066400000000000000000000302621411761111000165400ustar00rootroot00000000000000dist: xenial language: c os: linux jobs: include: - compiler: clang - compiler: gcc - os: osx - env: - MAKEFILE_TARGET=dist - compiler: clang env: - CFLAGS_EXTRA="-O3 -march=native" - CONF_OPTIONS="--enable-assertions" - compiler: gcc env: - CFLAGS_EXTRA="-O3 -march=native" - CONF_OPTIONS="--enable-assertions" - os: osx env: - CFLAGS_EXTRA="-O3 -march=native -D AO_USE_ALMOST_LOCK_FREE" - CONF_OPTIONS="--enable-assertions" - compiler: clang env: - CFLAGS_EXTRA="-march=native -std=c11" - compiler: clang env: - CFLAGS_EXTRA="-O3 -march=native -std=c11 -D AO_BL_SIZE=4 -D DEFAULT_NTHREADS=32" - compiler: gcc env: - CFLAGS_EXTRA="-march=native -D _FORTIFY_SOURCE=2 -std=c89" - addons: apt: packages: - gcc-multilib compiler: clang env: - CFLAGS_EXTRA="-m32" - CONF_OPTIONS="--enable-assertions" - addons: apt: packages: - gcc-multilib compiler: clang env: - CFLAGS_EXTRA="-m32 -O3 -march=native" - addons: apt: packages: - gcc-multilib compiler: clang env: - CFLAGS_EXTRA="-m32 -march=native" - CONF_OPTIONS="--disable-atomic-intrinsics" - addons: apt: packages: - gcc-multilib compiler: gcc env: - CFLAGS_EXTRA="-m32 -march=native -D AO_USE_ALMOST_LOCK_FREE" - CONF_OPTIONS="--enable-assertions" - addons: apt: packages: - gcc-multilib compiler: gcc env: - CFLAGS_EXTRA="-m32 -march=native" - CONF_OPTIONS="--disable-atomic-intrinsics" - os: osx env: - CFLAGS_EXTRA="-m32 -march=native -D _FORTIFY_SOURCE=2" - CONF_OPTIONS="--enable-assertions" - compiler: gcc env: - CFLAGS_EXTRA="-D AO_GENERALIZE_ASM_BOOL_CAS -D AO_USE_NANOSLEEP -D AO_USE_NO_SIGNALS" - CONF_OPTIONS="--enable-assertions" - compiler: clang env: - CFLAGS_EXTRA="-D AO_STACK_PREFER_CAS_DOUBLE -D AO_USE_PTHREAD_DEFS" - CONF_OPTIONS="--enable-assertions" - compiler: clang env: - CFLAGS_EXTRA="-D DONT_USE_MMAP -O3" - CONF_OPTIONS="--enable-assertions --enable-shared" - addons: apt: packages: - gcc-8 sources: - ubuntu-toolchain-r-test compiler: gcc-8 env: - CFLAGS_EXTRA="-O3 -march=native" - addons: apt: packages: - gcc-8 sources: - ubuntu-toolchain-r-test compiler: gcc-8 env: - CFLAGS_EXTRA="-O3 -march=native" - addons: apt: packages: - gcc-8 - gcc-8-multilib sources: - ubuntu-toolchain-r-test compiler: gcc-8 env: - CFLAGS_EXTRA="-m32 -O3 -march=native" - addons: apt: packages: - gcc-8 - gcc-8-multilib sources: - ubuntu-toolchain-r-test compiler: gcc-8 env: - CFLAGS_EXTRA="-mx32 -march=native -D _FORTIFY_SOURCE=2" - CONF_OPTIONS="--enable-assertions --enable-shared" - addons: apt: packages: - gcc-8 - gcc-8-multilib sources: - ubuntu-toolchain-r-test compiler: gcc-8 env: - CFLAGS_EXTRA="-mx32 -march=native" - CONF_OPTIONS="--disable-atomic-intrinsics --disable-docs" - compiler: clang env: - CFLAGS_EXTRA="-fsanitize=address -D AO_USE_ALMOST_LOCK_FREE -fno-omit-frame-pointer" - TESTS_CUSTOM_RUN=true - compiler: clang env: - CFLAGS_EXTRA="-fsanitize=address -march=native -fno-common -fno-omit-frame-pointer" - CONF_OPTIONS="--enable-assertions" - compiler: gcc env: - CFLAGS_EXTRA="-fsanitize=address -fno-omit-frame-pointer -D AO_USE_ALMOST_LOCK_FREE -D USE_STANDARD_MALLOC" - CONF_OPTIONS="--enable-assertions" - addons: apt: packages: - gcc-8 - gcc-8-multilib sources: - ubuntu-toolchain-r-test compiler: gcc-8 env: - CFLAGS_EXTRA="-fsanitize=address -m32 -march=native -fno-omit-frame-pointer" - LDFLAGS="-fuse-ld=gold" - os: osx env: - CFLAGS_EXTRA="-fsanitize=address -m32 -fno-omit-frame-pointer" - compiler: clang env: - CFLAGS_EXTRA="-fsanitize=memory,undefined -march=native -fno-omit-frame-pointer" - TESTS_CUSTOM_RUN=true - compiler: clang env: - CFLAGS_EXTRA="-fsanitize=thread -D AO_USE_ALMOST_LOCK_FREE -fno-omit-frame-pointer" - compiler: clang env: - CFLAGS_EXTRA="-fsanitize=thread -march=native -fno-omit-frame-pointer" - CONF_OPTIONS="--enable-assertions" - compiler: clang env: - CONF_OPTIONS="--disable-atomic-intrinsics" - CFLAGS_EXTRA="-march=native" - addons: apt: packages: - lcov compiler: gcc env: - CONF_OPTIONS="--enable-gcov --enable-shared" - CC_FOR_CHECK=gcc - MAKEFILE_TARGET=all - REPORT_COVERAGE=true - CFLAGS_EXTRA="-march=native -D DEBUG_RUN_ONE_TEST -D VERBOSE" - addons: apt: packages: - cppcheck env: - CPPCHECK_ENABLE="-j16 -q --enable=information,performance,portability,style,warning" - MAKEFILE_TARGET=all - addons: apt: packages: - cppcheck env: - CPPCHECK_ENABLE="-q --enable=unusedFunction -D AO_TEST_EMULATION" - MAKEFILE_TARGET=all - compiler: clang env: - CSA_CHECK=true - MAKEFILE_TARGET=all - CFLAGS_EXTRA="-D AO_TRACE_MALLOC -D HAVE_MMAP -D VERBOSE" - compiler: clang env: - CFLAGS_EXTRA="-x c++ -march=native -D VERBOSE" - CONF_OPTIONS="--enable-assertions" - MAKEFILE_TARGET=all - compiler: gcc env: - CC_FOR_CHECK=g++ - MAKEFILE_TARGET=all - addons: apt: packages: - musl-tools compiler: musl-gcc env: - CFLAGS_EXTRA="-march=native" - CONF_OPTIONS="--enable-assertions" - addons: apt: packages: - gcc-mingw-w64 compiler: x86_64-w64-mingw32-gcc env: - CONF_OPTIONS="--host=x86_64-w64-mingw32 --enable-shared" - MAKEFILE_TARGET=all - addons: apt: packages: - gcc-mingw-w64 compiler: i686-w64-mingw32-gcc env: - CONF_OPTIONS="--host=i686-w64-mingw32" - MAKEFILE_TARGET=all - addons: apt: packages: - gcc-multilib env: - CROSS_GCC_VER=4.9.0 - NOLIBC_ARCH_ABI=aarch64-linux - addons: apt: packages: - gcc-multilib env: - CROSS_GCC_VER=4.9.0 - NOLIBC_ARCH_ABI=alpha-linux - addons: apt: packages: - gcc-multilib env: - CROSS_GCC_VER=4.9.0 - NOLIBC_ARCH_ABI=arm-unknown-linux-gnueabi - addons: apt: packages: - gcc-4.6 - gcc-multilib sources: - ubuntu-toolchain-r-test compiler: gcc-4.6 env: - CROSS_GCC_VER=4.2.4 - NOLIBC_ARCH_ABI=avr32-linux - CFLAGS_EXTRA="-fno-strict-aliasing" - addons: apt: packages: - gcc-4.6 - gcc-multilib sources: - ubuntu-toolchain-r-test compiler: gcc-4.6 env: - CROSS_GCC_VER=4.6.3 - NOLIBC_ARCH_ABI=cris-linux - addons: apt: packages: - gcc-multilib env: - CROSS_GCC_VER=4.9.0 - NOLIBC_ARCH_ABI=hppa-linux - addons: apt: packages: - gcc-multilib env: - CROSS_GCC_VER=4.9.0 - NOLIBC_ARCH_ABI=ia64-linux - addons: apt: packages: - gcc-multilib env: - CROSS_GCC_VER=4.9.0 - NOLIBC_ARCH_ABI=m68k-linux - addons: apt: packages: - gcc-multilib env: - CROSS_GCC_VER=4.9.0 - NOLIBC_ARCH_ABI=mips-linux - addons: apt: packages: - gcc-multilib env: - CROSS_GCC_VER=4.9.0 - NOLIBC_ARCH_ABI=mips64-linux - addons: apt: packages: - gcc-multilib env: - CROSS_GCC_VER=4.9.0 - NOLIBC_ARCH_ABI=powerpc-linux - addons: apt: packages: - gcc-multilib env: - CROSS_GCC_VER=4.9.0 - NOLIBC_ARCH_ABI=powerpc64-linux - addons: apt: packages: - gcc-multilib env: - CROSS_GCC_VER=4.9.0 - NOLIBC_ARCH_ABI=ppc64le-linux - addons: apt: packages: - gcc-multilib env: - CROSS_GCC_VER=4.9.0 - NOLIBC_ARCH_ABI=s390x-linux - addons: apt: packages: - gcc-4.6 - gcc-multilib sources: - ubuntu-toolchain-r-test compiler: gcc-4.6 env: - CROSS_GCC_VER=4.6.3 - NOLIBC_ARCH_ABI=sh4-linux - addons: apt: packages: - gcc-multilib env: - CROSS_GCC_VER=4.9.0 - NOLIBC_ARCH_ABI=sparc-linux - CFLAGS_EXTRA="-D AO_NO_SPARC_V9" - addons: apt: packages: - gcc-multilib env: - CROSS_GCC_VER=4.9.0 - NOLIBC_ARCH_ABI=sparc64-linux - addons: apt: packages: - gcc-4.6 - gcc-multilib sources: - ubuntu-toolchain-r-test compiler: gcc-4.6 env: - CROSS_GCC_VER=4.6.2 - NOLIBC_ARCH_ABI=tilegx-linux - env: - MAKEFILE_TARGET=distcheck - AUTOMAKE_VER=1.15 - M4_VER=1.4.18 - LIBTOOL_VER=2.4.6 before_install: - if [[ "$CROSS_GCC_VER" != "" ]]; then BUILD_ARCH=x86_64; TAR_FOLDER_URL=https://www.kernel.org/pub/tools/crosstool/files/bin/$BUILD_ARCH/$CROSS_GCC_VER; TARFILE=$BUILD_ARCH-gcc-$CROSS_GCC_VER-nolibc_$NOLIBC_ARCH_ABI.tar.xz; wget -O - $TAR_FOLDER_URL/$TARFILE | tar xf - --xz --directory ~; CROSS_CC=~/gcc-$CROSS_GCC_VER-nolibc/$NOLIBC_ARCH_ABI/bin/$NOLIBC_ARCH_ABI-gcc; export C_INCLUDE_PATH=/usr/include; MAKEFILE_TARGET=check-nolink; fi - if [[ "$AUTOMAKE_VER" != "" || "$LIBTOOL_VER" != "" || "$M4_VER" != "" ]]; then GNUTOOLS_ROOT=`pwd`/../gnu-tools; export PATH=$GNUTOOLS_ROOT/bin:$PATH; GNU_DOWNLOAD_SITE=https://ftp.gnu.org/gnu; fi - if [[ "$M4_VER" != "" ]]; then M4_XZ_URL=$GNU_DOWNLOAD_SITE/m4/m4-$M4_VER.tar.xz; wget -O - $M4_XZ_URL | tar xf - --xz --directory ~; (cd ~/m4-$M4_VER && ./configure --prefix=$GNUTOOLS_ROOT && make -j check && make install); fi - if [[ "$LIBTOOL_VER" != "" ]]; then LIBTOOL_XZ_URL=$GNU_DOWNLOAD_SITE/libtool/libtool-$LIBTOOL_VER.tar.xz; wget -O - $LIBTOOL_XZ_URL | tar xf - --xz --directory ~; (cd ~/libtool-$LIBTOOL_VER && ./configure --prefix=$GNUTOOLS_ROOT && make -j && make install); fi - if [[ "$AUTOMAKE_VER" != "" ]]; then AUTOMAKE_XZ_URL=$GNU_DOWNLOAD_SITE/automake/automake-$AUTOMAKE_VER.tar.xz; wget -O - $AUTOMAKE_XZ_URL | tar xf - --xz --directory ~; (cd ~/automake-$AUTOMAKE_VER && ./configure --prefix=$GNUTOOLS_ROOT && make -j && make install); fi - if [[ "$MAKEFILE_TARGET" == "dist"* ]]; then autoconf --version; automake --version; m4 --version; libtool --version || true; fi - if [[ "$MAKEFILE_TARGET" == "" ]]; then MAKEFILE_TARGET=check; fi install: - "./autogen.sh" - if [[ "$REPORT_COVERAGE" == true ]]; then gem install coveralls-lcov; fi script: - ./configure $CONF_OPTIONS --enable-werror - if [[ "$CSA_CHECK" != true && "$CPPCHECK_ENABLE" == "" ]]; then cat src/config.h; fi - if [[ "$CROSS_GCC_VER" != "" ]]; then CC=$CROSS_CC; fi - make -j $MAKEFILE_TARGET CC=$CC CFLAGS_EXTRA="$CFLAGS_EXTRA" LDFLAGS="$LDFLAGS" - if [[ "$CC_FOR_CHECK" != "" ]]; then make check CC=$CC_FOR_CHECK CFLAGS_EXTRA="$CFLAGS_EXTRA"; fi - if [ -f tests/test_atomic.log ]; then cat tests/test_atomic*.log; fi - if [[ "$CSA_CHECK" == true ]]; then ${CC} --analyze -Xanalyzer -analyzer-output=text -Werror -I src $CFLAGS_EXTRA tests/*.c src/*.c; fi - if [[ "$CPPCHECK_ENABLE" != "" ]]; then cppcheck -f --error-exitcode=2 -D CPPCHECK -I src $CPPCHECK_ENABLE tests/*.c src/*.c; fi - if [[ "$TESTS_CUSTOM_RUN" == true ]]; then ASAN_OPTIONS="detect_leaks=1" UBSAN_OPTIONS="halt_on_error=1" make -C tests check-without-test-driver; fi after_success: - if [[ "$REPORT_COVERAGE" == true ]]; then lcov --capture --directory src --directory tests --output-file coverage.info; lcov --remove coverage.info '/usr/*' 'tests/*' --output-file coverage.info; lcov --list coverage.info; coveralls-lcov --repo-token ${COVERALLS_TOKEN} coverage.info; fi deploy: provider: releases edge: true file: libatomic_ops-*.tar.gz file_glob: true on: condition: $MAKEFILE_TARGET = distcheck repo: ivmai/libatomic_ops tags: true libatomic_ops-7.6.12/AUTHORS000066400000000000000000000042121411761111000154730ustar00rootroot00000000000000Originally written by Hans Boehm, with some platform-dependent code imported from the Boehm-Demers-Weiser GC, where it was contributed by many others. Currently maintained by Ivan Maidanski. Alexey Pavlov Andreas Tobler Andrew Agno Andy Li Bradley Smith Bruce Mitchener Carlos O'Donell Chris Metcalf Daniel Grayson Doug Lea Earl Chew Emmanuel Stapf Fabrizio Fabbri Frank Schaefer Frederic Recoules George Koehler Gilles Talis Gregory Farnum H.J. Lu Hans Boehm Hans-Peter Nilsson Ian Wienand Ivan Maidanski James Cowgill Jean Girardet Jeremy Huddleston Jim Marshall Joerg Wagner Linas Vepstas Luca Barbato Kochin Chang Maged Michael Manuel Serrano Marek Vasut Max Horn Michael Hope Mikael Urankar Patrick Marlier Pavel Raiskup Petter Urkedal Philipp Zambelli Ranko Zivojnovic Roger Hoover Sebastian Siewior Shea Levy Steve Capper Takashi Yoshii Tautvydas Zilys Thiemo Seufer Thorsten Glaser Tobias Leich Tony Mantler YunQiang Su Yvan Roux libatomic_ops-7.6.12/COPYING000066400000000000000000000432541411761111000154670ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. libatomic_ops-7.6.12/ChangeLog000066400000000000000000000716561411761111000162150ustar00rootroot00000000000000 == [7.6.12] 2021-09-13 == * Allow to generalize bool-CAS for sparc (gcc) * Declare argument of AO_load_next with const in atomic_ops_stack * Describe double_compare_and_swap operation in README_details * Document CAS operations better in README_details * Fix gcc/sunc x86 AO_compare_double_and_swap_double missing side effect * Fix library name in README_details * Fix link fail caused by missing GCC char/short atomic primitives on riscv64 * Fix size of local variable passed to cas[x] (gcc/sparc) * Implement fetch-CAS for sparc (gcc) * Refactor gcc x86 memory constraints * Refine and reformat description of size prefix in README_details * Remove outdated notes in README_details * Replace x86 setz instruction by asm flag output operand (gcc) * Support MSYS host (configure) * Turn off compare_double_and_swap_double_full PIC hack for GCC 5+ (x86) * Update README_win32 to match Makefile.msft * Use GCC atomic intrinsics for s390x (clang 8.0+ and gcc 5.4+) * Use __alignof__ instead of sizeof in atomic variable alignment assertions * Workaround assertion violation in AO_load/store on m68k == [7.6.10] 2019-03-01 == * Eliminate 'my_chunk_ptr-AO_initial_heap out of bounds' cppcheck warning * Fix 'AO_*_TS_T is not defined' compiler warnings (GCC-8) * Fix 'duplicate symbol' error for test_malloc/stack with static libs (OS X) * Workaround 'argument to function assert is always 1' cppcheck warnings == [7.6.8] 2018-12-11 == * Eliminate 'casting signed to bigger unsigned int' CSA warning (test_stack) * Eliminate 'redundant blank line at start/end of block' CodeFactor warning * Fix 'Cannot implement CAS_full on this architecture' build error (nios2) * Fix a typo in arm_v6.h * Support aarch64-ilp32 (GCC) and UWP/arm64 (MS VC) targets * Undefine AO_ARM_HAVE_* private macros after their usage * Use standalone private macro to guard against AO_GCC_BARRIER redefinition * Workaround 'condition my_chunk_ptr is always false' cppcheck false positive == [7.6.6] 2018-08-07 == * COPYING: sync with FSF's gpl-2.0.txt * Fix 'undefined reference to __atomic_load/store/cas_16' error (gcc-7/x64) * Fix a typo in the overview section of README * Fix comments style in configure.ac and Makefile.am * Update copyright information in README and some header files == [7.6.4] 2018-03-27 == * Add RISC-V support * Convert atomic_ops_malloc.c and tests to valid C++ code * Eliminate 'function is never used' cppcheck warning for load_before_cas * Eliminate 'using argument that points at uninitialized var' cppcheck error * Fix 'AO_pt_lock undefined' error if cross-compiling manually (MinGW) * Fix public headers inclusion from clients C++ code * Remove gcc/nios2.h file (include gcc/generic.h directly for nios2) * Support MIPS rel6 == [7.6.2] 2017-12-24 == * Allow to alter DEFAULT/MAX_NTHREADS values in test_malloc/stack * Allow to select almost-non-blocking stack implementation explicitly * Annotate AO_malloc with 'alloc_size' and 'malloc' attributes * Avoid misleading 'AO_t undefined' error if wrong atomic_ops.h included * Define AO_TS_SET to 1 (true) if GCC atomic_test_and_set is used * Disable workaround in stack_pop_acquire that was needed for ancient Clang * Do not define AO_GCC_FORCE_HAVE_CAS for Clang 3.8+ (Aarch64) * Do not disallow to define double_load using built-in atomics (Aarch64) * Do not expose AO_GCC_FORCE_HAVE_CAS macro to client code (GCC) * Do not install documentation if configure --disable-docs (new option) * Do not produce .tar.bz2 distribution file (configure) * Eliminate '-pedantic is not an option that controls warnings' GCC message * Eliminate data race in cons() of test_malloc * Eliminate GCC-5 ASan global-buffer-overflow false positive for AO_stack_bl * Fill in allocated memory with values depending on thread id (test_malloc) * Fix 'bad register name %sil' assembler error (GCC-4.4/x86) * Fix 'unknown attribute no_sanitize' compiler warning for GCC * Fix AO_malloc for sizes near CHUNK_SIZE * Fix memory leak in test_malloc * Fix test failures for Clang-3.8 and older (Aarch64) * Fix test_stack failure if AO_PREFER_BUILTIN_ATOMICS (GCC/Aarch64) * Fix typo in AO_REAL_NEXT_PTR comment * Increase the default number of threads to 16 in test_malloc/stack * Mark unallocated/freed memory as inaccessible using ASan functionality * New macro (DONT_USE_MMAP) to support testing as if mmap() is unavailable * New macro to select stack implementation based on CAS-double * Place no_sanitize attributes in a GCC-compliant way * Prevent too long run of test_atomic_generalized (especially with TSan) * Simplify '#if' expressions in gcc/x86.h (code refactoring) * Test smallest allocation of large type (test_malloc) * Use __builtin_expect in atomic_ops_malloc * Use built-in atomics for load/store/CAS for Clang by default (Aarch64) * Use double-word atomic intrinsics for recent Clang versions (gcc/x86.h) * Use GCC atomic intrinsics for Hexagon (clang 3.9+) * Use generalized double-wide load/store if AO_PREFER_GENERALIZED (Aarch64) * Workaround 'unused result' code defects in atomic_ops.c, list_atomic * Workaround Thread Sanitizer (TSan) false positive warnings Also, includes 7.4.8 changes == [7.6.0] 2017-05-19 == * Add *_and/or/xor* and *_[fetch_]compare_and_swap* tests to test_atomic * Add asm-based and/or/xor implementation for char/short/int (gcc/x86) * Add asm-based char/short/int CAS implementation for gcc/x86[_64] * Add configure '--disable-atomic-intrinsics' option * Add dd_acquire_read case to test_atomic * Add initial nios2 architecture support * Add Makefile target (check-nolink) to compile all source without linking * Add Makefile target to run all tests without test-driver * Add test_atomic_generalized to Makefile and Makefile.msft * Allow alternate CC (CROSS_CC) for AC_TRY_COMPILE (configure) * Always define word-wide CAS for x86 (MS VC++ 8+) * Avoid atomic_compare_exchange_n if no __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n * Avoid extra nop_full in stack_pop_acquire if atomic intrinsics used (x86) * Basic support of TILE-Gx and TILEPro CPUs * Code refactoring of int-wide primitives in gcc/x86.h * Define AO_TS_SET as __GCC_ATOMIC_TEST_AND_SET_TRUEVAL if applicable * Define CLANG/GNUC_PREREQ macros to check gcc/clang minimum version * Do not define print_list() unless used (tests) * Eliminate 'condition sizeof(long)>4 is always true' cppcheck style warning * Eliminate 'ISO C90 does not support long long' compiler pedantic warning * Eliminate 'scope of variable can be reduced' cppcheck warnings * Eliminate redundant lwsync 2nd call in CAS_full on fail (gcc/PowerPC) * Fix 'unknown attribute no_sanitize' compiler warning (clang prior to v3.8) * Fix 'variable new value is never used' cppcheck style warning * Fix missing double_compare_and_swap_dd_acquire_read * Fix reporting about missing and/or/xor_dd_acquire_read (test_atomic) * Hide AO_locks symbol * Implement AO_CLEAR using C11 atomic intrinsic (GCC) * Implement CAS_acquire/release/full using __atomic_compare_exchange_n (gcc) * Implement char and/or/xor and short CAS for msftc ARM and X86[_64] * Implement char CAS and char/short add for msftc X86[_64] (VS 2013+) * Implement compiler_barrier using C11 __atomic_signal_fence (GCC) * Implement int CAS/inc/dec for msftc/x86_64 * Implement short inc/dec directly for msftc ARM and X86[_64] * Initial ibmc/powerpc (xlc) support * New configure option (--enable-werror) to treat warnings as compiler errors * New macro AO_PREFER_BUILTIN_ATOMICS to rely on C11 atomics fully (AArch64) * Refine AO_nop_write comment for ARM big.LITTLE architecture * Refine configure messages when checking for compiler options * Refine documentation about _full memory ordering suffix * Refine README how to build the library source from the repository * Relax shareability domain for dmb st in AO_nop_write (ARM/AArch64) * Remove redundant include windows.h from headers (msftc/x86[_64]) * Remove spaces at EOLn in asm code * Report gcc/clang pedantic warnings (configure) * Support NaCl/arm * Suppress 'ISO C does not support __int128 type' GCC/Clang pedantic warning * Test store/CAS emulation explicitly * Update shared libraries version info to 2:0:1 * Use GCC atomic intrinsics for PowerPC 32/64 (GCC 4.8+ and clang 3.8+) * Use GCC atomic intrinsics for x86, x64, ARM, MIPS (gcc 4.9+, clang 3.5+) * Use generalized double-wide load/store if AO_PREFER_GENERALIZED (gcc/x86) * Workaround '#error' cppcheck error messages * Workaround 'condition always true', 'unused stored value' cppcheck warnings * Workaround 'function is never used' cppcheck style warnings * Workaround 'obsolescent ftime called' cppcheck style warning (POSIX) * Workaround 'overflow in pointer subtraction' cppcheck warning * Workaround 'shifting 32-bit value by 32 bits undefined' cppcheck warning * Workaround 'uninitialized memory use' code analyzer false warning (tests) * Workaround 'uninitialized variable' cppcheck error in hpc/hppa.h * Workaround 'value of macro is unknown' cppcheck information messages * Workaround a bug in double-wide intrinsics of Clang/x64 with ASan enabled * Workaround MSan warning about uninitialized data read by generalized store Also, includes 7.4.6 changes == [7.4.16] 2021-09-13 == * Fix gcc/sunc x86 AO_compare_double_and_swap_double missing side effect * Fix library name in README_details * Fix size of local variable passed to cas[x] (gcc/sparc) * Workaround assertion violation in AO_load/store on m68k == [7.4.14] 2019-03-01 == * Fix 'AO_*_TS_T is not defined' compiler warnings (GCC-8) == [7.4.12] 2018-12-11 == * COPYING: sync with FSF's gpl-2.0.txt * Fix a typo in arm_v6.h * Fix a typo in the overview section of README * Support ILP32 in AArch64 assembly routines (GCC) * Support UWP/arm64 target == [7.4.10] 2017-12-22 == * Fix AO_malloc for sizes near CHUNK_SIZE * Fix memory leak in test_malloc * Fix typo in AO_REAL_NEXT_PTR comment == [7.4.8] 2017-10-18 == * Fix 'missing select.h', 'undefined sigprocmask' compiler errors (Hexagon) * Update README about bugs reporting and new releases notification * Workaround misspelling in GCC to detect ARMv6KZ platform == [7.4.6] 2017-05-18 == * Add assertion that double-wide CAS target is aligned (msftc/x86[_64]) * Add configure --enable-gcov option (enable code coverage analysis) * Code refactoring of gcc/powerpc.h to avoid code duplication * Eliminate 'cast to long from void*' compiler warning in test_atomic * Eliminate 'implicit declaration of close' warning in 'strict ANSI' mode * Eliminate 'missing braces around initializer' gcc warning (hppa) * Eliminate 'printf format specifies type void*' GCC pedantic warnings * Eliminate 'value shift followed by expansion' false code defect warning * Enable limited testing in Makefile.msft without Cygwin * Fix (delete) comment for AO_and_full (x86) * Fix block_all_signals compilation in 'strict ANSI' mode * Fix missing .exe for test filenames in Makefile (MinGW) * Fix missing printed value names (test_stack) * Implement fetch-CAS for s390[x] (gcc) * Move libraries version info to the beginning of Makefile.am * Refine documentation in Makefile.msft how to run all tests (MS VC) * Refine README about library downloading * Rename doc/README.txt to doc/README_details.txt * Support AIX/ppc (gcc) * Support CFLAGS_EXTRA to pass extra user-defined compiler flags (make) * Support n32 ABI for mips64 * Update shared libraries version info for 7.4.6+ (to 1:4:0) * Use 'inline code' format for commands in README.md * Use LLD and SCD instructions on mips64 * Workaround 'resource leak' false positives in AO_malloc, add_elements * Workaround 'uninitialized memory use' MemorySanitizer warning (test_atomic) Also, includes 7.2h changes == [7.4.4] 2016-05-24 == * Eliminate 'signed-to-unsigned value extension' compiler warning in malloc * Eliminate 'variable set but not used' Cppcheck warnings in test_stack * Fix GCC 5.x compatibility for AArch64 double-wide primitives * Fix makefile preventing AO_pause undefined in libatomic_ops_gpl * Fix missing casts to match printf format specifier in test_atomic * Fix missing output folder on making auto-generated test files (Automake) * Fix typo in configure.ac (in description of AO_ASM_X64_AVAILABLE) * Minor fix of code alignment in mips AO_compare_and_swap * Remove TODO file * Restore contribution info in ChangeLog for authors not listed in git log Also, includes 7.2g changes == [7.4.2] 2014-05-02 == * Fix a typo in doc/README.txt (remove redundant "an" article) * Update emails/links due to project site transition Also, includes 7.2f changes == [7.4.0] 2013-11-17 == * Add and/or/xor entries to list_atomic (tests) * Add char/short/int/AO_double_t and dd_acquire cases to list_atomic (tests) * Add compile-time assertion for size of 'standard' AO_double_t * Add double_store pthread-based implementation and tests * Add generalized CAS primitives of char/short/int size * Add generalized atomic and/or/xor operations for char/short/int types * Add generalized fetch_and_add_acquire/release (for ARMv6+) * Add generic implementation of double_load primitives * Add information about AO_ASSUME_VISTA to README_win32 * Add internal header containing only char/short/int/AO_t atomic loads * Add load/store primitives generalization based on CAS * Add lock-based implementation of char/short/int_fetch_compare_and_swap * Add makefile rule to test list_atomic.template syntax (tests) * Add missing 'const' in aligned-atomic XSIZE_load implementation * Add missing double_compare_and_swap to generalization * Add missing generalization of no-barrier CAS in template * Add negative double-CAS test cases to test_atomic_include (tests) * Add test_stack to Makefile.msft (tests) * Adjust fprintf arguments type matching specifier in test_stack (tests) * Adjust included filenames in atomic_ops_malloc and test_stack * Adjust quotes in echo command of Makefile.msft (Win32) * Always use 'mfence' for nop_full if target CPU supports SSE2 (gcc/x86) * Better document configure THREADDLLIBS variable * Cast away volatile on dereference in CAS-based generalization primitives * Change policy regarding version numbers ("micro" part instead of "alpha") * Convert README to Markdown format * Define AO_NO_PTHREADS in configure if no pthreads (Win32 and VxWorks) * Define AO_int_X operations for ARM and avr32 * Define double-wide ordered loads/stores for x86 * Define int_and/or/xor primitives in ao_t_is_int header * Define nop_full as compiler barrier for pre-ARMv6 single-core case * Do not duplicate BUILT_SOURCES entries in nobase_private_HEADERS (Makefile) * Do not include standard_ao_double_t.h where double-CAS is unimplemented * Do not report absence of meaningless nop, load and store in test_atomic * Do not use deprecated AO_T and AO_TS_T (tests) * Eliminate 'missing initializer' warning for AO_stack_t value initializer * Eliminate 64-bit compiler warnings in atomic_ops_malloc * Eliminate arithmetic shifts in double-CAS (gcc/arm, msftc/x86) * Eliminate warning for fetch_and_add argument in test_atomic_include (tests) * Enable Makefile.msft for Win64 * Enable build using toolchain without pthreads * Enable double_compare_and_swap for non-cpp code (msftc/x86.h) * Enable generalization of all variants of CAS via fetch_compare_and_swap * Enable test_stack for pthreads-w32 and Win32 with native threads * Fix generalized AO_char/short_compare_and_swap args (missing 'unsigned') * Fix makefile sed rule for list_atomic (tests) * Fix missing abort() usage in atomic_ops_malloc and tests on WinCE * Generalize compare_double_and_swap_double using double_compare_and_swap * Generalize double_load/store for x86_64 (GCC) * Generate ao_t_is_int, 'loadstore' headers from templates * Generate generalized AO_t load/store/fetch_and_add primitives from template * Generate ordered_loads/stores_only headers from templates * Group all X_acquire_release_volatile.h and X_[aligned_]atomic_load_store.h * Implement and/or/xor, AO_double_load for ARM * Implement atomic store using direct write by default on ARMv6+ * Implement char/short/int-wide primitives using GCC built-in atomic/sync * Implement char/short/int_fetch_and_add for msftc/x86[_64] (Win32) * Implement char/short_fetch_and_add, char/short_load for ARMv6+ (GCC) * Implement char/short_store primitives at aligned addresses for ARM * Implement compare_double_and_swap_double for SunCC/x86 * Implement double_load/store based on guaranteed x86 access atomicity * Implement double_store for ARMv7 using LDREXD/STREXD * Implement load/store via simple LDR/STR for ARMv6+ (msftc) * Implement nop_full/write using 'dmb' instruction if available (gcc/arm) * Improve debug printing in test_stack (tests) * Log messages to stdout instead of stderr (tests) * Make AO_ASSUME_VISTA also enables Win98 code in msftc/x86.h (Win32) * Minimize gcc/generic-arithm template by factoring out barriers * Move 'unsigned' keyword to XCTYPE in generalize-small template * Move default compiler options to CFLAGS in Makefile.msft (Win32) * Move definitions of ordered loads/stores to inner separate headers * Move gcc-generic AO_t-wide primitives to generic-small/arithm headers * Move generalized arithmetical primitives to 'generalize-arithm' template * Optimize AO_spin manually to minimize compiler influence on its duration * Parameterize list_atomic template with XSIZE (tests) * Perform only few list reversals in test_malloc if AO based on pthreads * Put autogen.sh to 'dist' package (Automake) * Remote duplicate definition of test_and_set_acquire in generalize.h * Remove X_aligned_atomic_load_store headers and template * Remove duplicate AO_spin and AO_pause definition in atomic_ops_stack * Remove gcc/x86_64.h eliminating code duplication of gcc/x86.h * Remove nested AO_USE_PTHREAD_DEFS macro check in atomic_ops.h (gcc/arm) * Remove redundant 'cc' clobber for LDREXD instruction (gcc/arm) * Remove store_full from msftc/arm.h in favor of generalized primitive * Remove sunc/x86_64.h eliminating code duplication of sunc/x86.h * Remove unsafe emulation-based implementation of double CAS (SunCC/x86_64) * Remove useless 'perror' call in run_parallel.h (tests) * Reorder AO_double_t union elements for AO_DOUBLE_T_INITIALIZER portability * Replace atomic_load_store.template with atomic_load and atomic_store ones * Replace some FIXME items with TODO in atomic_ops.c and sysdeps headers * Specify fetch_and_add/sub1 result as unused in test_atomic (tests) * Support AArch64 (64-bit ARM) target (GCC) * Support ARMv8 target (gcc/arm) * Test double_compare_and_swap in test_atomic (tests) * Use AO_ prefix for internal functions in arm_v6.h, hppa.h * Use __atomic GCC built-in to implement generic double-wide CAS * Use built-in __sync CAS for double-CAS if AO_USE_SYNC_CAS_BUILTIN for x86 * Workaround GCC 4.4.3 warning reported for 'val' of list_atomic.c (tests) Also, includes 7.2e changes == [7.3alpha2] 2012-05-11 == * Add '-no-undefined' to LDFLAGS in src/Makefile.am * Add AO_and, AO_xor atomic operations * Add AO_fetch_compare_and_swap primitives * Add and fill in AUTHORS, TODO files * Add autogen.sh file * Adjust AO_..._H macros in public headers * Code refactoring of gcc/arm.h by introducing AO_ARM_HAVE_x macros * Define AO macros for libatomic_ops version identification * Do not define NDEBUG if '--enable-assertions' passed to configure * Eliminate compiler warnings in various functions and macros * Generalize AO_compare_and_swap primitives via AO_fetch_compare_and_swap * Generalize acquire/release/full CAS primitives for MIPS * Implement fetch_and_add, test_and_set primitives for MIPS * Improve Makefile for MS VC++; pass '-W3' option to MS compiler * Include ao_t_is_int.h from atomic_ops.h after first generalization pass * Merge all Makefile.am files in src tree * Minor code refactoring of atomic_ops.c, generic_pthread.h * Minor configure build improvements (e.g., ensure proper autoconf version) * Place only major per-release changes description to ChangeLog (this file) * Recognize AO_PREFER_GENERALIZED macro to favor generalization over assembly * Remove all auto-generated files except for generalize-small.h from the repo * Remove duplicate doc/COPYING and empty NEWS files * Replace atomic_ops_malloc static mmap-related empty functions with macros * Replace pointer relational comparisons with non-pointer ones * Require autoconf 2.61 instead of v2.64 * Show extra compiler warnings (GCC only) * Turn off AO primitives inlining if AO_NO_INLINE defined * Use __builtin_expect in CAS failure loop condition checks (GCC only) Also, includes 7.2 changes == [7.2j] 2021-09-12 == * Fix a typo in arm_v6.h * Fix asm constraints of primitives in sunc/x86_64.h * Fix gcc/sunc x86 AO_compare_double_and_swap_double missing side effect * Fix library name in README details * Fix size of local variable passed to cas[x] (gcc/sparc) * Workaround assertion violation in AO_load/store on m68k == [7.2i] 2017-12-21 == * Fix 'missing select.h', 'undefined sigprocmask' compiler errors (Hexagon) * Fix AO_malloc for sizes near CHUNK_SIZE * Fix typo in AO_REAL_NEXT_PTR comment == [7.2h] 2017-05-17 == * Add 'clean' target to Makefile.msft * Enable Makefile.msft for Win64 * Exclude 'check' from nmake all (Makefile.msft) * Fix 'Cannot implement CAS_full on this architecture' build error (cris) * Fix 'doc' files installation folder * Fix (improve) AO_REQUIRE_CAS description in README * Fix AO_SIZE_MAX definition (Linux/musl-gcc) * Fix assertions style in test_atomic_include * Fix size value wrap around in AO_malloc_large * Fix test_atomic failure caused unaligned AO_double_t access (x86) * Fix type of general AO_TS_INITIALIZER * Fix typo in comments in gcc/arm.h * Fix typos in 'error' pragma messages * Workaround test_stack failure on AIX/ppc == [7.2g] 2016-05-23 == * Add disclaimer to README to favor C11/C++14 atomics over libatomic_ops use * Regenerate configure files using official libtool release (v2.4.2) * Remove inclusion of acquire_release_volatile.h on MIPS * Remove obsolete information from README about C++0x standard future * Update links due to project site transition == [7.2f] 2014-05-02 == * Fix a typo in doc/README.txt (remove redundant "an" article) * Regenerate configure files by new automake (v1.14.1), libtool (v2.4.2.418) == [7.2e] 2013-11-10 == * Fix (remove) invalid include of read_ordered.h for ARM * Fix AM_CONFIG_HEADER in configure for autoconf-2.69-1 * Fix AO_pause sleep delay for particular argument values (Win32) * Fix ARMv7 LDREXD/STREXD double-wide operand specification (GCC/Clang) * Fix LDREXD/STREXD use for pre-Clang3.3/arm * Fix README regarding _acquire_read barrier * Fix XSIZE_load/store definition order in generalize-small template * Fix asm constraint of CAS memory operand for gcc/alpha, clang-3.1/mips * Fix asm constraints of primitives in sunc/x86.h * Fix cmpxchg16b-based compare_double_and_swap_double for SunCC/x86_64 * Fix compare_double_and_swap_double and double_ptr_storage for gcc/x32 * Fix compare_double_and_swap_double for clang3.0/x86 in PIC mode * Fix compare_double_and_swap_double_full definition condition in emul_cas * Fix generalize-small template adding missed CAS-based fetch_and_add * Fix generalized fetch_and_add function * Fix missing compiler barrier in nop_full for uniprocessor ARM * Fix ordered_except_wr header inclusion for s390 * Fix return type of AO_int_X primitives defined in ao_t_is_int header * Fix return type of char/short/int_load_read() in read_ordered.h * Fix template-based headers regeneration order in src/Makefile * Fix typos in ao_t_is_int, atomic_ops.h, generalize.h, msftc/arm.h comments * Fix variable type to match printf format specifier in test_stack * Fix visibility and initial value of 'dummy' variable in atomic_ops_stack * Terminate tests with abort after error reported == [7.2d] 2012-08-09 == * Fix AO_compare_double_and_swap_double_full for gcc-4.2.1/x86 in PIC mode * Fix AO_compiler_barrier missing parentheses * Fix missing 'unsigned' for generalized AO_char/short_fetch_and_add result == [7.2] 2012-05-11 == * Add atomic_ops.pc.in and atomic_ops-uninstalled.pc.in to pkgconfig folder * Define and use AO_PTRDIFF_T in tests for casts between pointer and integer * Fix AO_compare_and_swap return type for s390 and PowerPC * Fix AO_compare_double_and_swap_double_full for gcc/x86 (PIC mode) * Fix AO_stack_push_release to workaround bug in clang-1.1/x86 compiler * Fix AO_test_and_setXX in tests/list_atomic.template * Fix AO_test_and_set_full (gcc/x86[_64].h) to work-around a bug in LLVM v2.7 * Fix AO_test_and_set_full on m68k * Fix __ARM_ARCH_5__ macro handling for Android NDK (ARMv7) * Fix configure for Cygwin, mingw-w64/32 * Fix configure to define __PIC__ macro explicitly if needed (GCC) * Fix double_ptr_storage definition for GCC pre-v4 (x86_64) * Fix for x32 by removing 'q' suffix in x86-64 instructions * Fix generalization for IA-64 (regarding AO_or, AO_..._read/write primitives) * Fix generalized AO__fetch_and_add() return type * Fix test_atomic_include for the case of missing CAS primitive * Fix test_malloc - allocate less memory in case of missing mmap * Implement the basic atomic primitives for the hexagon CPU == [7.2alpha6] 2011-06-14 == * Add missing AO_HAVE_ macros * Add support of avr32 CPU * Better support of various models of ARM * Disable AO_compare_double_and_swap_double_full for SunCC x86 as not working * Enable ARM Thumb-2 mode * Fix AO_test_and_set_full for SunCC (x86) * Fix bugs in tests * Fix clobbers in AO_compare_and_swap_full (x86.h) * Fix typos in identifiers and comments * Improve AO_sync for PowerPC * Improve make scripts (configure.ac) * Make get_mmaped() in atomic_ops_malloc.c more portable * Support Intel compiler * Support NaCl target * Suppress compiler warnings in various places * Test more predefined macros (ARM, PowerPC) * Use assembly code only for MS VC if available (x86_64) * Use built-in __sync_bool_compare_and_swap if available (x86_64) * Workaround bugs in LLVM GCC and SunCC regarding XCHG (x86, x86_64) == [7.2alpha4] 2009-12-02 == * Fix typos in comments, identifiers and documentation * Implement AO_compare_and_swap_full for SPARC * Refine ARM-specific code * Refine code and comments for MS VC * Regenerate make scripts * Share common code for all 32-bit CPUs (MS VC) * Support DigitalMars and Watcom compilers * Support MS VC for ARM (WinCE) * Support SH CPU * Support win32-pthreads * Support x86 and x86_64 for SunCC compiler == [7.2alpha2] 2009-05-27 == * Add MIPS support * Add better support for m68k * Add "const" to first parameter of load calls * Add parentheses around address argument for various macros * Add some platform-specific documentation to INSTALL * Add untested 64-bit support for PowerPC * Fix AO_compare_and_swap_double_acquire * Fix AO_int_fetch_and_add_full (x86_64) * Fix comments * Fix s390 include paths * Fix use of lwz instruction (PowerPC) * Refine clobbers (PowerPC) * Remove outdated info about Windows support in README * Replace K&R-style function definition with ANSI C one * add AO_compare_double_and_swap_double for ARMv6 * gcc/powerpc.h: Consider __NO_LWSYNC__ == [7.1] 2008-02-11 == * Add test_and_set, AO_double_compare_and_swap generalizations * Conditionally add compare_double_and_swap_double (x86) * Conditionally add compare_double_and_swap_double (x86) * Fix AO_compare_double_and_swap_double_full (x86) for PIC mode * Fix AO_load_acquire for PowerPC * Fix double-width CAS (x86) * Refine README (add more warnings about data dependencies) * Refine double_ptr_storage type definition * Support ARMv6+ in GCC * Support ArmCC compiler * Use _InterlockedExchangeAdd for MS VC (x86) == [7.0] 2007-06-28 == * Add 64-bit version of AO_load_acquire for PowerPC (by Luca Barbato) * Add support of x86 and x86_64 for MS VC * Do not assume that "mfence" is always present (x86.h) * Fix ARM AO_test_and_set_full * Include windows.h (MS VC) * Update README to reflect C++0x effort == [1.2] 2006-07-11 == * Add prototypes to suppress compiler warnings * Add simple VxWorks support * Fix InterlockedCompareExchange proto usage * Fix typos (ia64) * Include all_acquire_release_volatile.h and all_atomic_load_store.h (ia64) * Initial support for 64-bit targets * Use "=q" for AO_test_and_set_full (x86) * Use inline assembler to generate "mfence" and byte sized XCHG * Use new intrinsics available in MSVC 2003 and MSVC 2005 == [1.1] 2005-09-27 == * Add and use read_ordered.h * Change function naming from "byte" to "char" * Fix AO_test_and_set for ARM; define AO_CAN_EMUL_CAS == [1.0] 2005-03-21 == * Add atomic_ops primitives for different sized data * Add compare_double_and_swap_double and compare_and_swap_double * Add gcc/cris.h (originally comes from Hans-Peter Nilsson) * Add gcc/m68k.h (contributed by Tony Mantler) * Add gcc/powerpc.h (with help of Maged Michael, Doug Lea, Roger Hoover) * Add initial support for atomic_ops for VC++/Windows/X86 and HP/UX * Add minimal support for the Sun SPARC compiler * Add support for platforms that require out-of-line assembly code * Add support of int-wide operations on platforms with int-sized pointers * Added libatomic_ops_gpl library with support for lock-free stack and malloc * Change atomic_ops include file structure * Change most platforms to use byte-wide test-and-set locations * Define AO_CLEAR, __ldcw[_align] macros in gcc/hppa.h (by Carlos O'Donell) * Fix various bugs * Install under "atomic_ops" instead of "ao" * Remove compiler_barrier workaround for gcc 3.4+ * Renamed various types to end in _t * Replace AO_HAVE_NOP_FULL with AO_HAVE_nop_full (by Ranko Zivojnovic) * Use autoconf, automake libatomic_ops-7.6.12/Makefile.am000066400000000000000000000011521411761111000164570ustar00rootroot00000000000000SUBDIRS = src doc tests ACLOCAL_AMFLAGS = -I m4 pkgconfigdir = $(libdir)/pkgconfig pkgconfig_DATA = pkgconfig/atomic_ops.pc noinst_DATA = pkgconfig/atomic_ops-uninstalled.pc if ENABLE_DOCS dist_doc_DATA = COPYING README.md endif EXTRA_DIST = autogen.sh ## TODO: After migration to autoconf-1.13+, remove check-nolink definition ## from this Makefile.am and add AM_EXTRA_RECURSIVE_TARGETS([check-nolink]) ## back to configure.ac file. .PHONY: check-nolink check-nolink-local check-nolink: check-nolink-local $(MAKE) --directory tests $(AM_MAKEFLAGS) check-nolink-local check-nolink-local: all #distclean-local: libatomic_ops-7.6.12/README.md000066400000000000000000000104051411761111000157030ustar00rootroot00000000000000# The atomic_ops library (`libatomic_ops`) IN NEW CODE, PLEASE USE C11 OR C++14 STANDARD ATOMICS INSTEAD OF THIS PACKAGE. This is version 7.6.12 of libatomic_ops. ## Download You might find a more recent/stable version on the [Download](https://github.com/ivmai/libatomic_ops/wiki/Download) page, or [BDWGC site](http://www.hboehm.info/gc/). Also, the latest bug fixes and new features are available in the [development repository](https://github.com/ivmai/libatomic_ops). ## Overview This package provides semi-portable access to hardware-provided atomic memory update operations on a number of architectures. These might allow you to write code: * That does more interesting things in signal handlers. * Makes more effective use of multiprocessors by allowing you to write clever lock-free code. Note that such code is very difficult to get right, and will unavoidably be less portable than lock-based code. It is also not always faster than lock-based code. But it may occasionally be a large performance win. * To experiment with new and much better thread programming paradigms, etc. For details and licensing restrictions see the files in the "doc" subdirectory. ## Installation and Usage The configuration and build scripts for this package were generated by Automake/Autoconf. `./configure; make; sudo make install` in this directory should work. For a more customized build, see the output of `./configure --help`. To build it from the development repository, `./autogen.sh` should be executed first. Note that much of the content of this library is in the header files. However, two small libraries are built and installed: * `libatomic_ops.a` is a support library, which is not needed on some platforms. This is intended to be usable, under some mild restrictions, in free or proprietary code, as are all the header files. See doc/LICENSING.txt. * `libatomic_ops_gpl.a` contains some higher level facilities. This code is currently covered by the GPL. The contents currently correspond to the headers `atomic_ops_stack.h` and `atomic_ops_malloc.h`. ## Platform Specific Notes Win32/64: src/Makefile.msft contains a very simple Makefile for building and running tests and building the gpl library. The core `libatomic_ops` implementation is entirely in header files. HP-UX/PA-RISC: `aCC -Ae` won't work as a C compiler, since it doesn't support inline assembly code. Use cc. ## Feedback, Contribution, Questions and Notifications Please address bug reports and new feature ideas to [GitHub issues](https://github.com/ivmai/libatomic_ops/issues). Before the submission please check that it has not been done yet by someone else. If you want to contribute, submit a [pull request](https://github.com/ivmai/libatomic_ops/pulls) to GitHub. If you need help, use [Stack Overflow](https://stackoverflow.com/questions/tagged/atomic-ops). Older questions on the site can be found by [this query](https://stackoverflow.com/search?q=atomic_ops). Older technical discussions are also available in `bdwgc` mailing list archive - it can be downloaded as a [compressed file](https://github.com/ivmai/bdwgc/files/1038163/bdwgc-mailing-list-archive-2017_04.tar.gz) or browsed at [Narkive](http://bdwgc.opendylan.narkive.com) (please search for _atomic_ keyword). To get new release announcements, subscribe to [RSS feed](https://github.com/ivmai/libatomic_ops/releases.atom). (To receive the notifications by email, a 3rd-party free service like [IFTTT RSS Feed](https://ifttt.com/feed) can be setup.) To be notified on all issues, please [watch](https://github.com/ivmai/libatomic_ops/watchers) the project on GitHub. ## Copyright & Warranty * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2011 Hewlett-Packard Development Company, L.P. * Copyright (c) 2008-2021 Ivan Maidanski The file armcc/arm_v6.h is also * Copyright (c) 2007 by NEC LE-IT. All rights reserved. The file gcc/avr32.h is * Copyright (c) 2009 Bradley Smith The file gcc/mips.h is * Copyright (c) 2005, 2007 Thiemo Seufer The file gcc/sh.h is * Copyright (c) 2009 by Takashi YOSHII. All rights reserved. See [LICENSING.txt](doc/LICENSING.txt) for the details. libatomic_ops-7.6.12/appveyor.yml000066400000000000000000000011511411761111000170120ustar00rootroot00000000000000version: 7.6.x-{build} clone_depth: 50 environment: MS_SDK_VER: v7.1 matrix: - CPU: x86 BLD: debug CFLAGS_EXTRA: -DAO_ASSUME_VISTA -DAO_USE_PENTIUM4_INSTRS - CPU: x86 BLD: release - CPU: x64 BLD: debug CFLAGS_EXTRA: -DAO_CMPXCHG16B_AVAILABLE - CPU: x64 BLD: release install: - cmd: '"C:\Program Files\Microsoft SDKs\Windows\%MS_SDK_VER%\Bin\SetEnv.cmd" /%CPU% /%BLD%' build_script: - cmd: cd src && nmake -f Makefile.msft clean all CFLAGS_EXTRA="%CFLAGS_EXTRA%" && cd .. test_script: - cmd: cd src && nmake -f Makefile.msft check-noautogen CFLAGS_EXTRA="%CFLAGS_EXTRA%" libatomic_ops-7.6.12/autogen.sh000077500000000000000000000003271411761111000164270ustar00rootroot00000000000000#!/bin/sh set -e # This script creates (or regenerates) configure (as well as aclocal.m4, # config.h.in, Makefile.in, etc.) missing in the source repository. autoreconf -i echo echo "Ready to run './configure'." libatomic_ops-7.6.12/configure.ac000066400000000000000000000201601411761111000167110ustar00rootroot00000000000000# Copyright (c) 2005-2006 Hewlett-Packard Development Company, L.P. # Copyright (c) 2009-2019 Ivan Maidanski # # THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED # OR IMPLIED. ANY USE IS AT YOUR OWN RISK. # # Permission is hereby granted to use or copy this program # for any purpose, provided the above notices are retained on all copies. # Permission to modify the code and to distribute modified code is granted, # provided the above notices are retained, and a notice that the code was # modified is included with the above copyright notice. dnl Process this file with autoconf to produce configure. AC_INIT([libatomic_ops],[7.6.12],https://github.com/ivmai/libatomic_ops/issues) AC_PREREQ(2.61) AC_CANONICAL_TARGET([]) AC_CONFIG_SRCDIR(src/atomic_ops.c) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([foreign nostdinc]) AM_MAINTAINER_MODE AC_CONFIG_HEADERS([src/config.h]) dnl Checks for programs. AM_PROG_CC_C_O AM_PROG_AS LT_INIT([disable-shared]) dnl Checks for functions. AC_FUNC_MMAP # Determine PIC flag. need_asm=false PICFLAG= AC_MSG_CHECKING(for PIC compiler flag) if test "$GCC" = yes; then old_CC="$CC" if test -n "$CROSS_CC"; then CC="$CROSS_CC" fi case "$host" in *-*-cygwin* | *-*-mingw* | *-*-msys*) # Cygwin and Mingw[-w32/64] do not need -fPIC. AC_MSG_RESULT([not needed]) ;; *) AC_MSG_RESULT(-fPIC) PICFLAG=-fPIC AC_MSG_CHECKING(whether -fPIC compiler option causes __PIC__ definition) # Workaround: at least GCC 3.4.6 (Solaris) does not define this macro. old_CFLAGS="$CFLAGS" CFLAGS="$PICFLAG $CFLAGS" AC_TRY_COMPILE([],[ #ifndef __PIC__ # error #endif ], [ac_cv_pic_macro=yes], [ac_cv_pic_macro=no]) CFLAGS="$old_CFLAGS" AC_MSG_RESULT($ac_cv_pic_macro) AS_IF([test "$ac_cv_pic_macro" = yes], [], [PICFLAG="-D__PIC__=1 $PICFLAG"]) ;; esac # Output all warnings. AC_MSG_CHECKING([whether compiler supports -Wextra]) old_CFLAGS="$CFLAGS" CFLAGS="-Wextra $CFLAGS" AC_TRY_COMPILE([],[], [ac_cv_cc_wextra=yes], [ac_cv_cc_wextra=no]) CFLAGS="$old_CFLAGS" AC_MSG_RESULT($ac_cv_cc_wextra) AS_IF([test "$ac_cv_cc_wextra" = yes], [WEXTRA="-Wextra"], [WEXTRA="-W"]) AC_MSG_CHECKING([whether compiler supports -Wpedantic]) CFLAGS="-Wpedantic -Wno-long-long $CFLAGS" AC_TRY_COMPILE([],[ extern int quiet; ], [ac_cv_cc_pedantic=yes], [ac_cv_cc_pedantic=no]) CFLAGS="$old_CFLAGS" AC_MSG_RESULT($ac_cv_cc_pedantic) WPEDANTIC= AS_IF([test "$ac_cv_cc_pedantic" = yes], [WPEDANTIC="-Wpedantic -Wno-long-long"]) CFLAGS="-Wall $WEXTRA $WPEDANTIC $CFLAGS" AC_ARG_ENABLE(werror, [AC_HELP_STRING([--enable-werror], [Pass -Werror to the C compiler])]) if test "$enable_werror" = yes; then CFLAGS="-Werror $CFLAGS" fi CC="$old_CC" else case "$host" in *-*-hpux*) AC_MSG_RESULT([+Z]) PICFLAG="+Z" CFLAGS="+O2 -mt $CFLAGS" ;; *-*-solaris*) AC_MSG_RESULT(-Kpic) PICFLAG=-Kpic CFLAGS="-O $CFLAGS" need_asm=true ;; *-*-linux*) AC_MSG_RESULT(-fPIC) PICFLAG=-fPIC # Any Linux compiler had better be gcc compatible. ;; *) AC_MSG_RESULT([none]) ;; esac fi AC_ARG_ENABLE(assertions, [AC_HELP_STRING([--enable-assertions], [Assertion checking])]) if test "$enable_assertions" != yes; then AC_DEFINE([NDEBUG], 1, [Define to disable assertion checking.]) fi AC_ARG_ENABLE(atomic-intrinsics, [AC_HELP_STRING([--disable-atomic-intrinsics], [Do not use C11 atomic intrinsics])]) if test "$enable_atomic_intrinsics" = no; then AC_DEFINE([AO_DISABLE_GCC_ATOMICS], 1, [Define to avoid C11 atomic intrinsics even if available.]) fi AC_ARG_ENABLE(gcov, AC_HELP_STRING([--enable-gcov], [Turn on code coverage analysis])) if test "$enable_gcov" = "yes"; then CFLAGS="$CFLAGS --coverage" # Turn off code optimization to get accurate line numbers. CFLAGS=`echo "$CFLAGS" | sed -e 's/-O\(1\|2\|3\|4\|s\|fast\)\?//g'` fi AC_ARG_ENABLE(docs, [AC_HELP_STRING([--disable-docs], [Do not build and install documentation])]) AM_CONDITIONAL(ENABLE_DOCS, test x$enable_docs != xno) AC_SUBST(PICFLAG) AC_SUBST(DEFS) dnl Extra user-defined C flags. AC_SUBST([CFLAGS_EXTRA]) AH_TEMPLATE([_PTHREADS], [Indicates the use of pthreads (NetBSD).]) AH_TEMPLATE([AO_USE_NANOSLEEP], [Use nanosleep() instead of select() (only if atomic operations \ are emulated)]) AH_TEMPLATE([AO_USE_NO_SIGNALS], [Do not block signals in compare_and_swap (only if atomic operations \ are emulated)]) AH_TEMPLATE([AO_USE_WIN32_PTHREADS], [Use Win32 Sleep() instead of select() (only if atomic operations \ are emulated)]) AH_TEMPLATE([AO_TRACE_MALLOC], [Trace AO_malloc/free calls (for debug only)]) dnl These macros are tested in public headers. AH_TEMPLATE([AO_GENERALIZE_ASM_BOOL_CAS], [Force compare_and_swap definition via fetch_compare_and_swap]) AH_TEMPLATE([AO_PREFER_GENERALIZED], [Prefer generalized definitions to direct assembly-based ones]) AH_TEMPLATE([AO_USE_PTHREAD_DEFS], [Emulate atomic operations via slow and async-signal-unsafe \ pthread locking]) AH_TEMPLATE([AO_ASM_X64_AVAILABLE], [Inline assembly available (only VC/x86_64)]) AH_TEMPLATE([AO_ASSUME_VISTA], [Assume Windows Server 2003, Vista or later target (only VC/x86)]) AH_TEMPLATE([AO_CMPXCHG16B_AVAILABLE], [Assume target is not old AMD Opteron chip (only x86_64)]) AH_TEMPLATE([AO_FORCE_USE_SWP], [Force test_and_set to use SWP instruction instead of LDREX/STREX \ (only arm v6+)]) AH_TEMPLATE([AO_NO_SPARC_V9], [Assume target is not sparc v9+ (only sparc)]) AH_TEMPLATE([AO_OLD_STYLE_INTERLOCKED_COMPARE_EXCHANGE], [Assume ancient MS VS Win32 headers (only VC/arm v6+, VC/x86)]) AH_TEMPLATE([AO_UNIPROCESSOR], [Assume single-core target (only arm v6+)]) AH_TEMPLATE([AO_USE_INTERLOCKED_INTRINSICS], [Assume Win32 _Interlocked primitives available as intrinsics \ (only VC/arm)]) AH_TEMPLATE([AO_USE_PENTIUM4_INSTRS], [Use Pentium 4 'mfence' instruction (only x86)]) AH_TEMPLATE([AO_USE_SYNC_CAS_BUILTIN], [Prefer GCC built-in CAS intrinsics in favor of inline assembly \ (only gcc/x86, gcc/x86_64)]) AH_TEMPLATE([AO_WEAK_DOUBLE_CAS_EMULATION], [Emulate double-width CAS via pthread locking in case of no hardware \ support (only gcc/x86_64, the emulation is unsafe)]) AH_TEMPLATE([AO_PREFER_BUILTIN_ATOMICS], [Prefer C11 atomic intrinsics over assembly-based implementation \ even in case of inefficient implementation (do not use assembly for \ any atomic_ops primitive if C11/GCC atomic intrinsics available)]) AC_DEFINE(_REENTRANT, 1, [Required define if using POSIX threads.]) # Libraries needed to support threads (if any). have_pthreads=false AC_CHECK_LIB(pthread, pthread_self, have_pthreads=true) if test x$have_pthreads = xtrue; then THREADDLLIBS=-lpthread case "$host" in *-*-netbsd*) # Indicates the use of pthreads. AC_DEFINE(_PTHREADS) ;; *-*-openbsd* | *-*-kfreebsd*-gnu | *-*-dgux*) THREADDLLIBS=-pthread ;; *-*-cygwin* | *-*-darwin*) # Cygwin does not have a real libpthread, so Libtool cannot link # against it. THREADDLLIBS= ;; *-*-mingw* | *-*-msys*) # Use Win32 threads for tests anyway. THREADDLLIBS= # Skip test_atomic_pthreads. have_pthreads=false ;; esac else AC_DEFINE([AO_NO_PTHREADS], 1, [No pthreads library available]) # Assume VxWorks or Win32. THREADDLLIBS= fi AC_SUBST(THREADDLLIBS) AM_CONDITIONAL(ENABLE_SHARED, test x$enable_shared = xyes) AM_CONDITIONAL(HAVE_PTHREAD_H, test x$have_pthreads = xtrue) AM_CONDITIONAL(NEED_ASM, test x$need_asm = xtrue) AC_CONFIG_FILES([ Makefile doc/Makefile src/Makefile tests/Makefile pkgconfig/atomic_ops.pc pkgconfig/atomic_ops-uninstalled.pc ]) AC_CONFIG_COMMANDS([default],[[]],[[ PICFLAG="${PICFLAG}" CC="${CC}" DEFS="${DEFS}" ]]) AC_OUTPUT libatomic_ops-7.6.12/doc/000077500000000000000000000000001411761111000151715ustar00rootroot00000000000000libatomic_ops-7.6.12/doc/LICENSING.txt000066400000000000000000000053601411761111000172510ustar00rootroot00000000000000Our intent is to make it easy to use libatomic_ops, in both free and proprietary software. Hence most code that we expect to be linked into a client application is covered by an MIT-style license. A few library routines are covered by the GNU General Public License. These are put into a separate library, libatomic_ops_gpl.a . The low-level part of the library is mostly covered by the following license: ---------------------------------------- Copyright (c) ... Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------- A few files in the sysdeps directory were inherited in part from the Boehm-Demers-Weiser conservative garbage collector, and are covered by its license, which is similar in spirit: -------------------------------- Copyright (c) ... THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. Permission is hereby granted to use or copy this program for any purpose, provided the above notices are retained on all copies. Permission to modify the code and to distribute modified code is granted, provided the above notices are retained, and a notice that the code was modified is included with the above copyright notice. ---------------------------------- A few files are covered by the GNU General Public License. (See file "COPYING".) This applies only to test code, sample applications, and the libatomic_ops_gpl portion of the library. Thus libatomic_ops_gpl should generally not be linked into proprietary code. (This distinction was motivated by patent considerations.) It is possible that the license of the GPL pieces may be changed for future versions to make them more consistent with the rest of the package. If you submit patches, and have strong preferences about licensing, please express them. libatomic_ops-7.6.12/doc/Makefile.am000066400000000000000000000002351411761111000172250ustar00rootroot00000000000000# installed documentation # if ENABLE_DOCS dist_doc_DATA = LICENSING.txt README_details.txt README_stack.txt \ README_malloc.txt README_win32.txt endif libatomic_ops-7.6.12/doc/README_details.txt000066400000000000000000000277561411761111000204150ustar00rootroot00000000000000Usage: 0) If possible, do this on a multiprocessor, especially if you are planning on modifying or enhancing the package. It will work on a uniprocessor, but the tests are much more likely to pass in the presence of serious problems. 1) Type ./configure --prefix=; make; make check in the directory containing unpacked source. The usual GNU build machinery is used, except that only static, but position-independent, libraries are normally built. On Windows, read README_win32.txt instead. 2) Applications should include atomic_ops.h. Nearly all operations are implemented by header files included from it. It is sometimes necessary, and always recommended to also link against libatomic_ops.a. To use the almost non-blocking stack or malloc implementations, see the corresponding README files, and also link against libatomic_ops_gpl.a before linking against libatomic_ops.a. OVERVIEW: Atomic_ops.h defines a large collection of operations, each one of which is a combination of an (optional) atomic memory operation, and a memory barrier. Also defines associated feature-test macros to determine whether a particular operation is available on the current target hardware (either directly or by synthesis). This is an attempt to replace various existing files with similar goals, since they usually do not handle differences in memory barrier styles with sufficient generality. If this is included after defining AO_REQUIRE_CAS, then the package makes an attempt to emulate [fetch_]compare_and_swap* (single-width) in a way that, at least on Linux, should still be async-signal-safe. As a result, most other atomic operations may then be defined using the compare-and-swap emulation. This emulation is slow, since it needs to disable signals. And it needs to block in case of contention. If you care about performance on a platform that can't directly provide compare-and-swap, there are probably better alternatives. But this allows easy ports to some such platforms (e.g. PA_RISC). The option is ignored if compare-and-swap can be implemented directly. If atomic_ops.h is included after defining AO_USE_PTHREAD_DEFS, then all atomic operations will be emulated with pthread locking. This is NOT async-signal-safe. And it is slow. It is intended primarily for debugging of the atomic_ops package itself. Note that the implementation reflects our understanding of real processor behavior. This occasionally diverges from the documented behavior. (E.g. the documented X86 behavior seems to be weak enough that it is impractical to use. Current real implementations appear to be much better behaved.) We of course are in no position to guarantee that future processors (even HPs) will continue to behave this way, though we hope they will. Corrections/additions for other platforms are greatly appreciated. OPERATIONS: Most operations handle values of type AO_t, which are unsigned integers whose size matches that of pointers on the given architecture. Additionally, on most supported architectures the operations are also implemented to handle smaller integers types; such operations are indicated by the appropriate size prefix: - char_... Operates on unsigned char values; - short_... Operates on unsigned short values; - int_... Operates on unsigned int values. The notable exception is AO_test_and_set operating only on AO_TS_t, which is whatever size the hardware supports with good performance. In some cases this is the length of a cache line, in some other cases it is a byte. In many cases AO_TS_t is equivalent to AO_t. The defined operations are all of the form AO_[](). The component specifies an atomic memory operation. It may be one of the following, where the corresponding argument and result types are also specified: void nop() No atomic operation. The barrier may still be useful. AO_t load(const volatile AO_t * addr) Atomic load of *addr. void store(volatile AO_t * addr, AO_t new_val) Atomically store new_val to *addr. AO_t fetch_and_add(volatile AO_t *addr, AO_t incr) Atomically add incr to *addr, and return the original value of *addr. AO_t fetch_and_add1(volatile AO_t *addr) Equivalent to AO_fetch_and_add(addr, 1). AO_t fetch_and_sub1(volatile AO_t *addr) Equivalent to AO_fetch_and_add(addr, (AO_t)(-1)). void and(volatile AO_t *addr, AO_t value) Atomically 'and' value into *addr. void or(volatile AO_t *addr, AO_t value) Atomically 'or' value into *addr. void xor(volatile AO_t *addr, AO_t value) Atomically 'xor' value into *addr. int compare_and_swap(volatile AO_t * addr, AO_t old_val, AO_t new_val) Atomically compare *addr to old_val, and replace *addr by new_val if the first comparison succeeds; returns nonzero if the comparison succeeded and *addr was updated; cannot fail spuriously. AO_t fetch_compare_and_swap(volatile AO_t * addr, AO_t old_val, AO_t new_val) Atomically compare *addr to old_val, and replace *addr by new_val if the first comparison succeeds; returns the original value of *addr; cannot fail spuriously. AO_TS_VAL_t test_and_set(volatile AO_TS_t * addr) Atomically read the binary value at *addr, and set it. AO_TS_VAL_t is an enumeration type which includes two values AO_TS_SET and AO_TS_CLEAR. An AO_TS_t location is capable of holding an AO_TS_VAL_t, but may be much larger, as dictated by hardware constraints. Test_and_set logically sets the value to AO_TS_SET. It may be reset to AO_TS_CLEAR with the AO_CLEAR(AO_TS_t *) macro. AO_TS_t locations should be initialized to AO_TS_INITIALIZER. The values of AO_TS_SET and AO_TS_CLEAR are hardware dependent. (On PA-RISC, AO_TS_SET is zero!) Test_and_set is a more limited version of compare_and_swap. Its only advantage is that it is more easily implementable on some hardware. It should thus be used if only binary test-and-set functionality is needed. If available, we also provide compare_and_swap operations that operate on wider values. Since standard data types for double width values may not be available, these explicitly take pairs of arguments for the new and/or old value. Unfortunately, there are two common variants, neither of which can easily and efficiently emulate the other. The first performs a comparison against the entire value being replaced, where the second replaces a double-width replacement, but performs a single-width comparison: int compare_double_and_swap_double(volatile AO_double_t * addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2); int compare_and_swap_double(volatile AO_double_t * addr, AO_t old_val1, AO_t new_val1, AO_t new_val2); where AO_double_t is a structure containing AO_val1 and AO_val2 fields, both of type AO_t. For compare_and_swap_double, we compare against the val1 field. AO_double_t exists only if AO_HAVE_double_t is defined. If this type is available then the following operation is provided for convenience, fully equivalent to compare_double_and_swap_double: int double_compare_and_swap(volatile AO_double_t * addr, AO_double_t old_val, AO_double_t new_val) Please note that AO_double_t (and AO_stack_t) variables should be properly aligned (8-byte alignment on 32-bit targets, 16-byte alignment on 64-bit ones) otherwise the behavior of a double-wide atomic primitive might be undefined (or an assertion violation might occur) if such a misaligned variable is passed (as a reference) to the primitive. Global and static variables should already have proper alignment automatically but automatic variables (i.e. located on the stack) might be misaligned because the stack might be word-aligned (e.g. 4-byte stack alignment is the default one for x86). Luckily, stack-allocated AO variables operated atomically are used rarely in practice. ORDERING CONSTRAINTS: Each operation name also includes a suffix that specifies the associated ordering semantics. The ordering constraint limits reordering of this operation with respect to other atomic operations and ordinary memory references. The current implementation assumes that all memory references are to ordinary cacheable memory; the ordering guarantee is with respect to other threads or processes, not I/O devices. (Whether or not this distinction is important is platform-dependent.) Ordering suffixes are one of the following: : No memory barrier. A plain AO_nop() really does nothing. _release: Earlier operations must become visible to other threads before the atomic operation. _acquire: Later operations must become visible after this operation. _read: Subsequent reads must become visible after reads included in the atomic operation or preceding it. Rarely useful for clients? _write: Earlier writes become visible before writes during or after the atomic operation. Rarely useful for clients? _full: The associated operation is ordered with respect to both earlier and later memory ops. If the associated operation is nop, then this orders all earlier memory operations with respect to subsequent ones. AO_store_full or AO_nop_full are the normal ways to force a store to be ordered with respect to a later load. _release_write: Ordered with respect to earlier writes. This is normally implemented as either a _write or _release barrier. _acquire_read: Ordered with respect to later reads. This is normally implemented as either a _read or _acquire barrier. _dd_acquire_read: Ordered with respect to later reads that are data dependent on this one. This is needed on a pointer read, which is later dereferenced to read a second value, with the expectation that the second read is ordered after the first one. On most architectures, this is equivalent to no barrier. (This is very hard to define precisely. It should probably be avoided. A major problem is that optimizers tend to try to eliminate dependencies from the generated code, since dependencies force the hardware to execute the code serially.) We assume that if a store is data-dependent on a previous load, then the two are always implicitly ordered. It is possible to test whether AO_[] is available on the target platform by checking whether AO_HAVE_[] is defined as a macro. Note that we generally don't implement operations that are either meaningless (e.g. AO_nop_acquire, AO_nop_release) or which appear to have no clear use (e.g. AO_load_release, AO_store_acquire, AO_load_write, AO_store_read). On some platforms (e.g. PA-RISC) many operations will remain undefined unless AO_REQUIRE_CAS is defined before including the package. When typed in the package build directory, the following command will print operations that are unimplemented on the platform: make test_atomic; ./test_atomic The following command generates a file "list_atomic.i" containing the macro expansions of all implemented operations on the platform: make list_atomic.i Known issues include: We should be more precise in defining the semantics of the ordering constraints, and if and how we can guarantee sequential consistency. Dd_acquire_read is very hard or impossible to define in a way that cannot be invalidated by reasonably standard compiler transformations. Example: If you want to initialize an object, and then "publish" a pointer to it in a global location p, such that other threads reading the new value of p are guaranteed to see an initialized object, it suffices to use AO_release_write(p, ...) to write the pointer to the object, and to retrieve it in other threads with AO_acquire_read(p). Platform notes: All X86: We quietly assume 486 or better. Gcc on x86: Define AO_USE_PENTIUM4_INSTRS to use the Pentium 4 mfence instruction. Currently this is appears to be of marginal benefit. libatomic_ops-7.6.12/doc/README_malloc.txt000066400000000000000000000060161411761111000202210ustar00rootroot00000000000000The libatomic_ops_gpl includes a simple almost-lock-free malloc implementation. This is intended as a safe way to allocate memory from a signal handler, or to allocate memory in the context of a library that does not know what thread library it will be used with. In either case locking is impossible. Note that the operations are only guaranteed to be 1-lock-free, i.e. a single blocked thread will not prevent progress, but multiple blocked threads may. To safely use these operations in a signal handler, the handler should be non-reentrant, i.e. it should not be interruptable by another handler using these operations. Furthermore use outside of signal handlers in a multithreaded application should be protected by a lock, so that at most one invocation may be interrupted by a signal. The header will define the macro "AO_MALLOC_IS_LOCK_FREE" on platforms on which malloc is completely lock-free, and hence these restrictions do not apply. In the presence of threads, but absence of contention, the time performance of this package should be as good, or slightly better than, most system malloc implementations. Its space performance is theoretically optimal (to within a constant factor), but probably quite poor in practice. In particular, no attempt is made to coalesce free small memory blocks. Something like Doug Lea's malloc is likely to use significantly less memory for complex applications. Performance on platforms without an efficient compare-and-swap implementation will be poor. This package was not designed for processor-scalability in the face of high allocation rates. If all threads happen to allocate different-sized objects, you might get lucky. Otherwise expect contention and false-sharing problems. If this is an issue, something like Maged Michael's algorithm (PLDI 2004) would be technically a far better choice. If you are concerned only with scalability, and not signal-safety, you might also consider using Hoard instead. We have seen a factor of 3 to 4 slowdown from the standard glibc malloc implementation with contention, even when the performance without contention was faster. (To make the implementation more scalable, one would need to replicate at least the free list headers, so that concurrent access is possible without cache conflicts.) Unfortunately there is no portable async-signal-safe way to obtain large chunks of memory from the OS. Based on reading of the source code, mmap-based allocation appears safe under Linux, and probably BSD variants. It is probably unsafe for operating systems built on Mach, such as Apple's Darwin. Without use of mmap, the allocator is limited to a fixed size, statically preallocated heap (2MB by default), and will fail to allocate objects above a certain size (just under 64K by default). Use of mmap to circumvent these limitations requires an explicit call. The entire interface to the AO_malloc package currently consists of: #include /* includes atomic_ops.h */ void *AO_malloc(size_t sz); void AO_free(void *p); void AO_malloc_enable_mmap(void); libatomic_ops-7.6.12/doc/README_stack.txt000066400000000000000000000075671411761111000200730ustar00rootroot00000000000000Note that the AO_stack implementation is licensed under the GPL, unlike the lower level routines. The header file atomic_ops_stack.h defines a linked stack abstraction. Stacks may be accessed by multiple concurrent threads. The implementation is 1-lock-free, i.e. it will continue to make progress if at most one thread becomes inactive while operating on the data structure. (The implementation can be built to be N-lock-free for any given N. But that seems to rarely be useful, especially since larger N involve some slowdown.) This makes it safe to access these data structures from non-reentrant signal handlers, provided at most one non-signal-handler thread is accessing the data structure at once. This latter condition can be ensured by acquiring an ordinary lock around the non-handler accesses to the data structure. For details see: Hans-J. Boehm, "An Almost Non-Blocking Stack", PODC 2004, http://portal.acm.org/citation.cfm?doid=1011767.1011774 (This is not exactly the implementation described there, since the interface was cleaned up in the interim. But it should perform very similarly.) We use a fully lock-free implementation when the underlying hardware makes that less expensive, i.e. when we have a double-wide compare-and-swap operation available. (The fully lock-free implementation uses an AO_t- sized version count, and assumes it does not wrap during the time any given operation is active. This seems reasonably safe on 32-bit hardware, and very safe on 64-bit hardware.) If a fully lock-free implementation is used, the macro AO_STACK_IS_LOCK_FREE will be defined. The implementation is interesting only because it allows reuse of existing nodes. This is necessary, for example, to implement a memory allocator. Since we want to leave the precise stack node type up to the client, we insist only that each stack node contains a link field of type AO_t. When a new node is pushed on the stack, the push operation expects to be passed the pointer to this link field, which will then be overwritten by this link field. Similarly, the pop operation returns a pointer to the link field of the object that previously was on the top of the stack. The cleanest way to use these routines is probably to define the stack node type with an initial AO_t link field, so that the conversion between the link-field pointer and the stack element pointer is just a compile-time cast. But other possibilities exist. (This would be cleaner in C++ with templates.) A stack is represented by an AO_stack_t structure. (This is normally 2 or 3 times the size of a pointer.) It may be statically initialized by setting it to AO_STACK_INITIALIZER, or dynamically initialized to an empty stack with AO_stack_init. There are only three operations for accessing stacks: void AO_stack_init(AO_stack_t *list); void AO_stack_push_release(AO_stack_t *list, AO_t *new_element); AO_t * AO_stack_pop_acquire(volatile AO_stack_t *list); We require that the objects pushed as list elements remain addressable as long as any push or pop operation are in progress. (It is OK for an object to be "pop"ped off a stack and "deallocated" with a concurrent "pop" on the same stack still in progress, but only if "deallocation" leaves the object addressable. The second "pop" may still read the object, but the value it reads will not matter.) We require that the headers (AO_stack objects) remain allocated and valid as long as any operations on them are still in-flight. We also provide macros AO_REAL_HEAD_PTR that converts an AO_stack_t to a pointer to the link field in the next element, and AO_REAL_NEXT_PTR that converts a link field to a real, dereferencable, pointer to the link field in the next element. This is intended only for debugging, or to traverse the list after modification has ceased. There is otherwise no guarantee that walking a stack using this macro will produce any kind of consistent picture of the data structure. libatomic_ops-7.6.12/doc/README_win32.txt000066400000000000000000000026211411761111000177120ustar00rootroot00000000000000Most of the atomic_ops functionality is available under Win32 with the Microsoft tools, but the build process is more primitive than that on Linux/Unix platforms. To build: 1) Go to the src directory in the distribution. 2) Make sure the Microsoft command-line tools (e.g. nmake) are available. 3) Run "nmake -f Makefile.msft check". This should build libatomic_ops_gpl.lib and run some tests. 4) To compile applications, you will need to retain or copy the following pieces from the resulting src directory contents: "atomic_ops.h" - Header file defining low-level primitives. This includes files from the following folder. "atomic_ops"- Subdirectory containing implementation header files. "atomic_ops_stack.h" - Header file describing almost lock-free stack. "atomic_ops_malloc.h" - Header file describing almost lock-free malloc. "libatomic_ops_gpl.lib" - Library containing implementation of the above two (plus AO_pause() defined in atomic_ops.c). The atomic_ops.h implementation is entirely in the header files in Win32. If the client defines AO_ASSUME_VISTA (before include atomic_ops.h), it should make double_compare_and_swap_full available. Note that the library is covered by the GNU General Public License, while the top 2 of these pieces allow use in proprietary code. libatomic_ops-7.6.12/m4/000077500000000000000000000000001411761111000147445ustar00rootroot00000000000000libatomic_ops-7.6.12/m4/.gitignore000066400000000000000000000001131411761111000167270ustar00rootroot00000000000000# Place holder to keep this directory in the Git repository. * !.gitignore libatomic_ops-7.6.12/pkgconfig/000077500000000000000000000000001411761111000163735ustar00rootroot00000000000000libatomic_ops-7.6.12/pkgconfig/atomic_ops-uninstalled.pc.in000066400000000000000000000004671411761111000240100ustar00rootroot00000000000000prefix=@prefix@ exec_prefix=@exec_prefix@ top_builddir=@abs_top_builddir@ top_srcdir=@abs_top_srcdir@ Name: The atomic_ops library (uninstalled) Description: Atomic memory update operations Version: @PACKAGE_VERSION@ Libs: ${top_builddir}/src/libatomic_ops.la Cflags: -I${top_builddir}/src -I${top_srcdir}/src libatomic_ops-7.6.12/pkgconfig/atomic_ops.pc.in000066400000000000000000000004071411761111000214620ustar00rootroot00000000000000prefix=@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ includedir=@includedir@ Name: The atomic_ops library Description: Atomic memory update operations portable implementation Version: @PACKAGE_VERSION@ Libs: -L${libdir} -latomic_ops Cflags: -I${includedir} libatomic_ops-7.6.12/src/000077500000000000000000000000001411761111000152135ustar00rootroot00000000000000libatomic_ops-7.6.12/src/Makefile.am000066400000000000000000000260341411761111000172540ustar00rootroot00000000000000 # Info (current:revision:age) for the Libtool versioning system. # These numbers should be updated at most once just before the release, # and, optionally, at most once during the development (after the release). LIBATOMIC_OPS_VER_INFO = 2:1:1 LIBATOMIC_OPS_GPL_VER_INFO = 2:2:1 AM_CFLAGS=@PICFLAG@ AM_CPPFLAGS = -I$(top_builddir)/src -I$(top_srcdir)/src CFLAGS += $(CFLAGS_EXTRA) include_HEADERS = atomic_ops.h atomic_ops_stack.h atomic_ops_malloc.h lib_LTLIBRARIES = libatomic_ops.la libatomic_ops_gpl.la if NEED_ASM libatomic_ops_la_SOURCES = atomic_ops.c atomic_ops_sysdeps.S else libatomic_ops_la_SOURCES = atomic_ops.c endif libatomic_ops_la_LDFLAGS = -version-info $(LIBATOMIC_OPS_VER_INFO) \ -no-undefined libatomic_ops_gpl_la_SOURCES = atomic_ops_stack.c atomic_ops_malloc.c libatomic_ops_gpl_la_LDFLAGS = -version-info $(LIBATOMIC_OPS_GPL_VER_INFO) \ -no-undefined libatomic_ops_gpl_la_LIBADD = libatomic_ops.la EXTRA_DIST = Makefile.msft atomic_ops/sysdeps/README \ atomic_ops/generalize-arithm.template \ atomic_ops/generalize-small.template \ atomic_ops/sysdeps/ao_t_is_int.template \ atomic_ops/sysdeps/gcc/generic-arithm.template \ atomic_ops/sysdeps/gcc/generic-small.template \ atomic_ops/sysdeps/loadstore/acquire_release_volatile.template \ atomic_ops/sysdeps/loadstore/atomic_load.template \ atomic_ops/sysdeps/loadstore/atomic_store.template \ atomic_ops/sysdeps/loadstore/ordered_loads_only.template \ atomic_ops/sysdeps/loadstore/ordered_stores_only.template \ atomic_ops/sysdeps/sunc/sparc.S BUILT_SOURCES = atomic_ops/generalize-arithm.h \ atomic_ops/generalize-small.h \ atomic_ops/sysdeps/ao_t_is_int.h \ atomic_ops/sysdeps/gcc/generic-arithm.h \ atomic_ops/sysdeps/gcc/generic-small.h \ atomic_ops/sysdeps/loadstore/acquire_release_volatile.h \ atomic_ops/sysdeps/loadstore/atomic_load.h \ atomic_ops/sysdeps/loadstore/atomic_store.h \ atomic_ops/sysdeps/loadstore/char_acquire_release_volatile.h \ atomic_ops/sysdeps/loadstore/char_atomic_load.h \ atomic_ops/sysdeps/loadstore/char_atomic_store.h \ atomic_ops/sysdeps/loadstore/int_acquire_release_volatile.h \ atomic_ops/sysdeps/loadstore/int_atomic_load.h \ atomic_ops/sysdeps/loadstore/int_atomic_store.h \ atomic_ops/sysdeps/loadstore/ordered_loads_only.h \ atomic_ops/sysdeps/loadstore/ordered_stores_only.h \ atomic_ops/sysdeps/loadstore/short_acquire_release_volatile.h \ atomic_ops/sysdeps/loadstore/short_atomic_load.h \ atomic_ops/sysdeps/loadstore/short_atomic_store.h #Private Headers privatedir=${includedir}/ nobase_private_HEADERS = atomic_ops/ao_version.h \ atomic_ops/generalize.h \ $(BUILT_SOURCES) \ \ atomic_ops/sysdeps/all_acquire_release_volatile.h \ atomic_ops/sysdeps/all_aligned_atomic_load_store.h \ atomic_ops/sysdeps/all_atomic_load_store.h \ atomic_ops/sysdeps/all_atomic_only_load.h \ atomic_ops/sysdeps/emul_cas.h \ atomic_ops/sysdeps/generic_pthread.h \ atomic_ops/sysdeps/ordered.h \ atomic_ops/sysdeps/ordered_except_wr.h \ atomic_ops/sysdeps/read_ordered.h \ atomic_ops/sysdeps/standard_ao_double_t.h \ atomic_ops/sysdeps/test_and_set_t_is_ao_t.h \ atomic_ops/sysdeps/test_and_set_t_is_char.h \ \ atomic_ops/sysdeps/armcc/arm_v6.h \ \ atomic_ops/sysdeps/gcc/aarch64.h \ atomic_ops/sysdeps/gcc/alpha.h \ atomic_ops/sysdeps/gcc/arm.h \ atomic_ops/sysdeps/gcc/avr32.h \ atomic_ops/sysdeps/gcc/cris.h \ atomic_ops/sysdeps/gcc/generic.h \ atomic_ops/sysdeps/gcc/hexagon.h \ atomic_ops/sysdeps/gcc/hppa.h \ atomic_ops/sysdeps/gcc/ia64.h \ atomic_ops/sysdeps/gcc/m68k.h \ atomic_ops/sysdeps/gcc/mips.h \ atomic_ops/sysdeps/gcc/powerpc.h \ atomic_ops/sysdeps/gcc/riscv.h \ atomic_ops/sysdeps/gcc/s390.h \ atomic_ops/sysdeps/gcc/sh.h \ atomic_ops/sysdeps/gcc/sparc.h \ atomic_ops/sysdeps/gcc/tile.h \ atomic_ops/sysdeps/gcc/x86.h \ \ atomic_ops/sysdeps/hpc/hppa.h \ atomic_ops/sysdeps/hpc/ia64.h \ \ atomic_ops/sysdeps/ibmc/powerpc.h \ \ atomic_ops/sysdeps/icc/ia64.h \ \ atomic_ops/sysdeps/loadstore/double_atomic_load_store.h \ \ atomic_ops/sysdeps/msftc/arm.h \ atomic_ops/sysdeps/msftc/common32_defs.h \ atomic_ops/sysdeps/msftc/x86.h \ atomic_ops/sysdeps/msftc/x86_64.h \ \ atomic_ops/sysdeps/sunc/sparc.h \ atomic_ops/sysdeps/sunc/x86.h atomic_ops/generalize-small.h: atomic_ops/generalize-small.template mkdir -p `dirname $@` sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g $? >> $@ atomic_ops/generalize-arithm.h: atomic_ops/generalize-arithm.template mkdir -p `dirname $@` sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@ atomic_ops/sysdeps/ao_t_is_int.h: atomic_ops/sysdeps/ao_t_is_int.template mkdir -p `dirname $@` sed -e s:_XBAR::g $? > $@ sed -e s:XBAR:full:g $? >> $@ sed -e s:XBAR:acquire:g $? >> $@ sed -e s:XBAR:release:g $? >> $@ sed -e s:XBAR:write:g $? >> $@ sed -e s:XBAR:read:g $? >> $@ atomic_ops/sysdeps/gcc/generic-arithm.h: \ atomic_ops/sysdeps/gcc/generic-arithm.template mkdir -p `dirname $@` sed -e s:_XBAR::g -e s:XGCCBAR:RELAXED:g \ -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@ sed -e s:_XBAR::g -e s:XGCCBAR:RELAXED:g \ -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@ sed -e s:_XBAR::g -e s:XGCCBAR:RELAXED:g \ -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@ sed -e s:_XBAR::g -e s:XGCCBAR:RELAXED:g \ -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@ sed -e s:XBAR:acquire:g -e s:XGCCBAR:ACQUIRE:g \ -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? >> $@ sed -e s:XBAR:acquire:g -e s:XGCCBAR:ACQUIRE:g \ -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@ sed -e s:XBAR:acquire:g -e s:XGCCBAR:ACQUIRE:g \ -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@ sed -e s:XBAR:acquire:g -e s:XGCCBAR:ACQUIRE:g \ -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@ sed -e s:XBAR:release:g -e s:XGCCBAR:RELEASE:g \ -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? >> $@ sed -e s:XBAR:release:g -e s:XGCCBAR:RELEASE:g \ -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@ sed -e s:XBAR:release:g -e s:XGCCBAR:RELEASE:g \ -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@ sed -e s:XBAR:release:g -e s:XGCCBAR:RELEASE:g \ -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@ sed -e s:XBAR:full:g -e s:XGCCBAR:SEQ_CST:g \ -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? >> $@ sed -e s:XBAR:full:g -e s:XGCCBAR:SEQ_CST:g \ -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@ sed -e s:XBAR:full:g -e s:XGCCBAR:SEQ_CST:g \ -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@ sed -e s:XBAR:full:g -e s:XGCCBAR:SEQ_CST:g \ -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@ atomic_ops/sysdeps/gcc/generic-small.h: \ atomic_ops/sysdeps/gcc/generic-small.template mkdir -p `dirname $@` sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@ atomic_ops/sysdeps/loadstore/ordered_loads_only.h: \ atomic_ops/sysdeps/loadstore/ordered_loads_only.template mkdir -p `dirname $@` sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g $? >> $@ atomic_ops/sysdeps/loadstore/ordered_stores_only.h: \ atomic_ops/sysdeps/loadstore/ordered_stores_only.template mkdir -p `dirname $@` sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g $? >> $@ atomic_ops/sysdeps/loadstore/acquire_release_volatile.h: \ atomic_ops/sysdeps/loadstore/acquire_release_volatile.template mkdir -p `dirname $@` sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? > $@ atomic_ops/sysdeps/loadstore/char_acquire_release_volatile.h: \ atomic_ops/sysdeps/loadstore/acquire_release_volatile.template mkdir -p `dirname $@` sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@ atomic_ops/sysdeps/loadstore/int_acquire_release_volatile.h: \ atomic_ops/sysdeps/loadstore/acquire_release_volatile.template mkdir -p `dirname $@` sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? > $@ atomic_ops/sysdeps/loadstore/short_acquire_release_volatile.h: \ atomic_ops/sysdeps/loadstore/acquire_release_volatile.template mkdir -p `dirname $@` sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? > $@ atomic_ops/sysdeps/loadstore/atomic_load.h: \ atomic_ops/sysdeps/loadstore/atomic_load.template mkdir -p `dirname $@` sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? > $@ atomic_ops/sysdeps/loadstore/char_atomic_load.h: \ atomic_ops/sysdeps/loadstore/atomic_load.template mkdir -p `dirname $@` sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@ atomic_ops/sysdeps/loadstore/int_atomic_load.h: \ atomic_ops/sysdeps/loadstore/atomic_load.template mkdir -p `dirname $@` sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? > $@ atomic_ops/sysdeps/loadstore/short_atomic_load.h: \ atomic_ops/sysdeps/loadstore/atomic_load.template mkdir -p `dirname $@` sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? > $@ atomic_ops/sysdeps/loadstore/atomic_store.h: \ atomic_ops/sysdeps/loadstore/atomic_store.template mkdir -p `dirname $@` sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? > $@ atomic_ops/sysdeps/loadstore/char_atomic_store.h: \ atomic_ops/sysdeps/loadstore/atomic_store.template mkdir -p `dirname $@` sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@ atomic_ops/sysdeps/loadstore/int_atomic_store.h: \ atomic_ops/sysdeps/loadstore/atomic_store.template mkdir -p `dirname $@` sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? > $@ atomic_ops/sysdeps/loadstore/short_atomic_store.h: \ atomic_ops/sysdeps/loadstore/atomic_store.template mkdir -p `dirname $@` sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? > $@ libatomic_ops-7.6.12/src/Makefile.msft000066400000000000000000000040661411761111000176310ustar00rootroot00000000000000# # Copyright (c) 2003-2005 Hewlett-Packard Development Company, L.P. # # The really trivial win32/VC++ Makefile. Note that atomic_ops.c defines # only AO_pause (used by atomic_ops_stack). # And we rely on a pre-built test_atomic_include.h and generalize-small.h, # since we can't rely on sed. But we don't keep test_atomic_include.h in # the development repository any longer, so if you want to run all tests # for the sources obtained from the repository then # type "./autogen.sh && ./configure && make -j && make clean" in Cygwin first. # Otherwise type "nmake -f Makefile.msft check-noautogen" to run only the # tests not requiring the source auto-generation. # Win32 clients only need to include the header files. # To install, copy atomic_ops.h and the atomic_ops/... tree to your favorite # include directory. #!include CFLAGS_EXTRA= CFLAGS=-O2 -W3 $(CFLAGS_EXTRA) LIB_OBJS=atomic_ops.obj atomic_ops_malloc.obj atomic_ops_stack.obj all: libatomic_ops_gpl.lib atomic_ops.obj: cl $(CFLAGS) -c atomic_ops.c atomic_ops_stack.obj: cl $(CFLAGS) -c atomic_ops_stack.c atomic_ops_malloc.obj: cl $(CFLAGS) -c atomic_ops_malloc.c libatomic_ops_gpl.lib: $(LIB_OBJS) lib /out:libatomic_ops_gpl.lib $(LIB_OBJS) test_atomic: ..\tests\test_atomic.c ..\tests\test_atomic_include.h cl $(CFLAGS) -I. ..\tests\test_atomic.c /Fo.\test_atomic test_atomic_generalized: ..\tests\test_atomic.c ..\tests\test_atomic_include.h cl $(CFLAGS) -DAO_PREFER_GENERALIZED -I. ..\tests\test_atomic.c \ /Fo.\test_atomic_generalized test_malloc: ..\tests\test_malloc.c libatomic_ops_gpl.lib cl $(CFLAGS) -I. ..\tests\test_malloc.c /Fo.\test_malloc \ libatomic_ops_gpl.lib test_stack: ..\tests\test_stack.c libatomic_ops_gpl.lib cl $(CFLAGS) -I. ..\tests\test_stack.c /Fo.\test_stack \ libatomic_ops_gpl.lib check: test_atomic test_atomic_generalized check-noautogen @echo "The following will print some 'Missing ...' messages" test_atomic test_atomic_generalized check-noautogen: test_malloc test_stack test_malloc test_stack clean: del *.exe *.obj libatomic_ops_gpl.lib libatomic_ops-7.6.12/src/atomic_ops.c000066400000000000000000000174741411761111000175310ustar00rootroot00000000000000/* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * Initialized data and out-of-line functions to support atomic_ops.h * go here. Currently this is needed only for pthread-based atomics * emulation, or for compare-and-swap emulation. * Pthreads emulation isn't useful on a native Windows platform, and * cas emulation is not needed. Thus we skip this on Windows. */ #if defined(HAVE_CONFIG_H) # include "config.h" #endif #if (defined(__hexagon__) || defined(__native_client__)) \ && !defined(AO_USE_NO_SIGNALS) && !defined(AO_USE_NANOSLEEP) /* Hexagon QuRT does not have sigprocmask (but Hexagon does not need */ /* emulation, so it is OK not to bother about signals blocking). */ /* Since NaCl is not recognized by configure yet, we do it here. */ # define AO_USE_NO_SIGNALS # define AO_USE_NANOSLEEP #endif #if defined(AO_USE_WIN32_PTHREADS) && !defined(AO_USE_NO_SIGNALS) # define AO_USE_NO_SIGNALS #endif #if (defined(__linux__) || defined(__GLIBC__) || defined(__GNU__)) \ && !defined(AO_USE_NO_SIGNALS) && !defined(_GNU_SOURCE) # define _GNU_SOURCE 1 #endif #undef AO_REQUIRE_CAS #include "atomic_ops.h" /* Without cas emulation! */ #if !defined(_MSC_VER) && !defined(__MINGW32__) && !defined(__BORLANDC__) \ || defined(AO_USE_NO_SIGNALS) #ifndef AO_NO_PTHREADS # include #endif #ifndef AO_USE_NO_SIGNALS # include #endif #ifdef AO_USE_NANOSLEEP /* This requires _POSIX_TIMERS feature. */ # include # include #elif defined(AO_USE_WIN32_PTHREADS) # include /* for Sleep() */ #elif defined(_HPUX_SOURCE) # include #else # include #endif #ifndef AO_HAVE_double_t # include "atomic_ops/sysdeps/standard_ao_double_t.h" #endif /* Lock for pthreads-based implementation. */ #ifndef AO_NO_PTHREADS pthread_mutex_t AO_pt_lock = PTHREAD_MUTEX_INITIALIZER; #endif /* * Out of line compare-and-swap emulation based on test and set. * * We use a small table of locks for different compare_and_swap locations. * Before we update perform a compare-and-swap, we grab the corresponding * lock. Different locations may hash to the same lock, but since we * never acquire more than one lock at a time, this can't deadlock. * We explicitly disable signals while we perform this operation. * * TODO: Probably also support emulation based on Lamport * locks, since we may not have test_and_set either. */ #define AO_HASH_SIZE 16 #define AO_HASH(x) (((unsigned long)(x) >> 12) & (AO_HASH_SIZE-1)) static AO_TS_t AO_locks[AO_HASH_SIZE] = { AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, AO_TS_INITIALIZER, }; void AO_pause(int); /* defined below */ static void lock_ool(volatile AO_TS_t *l) { int i = 0; while (AO_test_and_set_acquire(l) == AO_TS_SET) AO_pause(++i); } AO_INLINE void lock(volatile AO_TS_t *l) { if (AO_EXPECT_FALSE(AO_test_and_set_acquire(l) == AO_TS_SET)) lock_ool(l); } AO_INLINE void unlock(volatile AO_TS_t *l) { AO_CLEAR(l); } #ifndef AO_USE_NO_SIGNALS static sigset_t all_sigs; static volatile AO_t initialized = 0; static volatile AO_TS_t init_lock = AO_TS_INITIALIZER; AO_INLINE void block_all_signals(sigset_t *old_sigs_ptr) { if (AO_EXPECT_FALSE(!AO_load_acquire(&initialized))) { lock(&init_lock); if (!initialized) sigfillset(&all_sigs); unlock(&init_lock); AO_store_release(&initialized, 1); } sigprocmask(SIG_BLOCK, &all_sigs, old_sigs_ptr); /* Neither sigprocmask nor pthread_sigmask is 100% */ /* guaranteed to work here. Sigprocmask is not */ /* guaranteed be thread safe, and pthread_sigmask */ /* is not async-signal-safe. Under linuxthreads, */ /* sigprocmask may block some pthreads-internal */ /* signals. So long as we do that for short periods, */ /* we should be OK. */ } #endif /* !AO_USE_NO_SIGNALS */ AO_t AO_fetch_compare_and_swap_emulation(volatile AO_t *addr, AO_t old_val, AO_t new_val) { AO_TS_t *my_lock = AO_locks + AO_HASH(addr); AO_t fetched_val; # ifndef AO_USE_NO_SIGNALS sigset_t old_sigs; block_all_signals(&old_sigs); # endif lock(my_lock); fetched_val = *addr; if (fetched_val == old_val) *addr = new_val; unlock(my_lock); # ifndef AO_USE_NO_SIGNALS sigprocmask(SIG_SETMASK, &old_sigs, NULL); # endif return fetched_val; } int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2) { AO_TS_t *my_lock = AO_locks + AO_HASH(addr); int result; # ifndef AO_USE_NO_SIGNALS sigset_t old_sigs; block_all_signals(&old_sigs); # endif lock(my_lock); if (addr -> AO_val1 == old_val1 && addr -> AO_val2 == old_val2) { addr -> AO_val1 = new_val1; addr -> AO_val2 = new_val2; result = 1; } else result = 0; unlock(my_lock); # ifndef AO_USE_NO_SIGNALS sigprocmask(SIG_SETMASK, &old_sigs, NULL); # endif return result; } void AO_store_full_emulation(volatile AO_t *addr, AO_t val) { AO_TS_t *my_lock = AO_locks + AO_HASH(addr); lock(my_lock); *addr = val; unlock(my_lock); } #else /* Non-posix platform */ # include # define AO_USE_WIN32_PTHREADS /* define to use Sleep() */ extern int AO_non_posix_implementation_is_entirely_in_headers; #endif static AO_t spin_dummy = 1; /* Spin for 2**n units. */ static void AO_spin(int n) { AO_t j = AO_load(&spin_dummy); int i = 2 << n; while (i-- > 0) j += (j - 1) << 2; /* Given 'spin_dummy' is initialized to 1, j is 1 after the loop. */ AO_store(&spin_dummy, j); } void AO_pause(int n) { if (n < 12) AO_spin(n); else { # ifdef AO_USE_NANOSLEEP struct timespec ts; ts.tv_sec = 0; ts.tv_nsec = n > 28 ? 100000L * 1000 : 1L << (n - 2); nanosleep(&ts, 0); # elif defined(AO_USE_WIN32_PTHREADS) Sleep(n > 28 ? 100 /* millis */ : n < 22 ? 1 : (DWORD)1 << (n - 22)); # else struct timeval tv; /* Short async-signal-safe sleep. */ int usec = n > 28 ? 100000 : 1 << (n - 12); /* Use an intermediate variable (of int type) to avoid */ /* "shift followed by widening conversion" warning. */ tv.tv_sec = 0; tv.tv_usec = usec; (void)select(0, 0, 0, 0, &tv); # endif } } libatomic_ops-7.6.12/src/atomic_ops.h000066400000000000000000000511731411761111000175300ustar00rootroot00000000000000/* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * Copyright (c) 2008-2021 Ivan Maidanski * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef AO_ATOMIC_OPS_H #define AO_ATOMIC_OPS_H #include "atomic_ops/ao_version.h" /* Define version numbers here to allow */ /* test on build machines for cross-builds. */ #include #include /* We define various atomic operations on memory in a */ /* machine-specific way. Unfortunately, this is complicated */ /* by the fact that these may or may not be combined with */ /* various memory barriers. Thus the actual operations we */ /* define have the form AO__, for all */ /* plausible combinations of and . */ /* This of course results in a mild combinatorial explosion. */ /* To deal with it, we try to generate derived */ /* definitions for as many of the combinations as we can, as */ /* automatically as possible. */ /* */ /* Our assumption throughout is that the programmer will */ /* specify the least demanding operation and memory barrier */ /* that will guarantee correctness for the implementation. */ /* Our job is to find the least expensive way to implement it */ /* on the applicable hardware. In many cases that will */ /* involve, for example, a stronger memory barrier, or a */ /* combination of hardware primitives. */ /* */ /* Conventions: */ /* "plain" atomic operations are not guaranteed to include */ /* a barrier. The suffix in the name specifies the barrier */ /* type. Suffixes are: */ /* _release: Earlier operations may not be delayed past it. */ /* _acquire: Later operations may not move ahead of it. */ /* _read: Subsequent reads must follow this operation and */ /* preceding reads. */ /* _write: Earlier writes precede both this operation and */ /* later writes. */ /* _full: Ordered with respect to both earlier and later memory */ /* operations. */ /* _release_write: Ordered with respect to earlier writes. */ /* _acquire_read: Ordered with respect to later reads. */ /* */ /* Currently we try to define the following atomic memory */ /* operations, in combination with the above barriers: */ /* AO_nop */ /* AO_load */ /* AO_store */ /* AO_test_and_set (binary) */ /* AO_fetch_and_add */ /* AO_fetch_and_add1 */ /* AO_fetch_and_sub1 */ /* AO_and */ /* AO_or */ /* AO_xor */ /* AO_compare_and_swap */ /* AO_fetch_compare_and_swap */ /* */ /* Note that atomicity guarantees are valid only if both */ /* readers and writers use AO_ operations to access the */ /* shared value, while ordering constraints are intended to */ /* apply all memory operations. If a location can potentially */ /* be accessed simultaneously from multiple threads, and one of */ /* those accesses may be a write access, then all such */ /* accesses to that location should be through AO_ primitives. */ /* However if AO_ operations enforce sufficient ordering to */ /* ensure that a location x cannot be accessed concurrently, */ /* or can only be read concurrently, then x can be accessed */ /* via ordinary references and assignments. */ /* */ /* AO_compare_and_swap takes an address and an expected old */ /* value and a new value, and returns an int. Non-zero result */ /* indicates that it succeeded. */ /* AO_fetch_compare_and_swap takes an address and an expected */ /* old value and a new value, and returns the real old value. */ /* The operation succeeded if and only if the expected old */ /* value matches the old value returned. */ /* */ /* Test_and_set takes an address, atomically replaces it by */ /* AO_TS_SET, and returns the prior value. */ /* An AO_TS_t location can be reset with the */ /* AO_CLEAR macro, which normally uses AO_store_release. */ /* AO_fetch_and_add takes an address and an AO_t increment */ /* value. The AO_fetch_and_add1 and AO_fetch_and_sub1 variants */ /* are provided, since they allow faster implementations on */ /* some hardware. AO_and, AO_or, AO_xor do atomically and, or, */ /* xor (respectively) an AO_t value into a memory location, */ /* but do not provide access to the original. */ /* */ /* We expect this list to grow slowly over time. */ /* */ /* Note that AO_nop_full is a full memory barrier. */ /* */ /* Note that if some data is initialized with */ /* data.x = ...; data.y = ...; ... */ /* AO_store_release_write(&data_is_initialized, 1) */ /* then data is guaranteed to be initialized after the test */ /* if (AO_load_acquire_read(&data_is_initialized)) ... */ /* succeeds. Furthermore, this should generate near-optimal */ /* code on all common platforms. */ /* */ /* All operations operate on unsigned AO_t, which */ /* is the natural word size, and usually unsigned long. */ /* It is possible to check whether a particular operation op */ /* is available on a particular platform by checking whether */ /* AO_HAVE_op is defined. We make heavy use of these macros */ /* internally. */ /* The rest of this file basically has three sections: */ /* */ /* Some utility and default definitions. */ /* */ /* The architecture dependent section: */ /* This defines atomic operations that have direct hardware */ /* support on a particular platform, mostly by including the */ /* appropriate compiler- and hardware-dependent file. */ /* */ /* The synthesis section: */ /* This tries to define other atomic operations in terms of */ /* those that are explicitly available on the platform. */ /* This section is hardware independent. */ /* We make no attempt to synthesize operations in ways that */ /* effectively introduce locks, except for the debugging/demo */ /* pthread-based implementation at the beginning. A more */ /* realistic implementation that falls back to locks could be */ /* added as a higher layer. But that would sacrifice */ /* usability from signal handlers. */ /* The synthesis section is implemented almost entirely in */ /* atomic_ops/generalize.h. */ /* Some common defaults. Overridden for some architectures. */ #define AO_t size_t /* The test_and_set primitive returns an AO_TS_VAL_t value. */ /* AO_TS_t is the type of an in-memory test-and-set location. */ #define AO_TS_INITIALIZER ((AO_TS_t)AO_TS_CLEAR) /* Convenient internal macro to test version of GCC. */ #if defined(__GNUC__) && defined(__GNUC_MINOR__) # define AO_GNUC_PREREQ(major, minor) \ ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((major) << 16) + (minor)) #else # define AO_GNUC_PREREQ(major, minor) 0 /* false */ #endif /* Convenient internal macro to test version of Clang. */ #if defined(__clang__) && defined(__clang_major__) # define AO_CLANG_PREREQ(major, minor) \ ((__clang_major__ << 16) + __clang_minor__ >= ((major) << 16) + (minor)) #else # define AO_CLANG_PREREQ(major, minor) 0 /* false */ #endif /* Platform-dependent stuff: */ #if (defined(__GNUC__) || defined(_MSC_VER) || defined(__INTEL_COMPILER) \ || defined(__DMC__) || defined(__WATCOMC__)) && !defined(AO_NO_INLINE) # define AO_INLINE static __inline #elif defined(__sun) && !defined(AO_NO_INLINE) # define AO_INLINE static inline #else # define AO_INLINE static #endif #if AO_GNUC_PREREQ(3, 0) && !defined(LINT2) # define AO_EXPECT_FALSE(expr) __builtin_expect(expr, 0) /* Equivalent to (expr) but predict that usually (expr) == 0. */ #else # define AO_EXPECT_FALSE(expr) (expr) #endif /* !__GNUC__ */ #if defined(__has_feature) /* __has_feature() is supported. */ # if __has_feature(address_sanitizer) # define AO_ADDRESS_SANITIZER # endif # if __has_feature(memory_sanitizer) # define AO_MEMORY_SANITIZER # endif # if __has_feature(thread_sanitizer) # define AO_THREAD_SANITIZER # endif #else # ifdef __SANITIZE_ADDRESS__ /* GCC v4.8+ */ # define AO_ADDRESS_SANITIZER # endif #endif /* !__has_feature */ #ifndef AO_ATTR_NO_SANITIZE_MEMORY # ifndef AO_MEMORY_SANITIZER # define AO_ATTR_NO_SANITIZE_MEMORY /* empty */ # elif AO_CLANG_PREREQ(3, 8) # define AO_ATTR_NO_SANITIZE_MEMORY __attribute__((no_sanitize("memory"))) # else # define AO_ATTR_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory)) # endif #endif /* !AO_ATTR_NO_SANITIZE_MEMORY */ #ifndef AO_ATTR_NO_SANITIZE_THREAD # ifndef AO_THREAD_SANITIZER # define AO_ATTR_NO_SANITIZE_THREAD /* empty */ # elif AO_CLANG_PREREQ(3, 8) # define AO_ATTR_NO_SANITIZE_THREAD __attribute__((no_sanitize("thread"))) # else # define AO_ATTR_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread)) # endif #endif /* !AO_ATTR_NO_SANITIZE_THREAD */ #if (AO_GNUC_PREREQ(7, 5) || __STDC_VERSION__ >= 201112L) && !defined(LINT2) # define AO_ALIGNOF_SUPPORTED 1 #endif #ifdef AO_ALIGNOF_SUPPORTED # define AO_ASSERT_ADDR_ALIGNED(addr) \ assert(((size_t)(addr) & (__alignof__(*(addr)) - 1)) == 0) #else # define AO_ASSERT_ADDR_ALIGNED(addr) \ assert(((size_t)(addr) & (sizeof(*(addr)) - 1)) == 0) #endif /* !AO_ALIGNOF_SUPPORTED */ #if defined(__GNUC__) && !defined(__INTEL_COMPILER) # define AO_compiler_barrier() __asm__ __volatile__("" : : : "memory") #elif defined(_MSC_VER) || defined(__DMC__) || defined(__BORLANDC__) \ || defined(__WATCOMC__) # if defined(_AMD64_) || defined(_M_X64) || _MSC_VER >= 1400 # if defined(_WIN32_WCE) /* # include */ # elif defined(_MSC_VER) # include # endif # pragma intrinsic(_ReadWriteBarrier) # define AO_compiler_barrier() _ReadWriteBarrier() /* We assume this does not generate a fence instruction. */ /* The documentation is a bit unclear. */ # else # define AO_compiler_barrier() __asm { } /* The preceding implementation may be preferable here too. */ /* But the documentation warns about VC++ 2003 and earlier. */ # endif #elif defined(__INTEL_COMPILER) # define AO_compiler_barrier() __memory_barrier() /* FIXME: Too strong? IA64-only? */ #elif defined(_HPUX_SOURCE) # if defined(__ia64) # include # define AO_compiler_barrier() _Asm_sched_fence() # else /* FIXME - We do not know how to do this. This is a guess. */ /* And probably a bad one. */ static volatile int AO_barrier_dummy; # define AO_compiler_barrier() (void)(AO_barrier_dummy = AO_barrier_dummy) # endif #else /* We conjecture that the following usually gives us the right */ /* semantics or an error. */ # define AO_compiler_barrier() asm("") #endif #if defined(AO_USE_PTHREAD_DEFS) # include "atomic_ops/sysdeps/generic_pthread.h" #endif /* AO_USE_PTHREAD_DEFS */ #if (defined(__CC_ARM) || defined(__ARMCC__)) && !defined(__GNUC__) \ && !defined(AO_USE_PTHREAD_DEFS) # include "atomic_ops/sysdeps/armcc/arm_v6.h" # define AO_GENERALIZE_TWICE #endif #if defined(__GNUC__) && !defined(AO_USE_PTHREAD_DEFS) \ && !defined(__INTEL_COMPILER) # if defined(__i386__) /* We don't define AO_USE_SYNC_CAS_BUILTIN for x86 here because */ /* it might require specifying additional options (like -march) */ /* or additional link libraries (if -march is not specified). */ # include "atomic_ops/sysdeps/gcc/x86.h" # elif defined(__x86_64__) # if AO_GNUC_PREREQ(4, 2) && !defined(AO_USE_SYNC_CAS_BUILTIN) /* It is safe to use __sync CAS built-in on this architecture. */ # define AO_USE_SYNC_CAS_BUILTIN # endif # include "atomic_ops/sysdeps/gcc/x86.h" # elif defined(__ia64__) # include "atomic_ops/sysdeps/gcc/ia64.h" # define AO_GENERALIZE_TWICE # elif defined(__hppa__) # include "atomic_ops/sysdeps/gcc/hppa.h" # define AO_CAN_EMUL_CAS # elif defined(__alpha__) # include "atomic_ops/sysdeps/gcc/alpha.h" # define AO_GENERALIZE_TWICE # elif defined(__s390__) # include "atomic_ops/sysdeps/gcc/s390.h" # elif defined(__sparc__) # include "atomic_ops/sysdeps/gcc/sparc.h" # define AO_CAN_EMUL_CAS # elif defined(__m68k__) # include "atomic_ops/sysdeps/gcc/m68k.h" # elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \ || defined(__powerpc64__) || defined(__ppc64__) || defined(_ARCH_PPC) # include "atomic_ops/sysdeps/gcc/powerpc.h" # elif defined(__aarch64__) # include "atomic_ops/sysdeps/gcc/aarch64.h" # define AO_CAN_EMUL_CAS # elif defined(__arm__) # include "atomic_ops/sysdeps/gcc/arm.h" # define AO_CAN_EMUL_CAS # elif defined(__cris__) || defined(CRIS) # include "atomic_ops/sysdeps/gcc/cris.h" # define AO_CAN_EMUL_CAS # define AO_GENERALIZE_TWICE # elif defined(__mips__) # include "atomic_ops/sysdeps/gcc/mips.h" # elif defined(__sh__) || defined(SH4) # include "atomic_ops/sysdeps/gcc/sh.h" # define AO_CAN_EMUL_CAS # elif defined(__avr32__) # include "atomic_ops/sysdeps/gcc/avr32.h" # elif defined(__hexagon__) # include "atomic_ops/sysdeps/gcc/hexagon.h" # elif defined(__nios2__) # include "atomic_ops/sysdeps/gcc/generic.h" # define AO_CAN_EMUL_CAS # elif defined(__riscv) # include "atomic_ops/sysdeps/gcc/riscv.h" # elif defined(__tile__) # include "atomic_ops/sysdeps/gcc/tile.h" # else /* etc. */ # include "atomic_ops/sysdeps/gcc/generic.h" # endif #endif /* __GNUC__ && !AO_USE_PTHREAD_DEFS */ #if (defined(__IBMC__) || defined(__IBMCPP__)) && !defined(__GNUC__) \ && !defined(AO_USE_PTHREAD_DEFS) # if defined(__powerpc__) || defined(__powerpc) || defined(__ppc__) \ || defined(__PPC__) || defined(_M_PPC) || defined(_ARCH_PPC) \ || defined(_ARCH_PWR) # include "atomic_ops/sysdeps/ibmc/powerpc.h" # define AO_GENERALIZE_TWICE # endif #endif #if defined(__INTEL_COMPILER) && !defined(AO_USE_PTHREAD_DEFS) # if defined(__ia64__) # include "atomic_ops/sysdeps/icc/ia64.h" # define AO_GENERALIZE_TWICE # endif # if defined(__GNUC__) /* Intel Compiler in GCC compatible mode */ # if defined(__i386__) # include "atomic_ops/sysdeps/gcc/x86.h" # endif /* __i386__ */ # if defined(__x86_64__) # if (__INTEL_COMPILER > 1110) && !defined(AO_USE_SYNC_CAS_BUILTIN) # define AO_USE_SYNC_CAS_BUILTIN # endif # include "atomic_ops/sysdeps/gcc/x86.h" # endif /* __x86_64__ */ # endif #endif #if defined(_HPUX_SOURCE) && !defined(__GNUC__) && !defined(AO_USE_PTHREAD_DEFS) # if defined(__ia64) # include "atomic_ops/sysdeps/hpc/ia64.h" # define AO_GENERALIZE_TWICE # else # include "atomic_ops/sysdeps/hpc/hppa.h" # define AO_CAN_EMUL_CAS # endif #endif #if defined(_MSC_VER) || defined(__DMC__) || defined(__BORLANDC__) \ || (defined(__WATCOMC__) && defined(__NT__)) # if defined(_AMD64_) || defined(_M_X64) || defined(_M_ARM64) # include "atomic_ops/sysdeps/msftc/x86_64.h" # elif defined(_M_IX86) || defined(x86) # include "atomic_ops/sysdeps/msftc/x86.h" # elif defined(_M_ARM) || defined(ARM) || defined(_ARM_) # include "atomic_ops/sysdeps/msftc/arm.h" # define AO_GENERALIZE_TWICE # endif #endif #if defined(__sun) && !defined(__GNUC__) && !defined(AO_USE_PTHREAD_DEFS) /* Note: use -DAO_USE_PTHREAD_DEFS if Sun CC does not handle inline asm. */ # if defined(__i386) || defined(__x86_64) || defined(__amd64) # include "atomic_ops/sysdeps/sunc/x86.h" # endif #endif #if !defined(__GNUC__) && (defined(sparc) || defined(__sparc)) \ && !defined(AO_USE_PTHREAD_DEFS) # include "atomic_ops/sysdeps/sunc/sparc.h" # define AO_CAN_EMUL_CAS #endif #if (defined(AO_REQUIRE_CAS) && !defined(AO_HAVE_compare_and_swap) \ && !defined(AO_HAVE_fetch_compare_and_swap) \ && !defined(AO_HAVE_compare_and_swap_full) \ && !defined(AO_HAVE_fetch_compare_and_swap_full) \ && !defined(AO_HAVE_compare_and_swap_acquire) \ && !defined(AO_HAVE_fetch_compare_and_swap_acquire)) || defined(CPPCHECK) # if defined(AO_CAN_EMUL_CAS) # include "atomic_ops/sysdeps/emul_cas.h" # elif !defined(CPPCHECK) # error Cannot implement AO_compare_and_swap_full on this architecture. # endif #endif /* AO_REQUIRE_CAS && !AO_HAVE_compare_and_swap ... */ /* The most common way to clear a test-and-set location */ /* at the end of a critical section. */ #if defined(AO_AO_TS_T) && !defined(AO_HAVE_CLEAR) # define AO_CLEAR(addr) AO_store_release((AO_TS_t *)(addr), AO_TS_CLEAR) # define AO_HAVE_CLEAR #endif #if defined(AO_CHAR_TS_T) && !defined(AO_HAVE_CLEAR) # define AO_CLEAR(addr) AO_char_store_release((AO_TS_t *)(addr), AO_TS_CLEAR) # define AO_HAVE_CLEAR #endif /* The generalization section. */ #if !defined(AO_GENERALIZE_TWICE) && defined(AO_CAN_EMUL_CAS) \ && !defined(AO_HAVE_compare_and_swap_full) \ && !defined(AO_HAVE_fetch_compare_and_swap_full) # define AO_GENERALIZE_TWICE #endif /* Theoretically we should repeatedly include atomic_ops/generalize.h. */ /* In fact, we observe that this converges after a small fixed number */ /* of iterations, usually one. */ #include "atomic_ops/generalize.h" #if !defined(AO_GENERALIZE_TWICE) \ && defined(AO_HAVE_compare_double_and_swap_double) \ && (!defined(AO_HAVE_double_load) || !defined(AO_HAVE_double_store)) # define AO_GENERALIZE_TWICE #endif #ifdef AO_T_IS_INT /* Included after the first generalization pass. */ # include "atomic_ops/sysdeps/ao_t_is_int.h" # ifndef AO_GENERALIZE_TWICE /* Always generalize again. */ # define AO_GENERALIZE_TWICE # endif #endif /* AO_T_IS_INT */ #ifdef AO_GENERALIZE_TWICE # include "atomic_ops/generalize.h" #endif /* For compatibility with version 0.4 and earlier */ #define AO_TS_T AO_TS_t #define AO_T AO_t #define AO_TS_VAL AO_TS_VAL_t #endif /* !AO_ATOMIC_OPS_H */ libatomic_ops-7.6.12/src/atomic_ops/000077500000000000000000000000001411761111000173505ustar00rootroot00000000000000libatomic_ops-7.6.12/src/atomic_ops/ao_version.h000066400000000000000000000035521411761111000216720ustar00rootroot00000000000000/* * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P. * Copyright (c) 2011-2018 Ivan Maidanski * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef AO_ATOMIC_OPS_H # error This file should not be included directly. #endif /* The policy regarding version numbers: development code has odd */ /* "minor" number (and "micro" part is 0); when development is finished */ /* and a release is prepared, "minor" number is incremented (keeping */ /* "micro" number still zero), whenever a defect is fixed a new release */ /* is prepared incrementing "micro" part to odd value (the most stable */ /* release has the biggest "micro" number). */ /* The version here should match that in configure.ac and README. */ #define AO_VERSION_MAJOR 7 #define AO_VERSION_MINOR 6 #define AO_VERSION_MICRO 12 /* 7.6.12 */ libatomic_ops-7.6.12/src/atomic_ops/generalize-arithm.h000066400000000000000000003732471411761111000231500ustar00rootroot00000000000000/* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* char_compare_and_swap (based on fetch_compare_and_swap) */ #if defined(AO_HAVE_char_fetch_compare_and_swap_full) \ && !defined(AO_HAVE_char_compare_and_swap_full) AO_INLINE int AO_char_compare_and_swap_full(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { return AO_char_fetch_compare_and_swap_full(addr, old_val, new_val) == old_val; } # define AO_HAVE_char_compare_and_swap_full #endif #if defined(AO_HAVE_char_fetch_compare_and_swap_acquire) \ && !defined(AO_HAVE_char_compare_and_swap_acquire) AO_INLINE int AO_char_compare_and_swap_acquire(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { return AO_char_fetch_compare_and_swap_acquire(addr, old_val, new_val) == old_val; } # define AO_HAVE_char_compare_and_swap_acquire #endif #if defined(AO_HAVE_char_fetch_compare_and_swap_release) \ && !defined(AO_HAVE_char_compare_and_swap_release) AO_INLINE int AO_char_compare_and_swap_release(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { return AO_char_fetch_compare_and_swap_release(addr, old_val, new_val) == old_val; } # define AO_HAVE_char_compare_and_swap_release #endif #if defined(AO_HAVE_char_fetch_compare_and_swap_write) \ && !defined(AO_HAVE_char_compare_and_swap_write) AO_INLINE int AO_char_compare_and_swap_write(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { return AO_char_fetch_compare_and_swap_write(addr, old_val, new_val) == old_val; } # define AO_HAVE_char_compare_and_swap_write #endif #if defined(AO_HAVE_char_fetch_compare_and_swap_read) \ && !defined(AO_HAVE_char_compare_and_swap_read) AO_INLINE int AO_char_compare_and_swap_read(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { return AO_char_fetch_compare_and_swap_read(addr, old_val, new_val) == old_val; } # define AO_HAVE_char_compare_and_swap_read #endif #if defined(AO_HAVE_char_fetch_compare_and_swap) \ && !defined(AO_HAVE_char_compare_and_swap) AO_INLINE int AO_char_compare_and_swap(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { return AO_char_fetch_compare_and_swap(addr, old_val, new_val) == old_val; } # define AO_HAVE_char_compare_and_swap #endif #if defined(AO_HAVE_char_fetch_compare_and_swap_release_write) \ && !defined(AO_HAVE_char_compare_and_swap_release_write) AO_INLINE int AO_char_compare_and_swap_release_write(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { return AO_char_fetch_compare_and_swap_release_write(addr, old_val, new_val) == old_val; } # define AO_HAVE_char_compare_and_swap_release_write #endif #if defined(AO_HAVE_char_fetch_compare_and_swap_acquire_read) \ && !defined(AO_HAVE_char_compare_and_swap_acquire_read) AO_INLINE int AO_char_compare_and_swap_acquire_read(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { return AO_char_fetch_compare_and_swap_acquire_read(addr, old_val, new_val) == old_val; } # define AO_HAVE_char_compare_and_swap_acquire_read #endif #if defined(AO_HAVE_char_fetch_compare_and_swap_dd_acquire_read) \ && !defined(AO_HAVE_char_compare_and_swap_dd_acquire_read) AO_INLINE int AO_char_compare_and_swap_dd_acquire_read(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { return AO_char_fetch_compare_and_swap_dd_acquire_read(addr, old_val, new_val) == old_val; } # define AO_HAVE_char_compare_and_swap_dd_acquire_read #endif /* char_fetch_and_add */ /* We first try to implement fetch_and_add variants in terms of the */ /* corresponding compare_and_swap variants to minimize adding barriers. */ #if defined(AO_HAVE_char_compare_and_swap_full) \ && !defined(AO_HAVE_char_fetch_and_add_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned/**/char AO_char_fetch_and_add_full(volatile unsigned/**/char *addr, unsigned/**/char incr) { unsigned/**/char old; do { old = *(unsigned/**/char *)addr; } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_full(addr, old, old + incr))); return old; } # define AO_HAVE_char_fetch_and_add_full #endif #if defined(AO_HAVE_char_compare_and_swap_acquire) \ && !defined(AO_HAVE_char_fetch_and_add_acquire) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned/**/char AO_char_fetch_and_add_acquire(volatile unsigned/**/char *addr, unsigned/**/char incr) { unsigned/**/char old; do { old = *(unsigned/**/char *)addr; } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_acquire(addr, old, old + incr))); return old; } # define AO_HAVE_char_fetch_and_add_acquire #endif #if defined(AO_HAVE_char_compare_and_swap_release) \ && !defined(AO_HAVE_char_fetch_and_add_release) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned/**/char AO_char_fetch_and_add_release(volatile unsigned/**/char *addr, unsigned/**/char incr) { unsigned/**/char old; do { old = *(unsigned/**/char *)addr; } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_release(addr, old, old + incr))); return old; } # define AO_HAVE_char_fetch_and_add_release #endif #if defined(AO_HAVE_char_compare_and_swap) \ && !defined(AO_HAVE_char_fetch_and_add) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned/**/char AO_char_fetch_and_add(volatile unsigned/**/char *addr, unsigned/**/char incr) { unsigned/**/char old; do { old = *(unsigned/**/char *)addr; } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap(addr, old, old + incr))); return old; } # define AO_HAVE_char_fetch_and_add #endif #if defined(AO_HAVE_char_fetch_and_add_full) # if !defined(AO_HAVE_char_fetch_and_add_release) # define AO_char_fetch_and_add_release(addr, val) \ AO_char_fetch_and_add_full(addr, val) # define AO_HAVE_char_fetch_and_add_release # endif # if !defined(AO_HAVE_char_fetch_and_add_acquire) # define AO_char_fetch_and_add_acquire(addr, val) \ AO_char_fetch_and_add_full(addr, val) # define AO_HAVE_char_fetch_and_add_acquire # endif # if !defined(AO_HAVE_char_fetch_and_add_write) # define AO_char_fetch_and_add_write(addr, val) \ AO_char_fetch_and_add_full(addr, val) # define AO_HAVE_char_fetch_and_add_write # endif # if !defined(AO_HAVE_char_fetch_and_add_read) # define AO_char_fetch_and_add_read(addr, val) \ AO_char_fetch_and_add_full(addr, val) # define AO_HAVE_char_fetch_and_add_read # endif #endif /* AO_HAVE_char_fetch_and_add_full */ #if defined(AO_HAVE_char_fetch_and_add) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_char_fetch_and_add_acquire) AO_INLINE unsigned/**/char AO_char_fetch_and_add_acquire(volatile unsigned/**/char *addr, unsigned/**/char incr) { unsigned/**/char result = AO_char_fetch_and_add(addr, incr); AO_nop_full(); return result; } # define AO_HAVE_char_fetch_and_add_acquire #endif #if defined(AO_HAVE_char_fetch_and_add) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_char_fetch_and_add_release) # define AO_char_fetch_and_add_release(addr, incr) \ (AO_nop_full(), AO_char_fetch_and_add(addr, incr)) # define AO_HAVE_char_fetch_and_add_release #endif #if !defined(AO_HAVE_char_fetch_and_add) \ && defined(AO_HAVE_char_fetch_and_add_release) # define AO_char_fetch_and_add(addr, val) \ AO_char_fetch_and_add_release(addr, val) # define AO_HAVE_char_fetch_and_add #endif #if !defined(AO_HAVE_char_fetch_and_add) \ && defined(AO_HAVE_char_fetch_and_add_acquire) # define AO_char_fetch_and_add(addr, val) \ AO_char_fetch_and_add_acquire(addr, val) # define AO_HAVE_char_fetch_and_add #endif #if !defined(AO_HAVE_char_fetch_and_add) \ && defined(AO_HAVE_char_fetch_and_add_write) # define AO_char_fetch_and_add(addr, val) \ AO_char_fetch_and_add_write(addr, val) # define AO_HAVE_char_fetch_and_add #endif #if !defined(AO_HAVE_char_fetch_and_add) \ && defined(AO_HAVE_char_fetch_and_add_read) # define AO_char_fetch_and_add(addr, val) \ AO_char_fetch_and_add_read(addr, val) # define AO_HAVE_char_fetch_and_add #endif #if defined(AO_HAVE_char_fetch_and_add_acquire) \ && defined(AO_HAVE_nop_full) && !defined(AO_HAVE_char_fetch_and_add_full) # define AO_char_fetch_and_add_full(addr, val) \ (AO_nop_full(), AO_char_fetch_and_add_acquire(addr, val)) # define AO_HAVE_char_fetch_and_add_full #endif #if !defined(AO_HAVE_char_fetch_and_add_release_write) \ && defined(AO_HAVE_char_fetch_and_add_write) # define AO_char_fetch_and_add_release_write(addr, val) \ AO_char_fetch_and_add_write(addr, val) # define AO_HAVE_char_fetch_and_add_release_write #endif #if !defined(AO_HAVE_char_fetch_and_add_release_write) \ && defined(AO_HAVE_char_fetch_and_add_release) # define AO_char_fetch_and_add_release_write(addr, val) \ AO_char_fetch_and_add_release(addr, val) # define AO_HAVE_char_fetch_and_add_release_write #endif #if !defined(AO_HAVE_char_fetch_and_add_acquire_read) \ && defined(AO_HAVE_char_fetch_and_add_read) # define AO_char_fetch_and_add_acquire_read(addr, val) \ AO_char_fetch_and_add_read(addr, val) # define AO_HAVE_char_fetch_and_add_acquire_read #endif #if !defined(AO_HAVE_char_fetch_and_add_acquire_read) \ && defined(AO_HAVE_char_fetch_and_add_acquire) # define AO_char_fetch_and_add_acquire_read(addr, val) \ AO_char_fetch_and_add_acquire(addr, val) # define AO_HAVE_char_fetch_and_add_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_char_fetch_and_add_acquire_read) # define AO_char_fetch_and_add_dd_acquire_read(addr, val) \ AO_char_fetch_and_add_acquire_read(addr, val) # define AO_HAVE_char_fetch_and_add_dd_acquire_read # endif #else # if defined(AO_HAVE_char_fetch_and_add) # define AO_char_fetch_and_add_dd_acquire_read(addr, val) \ AO_char_fetch_and_add(addr, val) # define AO_HAVE_char_fetch_and_add_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* char_fetch_and_add1 */ #if defined(AO_HAVE_char_fetch_and_add_full) \ && !defined(AO_HAVE_char_fetch_and_add1_full) # define AO_char_fetch_and_add1_full(addr) \ AO_char_fetch_and_add_full(addr, 1) # define AO_HAVE_char_fetch_and_add1_full #endif #if defined(AO_HAVE_char_fetch_and_add_release) \ && !defined(AO_HAVE_char_fetch_and_add1_release) # define AO_char_fetch_and_add1_release(addr) \ AO_char_fetch_and_add_release(addr, 1) # define AO_HAVE_char_fetch_and_add1_release #endif #if defined(AO_HAVE_char_fetch_and_add_acquire) \ && !defined(AO_HAVE_char_fetch_and_add1_acquire) # define AO_char_fetch_and_add1_acquire(addr) \ AO_char_fetch_and_add_acquire(addr, 1) # define AO_HAVE_char_fetch_and_add1_acquire #endif #if defined(AO_HAVE_char_fetch_and_add_write) \ && !defined(AO_HAVE_char_fetch_and_add1_write) # define AO_char_fetch_and_add1_write(addr) \ AO_char_fetch_and_add_write(addr, 1) # define AO_HAVE_char_fetch_and_add1_write #endif #if defined(AO_HAVE_char_fetch_and_add_read) \ && !defined(AO_HAVE_char_fetch_and_add1_read) # define AO_char_fetch_and_add1_read(addr) \ AO_char_fetch_and_add_read(addr, 1) # define AO_HAVE_char_fetch_and_add1_read #endif #if defined(AO_HAVE_char_fetch_and_add_release_write) \ && !defined(AO_HAVE_char_fetch_and_add1_release_write) # define AO_char_fetch_and_add1_release_write(addr) \ AO_char_fetch_and_add_release_write(addr, 1) # define AO_HAVE_char_fetch_and_add1_release_write #endif #if defined(AO_HAVE_char_fetch_and_add_acquire_read) \ && !defined(AO_HAVE_char_fetch_and_add1_acquire_read) # define AO_char_fetch_and_add1_acquire_read(addr) \ AO_char_fetch_and_add_acquire_read(addr, 1) # define AO_HAVE_char_fetch_and_add1_acquire_read #endif #if defined(AO_HAVE_char_fetch_and_add) \ && !defined(AO_HAVE_char_fetch_and_add1) # define AO_char_fetch_and_add1(addr) AO_char_fetch_and_add(addr, 1) # define AO_HAVE_char_fetch_and_add1 #endif #if defined(AO_HAVE_char_fetch_and_add1_full) # if !defined(AO_HAVE_char_fetch_and_add1_release) # define AO_char_fetch_and_add1_release(addr) \ AO_char_fetch_and_add1_full(addr) # define AO_HAVE_char_fetch_and_add1_release # endif # if !defined(AO_HAVE_char_fetch_and_add1_acquire) # define AO_char_fetch_and_add1_acquire(addr) \ AO_char_fetch_and_add1_full(addr) # define AO_HAVE_char_fetch_and_add1_acquire # endif # if !defined(AO_HAVE_char_fetch_and_add1_write) # define AO_char_fetch_and_add1_write(addr) \ AO_char_fetch_and_add1_full(addr) # define AO_HAVE_char_fetch_and_add1_write # endif # if !defined(AO_HAVE_char_fetch_and_add1_read) # define AO_char_fetch_and_add1_read(addr) \ AO_char_fetch_and_add1_full(addr) # define AO_HAVE_char_fetch_and_add1_read # endif #endif /* AO_HAVE_char_fetch_and_add1_full */ #if !defined(AO_HAVE_char_fetch_and_add1) \ && defined(AO_HAVE_char_fetch_and_add1_release) # define AO_char_fetch_and_add1(addr) AO_char_fetch_and_add1_release(addr) # define AO_HAVE_char_fetch_and_add1 #endif #if !defined(AO_HAVE_char_fetch_and_add1) \ && defined(AO_HAVE_char_fetch_and_add1_acquire) # define AO_char_fetch_and_add1(addr) AO_char_fetch_and_add1_acquire(addr) # define AO_HAVE_char_fetch_and_add1 #endif #if !defined(AO_HAVE_char_fetch_and_add1) \ && defined(AO_HAVE_char_fetch_and_add1_write) # define AO_char_fetch_and_add1(addr) AO_char_fetch_and_add1_write(addr) # define AO_HAVE_char_fetch_and_add1 #endif #if !defined(AO_HAVE_char_fetch_and_add1) \ && defined(AO_HAVE_char_fetch_and_add1_read) # define AO_char_fetch_and_add1(addr) AO_char_fetch_and_add1_read(addr) # define AO_HAVE_char_fetch_and_add1 #endif #if defined(AO_HAVE_char_fetch_and_add1_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_char_fetch_and_add1_full) # define AO_char_fetch_and_add1_full(addr) \ (AO_nop_full(), AO_char_fetch_and_add1_acquire(addr)) # define AO_HAVE_char_fetch_and_add1_full #endif #if !defined(AO_HAVE_char_fetch_and_add1_release_write) \ && defined(AO_HAVE_char_fetch_and_add1_write) # define AO_char_fetch_and_add1_release_write(addr) \ AO_char_fetch_and_add1_write(addr) # define AO_HAVE_char_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_char_fetch_and_add1_release_write) \ && defined(AO_HAVE_char_fetch_and_add1_release) # define AO_char_fetch_and_add1_release_write(addr) \ AO_char_fetch_and_add1_release(addr) # define AO_HAVE_char_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_char_fetch_and_add1_acquire_read) \ && defined(AO_HAVE_char_fetch_and_add1_read) # define AO_char_fetch_and_add1_acquire_read(addr) \ AO_char_fetch_and_add1_read(addr) # define AO_HAVE_char_fetch_and_add1_acquire_read #endif #if !defined(AO_HAVE_char_fetch_and_add1_acquire_read) \ && defined(AO_HAVE_char_fetch_and_add1_acquire) # define AO_char_fetch_and_add1_acquire_read(addr) \ AO_char_fetch_and_add1_acquire(addr) # define AO_HAVE_char_fetch_and_add1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_char_fetch_and_add1_acquire_read) # define AO_char_fetch_and_add1_dd_acquire_read(addr) \ AO_char_fetch_and_add1_acquire_read(addr) # define AO_HAVE_char_fetch_and_add1_dd_acquire_read # endif #else # if defined(AO_HAVE_char_fetch_and_add1) # define AO_char_fetch_and_add1_dd_acquire_read(addr) \ AO_char_fetch_and_add1(addr) # define AO_HAVE_char_fetch_and_add1_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* char_fetch_and_sub1 */ #if defined(AO_HAVE_char_fetch_and_add_full) \ && !defined(AO_HAVE_char_fetch_and_sub1_full) # define AO_char_fetch_and_sub1_full(addr) \ AO_char_fetch_and_add_full(addr, (unsigned/**/char)(-1)) # define AO_HAVE_char_fetch_and_sub1_full #endif #if defined(AO_HAVE_char_fetch_and_add_release) \ && !defined(AO_HAVE_char_fetch_and_sub1_release) # define AO_char_fetch_and_sub1_release(addr) \ AO_char_fetch_and_add_release(addr, (unsigned/**/char)(-1)) # define AO_HAVE_char_fetch_and_sub1_release #endif #if defined(AO_HAVE_char_fetch_and_add_acquire) \ && !defined(AO_HAVE_char_fetch_and_sub1_acquire) # define AO_char_fetch_and_sub1_acquire(addr) \ AO_char_fetch_and_add_acquire(addr, (unsigned/**/char)(-1)) # define AO_HAVE_char_fetch_and_sub1_acquire #endif #if defined(AO_HAVE_char_fetch_and_add_write) \ && !defined(AO_HAVE_char_fetch_and_sub1_write) # define AO_char_fetch_and_sub1_write(addr) \ AO_char_fetch_and_add_write(addr, (unsigned/**/char)(-1)) # define AO_HAVE_char_fetch_and_sub1_write #endif #if defined(AO_HAVE_char_fetch_and_add_read) \ && !defined(AO_HAVE_char_fetch_and_sub1_read) # define AO_char_fetch_and_sub1_read(addr) \ AO_char_fetch_and_add_read(addr, (unsigned/**/char)(-1)) # define AO_HAVE_char_fetch_and_sub1_read #endif #if defined(AO_HAVE_char_fetch_and_add_release_write) \ && !defined(AO_HAVE_char_fetch_and_sub1_release_write) # define AO_char_fetch_and_sub1_release_write(addr) \ AO_char_fetch_and_add_release_write(addr, (unsigned/**/char)(-1)) # define AO_HAVE_char_fetch_and_sub1_release_write #endif #if defined(AO_HAVE_char_fetch_and_add_acquire_read) \ && !defined(AO_HAVE_char_fetch_and_sub1_acquire_read) # define AO_char_fetch_and_sub1_acquire_read(addr) \ AO_char_fetch_and_add_acquire_read(addr, (unsigned/**/char)(-1)) # define AO_HAVE_char_fetch_and_sub1_acquire_read #endif #if defined(AO_HAVE_char_fetch_and_add) \ && !defined(AO_HAVE_char_fetch_and_sub1) # define AO_char_fetch_and_sub1(addr) \ AO_char_fetch_and_add(addr, (unsigned/**/char)(-1)) # define AO_HAVE_char_fetch_and_sub1 #endif #if defined(AO_HAVE_char_fetch_and_sub1_full) # if !defined(AO_HAVE_char_fetch_and_sub1_release) # define AO_char_fetch_and_sub1_release(addr) \ AO_char_fetch_and_sub1_full(addr) # define AO_HAVE_char_fetch_and_sub1_release # endif # if !defined(AO_HAVE_char_fetch_and_sub1_acquire) # define AO_char_fetch_and_sub1_acquire(addr) \ AO_char_fetch_and_sub1_full(addr) # define AO_HAVE_char_fetch_and_sub1_acquire # endif # if !defined(AO_HAVE_char_fetch_and_sub1_write) # define AO_char_fetch_and_sub1_write(addr) \ AO_char_fetch_and_sub1_full(addr) # define AO_HAVE_char_fetch_and_sub1_write # endif # if !defined(AO_HAVE_char_fetch_and_sub1_read) # define AO_char_fetch_and_sub1_read(addr) \ AO_char_fetch_and_sub1_full(addr) # define AO_HAVE_char_fetch_and_sub1_read # endif #endif /* AO_HAVE_char_fetch_and_sub1_full */ #if !defined(AO_HAVE_char_fetch_and_sub1) \ && defined(AO_HAVE_char_fetch_and_sub1_release) # define AO_char_fetch_and_sub1(addr) AO_char_fetch_and_sub1_release(addr) # define AO_HAVE_char_fetch_and_sub1 #endif #if !defined(AO_HAVE_char_fetch_and_sub1) \ && defined(AO_HAVE_char_fetch_and_sub1_acquire) # define AO_char_fetch_and_sub1(addr) AO_char_fetch_and_sub1_acquire(addr) # define AO_HAVE_char_fetch_and_sub1 #endif #if !defined(AO_HAVE_char_fetch_and_sub1) \ && defined(AO_HAVE_char_fetch_and_sub1_write) # define AO_char_fetch_and_sub1(addr) AO_char_fetch_and_sub1_write(addr) # define AO_HAVE_char_fetch_and_sub1 #endif #if !defined(AO_HAVE_char_fetch_and_sub1) \ && defined(AO_HAVE_char_fetch_and_sub1_read) # define AO_char_fetch_and_sub1(addr) AO_char_fetch_and_sub1_read(addr) # define AO_HAVE_char_fetch_and_sub1 #endif #if defined(AO_HAVE_char_fetch_and_sub1_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_char_fetch_and_sub1_full) # define AO_char_fetch_and_sub1_full(addr) \ (AO_nop_full(), AO_char_fetch_and_sub1_acquire(addr)) # define AO_HAVE_char_fetch_and_sub1_full #endif #if !defined(AO_HAVE_char_fetch_and_sub1_release_write) \ && defined(AO_HAVE_char_fetch_and_sub1_write) # define AO_char_fetch_and_sub1_release_write(addr) \ AO_char_fetch_and_sub1_write(addr) # define AO_HAVE_char_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_char_fetch_and_sub1_release_write) \ && defined(AO_HAVE_char_fetch_and_sub1_release) # define AO_char_fetch_and_sub1_release_write(addr) \ AO_char_fetch_and_sub1_release(addr) # define AO_HAVE_char_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_char_fetch_and_sub1_acquire_read) \ && defined(AO_HAVE_char_fetch_and_sub1_read) # define AO_char_fetch_and_sub1_acquire_read(addr) \ AO_char_fetch_and_sub1_read(addr) # define AO_HAVE_char_fetch_and_sub1_acquire_read #endif #if !defined(AO_HAVE_char_fetch_and_sub1_acquire_read) \ && defined(AO_HAVE_char_fetch_and_sub1_acquire) # define AO_char_fetch_and_sub1_acquire_read(addr) \ AO_char_fetch_and_sub1_acquire(addr) # define AO_HAVE_char_fetch_and_sub1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_char_fetch_and_sub1_acquire_read) # define AO_char_fetch_and_sub1_dd_acquire_read(addr) \ AO_char_fetch_and_sub1_acquire_read(addr) # define AO_HAVE_char_fetch_and_sub1_dd_acquire_read # endif #else # if defined(AO_HAVE_char_fetch_and_sub1) # define AO_char_fetch_and_sub1_dd_acquire_read(addr) \ AO_char_fetch_and_sub1(addr) # define AO_HAVE_char_fetch_and_sub1_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* char_and */ #if defined(AO_HAVE_char_compare_and_swap_full) \ && !defined(AO_HAVE_char_and_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_char_and_full(volatile unsigned/**/char *addr, unsigned/**/char value) { unsigned/**/char old; do { old = *(unsigned/**/char *)addr; } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_full(addr, old, old & value))); } # define AO_HAVE_char_and_full #endif #if defined(AO_HAVE_char_and_full) # if !defined(AO_HAVE_char_and_release) # define AO_char_and_release(addr, val) AO_char_and_full(addr, val) # define AO_HAVE_char_and_release # endif # if !defined(AO_HAVE_char_and_acquire) # define AO_char_and_acquire(addr, val) AO_char_and_full(addr, val) # define AO_HAVE_char_and_acquire # endif # if !defined(AO_HAVE_char_and_write) # define AO_char_and_write(addr, val) AO_char_and_full(addr, val) # define AO_HAVE_char_and_write # endif # if !defined(AO_HAVE_char_and_read) # define AO_char_and_read(addr, val) AO_char_and_full(addr, val) # define AO_HAVE_char_and_read # endif #endif /* AO_HAVE_char_and_full */ #if !defined(AO_HAVE_char_and) && defined(AO_HAVE_char_and_release) # define AO_char_and(addr, val) AO_char_and_release(addr, val) # define AO_HAVE_char_and #endif #if !defined(AO_HAVE_char_and) && defined(AO_HAVE_char_and_acquire) # define AO_char_and(addr, val) AO_char_and_acquire(addr, val) # define AO_HAVE_char_and #endif #if !defined(AO_HAVE_char_and) && defined(AO_HAVE_char_and_write) # define AO_char_and(addr, val) AO_char_and_write(addr, val) # define AO_HAVE_char_and #endif #if !defined(AO_HAVE_char_and) && defined(AO_HAVE_char_and_read) # define AO_char_and(addr, val) AO_char_and_read(addr, val) # define AO_HAVE_char_and #endif #if defined(AO_HAVE_char_and_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_char_and_full) # define AO_char_and_full(addr, val) \ (AO_nop_full(), AO_char_and_acquire(addr, val)) # define AO_HAVE_char_and_full #endif #if !defined(AO_HAVE_char_and_release_write) \ && defined(AO_HAVE_char_and_write) # define AO_char_and_release_write(addr, val) AO_char_and_write(addr, val) # define AO_HAVE_char_and_release_write #endif #if !defined(AO_HAVE_char_and_release_write) \ && defined(AO_HAVE_char_and_release) # define AO_char_and_release_write(addr, val) AO_char_and_release(addr, val) # define AO_HAVE_char_and_release_write #endif #if !defined(AO_HAVE_char_and_acquire_read) \ && defined(AO_HAVE_char_and_read) # define AO_char_and_acquire_read(addr, val) AO_char_and_read(addr, val) # define AO_HAVE_char_and_acquire_read #endif #if !defined(AO_HAVE_char_and_acquire_read) \ && defined(AO_HAVE_char_and_acquire) # define AO_char_and_acquire_read(addr, val) AO_char_and_acquire(addr, val) # define AO_HAVE_char_and_acquire_read #endif /* char_or */ #if defined(AO_HAVE_char_compare_and_swap_full) \ && !defined(AO_HAVE_char_or_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_char_or_full(volatile unsigned/**/char *addr, unsigned/**/char value) { unsigned/**/char old; do { old = *(unsigned/**/char *)addr; } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_full(addr, old, old | value))); } # define AO_HAVE_char_or_full #endif #if defined(AO_HAVE_char_or_full) # if !defined(AO_HAVE_char_or_release) # define AO_char_or_release(addr, val) AO_char_or_full(addr, val) # define AO_HAVE_char_or_release # endif # if !defined(AO_HAVE_char_or_acquire) # define AO_char_or_acquire(addr, val) AO_char_or_full(addr, val) # define AO_HAVE_char_or_acquire # endif # if !defined(AO_HAVE_char_or_write) # define AO_char_or_write(addr, val) AO_char_or_full(addr, val) # define AO_HAVE_char_or_write # endif # if !defined(AO_HAVE_char_or_read) # define AO_char_or_read(addr, val) AO_char_or_full(addr, val) # define AO_HAVE_char_or_read # endif #endif /* AO_HAVE_char_or_full */ #if !defined(AO_HAVE_char_or) && defined(AO_HAVE_char_or_release) # define AO_char_or(addr, val) AO_char_or_release(addr, val) # define AO_HAVE_char_or #endif #if !defined(AO_HAVE_char_or) && defined(AO_HAVE_char_or_acquire) # define AO_char_or(addr, val) AO_char_or_acquire(addr, val) # define AO_HAVE_char_or #endif #if !defined(AO_HAVE_char_or) && defined(AO_HAVE_char_or_write) # define AO_char_or(addr, val) AO_char_or_write(addr, val) # define AO_HAVE_char_or #endif #if !defined(AO_HAVE_char_or) && defined(AO_HAVE_char_or_read) # define AO_char_or(addr, val) AO_char_or_read(addr, val) # define AO_HAVE_char_or #endif #if defined(AO_HAVE_char_or_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_char_or_full) # define AO_char_or_full(addr, val) \ (AO_nop_full(), AO_char_or_acquire(addr, val)) # define AO_HAVE_char_or_full #endif #if !defined(AO_HAVE_char_or_release_write) \ && defined(AO_HAVE_char_or_write) # define AO_char_or_release_write(addr, val) AO_char_or_write(addr, val) # define AO_HAVE_char_or_release_write #endif #if !defined(AO_HAVE_char_or_release_write) \ && defined(AO_HAVE_char_or_release) # define AO_char_or_release_write(addr, val) AO_char_or_release(addr, val) # define AO_HAVE_char_or_release_write #endif #if !defined(AO_HAVE_char_or_acquire_read) && defined(AO_HAVE_char_or_read) # define AO_char_or_acquire_read(addr, val) AO_char_or_read(addr, val) # define AO_HAVE_char_or_acquire_read #endif #if !defined(AO_HAVE_char_or_acquire_read) \ && defined(AO_HAVE_char_or_acquire) # define AO_char_or_acquire_read(addr, val) AO_char_or_acquire(addr, val) # define AO_HAVE_char_or_acquire_read #endif /* char_xor */ #if defined(AO_HAVE_char_compare_and_swap_full) \ && !defined(AO_HAVE_char_xor_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_char_xor_full(volatile unsigned/**/char *addr, unsigned/**/char value) { unsigned/**/char old; do { old = *(unsigned/**/char *)addr; } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_full(addr, old, old ^ value))); } # define AO_HAVE_char_xor_full #endif #if defined(AO_HAVE_char_xor_full) # if !defined(AO_HAVE_char_xor_release) # define AO_char_xor_release(addr, val) AO_char_xor_full(addr, val) # define AO_HAVE_char_xor_release # endif # if !defined(AO_HAVE_char_xor_acquire) # define AO_char_xor_acquire(addr, val) AO_char_xor_full(addr, val) # define AO_HAVE_char_xor_acquire # endif # if !defined(AO_HAVE_char_xor_write) # define AO_char_xor_write(addr, val) AO_char_xor_full(addr, val) # define AO_HAVE_char_xor_write # endif # if !defined(AO_HAVE_char_xor_read) # define AO_char_xor_read(addr, val) AO_char_xor_full(addr, val) # define AO_HAVE_char_xor_read # endif #endif /* AO_HAVE_char_xor_full */ #if !defined(AO_HAVE_char_xor) && defined(AO_HAVE_char_xor_release) # define AO_char_xor(addr, val) AO_char_xor_release(addr, val) # define AO_HAVE_char_xor #endif #if !defined(AO_HAVE_char_xor) && defined(AO_HAVE_char_xor_acquire) # define AO_char_xor(addr, val) AO_char_xor_acquire(addr, val) # define AO_HAVE_char_xor #endif #if !defined(AO_HAVE_char_xor) && defined(AO_HAVE_char_xor_write) # define AO_char_xor(addr, val) AO_char_xor_write(addr, val) # define AO_HAVE_char_xor #endif #if !defined(AO_HAVE_char_xor) && defined(AO_HAVE_char_xor_read) # define AO_char_xor(addr, val) AO_char_xor_read(addr, val) # define AO_HAVE_char_xor #endif #if defined(AO_HAVE_char_xor_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_char_xor_full) # define AO_char_xor_full(addr, val) \ (AO_nop_full(), AO_char_xor_acquire(addr, val)) # define AO_HAVE_char_xor_full #endif #if !defined(AO_HAVE_char_xor_release_write) \ && defined(AO_HAVE_char_xor_write) # define AO_char_xor_release_write(addr, val) AO_char_xor_write(addr, val) # define AO_HAVE_char_xor_release_write #endif #if !defined(AO_HAVE_char_xor_release_write) \ && defined(AO_HAVE_char_xor_release) # define AO_char_xor_release_write(addr, val) AO_char_xor_release(addr, val) # define AO_HAVE_char_xor_release_write #endif #if !defined(AO_HAVE_char_xor_acquire_read) \ && defined(AO_HAVE_char_xor_read) # define AO_char_xor_acquire_read(addr, val) AO_char_xor_read(addr, val) # define AO_HAVE_char_xor_acquire_read #endif #if !defined(AO_HAVE_char_xor_acquire_read) \ && defined(AO_HAVE_char_xor_acquire) # define AO_char_xor_acquire_read(addr, val) AO_char_xor_acquire(addr, val) # define AO_HAVE_char_xor_acquire_read #endif /* char_and/or/xor_dd_acquire_read are meaningless. */ /* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* short_compare_and_swap (based on fetch_compare_and_swap) */ #if defined(AO_HAVE_short_fetch_compare_and_swap_full) \ && !defined(AO_HAVE_short_compare_and_swap_full) AO_INLINE int AO_short_compare_and_swap_full(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { return AO_short_fetch_compare_and_swap_full(addr, old_val, new_val) == old_val; } # define AO_HAVE_short_compare_and_swap_full #endif #if defined(AO_HAVE_short_fetch_compare_and_swap_acquire) \ && !defined(AO_HAVE_short_compare_and_swap_acquire) AO_INLINE int AO_short_compare_and_swap_acquire(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { return AO_short_fetch_compare_and_swap_acquire(addr, old_val, new_val) == old_val; } # define AO_HAVE_short_compare_and_swap_acquire #endif #if defined(AO_HAVE_short_fetch_compare_and_swap_release) \ && !defined(AO_HAVE_short_compare_and_swap_release) AO_INLINE int AO_short_compare_and_swap_release(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { return AO_short_fetch_compare_and_swap_release(addr, old_val, new_val) == old_val; } # define AO_HAVE_short_compare_and_swap_release #endif #if defined(AO_HAVE_short_fetch_compare_and_swap_write) \ && !defined(AO_HAVE_short_compare_and_swap_write) AO_INLINE int AO_short_compare_and_swap_write(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { return AO_short_fetch_compare_and_swap_write(addr, old_val, new_val) == old_val; } # define AO_HAVE_short_compare_and_swap_write #endif #if defined(AO_HAVE_short_fetch_compare_and_swap_read) \ && !defined(AO_HAVE_short_compare_and_swap_read) AO_INLINE int AO_short_compare_and_swap_read(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { return AO_short_fetch_compare_and_swap_read(addr, old_val, new_val) == old_val; } # define AO_HAVE_short_compare_and_swap_read #endif #if defined(AO_HAVE_short_fetch_compare_and_swap) \ && !defined(AO_HAVE_short_compare_and_swap) AO_INLINE int AO_short_compare_and_swap(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { return AO_short_fetch_compare_and_swap(addr, old_val, new_val) == old_val; } # define AO_HAVE_short_compare_and_swap #endif #if defined(AO_HAVE_short_fetch_compare_and_swap_release_write) \ && !defined(AO_HAVE_short_compare_and_swap_release_write) AO_INLINE int AO_short_compare_and_swap_release_write(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { return AO_short_fetch_compare_and_swap_release_write(addr, old_val, new_val) == old_val; } # define AO_HAVE_short_compare_and_swap_release_write #endif #if defined(AO_HAVE_short_fetch_compare_and_swap_acquire_read) \ && !defined(AO_HAVE_short_compare_and_swap_acquire_read) AO_INLINE int AO_short_compare_and_swap_acquire_read(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { return AO_short_fetch_compare_and_swap_acquire_read(addr, old_val, new_val) == old_val; } # define AO_HAVE_short_compare_and_swap_acquire_read #endif #if defined(AO_HAVE_short_fetch_compare_and_swap_dd_acquire_read) \ && !defined(AO_HAVE_short_compare_and_swap_dd_acquire_read) AO_INLINE int AO_short_compare_and_swap_dd_acquire_read(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { return AO_short_fetch_compare_and_swap_dd_acquire_read(addr, old_val, new_val) == old_val; } # define AO_HAVE_short_compare_and_swap_dd_acquire_read #endif /* short_fetch_and_add */ /* We first try to implement fetch_and_add variants in terms of the */ /* corresponding compare_and_swap variants to minimize adding barriers. */ #if defined(AO_HAVE_short_compare_and_swap_full) \ && !defined(AO_HAVE_short_fetch_and_add_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned/**/short AO_short_fetch_and_add_full(volatile unsigned/**/short *addr, unsigned/**/short incr) { unsigned/**/short old; do { old = *(unsigned/**/short *)addr; } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_full(addr, old, old + incr))); return old; } # define AO_HAVE_short_fetch_and_add_full #endif #if defined(AO_HAVE_short_compare_and_swap_acquire) \ && !defined(AO_HAVE_short_fetch_and_add_acquire) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned/**/short AO_short_fetch_and_add_acquire(volatile unsigned/**/short *addr, unsigned/**/short incr) { unsigned/**/short old; do { old = *(unsigned/**/short *)addr; } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_acquire(addr, old, old + incr))); return old; } # define AO_HAVE_short_fetch_and_add_acquire #endif #if defined(AO_HAVE_short_compare_and_swap_release) \ && !defined(AO_HAVE_short_fetch_and_add_release) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned/**/short AO_short_fetch_and_add_release(volatile unsigned/**/short *addr, unsigned/**/short incr) { unsigned/**/short old; do { old = *(unsigned/**/short *)addr; } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_release(addr, old, old + incr))); return old; } # define AO_HAVE_short_fetch_and_add_release #endif #if defined(AO_HAVE_short_compare_and_swap) \ && !defined(AO_HAVE_short_fetch_and_add) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned/**/short AO_short_fetch_and_add(volatile unsigned/**/short *addr, unsigned/**/short incr) { unsigned/**/short old; do { old = *(unsigned/**/short *)addr; } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap(addr, old, old + incr))); return old; } # define AO_HAVE_short_fetch_and_add #endif #if defined(AO_HAVE_short_fetch_and_add_full) # if !defined(AO_HAVE_short_fetch_and_add_release) # define AO_short_fetch_and_add_release(addr, val) \ AO_short_fetch_and_add_full(addr, val) # define AO_HAVE_short_fetch_and_add_release # endif # if !defined(AO_HAVE_short_fetch_and_add_acquire) # define AO_short_fetch_and_add_acquire(addr, val) \ AO_short_fetch_and_add_full(addr, val) # define AO_HAVE_short_fetch_and_add_acquire # endif # if !defined(AO_HAVE_short_fetch_and_add_write) # define AO_short_fetch_and_add_write(addr, val) \ AO_short_fetch_and_add_full(addr, val) # define AO_HAVE_short_fetch_and_add_write # endif # if !defined(AO_HAVE_short_fetch_and_add_read) # define AO_short_fetch_and_add_read(addr, val) \ AO_short_fetch_and_add_full(addr, val) # define AO_HAVE_short_fetch_and_add_read # endif #endif /* AO_HAVE_short_fetch_and_add_full */ #if defined(AO_HAVE_short_fetch_and_add) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_short_fetch_and_add_acquire) AO_INLINE unsigned/**/short AO_short_fetch_and_add_acquire(volatile unsigned/**/short *addr, unsigned/**/short incr) { unsigned/**/short result = AO_short_fetch_and_add(addr, incr); AO_nop_full(); return result; } # define AO_HAVE_short_fetch_and_add_acquire #endif #if defined(AO_HAVE_short_fetch_and_add) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_short_fetch_and_add_release) # define AO_short_fetch_and_add_release(addr, incr) \ (AO_nop_full(), AO_short_fetch_and_add(addr, incr)) # define AO_HAVE_short_fetch_and_add_release #endif #if !defined(AO_HAVE_short_fetch_and_add) \ && defined(AO_HAVE_short_fetch_and_add_release) # define AO_short_fetch_and_add(addr, val) \ AO_short_fetch_and_add_release(addr, val) # define AO_HAVE_short_fetch_and_add #endif #if !defined(AO_HAVE_short_fetch_and_add) \ && defined(AO_HAVE_short_fetch_and_add_acquire) # define AO_short_fetch_and_add(addr, val) \ AO_short_fetch_and_add_acquire(addr, val) # define AO_HAVE_short_fetch_and_add #endif #if !defined(AO_HAVE_short_fetch_and_add) \ && defined(AO_HAVE_short_fetch_and_add_write) # define AO_short_fetch_and_add(addr, val) \ AO_short_fetch_and_add_write(addr, val) # define AO_HAVE_short_fetch_and_add #endif #if !defined(AO_HAVE_short_fetch_and_add) \ && defined(AO_HAVE_short_fetch_and_add_read) # define AO_short_fetch_and_add(addr, val) \ AO_short_fetch_and_add_read(addr, val) # define AO_HAVE_short_fetch_and_add #endif #if defined(AO_HAVE_short_fetch_and_add_acquire) \ && defined(AO_HAVE_nop_full) && !defined(AO_HAVE_short_fetch_and_add_full) # define AO_short_fetch_and_add_full(addr, val) \ (AO_nop_full(), AO_short_fetch_and_add_acquire(addr, val)) # define AO_HAVE_short_fetch_and_add_full #endif #if !defined(AO_HAVE_short_fetch_and_add_release_write) \ && defined(AO_HAVE_short_fetch_and_add_write) # define AO_short_fetch_and_add_release_write(addr, val) \ AO_short_fetch_and_add_write(addr, val) # define AO_HAVE_short_fetch_and_add_release_write #endif #if !defined(AO_HAVE_short_fetch_and_add_release_write) \ && defined(AO_HAVE_short_fetch_and_add_release) # define AO_short_fetch_and_add_release_write(addr, val) \ AO_short_fetch_and_add_release(addr, val) # define AO_HAVE_short_fetch_and_add_release_write #endif #if !defined(AO_HAVE_short_fetch_and_add_acquire_read) \ && defined(AO_HAVE_short_fetch_and_add_read) # define AO_short_fetch_and_add_acquire_read(addr, val) \ AO_short_fetch_and_add_read(addr, val) # define AO_HAVE_short_fetch_and_add_acquire_read #endif #if !defined(AO_HAVE_short_fetch_and_add_acquire_read) \ && defined(AO_HAVE_short_fetch_and_add_acquire) # define AO_short_fetch_and_add_acquire_read(addr, val) \ AO_short_fetch_and_add_acquire(addr, val) # define AO_HAVE_short_fetch_and_add_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_short_fetch_and_add_acquire_read) # define AO_short_fetch_and_add_dd_acquire_read(addr, val) \ AO_short_fetch_and_add_acquire_read(addr, val) # define AO_HAVE_short_fetch_and_add_dd_acquire_read # endif #else # if defined(AO_HAVE_short_fetch_and_add) # define AO_short_fetch_and_add_dd_acquire_read(addr, val) \ AO_short_fetch_and_add(addr, val) # define AO_HAVE_short_fetch_and_add_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* short_fetch_and_add1 */ #if defined(AO_HAVE_short_fetch_and_add_full) \ && !defined(AO_HAVE_short_fetch_and_add1_full) # define AO_short_fetch_and_add1_full(addr) \ AO_short_fetch_and_add_full(addr, 1) # define AO_HAVE_short_fetch_and_add1_full #endif #if defined(AO_HAVE_short_fetch_and_add_release) \ && !defined(AO_HAVE_short_fetch_and_add1_release) # define AO_short_fetch_and_add1_release(addr) \ AO_short_fetch_and_add_release(addr, 1) # define AO_HAVE_short_fetch_and_add1_release #endif #if defined(AO_HAVE_short_fetch_and_add_acquire) \ && !defined(AO_HAVE_short_fetch_and_add1_acquire) # define AO_short_fetch_and_add1_acquire(addr) \ AO_short_fetch_and_add_acquire(addr, 1) # define AO_HAVE_short_fetch_and_add1_acquire #endif #if defined(AO_HAVE_short_fetch_and_add_write) \ && !defined(AO_HAVE_short_fetch_and_add1_write) # define AO_short_fetch_and_add1_write(addr) \ AO_short_fetch_and_add_write(addr, 1) # define AO_HAVE_short_fetch_and_add1_write #endif #if defined(AO_HAVE_short_fetch_and_add_read) \ && !defined(AO_HAVE_short_fetch_and_add1_read) # define AO_short_fetch_and_add1_read(addr) \ AO_short_fetch_and_add_read(addr, 1) # define AO_HAVE_short_fetch_and_add1_read #endif #if defined(AO_HAVE_short_fetch_and_add_release_write) \ && !defined(AO_HAVE_short_fetch_and_add1_release_write) # define AO_short_fetch_and_add1_release_write(addr) \ AO_short_fetch_and_add_release_write(addr, 1) # define AO_HAVE_short_fetch_and_add1_release_write #endif #if defined(AO_HAVE_short_fetch_and_add_acquire_read) \ && !defined(AO_HAVE_short_fetch_and_add1_acquire_read) # define AO_short_fetch_and_add1_acquire_read(addr) \ AO_short_fetch_and_add_acquire_read(addr, 1) # define AO_HAVE_short_fetch_and_add1_acquire_read #endif #if defined(AO_HAVE_short_fetch_and_add) \ && !defined(AO_HAVE_short_fetch_and_add1) # define AO_short_fetch_and_add1(addr) AO_short_fetch_and_add(addr, 1) # define AO_HAVE_short_fetch_and_add1 #endif #if defined(AO_HAVE_short_fetch_and_add1_full) # if !defined(AO_HAVE_short_fetch_and_add1_release) # define AO_short_fetch_and_add1_release(addr) \ AO_short_fetch_and_add1_full(addr) # define AO_HAVE_short_fetch_and_add1_release # endif # if !defined(AO_HAVE_short_fetch_and_add1_acquire) # define AO_short_fetch_and_add1_acquire(addr) \ AO_short_fetch_and_add1_full(addr) # define AO_HAVE_short_fetch_and_add1_acquire # endif # if !defined(AO_HAVE_short_fetch_and_add1_write) # define AO_short_fetch_and_add1_write(addr) \ AO_short_fetch_and_add1_full(addr) # define AO_HAVE_short_fetch_and_add1_write # endif # if !defined(AO_HAVE_short_fetch_and_add1_read) # define AO_short_fetch_and_add1_read(addr) \ AO_short_fetch_and_add1_full(addr) # define AO_HAVE_short_fetch_and_add1_read # endif #endif /* AO_HAVE_short_fetch_and_add1_full */ #if !defined(AO_HAVE_short_fetch_and_add1) \ && defined(AO_HAVE_short_fetch_and_add1_release) # define AO_short_fetch_and_add1(addr) AO_short_fetch_and_add1_release(addr) # define AO_HAVE_short_fetch_and_add1 #endif #if !defined(AO_HAVE_short_fetch_and_add1) \ && defined(AO_HAVE_short_fetch_and_add1_acquire) # define AO_short_fetch_and_add1(addr) AO_short_fetch_and_add1_acquire(addr) # define AO_HAVE_short_fetch_and_add1 #endif #if !defined(AO_HAVE_short_fetch_and_add1) \ && defined(AO_HAVE_short_fetch_and_add1_write) # define AO_short_fetch_and_add1(addr) AO_short_fetch_and_add1_write(addr) # define AO_HAVE_short_fetch_and_add1 #endif #if !defined(AO_HAVE_short_fetch_and_add1) \ && defined(AO_HAVE_short_fetch_and_add1_read) # define AO_short_fetch_and_add1(addr) AO_short_fetch_and_add1_read(addr) # define AO_HAVE_short_fetch_and_add1 #endif #if defined(AO_HAVE_short_fetch_and_add1_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_short_fetch_and_add1_full) # define AO_short_fetch_and_add1_full(addr) \ (AO_nop_full(), AO_short_fetch_and_add1_acquire(addr)) # define AO_HAVE_short_fetch_and_add1_full #endif #if !defined(AO_HAVE_short_fetch_and_add1_release_write) \ && defined(AO_HAVE_short_fetch_and_add1_write) # define AO_short_fetch_and_add1_release_write(addr) \ AO_short_fetch_and_add1_write(addr) # define AO_HAVE_short_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_short_fetch_and_add1_release_write) \ && defined(AO_HAVE_short_fetch_and_add1_release) # define AO_short_fetch_and_add1_release_write(addr) \ AO_short_fetch_and_add1_release(addr) # define AO_HAVE_short_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_short_fetch_and_add1_acquire_read) \ && defined(AO_HAVE_short_fetch_and_add1_read) # define AO_short_fetch_and_add1_acquire_read(addr) \ AO_short_fetch_and_add1_read(addr) # define AO_HAVE_short_fetch_and_add1_acquire_read #endif #if !defined(AO_HAVE_short_fetch_and_add1_acquire_read) \ && defined(AO_HAVE_short_fetch_and_add1_acquire) # define AO_short_fetch_and_add1_acquire_read(addr) \ AO_short_fetch_and_add1_acquire(addr) # define AO_HAVE_short_fetch_and_add1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_short_fetch_and_add1_acquire_read) # define AO_short_fetch_and_add1_dd_acquire_read(addr) \ AO_short_fetch_and_add1_acquire_read(addr) # define AO_HAVE_short_fetch_and_add1_dd_acquire_read # endif #else # if defined(AO_HAVE_short_fetch_and_add1) # define AO_short_fetch_and_add1_dd_acquire_read(addr) \ AO_short_fetch_and_add1(addr) # define AO_HAVE_short_fetch_and_add1_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* short_fetch_and_sub1 */ #if defined(AO_HAVE_short_fetch_and_add_full) \ && !defined(AO_HAVE_short_fetch_and_sub1_full) # define AO_short_fetch_and_sub1_full(addr) \ AO_short_fetch_and_add_full(addr, (unsigned/**/short)(-1)) # define AO_HAVE_short_fetch_and_sub1_full #endif #if defined(AO_HAVE_short_fetch_and_add_release) \ && !defined(AO_HAVE_short_fetch_and_sub1_release) # define AO_short_fetch_and_sub1_release(addr) \ AO_short_fetch_and_add_release(addr, (unsigned/**/short)(-1)) # define AO_HAVE_short_fetch_and_sub1_release #endif #if defined(AO_HAVE_short_fetch_and_add_acquire) \ && !defined(AO_HAVE_short_fetch_and_sub1_acquire) # define AO_short_fetch_and_sub1_acquire(addr) \ AO_short_fetch_and_add_acquire(addr, (unsigned/**/short)(-1)) # define AO_HAVE_short_fetch_and_sub1_acquire #endif #if defined(AO_HAVE_short_fetch_and_add_write) \ && !defined(AO_HAVE_short_fetch_and_sub1_write) # define AO_short_fetch_and_sub1_write(addr) \ AO_short_fetch_and_add_write(addr, (unsigned/**/short)(-1)) # define AO_HAVE_short_fetch_and_sub1_write #endif #if defined(AO_HAVE_short_fetch_and_add_read) \ && !defined(AO_HAVE_short_fetch_and_sub1_read) # define AO_short_fetch_and_sub1_read(addr) \ AO_short_fetch_and_add_read(addr, (unsigned/**/short)(-1)) # define AO_HAVE_short_fetch_and_sub1_read #endif #if defined(AO_HAVE_short_fetch_and_add_release_write) \ && !defined(AO_HAVE_short_fetch_and_sub1_release_write) # define AO_short_fetch_and_sub1_release_write(addr) \ AO_short_fetch_and_add_release_write(addr, (unsigned/**/short)(-1)) # define AO_HAVE_short_fetch_and_sub1_release_write #endif #if defined(AO_HAVE_short_fetch_and_add_acquire_read) \ && !defined(AO_HAVE_short_fetch_and_sub1_acquire_read) # define AO_short_fetch_and_sub1_acquire_read(addr) \ AO_short_fetch_and_add_acquire_read(addr, (unsigned/**/short)(-1)) # define AO_HAVE_short_fetch_and_sub1_acquire_read #endif #if defined(AO_HAVE_short_fetch_and_add) \ && !defined(AO_HAVE_short_fetch_and_sub1) # define AO_short_fetch_and_sub1(addr) \ AO_short_fetch_and_add(addr, (unsigned/**/short)(-1)) # define AO_HAVE_short_fetch_and_sub1 #endif #if defined(AO_HAVE_short_fetch_and_sub1_full) # if !defined(AO_HAVE_short_fetch_and_sub1_release) # define AO_short_fetch_and_sub1_release(addr) \ AO_short_fetch_and_sub1_full(addr) # define AO_HAVE_short_fetch_and_sub1_release # endif # if !defined(AO_HAVE_short_fetch_and_sub1_acquire) # define AO_short_fetch_and_sub1_acquire(addr) \ AO_short_fetch_and_sub1_full(addr) # define AO_HAVE_short_fetch_and_sub1_acquire # endif # if !defined(AO_HAVE_short_fetch_and_sub1_write) # define AO_short_fetch_and_sub1_write(addr) \ AO_short_fetch_and_sub1_full(addr) # define AO_HAVE_short_fetch_and_sub1_write # endif # if !defined(AO_HAVE_short_fetch_and_sub1_read) # define AO_short_fetch_and_sub1_read(addr) \ AO_short_fetch_and_sub1_full(addr) # define AO_HAVE_short_fetch_and_sub1_read # endif #endif /* AO_HAVE_short_fetch_and_sub1_full */ #if !defined(AO_HAVE_short_fetch_and_sub1) \ && defined(AO_HAVE_short_fetch_and_sub1_release) # define AO_short_fetch_and_sub1(addr) AO_short_fetch_and_sub1_release(addr) # define AO_HAVE_short_fetch_and_sub1 #endif #if !defined(AO_HAVE_short_fetch_and_sub1) \ && defined(AO_HAVE_short_fetch_and_sub1_acquire) # define AO_short_fetch_and_sub1(addr) AO_short_fetch_and_sub1_acquire(addr) # define AO_HAVE_short_fetch_and_sub1 #endif #if !defined(AO_HAVE_short_fetch_and_sub1) \ && defined(AO_HAVE_short_fetch_and_sub1_write) # define AO_short_fetch_and_sub1(addr) AO_short_fetch_and_sub1_write(addr) # define AO_HAVE_short_fetch_and_sub1 #endif #if !defined(AO_HAVE_short_fetch_and_sub1) \ && defined(AO_HAVE_short_fetch_and_sub1_read) # define AO_short_fetch_and_sub1(addr) AO_short_fetch_and_sub1_read(addr) # define AO_HAVE_short_fetch_and_sub1 #endif #if defined(AO_HAVE_short_fetch_and_sub1_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_short_fetch_and_sub1_full) # define AO_short_fetch_and_sub1_full(addr) \ (AO_nop_full(), AO_short_fetch_and_sub1_acquire(addr)) # define AO_HAVE_short_fetch_and_sub1_full #endif #if !defined(AO_HAVE_short_fetch_and_sub1_release_write) \ && defined(AO_HAVE_short_fetch_and_sub1_write) # define AO_short_fetch_and_sub1_release_write(addr) \ AO_short_fetch_and_sub1_write(addr) # define AO_HAVE_short_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_short_fetch_and_sub1_release_write) \ && defined(AO_HAVE_short_fetch_and_sub1_release) # define AO_short_fetch_and_sub1_release_write(addr) \ AO_short_fetch_and_sub1_release(addr) # define AO_HAVE_short_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_short_fetch_and_sub1_acquire_read) \ && defined(AO_HAVE_short_fetch_and_sub1_read) # define AO_short_fetch_and_sub1_acquire_read(addr) \ AO_short_fetch_and_sub1_read(addr) # define AO_HAVE_short_fetch_and_sub1_acquire_read #endif #if !defined(AO_HAVE_short_fetch_and_sub1_acquire_read) \ && defined(AO_HAVE_short_fetch_and_sub1_acquire) # define AO_short_fetch_and_sub1_acquire_read(addr) \ AO_short_fetch_and_sub1_acquire(addr) # define AO_HAVE_short_fetch_and_sub1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_short_fetch_and_sub1_acquire_read) # define AO_short_fetch_and_sub1_dd_acquire_read(addr) \ AO_short_fetch_and_sub1_acquire_read(addr) # define AO_HAVE_short_fetch_and_sub1_dd_acquire_read # endif #else # if defined(AO_HAVE_short_fetch_and_sub1) # define AO_short_fetch_and_sub1_dd_acquire_read(addr) \ AO_short_fetch_and_sub1(addr) # define AO_HAVE_short_fetch_and_sub1_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* short_and */ #if defined(AO_HAVE_short_compare_and_swap_full) \ && !defined(AO_HAVE_short_and_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_short_and_full(volatile unsigned/**/short *addr, unsigned/**/short value) { unsigned/**/short old; do { old = *(unsigned/**/short *)addr; } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_full(addr, old, old & value))); } # define AO_HAVE_short_and_full #endif #if defined(AO_HAVE_short_and_full) # if !defined(AO_HAVE_short_and_release) # define AO_short_and_release(addr, val) AO_short_and_full(addr, val) # define AO_HAVE_short_and_release # endif # if !defined(AO_HAVE_short_and_acquire) # define AO_short_and_acquire(addr, val) AO_short_and_full(addr, val) # define AO_HAVE_short_and_acquire # endif # if !defined(AO_HAVE_short_and_write) # define AO_short_and_write(addr, val) AO_short_and_full(addr, val) # define AO_HAVE_short_and_write # endif # if !defined(AO_HAVE_short_and_read) # define AO_short_and_read(addr, val) AO_short_and_full(addr, val) # define AO_HAVE_short_and_read # endif #endif /* AO_HAVE_short_and_full */ #if !defined(AO_HAVE_short_and) && defined(AO_HAVE_short_and_release) # define AO_short_and(addr, val) AO_short_and_release(addr, val) # define AO_HAVE_short_and #endif #if !defined(AO_HAVE_short_and) && defined(AO_HAVE_short_and_acquire) # define AO_short_and(addr, val) AO_short_and_acquire(addr, val) # define AO_HAVE_short_and #endif #if !defined(AO_HAVE_short_and) && defined(AO_HAVE_short_and_write) # define AO_short_and(addr, val) AO_short_and_write(addr, val) # define AO_HAVE_short_and #endif #if !defined(AO_HAVE_short_and) && defined(AO_HAVE_short_and_read) # define AO_short_and(addr, val) AO_short_and_read(addr, val) # define AO_HAVE_short_and #endif #if defined(AO_HAVE_short_and_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_short_and_full) # define AO_short_and_full(addr, val) \ (AO_nop_full(), AO_short_and_acquire(addr, val)) # define AO_HAVE_short_and_full #endif #if !defined(AO_HAVE_short_and_release_write) \ && defined(AO_HAVE_short_and_write) # define AO_short_and_release_write(addr, val) AO_short_and_write(addr, val) # define AO_HAVE_short_and_release_write #endif #if !defined(AO_HAVE_short_and_release_write) \ && defined(AO_HAVE_short_and_release) # define AO_short_and_release_write(addr, val) AO_short_and_release(addr, val) # define AO_HAVE_short_and_release_write #endif #if !defined(AO_HAVE_short_and_acquire_read) \ && defined(AO_HAVE_short_and_read) # define AO_short_and_acquire_read(addr, val) AO_short_and_read(addr, val) # define AO_HAVE_short_and_acquire_read #endif #if !defined(AO_HAVE_short_and_acquire_read) \ && defined(AO_HAVE_short_and_acquire) # define AO_short_and_acquire_read(addr, val) AO_short_and_acquire(addr, val) # define AO_HAVE_short_and_acquire_read #endif /* short_or */ #if defined(AO_HAVE_short_compare_and_swap_full) \ && !defined(AO_HAVE_short_or_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_short_or_full(volatile unsigned/**/short *addr, unsigned/**/short value) { unsigned/**/short old; do { old = *(unsigned/**/short *)addr; } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_full(addr, old, old | value))); } # define AO_HAVE_short_or_full #endif #if defined(AO_HAVE_short_or_full) # if !defined(AO_HAVE_short_or_release) # define AO_short_or_release(addr, val) AO_short_or_full(addr, val) # define AO_HAVE_short_or_release # endif # if !defined(AO_HAVE_short_or_acquire) # define AO_short_or_acquire(addr, val) AO_short_or_full(addr, val) # define AO_HAVE_short_or_acquire # endif # if !defined(AO_HAVE_short_or_write) # define AO_short_or_write(addr, val) AO_short_or_full(addr, val) # define AO_HAVE_short_or_write # endif # if !defined(AO_HAVE_short_or_read) # define AO_short_or_read(addr, val) AO_short_or_full(addr, val) # define AO_HAVE_short_or_read # endif #endif /* AO_HAVE_short_or_full */ #if !defined(AO_HAVE_short_or) && defined(AO_HAVE_short_or_release) # define AO_short_or(addr, val) AO_short_or_release(addr, val) # define AO_HAVE_short_or #endif #if !defined(AO_HAVE_short_or) && defined(AO_HAVE_short_or_acquire) # define AO_short_or(addr, val) AO_short_or_acquire(addr, val) # define AO_HAVE_short_or #endif #if !defined(AO_HAVE_short_or) && defined(AO_HAVE_short_or_write) # define AO_short_or(addr, val) AO_short_or_write(addr, val) # define AO_HAVE_short_or #endif #if !defined(AO_HAVE_short_or) && defined(AO_HAVE_short_or_read) # define AO_short_or(addr, val) AO_short_or_read(addr, val) # define AO_HAVE_short_or #endif #if defined(AO_HAVE_short_or_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_short_or_full) # define AO_short_or_full(addr, val) \ (AO_nop_full(), AO_short_or_acquire(addr, val)) # define AO_HAVE_short_or_full #endif #if !defined(AO_HAVE_short_or_release_write) \ && defined(AO_HAVE_short_or_write) # define AO_short_or_release_write(addr, val) AO_short_or_write(addr, val) # define AO_HAVE_short_or_release_write #endif #if !defined(AO_HAVE_short_or_release_write) \ && defined(AO_HAVE_short_or_release) # define AO_short_or_release_write(addr, val) AO_short_or_release(addr, val) # define AO_HAVE_short_or_release_write #endif #if !defined(AO_HAVE_short_or_acquire_read) && defined(AO_HAVE_short_or_read) # define AO_short_or_acquire_read(addr, val) AO_short_or_read(addr, val) # define AO_HAVE_short_or_acquire_read #endif #if !defined(AO_HAVE_short_or_acquire_read) \ && defined(AO_HAVE_short_or_acquire) # define AO_short_or_acquire_read(addr, val) AO_short_or_acquire(addr, val) # define AO_HAVE_short_or_acquire_read #endif /* short_xor */ #if defined(AO_HAVE_short_compare_and_swap_full) \ && !defined(AO_HAVE_short_xor_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_short_xor_full(volatile unsigned/**/short *addr, unsigned/**/short value) { unsigned/**/short old; do { old = *(unsigned/**/short *)addr; } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_full(addr, old, old ^ value))); } # define AO_HAVE_short_xor_full #endif #if defined(AO_HAVE_short_xor_full) # if !defined(AO_HAVE_short_xor_release) # define AO_short_xor_release(addr, val) AO_short_xor_full(addr, val) # define AO_HAVE_short_xor_release # endif # if !defined(AO_HAVE_short_xor_acquire) # define AO_short_xor_acquire(addr, val) AO_short_xor_full(addr, val) # define AO_HAVE_short_xor_acquire # endif # if !defined(AO_HAVE_short_xor_write) # define AO_short_xor_write(addr, val) AO_short_xor_full(addr, val) # define AO_HAVE_short_xor_write # endif # if !defined(AO_HAVE_short_xor_read) # define AO_short_xor_read(addr, val) AO_short_xor_full(addr, val) # define AO_HAVE_short_xor_read # endif #endif /* AO_HAVE_short_xor_full */ #if !defined(AO_HAVE_short_xor) && defined(AO_HAVE_short_xor_release) # define AO_short_xor(addr, val) AO_short_xor_release(addr, val) # define AO_HAVE_short_xor #endif #if !defined(AO_HAVE_short_xor) && defined(AO_HAVE_short_xor_acquire) # define AO_short_xor(addr, val) AO_short_xor_acquire(addr, val) # define AO_HAVE_short_xor #endif #if !defined(AO_HAVE_short_xor) && defined(AO_HAVE_short_xor_write) # define AO_short_xor(addr, val) AO_short_xor_write(addr, val) # define AO_HAVE_short_xor #endif #if !defined(AO_HAVE_short_xor) && defined(AO_HAVE_short_xor_read) # define AO_short_xor(addr, val) AO_short_xor_read(addr, val) # define AO_HAVE_short_xor #endif #if defined(AO_HAVE_short_xor_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_short_xor_full) # define AO_short_xor_full(addr, val) \ (AO_nop_full(), AO_short_xor_acquire(addr, val)) # define AO_HAVE_short_xor_full #endif #if !defined(AO_HAVE_short_xor_release_write) \ && defined(AO_HAVE_short_xor_write) # define AO_short_xor_release_write(addr, val) AO_short_xor_write(addr, val) # define AO_HAVE_short_xor_release_write #endif #if !defined(AO_HAVE_short_xor_release_write) \ && defined(AO_HAVE_short_xor_release) # define AO_short_xor_release_write(addr, val) AO_short_xor_release(addr, val) # define AO_HAVE_short_xor_release_write #endif #if !defined(AO_HAVE_short_xor_acquire_read) \ && defined(AO_HAVE_short_xor_read) # define AO_short_xor_acquire_read(addr, val) AO_short_xor_read(addr, val) # define AO_HAVE_short_xor_acquire_read #endif #if !defined(AO_HAVE_short_xor_acquire_read) \ && defined(AO_HAVE_short_xor_acquire) # define AO_short_xor_acquire_read(addr, val) AO_short_xor_acquire(addr, val) # define AO_HAVE_short_xor_acquire_read #endif /* short_and/or/xor_dd_acquire_read are meaningless. */ /* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* int_compare_and_swap (based on fetch_compare_and_swap) */ #if defined(AO_HAVE_int_fetch_compare_and_swap_full) \ && !defined(AO_HAVE_int_compare_and_swap_full) AO_INLINE int AO_int_compare_and_swap_full(volatile unsigned *addr, unsigned old_val, unsigned new_val) { return AO_int_fetch_compare_and_swap_full(addr, old_val, new_val) == old_val; } # define AO_HAVE_int_compare_and_swap_full #endif #if defined(AO_HAVE_int_fetch_compare_and_swap_acquire) \ && !defined(AO_HAVE_int_compare_and_swap_acquire) AO_INLINE int AO_int_compare_and_swap_acquire(volatile unsigned *addr, unsigned old_val, unsigned new_val) { return AO_int_fetch_compare_and_swap_acquire(addr, old_val, new_val) == old_val; } # define AO_HAVE_int_compare_and_swap_acquire #endif #if defined(AO_HAVE_int_fetch_compare_and_swap_release) \ && !defined(AO_HAVE_int_compare_and_swap_release) AO_INLINE int AO_int_compare_and_swap_release(volatile unsigned *addr, unsigned old_val, unsigned new_val) { return AO_int_fetch_compare_and_swap_release(addr, old_val, new_val) == old_val; } # define AO_HAVE_int_compare_and_swap_release #endif #if defined(AO_HAVE_int_fetch_compare_and_swap_write) \ && !defined(AO_HAVE_int_compare_and_swap_write) AO_INLINE int AO_int_compare_and_swap_write(volatile unsigned *addr, unsigned old_val, unsigned new_val) { return AO_int_fetch_compare_and_swap_write(addr, old_val, new_val) == old_val; } # define AO_HAVE_int_compare_and_swap_write #endif #if defined(AO_HAVE_int_fetch_compare_and_swap_read) \ && !defined(AO_HAVE_int_compare_and_swap_read) AO_INLINE int AO_int_compare_and_swap_read(volatile unsigned *addr, unsigned old_val, unsigned new_val) { return AO_int_fetch_compare_and_swap_read(addr, old_val, new_val) == old_val; } # define AO_HAVE_int_compare_and_swap_read #endif #if defined(AO_HAVE_int_fetch_compare_and_swap) \ && !defined(AO_HAVE_int_compare_and_swap) AO_INLINE int AO_int_compare_and_swap(volatile unsigned *addr, unsigned old_val, unsigned new_val) { return AO_int_fetch_compare_and_swap(addr, old_val, new_val) == old_val; } # define AO_HAVE_int_compare_and_swap #endif #if defined(AO_HAVE_int_fetch_compare_and_swap_release_write) \ && !defined(AO_HAVE_int_compare_and_swap_release_write) AO_INLINE int AO_int_compare_and_swap_release_write(volatile unsigned *addr, unsigned old_val, unsigned new_val) { return AO_int_fetch_compare_and_swap_release_write(addr, old_val, new_val) == old_val; } # define AO_HAVE_int_compare_and_swap_release_write #endif #if defined(AO_HAVE_int_fetch_compare_and_swap_acquire_read) \ && !defined(AO_HAVE_int_compare_and_swap_acquire_read) AO_INLINE int AO_int_compare_and_swap_acquire_read(volatile unsigned *addr, unsigned old_val, unsigned new_val) { return AO_int_fetch_compare_and_swap_acquire_read(addr, old_val, new_val) == old_val; } # define AO_HAVE_int_compare_and_swap_acquire_read #endif #if defined(AO_HAVE_int_fetch_compare_and_swap_dd_acquire_read) \ && !defined(AO_HAVE_int_compare_and_swap_dd_acquire_read) AO_INLINE int AO_int_compare_and_swap_dd_acquire_read(volatile unsigned *addr, unsigned old_val, unsigned new_val) { return AO_int_fetch_compare_and_swap_dd_acquire_read(addr, old_val, new_val) == old_val; } # define AO_HAVE_int_compare_and_swap_dd_acquire_read #endif /* int_fetch_and_add */ /* We first try to implement fetch_and_add variants in terms of the */ /* corresponding compare_and_swap variants to minimize adding barriers. */ #if defined(AO_HAVE_int_compare_and_swap_full) \ && !defined(AO_HAVE_int_fetch_and_add_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned AO_int_fetch_and_add_full(volatile unsigned *addr, unsigned incr) { unsigned old; do { old = *(unsigned *)addr; } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_full(addr, old, old + incr))); return old; } # define AO_HAVE_int_fetch_and_add_full #endif #if defined(AO_HAVE_int_compare_and_swap_acquire) \ && !defined(AO_HAVE_int_fetch_and_add_acquire) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned AO_int_fetch_and_add_acquire(volatile unsigned *addr, unsigned incr) { unsigned old; do { old = *(unsigned *)addr; } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_acquire(addr, old, old + incr))); return old; } # define AO_HAVE_int_fetch_and_add_acquire #endif #if defined(AO_HAVE_int_compare_and_swap_release) \ && !defined(AO_HAVE_int_fetch_and_add_release) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned AO_int_fetch_and_add_release(volatile unsigned *addr, unsigned incr) { unsigned old; do { old = *(unsigned *)addr; } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_release(addr, old, old + incr))); return old; } # define AO_HAVE_int_fetch_and_add_release #endif #if defined(AO_HAVE_int_compare_and_swap) \ && !defined(AO_HAVE_int_fetch_and_add) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned AO_int_fetch_and_add(volatile unsigned *addr, unsigned incr) { unsigned old; do { old = *(unsigned *)addr; } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap(addr, old, old + incr))); return old; } # define AO_HAVE_int_fetch_and_add #endif #if defined(AO_HAVE_int_fetch_and_add_full) # if !defined(AO_HAVE_int_fetch_and_add_release) # define AO_int_fetch_and_add_release(addr, val) \ AO_int_fetch_and_add_full(addr, val) # define AO_HAVE_int_fetch_and_add_release # endif # if !defined(AO_HAVE_int_fetch_and_add_acquire) # define AO_int_fetch_and_add_acquire(addr, val) \ AO_int_fetch_and_add_full(addr, val) # define AO_HAVE_int_fetch_and_add_acquire # endif # if !defined(AO_HAVE_int_fetch_and_add_write) # define AO_int_fetch_and_add_write(addr, val) \ AO_int_fetch_and_add_full(addr, val) # define AO_HAVE_int_fetch_and_add_write # endif # if !defined(AO_HAVE_int_fetch_and_add_read) # define AO_int_fetch_and_add_read(addr, val) \ AO_int_fetch_and_add_full(addr, val) # define AO_HAVE_int_fetch_and_add_read # endif #endif /* AO_HAVE_int_fetch_and_add_full */ #if defined(AO_HAVE_int_fetch_and_add) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_int_fetch_and_add_acquire) AO_INLINE unsigned AO_int_fetch_and_add_acquire(volatile unsigned *addr, unsigned incr) { unsigned result = AO_int_fetch_and_add(addr, incr); AO_nop_full(); return result; } # define AO_HAVE_int_fetch_and_add_acquire #endif #if defined(AO_HAVE_int_fetch_and_add) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_int_fetch_and_add_release) # define AO_int_fetch_and_add_release(addr, incr) \ (AO_nop_full(), AO_int_fetch_and_add(addr, incr)) # define AO_HAVE_int_fetch_and_add_release #endif #if !defined(AO_HAVE_int_fetch_and_add) \ && defined(AO_HAVE_int_fetch_and_add_release) # define AO_int_fetch_and_add(addr, val) \ AO_int_fetch_and_add_release(addr, val) # define AO_HAVE_int_fetch_and_add #endif #if !defined(AO_HAVE_int_fetch_and_add) \ && defined(AO_HAVE_int_fetch_and_add_acquire) # define AO_int_fetch_and_add(addr, val) \ AO_int_fetch_and_add_acquire(addr, val) # define AO_HAVE_int_fetch_and_add #endif #if !defined(AO_HAVE_int_fetch_and_add) \ && defined(AO_HAVE_int_fetch_and_add_write) # define AO_int_fetch_and_add(addr, val) \ AO_int_fetch_and_add_write(addr, val) # define AO_HAVE_int_fetch_and_add #endif #if !defined(AO_HAVE_int_fetch_and_add) \ && defined(AO_HAVE_int_fetch_and_add_read) # define AO_int_fetch_and_add(addr, val) \ AO_int_fetch_and_add_read(addr, val) # define AO_HAVE_int_fetch_and_add #endif #if defined(AO_HAVE_int_fetch_and_add_acquire) \ && defined(AO_HAVE_nop_full) && !defined(AO_HAVE_int_fetch_and_add_full) # define AO_int_fetch_and_add_full(addr, val) \ (AO_nop_full(), AO_int_fetch_and_add_acquire(addr, val)) # define AO_HAVE_int_fetch_and_add_full #endif #if !defined(AO_HAVE_int_fetch_and_add_release_write) \ && defined(AO_HAVE_int_fetch_and_add_write) # define AO_int_fetch_and_add_release_write(addr, val) \ AO_int_fetch_and_add_write(addr, val) # define AO_HAVE_int_fetch_and_add_release_write #endif #if !defined(AO_HAVE_int_fetch_and_add_release_write) \ && defined(AO_HAVE_int_fetch_and_add_release) # define AO_int_fetch_and_add_release_write(addr, val) \ AO_int_fetch_and_add_release(addr, val) # define AO_HAVE_int_fetch_and_add_release_write #endif #if !defined(AO_HAVE_int_fetch_and_add_acquire_read) \ && defined(AO_HAVE_int_fetch_and_add_read) # define AO_int_fetch_and_add_acquire_read(addr, val) \ AO_int_fetch_and_add_read(addr, val) # define AO_HAVE_int_fetch_and_add_acquire_read #endif #if !defined(AO_HAVE_int_fetch_and_add_acquire_read) \ && defined(AO_HAVE_int_fetch_and_add_acquire) # define AO_int_fetch_and_add_acquire_read(addr, val) \ AO_int_fetch_and_add_acquire(addr, val) # define AO_HAVE_int_fetch_and_add_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_int_fetch_and_add_acquire_read) # define AO_int_fetch_and_add_dd_acquire_read(addr, val) \ AO_int_fetch_and_add_acquire_read(addr, val) # define AO_HAVE_int_fetch_and_add_dd_acquire_read # endif #else # if defined(AO_HAVE_int_fetch_and_add) # define AO_int_fetch_and_add_dd_acquire_read(addr, val) \ AO_int_fetch_and_add(addr, val) # define AO_HAVE_int_fetch_and_add_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* int_fetch_and_add1 */ #if defined(AO_HAVE_int_fetch_and_add_full) \ && !defined(AO_HAVE_int_fetch_and_add1_full) # define AO_int_fetch_and_add1_full(addr) \ AO_int_fetch_and_add_full(addr, 1) # define AO_HAVE_int_fetch_and_add1_full #endif #if defined(AO_HAVE_int_fetch_and_add_release) \ && !defined(AO_HAVE_int_fetch_and_add1_release) # define AO_int_fetch_and_add1_release(addr) \ AO_int_fetch_and_add_release(addr, 1) # define AO_HAVE_int_fetch_and_add1_release #endif #if defined(AO_HAVE_int_fetch_and_add_acquire) \ && !defined(AO_HAVE_int_fetch_and_add1_acquire) # define AO_int_fetch_and_add1_acquire(addr) \ AO_int_fetch_and_add_acquire(addr, 1) # define AO_HAVE_int_fetch_and_add1_acquire #endif #if defined(AO_HAVE_int_fetch_and_add_write) \ && !defined(AO_HAVE_int_fetch_and_add1_write) # define AO_int_fetch_and_add1_write(addr) \ AO_int_fetch_and_add_write(addr, 1) # define AO_HAVE_int_fetch_and_add1_write #endif #if defined(AO_HAVE_int_fetch_and_add_read) \ && !defined(AO_HAVE_int_fetch_and_add1_read) # define AO_int_fetch_and_add1_read(addr) \ AO_int_fetch_and_add_read(addr, 1) # define AO_HAVE_int_fetch_and_add1_read #endif #if defined(AO_HAVE_int_fetch_and_add_release_write) \ && !defined(AO_HAVE_int_fetch_and_add1_release_write) # define AO_int_fetch_and_add1_release_write(addr) \ AO_int_fetch_and_add_release_write(addr, 1) # define AO_HAVE_int_fetch_and_add1_release_write #endif #if defined(AO_HAVE_int_fetch_and_add_acquire_read) \ && !defined(AO_HAVE_int_fetch_and_add1_acquire_read) # define AO_int_fetch_and_add1_acquire_read(addr) \ AO_int_fetch_and_add_acquire_read(addr, 1) # define AO_HAVE_int_fetch_and_add1_acquire_read #endif #if defined(AO_HAVE_int_fetch_and_add) \ && !defined(AO_HAVE_int_fetch_and_add1) # define AO_int_fetch_and_add1(addr) AO_int_fetch_and_add(addr, 1) # define AO_HAVE_int_fetch_and_add1 #endif #if defined(AO_HAVE_int_fetch_and_add1_full) # if !defined(AO_HAVE_int_fetch_and_add1_release) # define AO_int_fetch_and_add1_release(addr) \ AO_int_fetch_and_add1_full(addr) # define AO_HAVE_int_fetch_and_add1_release # endif # if !defined(AO_HAVE_int_fetch_and_add1_acquire) # define AO_int_fetch_and_add1_acquire(addr) \ AO_int_fetch_and_add1_full(addr) # define AO_HAVE_int_fetch_and_add1_acquire # endif # if !defined(AO_HAVE_int_fetch_and_add1_write) # define AO_int_fetch_and_add1_write(addr) \ AO_int_fetch_and_add1_full(addr) # define AO_HAVE_int_fetch_and_add1_write # endif # if !defined(AO_HAVE_int_fetch_and_add1_read) # define AO_int_fetch_and_add1_read(addr) \ AO_int_fetch_and_add1_full(addr) # define AO_HAVE_int_fetch_and_add1_read # endif #endif /* AO_HAVE_int_fetch_and_add1_full */ #if !defined(AO_HAVE_int_fetch_and_add1) \ && defined(AO_HAVE_int_fetch_and_add1_release) # define AO_int_fetch_and_add1(addr) AO_int_fetch_and_add1_release(addr) # define AO_HAVE_int_fetch_and_add1 #endif #if !defined(AO_HAVE_int_fetch_and_add1) \ && defined(AO_HAVE_int_fetch_and_add1_acquire) # define AO_int_fetch_and_add1(addr) AO_int_fetch_and_add1_acquire(addr) # define AO_HAVE_int_fetch_and_add1 #endif #if !defined(AO_HAVE_int_fetch_and_add1) \ && defined(AO_HAVE_int_fetch_and_add1_write) # define AO_int_fetch_and_add1(addr) AO_int_fetch_and_add1_write(addr) # define AO_HAVE_int_fetch_and_add1 #endif #if !defined(AO_HAVE_int_fetch_and_add1) \ && defined(AO_HAVE_int_fetch_and_add1_read) # define AO_int_fetch_and_add1(addr) AO_int_fetch_and_add1_read(addr) # define AO_HAVE_int_fetch_and_add1 #endif #if defined(AO_HAVE_int_fetch_and_add1_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_int_fetch_and_add1_full) # define AO_int_fetch_and_add1_full(addr) \ (AO_nop_full(), AO_int_fetch_and_add1_acquire(addr)) # define AO_HAVE_int_fetch_and_add1_full #endif #if !defined(AO_HAVE_int_fetch_and_add1_release_write) \ && defined(AO_HAVE_int_fetch_and_add1_write) # define AO_int_fetch_and_add1_release_write(addr) \ AO_int_fetch_and_add1_write(addr) # define AO_HAVE_int_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_int_fetch_and_add1_release_write) \ && defined(AO_HAVE_int_fetch_and_add1_release) # define AO_int_fetch_and_add1_release_write(addr) \ AO_int_fetch_and_add1_release(addr) # define AO_HAVE_int_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_int_fetch_and_add1_acquire_read) \ && defined(AO_HAVE_int_fetch_and_add1_read) # define AO_int_fetch_and_add1_acquire_read(addr) \ AO_int_fetch_and_add1_read(addr) # define AO_HAVE_int_fetch_and_add1_acquire_read #endif #if !defined(AO_HAVE_int_fetch_and_add1_acquire_read) \ && defined(AO_HAVE_int_fetch_and_add1_acquire) # define AO_int_fetch_and_add1_acquire_read(addr) \ AO_int_fetch_and_add1_acquire(addr) # define AO_HAVE_int_fetch_and_add1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_int_fetch_and_add1_acquire_read) # define AO_int_fetch_and_add1_dd_acquire_read(addr) \ AO_int_fetch_and_add1_acquire_read(addr) # define AO_HAVE_int_fetch_and_add1_dd_acquire_read # endif #else # if defined(AO_HAVE_int_fetch_and_add1) # define AO_int_fetch_and_add1_dd_acquire_read(addr) \ AO_int_fetch_and_add1(addr) # define AO_HAVE_int_fetch_and_add1_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* int_fetch_and_sub1 */ #if defined(AO_HAVE_int_fetch_and_add_full) \ && !defined(AO_HAVE_int_fetch_and_sub1_full) # define AO_int_fetch_and_sub1_full(addr) \ AO_int_fetch_and_add_full(addr, (unsigned)(-1)) # define AO_HAVE_int_fetch_and_sub1_full #endif #if defined(AO_HAVE_int_fetch_and_add_release) \ && !defined(AO_HAVE_int_fetch_and_sub1_release) # define AO_int_fetch_and_sub1_release(addr) \ AO_int_fetch_and_add_release(addr, (unsigned)(-1)) # define AO_HAVE_int_fetch_and_sub1_release #endif #if defined(AO_HAVE_int_fetch_and_add_acquire) \ && !defined(AO_HAVE_int_fetch_and_sub1_acquire) # define AO_int_fetch_and_sub1_acquire(addr) \ AO_int_fetch_and_add_acquire(addr, (unsigned)(-1)) # define AO_HAVE_int_fetch_and_sub1_acquire #endif #if defined(AO_HAVE_int_fetch_and_add_write) \ && !defined(AO_HAVE_int_fetch_and_sub1_write) # define AO_int_fetch_and_sub1_write(addr) \ AO_int_fetch_and_add_write(addr, (unsigned)(-1)) # define AO_HAVE_int_fetch_and_sub1_write #endif #if defined(AO_HAVE_int_fetch_and_add_read) \ && !defined(AO_HAVE_int_fetch_and_sub1_read) # define AO_int_fetch_and_sub1_read(addr) \ AO_int_fetch_and_add_read(addr, (unsigned)(-1)) # define AO_HAVE_int_fetch_and_sub1_read #endif #if defined(AO_HAVE_int_fetch_and_add_release_write) \ && !defined(AO_HAVE_int_fetch_and_sub1_release_write) # define AO_int_fetch_and_sub1_release_write(addr) \ AO_int_fetch_and_add_release_write(addr, (unsigned)(-1)) # define AO_HAVE_int_fetch_and_sub1_release_write #endif #if defined(AO_HAVE_int_fetch_and_add_acquire_read) \ && !defined(AO_HAVE_int_fetch_and_sub1_acquire_read) # define AO_int_fetch_and_sub1_acquire_read(addr) \ AO_int_fetch_and_add_acquire_read(addr, (unsigned)(-1)) # define AO_HAVE_int_fetch_and_sub1_acquire_read #endif #if defined(AO_HAVE_int_fetch_and_add) \ && !defined(AO_HAVE_int_fetch_and_sub1) # define AO_int_fetch_and_sub1(addr) \ AO_int_fetch_and_add(addr, (unsigned)(-1)) # define AO_HAVE_int_fetch_and_sub1 #endif #if defined(AO_HAVE_int_fetch_and_sub1_full) # if !defined(AO_HAVE_int_fetch_and_sub1_release) # define AO_int_fetch_and_sub1_release(addr) \ AO_int_fetch_and_sub1_full(addr) # define AO_HAVE_int_fetch_and_sub1_release # endif # if !defined(AO_HAVE_int_fetch_and_sub1_acquire) # define AO_int_fetch_and_sub1_acquire(addr) \ AO_int_fetch_and_sub1_full(addr) # define AO_HAVE_int_fetch_and_sub1_acquire # endif # if !defined(AO_HAVE_int_fetch_and_sub1_write) # define AO_int_fetch_and_sub1_write(addr) \ AO_int_fetch_and_sub1_full(addr) # define AO_HAVE_int_fetch_and_sub1_write # endif # if !defined(AO_HAVE_int_fetch_and_sub1_read) # define AO_int_fetch_and_sub1_read(addr) \ AO_int_fetch_and_sub1_full(addr) # define AO_HAVE_int_fetch_and_sub1_read # endif #endif /* AO_HAVE_int_fetch_and_sub1_full */ #if !defined(AO_HAVE_int_fetch_and_sub1) \ && defined(AO_HAVE_int_fetch_and_sub1_release) # define AO_int_fetch_and_sub1(addr) AO_int_fetch_and_sub1_release(addr) # define AO_HAVE_int_fetch_and_sub1 #endif #if !defined(AO_HAVE_int_fetch_and_sub1) \ && defined(AO_HAVE_int_fetch_and_sub1_acquire) # define AO_int_fetch_and_sub1(addr) AO_int_fetch_and_sub1_acquire(addr) # define AO_HAVE_int_fetch_and_sub1 #endif #if !defined(AO_HAVE_int_fetch_and_sub1) \ && defined(AO_HAVE_int_fetch_and_sub1_write) # define AO_int_fetch_and_sub1(addr) AO_int_fetch_and_sub1_write(addr) # define AO_HAVE_int_fetch_and_sub1 #endif #if !defined(AO_HAVE_int_fetch_and_sub1) \ && defined(AO_HAVE_int_fetch_and_sub1_read) # define AO_int_fetch_and_sub1(addr) AO_int_fetch_and_sub1_read(addr) # define AO_HAVE_int_fetch_and_sub1 #endif #if defined(AO_HAVE_int_fetch_and_sub1_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_int_fetch_and_sub1_full) # define AO_int_fetch_and_sub1_full(addr) \ (AO_nop_full(), AO_int_fetch_and_sub1_acquire(addr)) # define AO_HAVE_int_fetch_and_sub1_full #endif #if !defined(AO_HAVE_int_fetch_and_sub1_release_write) \ && defined(AO_HAVE_int_fetch_and_sub1_write) # define AO_int_fetch_and_sub1_release_write(addr) \ AO_int_fetch_and_sub1_write(addr) # define AO_HAVE_int_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_int_fetch_and_sub1_release_write) \ && defined(AO_HAVE_int_fetch_and_sub1_release) # define AO_int_fetch_and_sub1_release_write(addr) \ AO_int_fetch_and_sub1_release(addr) # define AO_HAVE_int_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_int_fetch_and_sub1_acquire_read) \ && defined(AO_HAVE_int_fetch_and_sub1_read) # define AO_int_fetch_and_sub1_acquire_read(addr) \ AO_int_fetch_and_sub1_read(addr) # define AO_HAVE_int_fetch_and_sub1_acquire_read #endif #if !defined(AO_HAVE_int_fetch_and_sub1_acquire_read) \ && defined(AO_HAVE_int_fetch_and_sub1_acquire) # define AO_int_fetch_and_sub1_acquire_read(addr) \ AO_int_fetch_and_sub1_acquire(addr) # define AO_HAVE_int_fetch_and_sub1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_int_fetch_and_sub1_acquire_read) # define AO_int_fetch_and_sub1_dd_acquire_read(addr) \ AO_int_fetch_and_sub1_acquire_read(addr) # define AO_HAVE_int_fetch_and_sub1_dd_acquire_read # endif #else # if defined(AO_HAVE_int_fetch_and_sub1) # define AO_int_fetch_and_sub1_dd_acquire_read(addr) \ AO_int_fetch_and_sub1(addr) # define AO_HAVE_int_fetch_and_sub1_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* int_and */ #if defined(AO_HAVE_int_compare_and_swap_full) \ && !defined(AO_HAVE_int_and_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_int_and_full(volatile unsigned *addr, unsigned value) { unsigned old; do { old = *(unsigned *)addr; } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_full(addr, old, old & value))); } # define AO_HAVE_int_and_full #endif #if defined(AO_HAVE_int_and_full) # if !defined(AO_HAVE_int_and_release) # define AO_int_and_release(addr, val) AO_int_and_full(addr, val) # define AO_HAVE_int_and_release # endif # if !defined(AO_HAVE_int_and_acquire) # define AO_int_and_acquire(addr, val) AO_int_and_full(addr, val) # define AO_HAVE_int_and_acquire # endif # if !defined(AO_HAVE_int_and_write) # define AO_int_and_write(addr, val) AO_int_and_full(addr, val) # define AO_HAVE_int_and_write # endif # if !defined(AO_HAVE_int_and_read) # define AO_int_and_read(addr, val) AO_int_and_full(addr, val) # define AO_HAVE_int_and_read # endif #endif /* AO_HAVE_int_and_full */ #if !defined(AO_HAVE_int_and) && defined(AO_HAVE_int_and_release) # define AO_int_and(addr, val) AO_int_and_release(addr, val) # define AO_HAVE_int_and #endif #if !defined(AO_HAVE_int_and) && defined(AO_HAVE_int_and_acquire) # define AO_int_and(addr, val) AO_int_and_acquire(addr, val) # define AO_HAVE_int_and #endif #if !defined(AO_HAVE_int_and) && defined(AO_HAVE_int_and_write) # define AO_int_and(addr, val) AO_int_and_write(addr, val) # define AO_HAVE_int_and #endif #if !defined(AO_HAVE_int_and) && defined(AO_HAVE_int_and_read) # define AO_int_and(addr, val) AO_int_and_read(addr, val) # define AO_HAVE_int_and #endif #if defined(AO_HAVE_int_and_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_int_and_full) # define AO_int_and_full(addr, val) \ (AO_nop_full(), AO_int_and_acquire(addr, val)) # define AO_HAVE_int_and_full #endif #if !defined(AO_HAVE_int_and_release_write) \ && defined(AO_HAVE_int_and_write) # define AO_int_and_release_write(addr, val) AO_int_and_write(addr, val) # define AO_HAVE_int_and_release_write #endif #if !defined(AO_HAVE_int_and_release_write) \ && defined(AO_HAVE_int_and_release) # define AO_int_and_release_write(addr, val) AO_int_and_release(addr, val) # define AO_HAVE_int_and_release_write #endif #if !defined(AO_HAVE_int_and_acquire_read) \ && defined(AO_HAVE_int_and_read) # define AO_int_and_acquire_read(addr, val) AO_int_and_read(addr, val) # define AO_HAVE_int_and_acquire_read #endif #if !defined(AO_HAVE_int_and_acquire_read) \ && defined(AO_HAVE_int_and_acquire) # define AO_int_and_acquire_read(addr, val) AO_int_and_acquire(addr, val) # define AO_HAVE_int_and_acquire_read #endif /* int_or */ #if defined(AO_HAVE_int_compare_and_swap_full) \ && !defined(AO_HAVE_int_or_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_int_or_full(volatile unsigned *addr, unsigned value) { unsigned old; do { old = *(unsigned *)addr; } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_full(addr, old, old | value))); } # define AO_HAVE_int_or_full #endif #if defined(AO_HAVE_int_or_full) # if !defined(AO_HAVE_int_or_release) # define AO_int_or_release(addr, val) AO_int_or_full(addr, val) # define AO_HAVE_int_or_release # endif # if !defined(AO_HAVE_int_or_acquire) # define AO_int_or_acquire(addr, val) AO_int_or_full(addr, val) # define AO_HAVE_int_or_acquire # endif # if !defined(AO_HAVE_int_or_write) # define AO_int_or_write(addr, val) AO_int_or_full(addr, val) # define AO_HAVE_int_or_write # endif # if !defined(AO_HAVE_int_or_read) # define AO_int_or_read(addr, val) AO_int_or_full(addr, val) # define AO_HAVE_int_or_read # endif #endif /* AO_HAVE_int_or_full */ #if !defined(AO_HAVE_int_or) && defined(AO_HAVE_int_or_release) # define AO_int_or(addr, val) AO_int_or_release(addr, val) # define AO_HAVE_int_or #endif #if !defined(AO_HAVE_int_or) && defined(AO_HAVE_int_or_acquire) # define AO_int_or(addr, val) AO_int_or_acquire(addr, val) # define AO_HAVE_int_or #endif #if !defined(AO_HAVE_int_or) && defined(AO_HAVE_int_or_write) # define AO_int_or(addr, val) AO_int_or_write(addr, val) # define AO_HAVE_int_or #endif #if !defined(AO_HAVE_int_or) && defined(AO_HAVE_int_or_read) # define AO_int_or(addr, val) AO_int_or_read(addr, val) # define AO_HAVE_int_or #endif #if defined(AO_HAVE_int_or_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_int_or_full) # define AO_int_or_full(addr, val) \ (AO_nop_full(), AO_int_or_acquire(addr, val)) # define AO_HAVE_int_or_full #endif #if !defined(AO_HAVE_int_or_release_write) \ && defined(AO_HAVE_int_or_write) # define AO_int_or_release_write(addr, val) AO_int_or_write(addr, val) # define AO_HAVE_int_or_release_write #endif #if !defined(AO_HAVE_int_or_release_write) \ && defined(AO_HAVE_int_or_release) # define AO_int_or_release_write(addr, val) AO_int_or_release(addr, val) # define AO_HAVE_int_or_release_write #endif #if !defined(AO_HAVE_int_or_acquire_read) && defined(AO_HAVE_int_or_read) # define AO_int_or_acquire_read(addr, val) AO_int_or_read(addr, val) # define AO_HAVE_int_or_acquire_read #endif #if !defined(AO_HAVE_int_or_acquire_read) \ && defined(AO_HAVE_int_or_acquire) # define AO_int_or_acquire_read(addr, val) AO_int_or_acquire(addr, val) # define AO_HAVE_int_or_acquire_read #endif /* int_xor */ #if defined(AO_HAVE_int_compare_and_swap_full) \ && !defined(AO_HAVE_int_xor_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_int_xor_full(volatile unsigned *addr, unsigned value) { unsigned old; do { old = *(unsigned *)addr; } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_full(addr, old, old ^ value))); } # define AO_HAVE_int_xor_full #endif #if defined(AO_HAVE_int_xor_full) # if !defined(AO_HAVE_int_xor_release) # define AO_int_xor_release(addr, val) AO_int_xor_full(addr, val) # define AO_HAVE_int_xor_release # endif # if !defined(AO_HAVE_int_xor_acquire) # define AO_int_xor_acquire(addr, val) AO_int_xor_full(addr, val) # define AO_HAVE_int_xor_acquire # endif # if !defined(AO_HAVE_int_xor_write) # define AO_int_xor_write(addr, val) AO_int_xor_full(addr, val) # define AO_HAVE_int_xor_write # endif # if !defined(AO_HAVE_int_xor_read) # define AO_int_xor_read(addr, val) AO_int_xor_full(addr, val) # define AO_HAVE_int_xor_read # endif #endif /* AO_HAVE_int_xor_full */ #if !defined(AO_HAVE_int_xor) && defined(AO_HAVE_int_xor_release) # define AO_int_xor(addr, val) AO_int_xor_release(addr, val) # define AO_HAVE_int_xor #endif #if !defined(AO_HAVE_int_xor) && defined(AO_HAVE_int_xor_acquire) # define AO_int_xor(addr, val) AO_int_xor_acquire(addr, val) # define AO_HAVE_int_xor #endif #if !defined(AO_HAVE_int_xor) && defined(AO_HAVE_int_xor_write) # define AO_int_xor(addr, val) AO_int_xor_write(addr, val) # define AO_HAVE_int_xor #endif #if !defined(AO_HAVE_int_xor) && defined(AO_HAVE_int_xor_read) # define AO_int_xor(addr, val) AO_int_xor_read(addr, val) # define AO_HAVE_int_xor #endif #if defined(AO_HAVE_int_xor_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_int_xor_full) # define AO_int_xor_full(addr, val) \ (AO_nop_full(), AO_int_xor_acquire(addr, val)) # define AO_HAVE_int_xor_full #endif #if !defined(AO_HAVE_int_xor_release_write) \ && defined(AO_HAVE_int_xor_write) # define AO_int_xor_release_write(addr, val) AO_int_xor_write(addr, val) # define AO_HAVE_int_xor_release_write #endif #if !defined(AO_HAVE_int_xor_release_write) \ && defined(AO_HAVE_int_xor_release) # define AO_int_xor_release_write(addr, val) AO_int_xor_release(addr, val) # define AO_HAVE_int_xor_release_write #endif #if !defined(AO_HAVE_int_xor_acquire_read) \ && defined(AO_HAVE_int_xor_read) # define AO_int_xor_acquire_read(addr, val) AO_int_xor_read(addr, val) # define AO_HAVE_int_xor_acquire_read #endif #if !defined(AO_HAVE_int_xor_acquire_read) \ && defined(AO_HAVE_int_xor_acquire) # define AO_int_xor_acquire_read(addr, val) AO_int_xor_acquire(addr, val) # define AO_HAVE_int_xor_acquire_read #endif /* int_and/or/xor_dd_acquire_read are meaningless. */ /* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* compare_and_swap (based on fetch_compare_and_swap) */ #if defined(AO_HAVE_fetch_compare_and_swap_full) \ && !defined(AO_HAVE_compare_and_swap_full) AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return AO_fetch_compare_and_swap_full(addr, old_val, new_val) == old_val; } # define AO_HAVE_compare_and_swap_full #endif #if defined(AO_HAVE_fetch_compare_and_swap_acquire) \ && !defined(AO_HAVE_compare_and_swap_acquire) AO_INLINE int AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return AO_fetch_compare_and_swap_acquire(addr, old_val, new_val) == old_val; } # define AO_HAVE_compare_and_swap_acquire #endif #if defined(AO_HAVE_fetch_compare_and_swap_release) \ && !defined(AO_HAVE_compare_and_swap_release) AO_INLINE int AO_compare_and_swap_release(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return AO_fetch_compare_and_swap_release(addr, old_val, new_val) == old_val; } # define AO_HAVE_compare_and_swap_release #endif #if defined(AO_HAVE_fetch_compare_and_swap_write) \ && !defined(AO_HAVE_compare_and_swap_write) AO_INLINE int AO_compare_and_swap_write(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return AO_fetch_compare_and_swap_write(addr, old_val, new_val) == old_val; } # define AO_HAVE_compare_and_swap_write #endif #if defined(AO_HAVE_fetch_compare_and_swap_read) \ && !defined(AO_HAVE_compare_and_swap_read) AO_INLINE int AO_compare_and_swap_read(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return AO_fetch_compare_and_swap_read(addr, old_val, new_val) == old_val; } # define AO_HAVE_compare_and_swap_read #endif #if defined(AO_HAVE_fetch_compare_and_swap) \ && !defined(AO_HAVE_compare_and_swap) AO_INLINE int AO_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return AO_fetch_compare_and_swap(addr, old_val, new_val) == old_val; } # define AO_HAVE_compare_and_swap #endif #if defined(AO_HAVE_fetch_compare_and_swap_release_write) \ && !defined(AO_HAVE_compare_and_swap_release_write) AO_INLINE int AO_compare_and_swap_release_write(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return AO_fetch_compare_and_swap_release_write(addr, old_val, new_val) == old_val; } # define AO_HAVE_compare_and_swap_release_write #endif #if defined(AO_HAVE_fetch_compare_and_swap_acquire_read) \ && !defined(AO_HAVE_compare_and_swap_acquire_read) AO_INLINE int AO_compare_and_swap_acquire_read(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return AO_fetch_compare_and_swap_acquire_read(addr, old_val, new_val) == old_val; } # define AO_HAVE_compare_and_swap_acquire_read #endif #if defined(AO_HAVE_fetch_compare_and_swap_dd_acquire_read) \ && !defined(AO_HAVE_compare_and_swap_dd_acquire_read) AO_INLINE int AO_compare_and_swap_dd_acquire_read(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return AO_fetch_compare_and_swap_dd_acquire_read(addr, old_val, new_val) == old_val; } # define AO_HAVE_compare_and_swap_dd_acquire_read #endif /* fetch_and_add */ /* We first try to implement fetch_and_add variants in terms of the */ /* corresponding compare_and_swap variants to minimize adding barriers. */ #if defined(AO_HAVE_compare_and_swap_full) \ && !defined(AO_HAVE_fetch_and_add_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE AO_t AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr) { AO_t old; do { old = *(AO_t *)addr; } while (AO_EXPECT_FALSE(!AO_compare_and_swap_full(addr, old, old + incr))); return old; } # define AO_HAVE_fetch_and_add_full #endif #if defined(AO_HAVE_compare_and_swap_acquire) \ && !defined(AO_HAVE_fetch_and_add_acquire) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE AO_t AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr) { AO_t old; do { old = *(AO_t *)addr; } while (AO_EXPECT_FALSE(!AO_compare_and_swap_acquire(addr, old, old + incr))); return old; } # define AO_HAVE_fetch_and_add_acquire #endif #if defined(AO_HAVE_compare_and_swap_release) \ && !defined(AO_HAVE_fetch_and_add_release) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE AO_t AO_fetch_and_add_release(volatile AO_t *addr, AO_t incr) { AO_t old; do { old = *(AO_t *)addr; } while (AO_EXPECT_FALSE(!AO_compare_and_swap_release(addr, old, old + incr))); return old; } # define AO_HAVE_fetch_and_add_release #endif #if defined(AO_HAVE_compare_and_swap) \ && !defined(AO_HAVE_fetch_and_add) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE AO_t AO_fetch_and_add(volatile AO_t *addr, AO_t incr) { AO_t old; do { old = *(AO_t *)addr; } while (AO_EXPECT_FALSE(!AO_compare_and_swap(addr, old, old + incr))); return old; } # define AO_HAVE_fetch_and_add #endif #if defined(AO_HAVE_fetch_and_add_full) # if !defined(AO_HAVE_fetch_and_add_release) # define AO_fetch_and_add_release(addr, val) \ AO_fetch_and_add_full(addr, val) # define AO_HAVE_fetch_and_add_release # endif # if !defined(AO_HAVE_fetch_and_add_acquire) # define AO_fetch_and_add_acquire(addr, val) \ AO_fetch_and_add_full(addr, val) # define AO_HAVE_fetch_and_add_acquire # endif # if !defined(AO_HAVE_fetch_and_add_write) # define AO_fetch_and_add_write(addr, val) \ AO_fetch_and_add_full(addr, val) # define AO_HAVE_fetch_and_add_write # endif # if !defined(AO_HAVE_fetch_and_add_read) # define AO_fetch_and_add_read(addr, val) \ AO_fetch_and_add_full(addr, val) # define AO_HAVE_fetch_and_add_read # endif #endif /* AO_HAVE_fetch_and_add_full */ #if defined(AO_HAVE_fetch_and_add) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_fetch_and_add_acquire) AO_INLINE AO_t AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr) { AO_t result = AO_fetch_and_add(addr, incr); AO_nop_full(); return result; } # define AO_HAVE_fetch_and_add_acquire #endif #if defined(AO_HAVE_fetch_and_add) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_fetch_and_add_release) # define AO_fetch_and_add_release(addr, incr) \ (AO_nop_full(), AO_fetch_and_add(addr, incr)) # define AO_HAVE_fetch_and_add_release #endif #if !defined(AO_HAVE_fetch_and_add) \ && defined(AO_HAVE_fetch_and_add_release) # define AO_fetch_and_add(addr, val) \ AO_fetch_and_add_release(addr, val) # define AO_HAVE_fetch_and_add #endif #if !defined(AO_HAVE_fetch_and_add) \ && defined(AO_HAVE_fetch_and_add_acquire) # define AO_fetch_and_add(addr, val) \ AO_fetch_and_add_acquire(addr, val) # define AO_HAVE_fetch_and_add #endif #if !defined(AO_HAVE_fetch_and_add) \ && defined(AO_HAVE_fetch_and_add_write) # define AO_fetch_and_add(addr, val) \ AO_fetch_and_add_write(addr, val) # define AO_HAVE_fetch_and_add #endif #if !defined(AO_HAVE_fetch_and_add) \ && defined(AO_HAVE_fetch_and_add_read) # define AO_fetch_and_add(addr, val) \ AO_fetch_and_add_read(addr, val) # define AO_HAVE_fetch_and_add #endif #if defined(AO_HAVE_fetch_and_add_acquire) \ && defined(AO_HAVE_nop_full) && !defined(AO_HAVE_fetch_and_add_full) # define AO_fetch_and_add_full(addr, val) \ (AO_nop_full(), AO_fetch_and_add_acquire(addr, val)) # define AO_HAVE_fetch_and_add_full #endif #if !defined(AO_HAVE_fetch_and_add_release_write) \ && defined(AO_HAVE_fetch_and_add_write) # define AO_fetch_and_add_release_write(addr, val) \ AO_fetch_and_add_write(addr, val) # define AO_HAVE_fetch_and_add_release_write #endif #if !defined(AO_HAVE_fetch_and_add_release_write) \ && defined(AO_HAVE_fetch_and_add_release) # define AO_fetch_and_add_release_write(addr, val) \ AO_fetch_and_add_release(addr, val) # define AO_HAVE_fetch_and_add_release_write #endif #if !defined(AO_HAVE_fetch_and_add_acquire_read) \ && defined(AO_HAVE_fetch_and_add_read) # define AO_fetch_and_add_acquire_read(addr, val) \ AO_fetch_and_add_read(addr, val) # define AO_HAVE_fetch_and_add_acquire_read #endif #if !defined(AO_HAVE_fetch_and_add_acquire_read) \ && defined(AO_HAVE_fetch_and_add_acquire) # define AO_fetch_and_add_acquire_read(addr, val) \ AO_fetch_and_add_acquire(addr, val) # define AO_HAVE_fetch_and_add_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_fetch_and_add_acquire_read) # define AO_fetch_and_add_dd_acquire_read(addr, val) \ AO_fetch_and_add_acquire_read(addr, val) # define AO_HAVE_fetch_and_add_dd_acquire_read # endif #else # if defined(AO_HAVE_fetch_and_add) # define AO_fetch_and_add_dd_acquire_read(addr, val) \ AO_fetch_and_add(addr, val) # define AO_HAVE_fetch_and_add_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* fetch_and_add1 */ #if defined(AO_HAVE_fetch_and_add_full) \ && !defined(AO_HAVE_fetch_and_add1_full) # define AO_fetch_and_add1_full(addr) \ AO_fetch_and_add_full(addr, 1) # define AO_HAVE_fetch_and_add1_full #endif #if defined(AO_HAVE_fetch_and_add_release) \ && !defined(AO_HAVE_fetch_and_add1_release) # define AO_fetch_and_add1_release(addr) \ AO_fetch_and_add_release(addr, 1) # define AO_HAVE_fetch_and_add1_release #endif #if defined(AO_HAVE_fetch_and_add_acquire) \ && !defined(AO_HAVE_fetch_and_add1_acquire) # define AO_fetch_and_add1_acquire(addr) \ AO_fetch_and_add_acquire(addr, 1) # define AO_HAVE_fetch_and_add1_acquire #endif #if defined(AO_HAVE_fetch_and_add_write) \ && !defined(AO_HAVE_fetch_and_add1_write) # define AO_fetch_and_add1_write(addr) \ AO_fetch_and_add_write(addr, 1) # define AO_HAVE_fetch_and_add1_write #endif #if defined(AO_HAVE_fetch_and_add_read) \ && !defined(AO_HAVE_fetch_and_add1_read) # define AO_fetch_and_add1_read(addr) \ AO_fetch_and_add_read(addr, 1) # define AO_HAVE_fetch_and_add1_read #endif #if defined(AO_HAVE_fetch_and_add_release_write) \ && !defined(AO_HAVE_fetch_and_add1_release_write) # define AO_fetch_and_add1_release_write(addr) \ AO_fetch_and_add_release_write(addr, 1) # define AO_HAVE_fetch_and_add1_release_write #endif #if defined(AO_HAVE_fetch_and_add_acquire_read) \ && !defined(AO_HAVE_fetch_and_add1_acquire_read) # define AO_fetch_and_add1_acquire_read(addr) \ AO_fetch_and_add_acquire_read(addr, 1) # define AO_HAVE_fetch_and_add1_acquire_read #endif #if defined(AO_HAVE_fetch_and_add) \ && !defined(AO_HAVE_fetch_and_add1) # define AO_fetch_and_add1(addr) AO_fetch_and_add(addr, 1) # define AO_HAVE_fetch_and_add1 #endif #if defined(AO_HAVE_fetch_and_add1_full) # if !defined(AO_HAVE_fetch_and_add1_release) # define AO_fetch_and_add1_release(addr) \ AO_fetch_and_add1_full(addr) # define AO_HAVE_fetch_and_add1_release # endif # if !defined(AO_HAVE_fetch_and_add1_acquire) # define AO_fetch_and_add1_acquire(addr) \ AO_fetch_and_add1_full(addr) # define AO_HAVE_fetch_and_add1_acquire # endif # if !defined(AO_HAVE_fetch_and_add1_write) # define AO_fetch_and_add1_write(addr) \ AO_fetch_and_add1_full(addr) # define AO_HAVE_fetch_and_add1_write # endif # if !defined(AO_HAVE_fetch_and_add1_read) # define AO_fetch_and_add1_read(addr) \ AO_fetch_and_add1_full(addr) # define AO_HAVE_fetch_and_add1_read # endif #endif /* AO_HAVE_fetch_and_add1_full */ #if !defined(AO_HAVE_fetch_and_add1) \ && defined(AO_HAVE_fetch_and_add1_release) # define AO_fetch_and_add1(addr) AO_fetch_and_add1_release(addr) # define AO_HAVE_fetch_and_add1 #endif #if !defined(AO_HAVE_fetch_and_add1) \ && defined(AO_HAVE_fetch_and_add1_acquire) # define AO_fetch_and_add1(addr) AO_fetch_and_add1_acquire(addr) # define AO_HAVE_fetch_and_add1 #endif #if !defined(AO_HAVE_fetch_and_add1) \ && defined(AO_HAVE_fetch_and_add1_write) # define AO_fetch_and_add1(addr) AO_fetch_and_add1_write(addr) # define AO_HAVE_fetch_and_add1 #endif #if !defined(AO_HAVE_fetch_and_add1) \ && defined(AO_HAVE_fetch_and_add1_read) # define AO_fetch_and_add1(addr) AO_fetch_and_add1_read(addr) # define AO_HAVE_fetch_and_add1 #endif #if defined(AO_HAVE_fetch_and_add1_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_fetch_and_add1_full) # define AO_fetch_and_add1_full(addr) \ (AO_nop_full(), AO_fetch_and_add1_acquire(addr)) # define AO_HAVE_fetch_and_add1_full #endif #if !defined(AO_HAVE_fetch_and_add1_release_write) \ && defined(AO_HAVE_fetch_and_add1_write) # define AO_fetch_and_add1_release_write(addr) \ AO_fetch_and_add1_write(addr) # define AO_HAVE_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_fetch_and_add1_release_write) \ && defined(AO_HAVE_fetch_and_add1_release) # define AO_fetch_and_add1_release_write(addr) \ AO_fetch_and_add1_release(addr) # define AO_HAVE_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_fetch_and_add1_acquire_read) \ && defined(AO_HAVE_fetch_and_add1_read) # define AO_fetch_and_add1_acquire_read(addr) \ AO_fetch_and_add1_read(addr) # define AO_HAVE_fetch_and_add1_acquire_read #endif #if !defined(AO_HAVE_fetch_and_add1_acquire_read) \ && defined(AO_HAVE_fetch_and_add1_acquire) # define AO_fetch_and_add1_acquire_read(addr) \ AO_fetch_and_add1_acquire(addr) # define AO_HAVE_fetch_and_add1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_fetch_and_add1_acquire_read) # define AO_fetch_and_add1_dd_acquire_read(addr) \ AO_fetch_and_add1_acquire_read(addr) # define AO_HAVE_fetch_and_add1_dd_acquire_read # endif #else # if defined(AO_HAVE_fetch_and_add1) # define AO_fetch_and_add1_dd_acquire_read(addr) \ AO_fetch_and_add1(addr) # define AO_HAVE_fetch_and_add1_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* fetch_and_sub1 */ #if defined(AO_HAVE_fetch_and_add_full) \ && !defined(AO_HAVE_fetch_and_sub1_full) # define AO_fetch_and_sub1_full(addr) \ AO_fetch_and_add_full(addr, (AO_t)(-1)) # define AO_HAVE_fetch_and_sub1_full #endif #if defined(AO_HAVE_fetch_and_add_release) \ && !defined(AO_HAVE_fetch_and_sub1_release) # define AO_fetch_and_sub1_release(addr) \ AO_fetch_and_add_release(addr, (AO_t)(-1)) # define AO_HAVE_fetch_and_sub1_release #endif #if defined(AO_HAVE_fetch_and_add_acquire) \ && !defined(AO_HAVE_fetch_and_sub1_acquire) # define AO_fetch_and_sub1_acquire(addr) \ AO_fetch_and_add_acquire(addr, (AO_t)(-1)) # define AO_HAVE_fetch_and_sub1_acquire #endif #if defined(AO_HAVE_fetch_and_add_write) \ && !defined(AO_HAVE_fetch_and_sub1_write) # define AO_fetch_and_sub1_write(addr) \ AO_fetch_and_add_write(addr, (AO_t)(-1)) # define AO_HAVE_fetch_and_sub1_write #endif #if defined(AO_HAVE_fetch_and_add_read) \ && !defined(AO_HAVE_fetch_and_sub1_read) # define AO_fetch_and_sub1_read(addr) \ AO_fetch_and_add_read(addr, (AO_t)(-1)) # define AO_HAVE_fetch_and_sub1_read #endif #if defined(AO_HAVE_fetch_and_add_release_write) \ && !defined(AO_HAVE_fetch_and_sub1_release_write) # define AO_fetch_and_sub1_release_write(addr) \ AO_fetch_and_add_release_write(addr, (AO_t)(-1)) # define AO_HAVE_fetch_and_sub1_release_write #endif #if defined(AO_HAVE_fetch_and_add_acquire_read) \ && !defined(AO_HAVE_fetch_and_sub1_acquire_read) # define AO_fetch_and_sub1_acquire_read(addr) \ AO_fetch_and_add_acquire_read(addr, (AO_t)(-1)) # define AO_HAVE_fetch_and_sub1_acquire_read #endif #if defined(AO_HAVE_fetch_and_add) \ && !defined(AO_HAVE_fetch_and_sub1) # define AO_fetch_and_sub1(addr) \ AO_fetch_and_add(addr, (AO_t)(-1)) # define AO_HAVE_fetch_and_sub1 #endif #if defined(AO_HAVE_fetch_and_sub1_full) # if !defined(AO_HAVE_fetch_and_sub1_release) # define AO_fetch_and_sub1_release(addr) \ AO_fetch_and_sub1_full(addr) # define AO_HAVE_fetch_and_sub1_release # endif # if !defined(AO_HAVE_fetch_and_sub1_acquire) # define AO_fetch_and_sub1_acquire(addr) \ AO_fetch_and_sub1_full(addr) # define AO_HAVE_fetch_and_sub1_acquire # endif # if !defined(AO_HAVE_fetch_and_sub1_write) # define AO_fetch_and_sub1_write(addr) \ AO_fetch_and_sub1_full(addr) # define AO_HAVE_fetch_and_sub1_write # endif # if !defined(AO_HAVE_fetch_and_sub1_read) # define AO_fetch_and_sub1_read(addr) \ AO_fetch_and_sub1_full(addr) # define AO_HAVE_fetch_and_sub1_read # endif #endif /* AO_HAVE_fetch_and_sub1_full */ #if !defined(AO_HAVE_fetch_and_sub1) \ && defined(AO_HAVE_fetch_and_sub1_release) # define AO_fetch_and_sub1(addr) AO_fetch_and_sub1_release(addr) # define AO_HAVE_fetch_and_sub1 #endif #if !defined(AO_HAVE_fetch_and_sub1) \ && defined(AO_HAVE_fetch_and_sub1_acquire) # define AO_fetch_and_sub1(addr) AO_fetch_and_sub1_acquire(addr) # define AO_HAVE_fetch_and_sub1 #endif #if !defined(AO_HAVE_fetch_and_sub1) \ && defined(AO_HAVE_fetch_and_sub1_write) # define AO_fetch_and_sub1(addr) AO_fetch_and_sub1_write(addr) # define AO_HAVE_fetch_and_sub1 #endif #if !defined(AO_HAVE_fetch_and_sub1) \ && defined(AO_HAVE_fetch_and_sub1_read) # define AO_fetch_and_sub1(addr) AO_fetch_and_sub1_read(addr) # define AO_HAVE_fetch_and_sub1 #endif #if defined(AO_HAVE_fetch_and_sub1_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_fetch_and_sub1_full) # define AO_fetch_and_sub1_full(addr) \ (AO_nop_full(), AO_fetch_and_sub1_acquire(addr)) # define AO_HAVE_fetch_and_sub1_full #endif #if !defined(AO_HAVE_fetch_and_sub1_release_write) \ && defined(AO_HAVE_fetch_and_sub1_write) # define AO_fetch_and_sub1_release_write(addr) \ AO_fetch_and_sub1_write(addr) # define AO_HAVE_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_fetch_and_sub1_release_write) \ && defined(AO_HAVE_fetch_and_sub1_release) # define AO_fetch_and_sub1_release_write(addr) \ AO_fetch_and_sub1_release(addr) # define AO_HAVE_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_fetch_and_sub1_acquire_read) \ && defined(AO_HAVE_fetch_and_sub1_read) # define AO_fetch_and_sub1_acquire_read(addr) \ AO_fetch_and_sub1_read(addr) # define AO_HAVE_fetch_and_sub1_acquire_read #endif #if !defined(AO_HAVE_fetch_and_sub1_acquire_read) \ && defined(AO_HAVE_fetch_and_sub1_acquire) # define AO_fetch_and_sub1_acquire_read(addr) \ AO_fetch_and_sub1_acquire(addr) # define AO_HAVE_fetch_and_sub1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_fetch_and_sub1_acquire_read) # define AO_fetch_and_sub1_dd_acquire_read(addr) \ AO_fetch_and_sub1_acquire_read(addr) # define AO_HAVE_fetch_and_sub1_dd_acquire_read # endif #else # if defined(AO_HAVE_fetch_and_sub1) # define AO_fetch_and_sub1_dd_acquire_read(addr) \ AO_fetch_and_sub1(addr) # define AO_HAVE_fetch_and_sub1_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* and */ #if defined(AO_HAVE_compare_and_swap_full) \ && !defined(AO_HAVE_and_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_and_full(volatile AO_t *addr, AO_t value) { AO_t old; do { old = *(AO_t *)addr; } while (AO_EXPECT_FALSE(!AO_compare_and_swap_full(addr, old, old & value))); } # define AO_HAVE_and_full #endif #if defined(AO_HAVE_and_full) # if !defined(AO_HAVE_and_release) # define AO_and_release(addr, val) AO_and_full(addr, val) # define AO_HAVE_and_release # endif # if !defined(AO_HAVE_and_acquire) # define AO_and_acquire(addr, val) AO_and_full(addr, val) # define AO_HAVE_and_acquire # endif # if !defined(AO_HAVE_and_write) # define AO_and_write(addr, val) AO_and_full(addr, val) # define AO_HAVE_and_write # endif # if !defined(AO_HAVE_and_read) # define AO_and_read(addr, val) AO_and_full(addr, val) # define AO_HAVE_and_read # endif #endif /* AO_HAVE_and_full */ #if !defined(AO_HAVE_and) && defined(AO_HAVE_and_release) # define AO_and(addr, val) AO_and_release(addr, val) # define AO_HAVE_and #endif #if !defined(AO_HAVE_and) && defined(AO_HAVE_and_acquire) # define AO_and(addr, val) AO_and_acquire(addr, val) # define AO_HAVE_and #endif #if !defined(AO_HAVE_and) && defined(AO_HAVE_and_write) # define AO_and(addr, val) AO_and_write(addr, val) # define AO_HAVE_and #endif #if !defined(AO_HAVE_and) && defined(AO_HAVE_and_read) # define AO_and(addr, val) AO_and_read(addr, val) # define AO_HAVE_and #endif #if defined(AO_HAVE_and_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_and_full) # define AO_and_full(addr, val) \ (AO_nop_full(), AO_and_acquire(addr, val)) # define AO_HAVE_and_full #endif #if !defined(AO_HAVE_and_release_write) \ && defined(AO_HAVE_and_write) # define AO_and_release_write(addr, val) AO_and_write(addr, val) # define AO_HAVE_and_release_write #endif #if !defined(AO_HAVE_and_release_write) \ && defined(AO_HAVE_and_release) # define AO_and_release_write(addr, val) AO_and_release(addr, val) # define AO_HAVE_and_release_write #endif #if !defined(AO_HAVE_and_acquire_read) \ && defined(AO_HAVE_and_read) # define AO_and_acquire_read(addr, val) AO_and_read(addr, val) # define AO_HAVE_and_acquire_read #endif #if !defined(AO_HAVE_and_acquire_read) \ && defined(AO_HAVE_and_acquire) # define AO_and_acquire_read(addr, val) AO_and_acquire(addr, val) # define AO_HAVE_and_acquire_read #endif /* or */ #if defined(AO_HAVE_compare_and_swap_full) \ && !defined(AO_HAVE_or_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_or_full(volatile AO_t *addr, AO_t value) { AO_t old; do { old = *(AO_t *)addr; } while (AO_EXPECT_FALSE(!AO_compare_and_swap_full(addr, old, old | value))); } # define AO_HAVE_or_full #endif #if defined(AO_HAVE_or_full) # if !defined(AO_HAVE_or_release) # define AO_or_release(addr, val) AO_or_full(addr, val) # define AO_HAVE_or_release # endif # if !defined(AO_HAVE_or_acquire) # define AO_or_acquire(addr, val) AO_or_full(addr, val) # define AO_HAVE_or_acquire # endif # if !defined(AO_HAVE_or_write) # define AO_or_write(addr, val) AO_or_full(addr, val) # define AO_HAVE_or_write # endif # if !defined(AO_HAVE_or_read) # define AO_or_read(addr, val) AO_or_full(addr, val) # define AO_HAVE_or_read # endif #endif /* AO_HAVE_or_full */ #if !defined(AO_HAVE_or) && defined(AO_HAVE_or_release) # define AO_or(addr, val) AO_or_release(addr, val) # define AO_HAVE_or #endif #if !defined(AO_HAVE_or) && defined(AO_HAVE_or_acquire) # define AO_or(addr, val) AO_or_acquire(addr, val) # define AO_HAVE_or #endif #if !defined(AO_HAVE_or) && defined(AO_HAVE_or_write) # define AO_or(addr, val) AO_or_write(addr, val) # define AO_HAVE_or #endif #if !defined(AO_HAVE_or) && defined(AO_HAVE_or_read) # define AO_or(addr, val) AO_or_read(addr, val) # define AO_HAVE_or #endif #if defined(AO_HAVE_or_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_or_full) # define AO_or_full(addr, val) \ (AO_nop_full(), AO_or_acquire(addr, val)) # define AO_HAVE_or_full #endif #if !defined(AO_HAVE_or_release_write) \ && defined(AO_HAVE_or_write) # define AO_or_release_write(addr, val) AO_or_write(addr, val) # define AO_HAVE_or_release_write #endif #if !defined(AO_HAVE_or_release_write) \ && defined(AO_HAVE_or_release) # define AO_or_release_write(addr, val) AO_or_release(addr, val) # define AO_HAVE_or_release_write #endif #if !defined(AO_HAVE_or_acquire_read) && defined(AO_HAVE_or_read) # define AO_or_acquire_read(addr, val) AO_or_read(addr, val) # define AO_HAVE_or_acquire_read #endif #if !defined(AO_HAVE_or_acquire_read) \ && defined(AO_HAVE_or_acquire) # define AO_or_acquire_read(addr, val) AO_or_acquire(addr, val) # define AO_HAVE_or_acquire_read #endif /* xor */ #if defined(AO_HAVE_compare_and_swap_full) \ && !defined(AO_HAVE_xor_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_xor_full(volatile AO_t *addr, AO_t value) { AO_t old; do { old = *(AO_t *)addr; } while (AO_EXPECT_FALSE(!AO_compare_and_swap_full(addr, old, old ^ value))); } # define AO_HAVE_xor_full #endif #if defined(AO_HAVE_xor_full) # if !defined(AO_HAVE_xor_release) # define AO_xor_release(addr, val) AO_xor_full(addr, val) # define AO_HAVE_xor_release # endif # if !defined(AO_HAVE_xor_acquire) # define AO_xor_acquire(addr, val) AO_xor_full(addr, val) # define AO_HAVE_xor_acquire # endif # if !defined(AO_HAVE_xor_write) # define AO_xor_write(addr, val) AO_xor_full(addr, val) # define AO_HAVE_xor_write # endif # if !defined(AO_HAVE_xor_read) # define AO_xor_read(addr, val) AO_xor_full(addr, val) # define AO_HAVE_xor_read # endif #endif /* AO_HAVE_xor_full */ #if !defined(AO_HAVE_xor) && defined(AO_HAVE_xor_release) # define AO_xor(addr, val) AO_xor_release(addr, val) # define AO_HAVE_xor #endif #if !defined(AO_HAVE_xor) && defined(AO_HAVE_xor_acquire) # define AO_xor(addr, val) AO_xor_acquire(addr, val) # define AO_HAVE_xor #endif #if !defined(AO_HAVE_xor) && defined(AO_HAVE_xor_write) # define AO_xor(addr, val) AO_xor_write(addr, val) # define AO_HAVE_xor #endif #if !defined(AO_HAVE_xor) && defined(AO_HAVE_xor_read) # define AO_xor(addr, val) AO_xor_read(addr, val) # define AO_HAVE_xor #endif #if defined(AO_HAVE_xor_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_xor_full) # define AO_xor_full(addr, val) \ (AO_nop_full(), AO_xor_acquire(addr, val)) # define AO_HAVE_xor_full #endif #if !defined(AO_HAVE_xor_release_write) \ && defined(AO_HAVE_xor_write) # define AO_xor_release_write(addr, val) AO_xor_write(addr, val) # define AO_HAVE_xor_release_write #endif #if !defined(AO_HAVE_xor_release_write) \ && defined(AO_HAVE_xor_release) # define AO_xor_release_write(addr, val) AO_xor_release(addr, val) # define AO_HAVE_xor_release_write #endif #if !defined(AO_HAVE_xor_acquire_read) \ && defined(AO_HAVE_xor_read) # define AO_xor_acquire_read(addr, val) AO_xor_read(addr, val) # define AO_HAVE_xor_acquire_read #endif #if !defined(AO_HAVE_xor_acquire_read) \ && defined(AO_HAVE_xor_acquire) # define AO_xor_acquire_read(addr, val) AO_xor_acquire(addr, val) # define AO_HAVE_xor_acquire_read #endif /* and/or/xor_dd_acquire_read are meaningless. */ libatomic_ops-7.6.12/src/atomic_ops/generalize-arithm.template000066400000000000000000001005161411761111000245170ustar00rootroot00000000000000/* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* XSIZE_compare_and_swap (based on fetch_compare_and_swap) */ #if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_full) \ && !defined(AO_HAVE_XSIZE_compare_and_swap_full) AO_INLINE int AO_XSIZE_compare_and_swap_full(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { return AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val) == old_val; } # define AO_HAVE_XSIZE_compare_and_swap_full #endif #if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire) \ && !defined(AO_HAVE_XSIZE_compare_and_swap_acquire) AO_INLINE int AO_XSIZE_compare_and_swap_acquire(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { return AO_XSIZE_fetch_compare_and_swap_acquire(addr, old_val, new_val) == old_val; } # define AO_HAVE_XSIZE_compare_and_swap_acquire #endif #if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release) \ && !defined(AO_HAVE_XSIZE_compare_and_swap_release) AO_INLINE int AO_XSIZE_compare_and_swap_release(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { return AO_XSIZE_fetch_compare_and_swap_release(addr, old_val, new_val) == old_val; } # define AO_HAVE_XSIZE_compare_and_swap_release #endif #if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_write) \ && !defined(AO_HAVE_XSIZE_compare_and_swap_write) AO_INLINE int AO_XSIZE_compare_and_swap_write(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { return AO_XSIZE_fetch_compare_and_swap_write(addr, old_val, new_val) == old_val; } # define AO_HAVE_XSIZE_compare_and_swap_write #endif #if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_read) \ && !defined(AO_HAVE_XSIZE_compare_and_swap_read) AO_INLINE int AO_XSIZE_compare_and_swap_read(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { return AO_XSIZE_fetch_compare_and_swap_read(addr, old_val, new_val) == old_val; } # define AO_HAVE_XSIZE_compare_and_swap_read #endif #if defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \ && !defined(AO_HAVE_XSIZE_compare_and_swap) AO_INLINE int AO_XSIZE_compare_and_swap(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { return AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val) == old_val; } # define AO_HAVE_XSIZE_compare_and_swap #endif #if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release_write) \ && !defined(AO_HAVE_XSIZE_compare_and_swap_release_write) AO_INLINE int AO_XSIZE_compare_and_swap_release_write(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { return AO_XSIZE_fetch_compare_and_swap_release_write(addr, old_val, new_val) == old_val; } # define AO_HAVE_XSIZE_compare_and_swap_release_write #endif #if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read) \ && !defined(AO_HAVE_XSIZE_compare_and_swap_acquire_read) AO_INLINE int AO_XSIZE_compare_and_swap_acquire_read(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { return AO_XSIZE_fetch_compare_and_swap_acquire_read(addr, old_val, new_val) == old_val; } # define AO_HAVE_XSIZE_compare_and_swap_acquire_read #endif #if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_dd_acquire_read) \ && !defined(AO_HAVE_XSIZE_compare_and_swap_dd_acquire_read) AO_INLINE int AO_XSIZE_compare_and_swap_dd_acquire_read(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { return AO_XSIZE_fetch_compare_and_swap_dd_acquire_read(addr, old_val, new_val) == old_val; } # define AO_HAVE_XSIZE_compare_and_swap_dd_acquire_read #endif /* XSIZE_fetch_and_add */ /* We first try to implement fetch_and_add variants in terms of the */ /* corresponding compare_and_swap variants to minimize adding barriers. */ #if defined(AO_HAVE_XSIZE_compare_and_swap_full) \ && !defined(AO_HAVE_XSIZE_fetch_and_add_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE XCTYPE AO_XSIZE_fetch_and_add_full(volatile XCTYPE *addr, XCTYPE incr) { XCTYPE old; do { old = *(XCTYPE *)addr; } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full(addr, old, old + incr))); return old; } # define AO_HAVE_XSIZE_fetch_and_add_full #endif #if defined(AO_HAVE_XSIZE_compare_and_swap_acquire) \ && !defined(AO_HAVE_XSIZE_fetch_and_add_acquire) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE XCTYPE AO_XSIZE_fetch_and_add_acquire(volatile XCTYPE *addr, XCTYPE incr) { XCTYPE old; do { old = *(XCTYPE *)addr; } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_acquire(addr, old, old + incr))); return old; } # define AO_HAVE_XSIZE_fetch_and_add_acquire #endif #if defined(AO_HAVE_XSIZE_compare_and_swap_release) \ && !defined(AO_HAVE_XSIZE_fetch_and_add_release) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE XCTYPE AO_XSIZE_fetch_and_add_release(volatile XCTYPE *addr, XCTYPE incr) { XCTYPE old; do { old = *(XCTYPE *)addr; } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_release(addr, old, old + incr))); return old; } # define AO_HAVE_XSIZE_fetch_and_add_release #endif #if defined(AO_HAVE_XSIZE_compare_and_swap) \ && !defined(AO_HAVE_XSIZE_fetch_and_add) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE XCTYPE AO_XSIZE_fetch_and_add(volatile XCTYPE *addr, XCTYPE incr) { XCTYPE old; do { old = *(XCTYPE *)addr; } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap(addr, old, old + incr))); return old; } # define AO_HAVE_XSIZE_fetch_and_add #endif #if defined(AO_HAVE_XSIZE_fetch_and_add_full) # if !defined(AO_HAVE_XSIZE_fetch_and_add_release) # define AO_XSIZE_fetch_and_add_release(addr, val) \ AO_XSIZE_fetch_and_add_full(addr, val) # define AO_HAVE_XSIZE_fetch_and_add_release # endif # if !defined(AO_HAVE_XSIZE_fetch_and_add_acquire) # define AO_XSIZE_fetch_and_add_acquire(addr, val) \ AO_XSIZE_fetch_and_add_full(addr, val) # define AO_HAVE_XSIZE_fetch_and_add_acquire # endif # if !defined(AO_HAVE_XSIZE_fetch_and_add_write) # define AO_XSIZE_fetch_and_add_write(addr, val) \ AO_XSIZE_fetch_and_add_full(addr, val) # define AO_HAVE_XSIZE_fetch_and_add_write # endif # if !defined(AO_HAVE_XSIZE_fetch_and_add_read) # define AO_XSIZE_fetch_and_add_read(addr, val) \ AO_XSIZE_fetch_and_add_full(addr, val) # define AO_HAVE_XSIZE_fetch_and_add_read # endif #endif /* AO_HAVE_XSIZE_fetch_and_add_full */ #if defined(AO_HAVE_XSIZE_fetch_and_add) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_XSIZE_fetch_and_add_acquire) AO_INLINE XCTYPE AO_XSIZE_fetch_and_add_acquire(volatile XCTYPE *addr, XCTYPE incr) { XCTYPE result = AO_XSIZE_fetch_and_add(addr, incr); AO_nop_full(); return result; } # define AO_HAVE_XSIZE_fetch_and_add_acquire #endif #if defined(AO_HAVE_XSIZE_fetch_and_add) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_XSIZE_fetch_and_add_release) # define AO_XSIZE_fetch_and_add_release(addr, incr) \ (AO_nop_full(), AO_XSIZE_fetch_and_add(addr, incr)) # define AO_HAVE_XSIZE_fetch_and_add_release #endif #if !defined(AO_HAVE_XSIZE_fetch_and_add) \ && defined(AO_HAVE_XSIZE_fetch_and_add_release) # define AO_XSIZE_fetch_and_add(addr, val) \ AO_XSIZE_fetch_and_add_release(addr, val) # define AO_HAVE_XSIZE_fetch_and_add #endif #if !defined(AO_HAVE_XSIZE_fetch_and_add) \ && defined(AO_HAVE_XSIZE_fetch_and_add_acquire) # define AO_XSIZE_fetch_and_add(addr, val) \ AO_XSIZE_fetch_and_add_acquire(addr, val) # define AO_HAVE_XSIZE_fetch_and_add #endif #if !defined(AO_HAVE_XSIZE_fetch_and_add) \ && defined(AO_HAVE_XSIZE_fetch_and_add_write) # define AO_XSIZE_fetch_and_add(addr, val) \ AO_XSIZE_fetch_and_add_write(addr, val) # define AO_HAVE_XSIZE_fetch_and_add #endif #if !defined(AO_HAVE_XSIZE_fetch_and_add) \ && defined(AO_HAVE_XSIZE_fetch_and_add_read) # define AO_XSIZE_fetch_and_add(addr, val) \ AO_XSIZE_fetch_and_add_read(addr, val) # define AO_HAVE_XSIZE_fetch_and_add #endif #if defined(AO_HAVE_XSIZE_fetch_and_add_acquire) \ && defined(AO_HAVE_nop_full) && !defined(AO_HAVE_XSIZE_fetch_and_add_full) # define AO_XSIZE_fetch_and_add_full(addr, val) \ (AO_nop_full(), AO_XSIZE_fetch_and_add_acquire(addr, val)) # define AO_HAVE_XSIZE_fetch_and_add_full #endif #if !defined(AO_HAVE_XSIZE_fetch_and_add_release_write) \ && defined(AO_HAVE_XSIZE_fetch_and_add_write) # define AO_XSIZE_fetch_and_add_release_write(addr, val) \ AO_XSIZE_fetch_and_add_write(addr, val) # define AO_HAVE_XSIZE_fetch_and_add_release_write #endif #if !defined(AO_HAVE_XSIZE_fetch_and_add_release_write) \ && defined(AO_HAVE_XSIZE_fetch_and_add_release) # define AO_XSIZE_fetch_and_add_release_write(addr, val) \ AO_XSIZE_fetch_and_add_release(addr, val) # define AO_HAVE_XSIZE_fetch_and_add_release_write #endif #if !defined(AO_HAVE_XSIZE_fetch_and_add_acquire_read) \ && defined(AO_HAVE_XSIZE_fetch_and_add_read) # define AO_XSIZE_fetch_and_add_acquire_read(addr, val) \ AO_XSIZE_fetch_and_add_read(addr, val) # define AO_HAVE_XSIZE_fetch_and_add_acquire_read #endif #if !defined(AO_HAVE_XSIZE_fetch_and_add_acquire_read) \ && defined(AO_HAVE_XSIZE_fetch_and_add_acquire) # define AO_XSIZE_fetch_and_add_acquire_read(addr, val) \ AO_XSIZE_fetch_and_add_acquire(addr, val) # define AO_HAVE_XSIZE_fetch_and_add_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_XSIZE_fetch_and_add_acquire_read) # define AO_XSIZE_fetch_and_add_dd_acquire_read(addr, val) \ AO_XSIZE_fetch_and_add_acquire_read(addr, val) # define AO_HAVE_XSIZE_fetch_and_add_dd_acquire_read # endif #else # if defined(AO_HAVE_XSIZE_fetch_and_add) # define AO_XSIZE_fetch_and_add_dd_acquire_read(addr, val) \ AO_XSIZE_fetch_and_add(addr, val) # define AO_HAVE_XSIZE_fetch_and_add_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* XSIZE_fetch_and_add1 */ #if defined(AO_HAVE_XSIZE_fetch_and_add_full) \ && !defined(AO_HAVE_XSIZE_fetch_and_add1_full) # define AO_XSIZE_fetch_and_add1_full(addr) \ AO_XSIZE_fetch_and_add_full(addr, 1) # define AO_HAVE_XSIZE_fetch_and_add1_full #endif #if defined(AO_HAVE_XSIZE_fetch_and_add_release) \ && !defined(AO_HAVE_XSIZE_fetch_and_add1_release) # define AO_XSIZE_fetch_and_add1_release(addr) \ AO_XSIZE_fetch_and_add_release(addr, 1) # define AO_HAVE_XSIZE_fetch_and_add1_release #endif #if defined(AO_HAVE_XSIZE_fetch_and_add_acquire) \ && !defined(AO_HAVE_XSIZE_fetch_and_add1_acquire) # define AO_XSIZE_fetch_and_add1_acquire(addr) \ AO_XSIZE_fetch_and_add_acquire(addr, 1) # define AO_HAVE_XSIZE_fetch_and_add1_acquire #endif #if defined(AO_HAVE_XSIZE_fetch_and_add_write) \ && !defined(AO_HAVE_XSIZE_fetch_and_add1_write) # define AO_XSIZE_fetch_and_add1_write(addr) \ AO_XSIZE_fetch_and_add_write(addr, 1) # define AO_HAVE_XSIZE_fetch_and_add1_write #endif #if defined(AO_HAVE_XSIZE_fetch_and_add_read) \ && !defined(AO_HAVE_XSIZE_fetch_and_add1_read) # define AO_XSIZE_fetch_and_add1_read(addr) \ AO_XSIZE_fetch_and_add_read(addr, 1) # define AO_HAVE_XSIZE_fetch_and_add1_read #endif #if defined(AO_HAVE_XSIZE_fetch_and_add_release_write) \ && !defined(AO_HAVE_XSIZE_fetch_and_add1_release_write) # define AO_XSIZE_fetch_and_add1_release_write(addr) \ AO_XSIZE_fetch_and_add_release_write(addr, 1) # define AO_HAVE_XSIZE_fetch_and_add1_release_write #endif #if defined(AO_HAVE_XSIZE_fetch_and_add_acquire_read) \ && !defined(AO_HAVE_XSIZE_fetch_and_add1_acquire_read) # define AO_XSIZE_fetch_and_add1_acquire_read(addr) \ AO_XSIZE_fetch_and_add_acquire_read(addr, 1) # define AO_HAVE_XSIZE_fetch_and_add1_acquire_read #endif #if defined(AO_HAVE_XSIZE_fetch_and_add) \ && !defined(AO_HAVE_XSIZE_fetch_and_add1) # define AO_XSIZE_fetch_and_add1(addr) AO_XSIZE_fetch_and_add(addr, 1) # define AO_HAVE_XSIZE_fetch_and_add1 #endif #if defined(AO_HAVE_XSIZE_fetch_and_add1_full) # if !defined(AO_HAVE_XSIZE_fetch_and_add1_release) # define AO_XSIZE_fetch_and_add1_release(addr) \ AO_XSIZE_fetch_and_add1_full(addr) # define AO_HAVE_XSIZE_fetch_and_add1_release # endif # if !defined(AO_HAVE_XSIZE_fetch_and_add1_acquire) # define AO_XSIZE_fetch_and_add1_acquire(addr) \ AO_XSIZE_fetch_and_add1_full(addr) # define AO_HAVE_XSIZE_fetch_and_add1_acquire # endif # if !defined(AO_HAVE_XSIZE_fetch_and_add1_write) # define AO_XSIZE_fetch_and_add1_write(addr) \ AO_XSIZE_fetch_and_add1_full(addr) # define AO_HAVE_XSIZE_fetch_and_add1_write # endif # if !defined(AO_HAVE_XSIZE_fetch_and_add1_read) # define AO_XSIZE_fetch_and_add1_read(addr) \ AO_XSIZE_fetch_and_add1_full(addr) # define AO_HAVE_XSIZE_fetch_and_add1_read # endif #endif /* AO_HAVE_XSIZE_fetch_and_add1_full */ #if !defined(AO_HAVE_XSIZE_fetch_and_add1) \ && defined(AO_HAVE_XSIZE_fetch_and_add1_release) # define AO_XSIZE_fetch_and_add1(addr) AO_XSIZE_fetch_and_add1_release(addr) # define AO_HAVE_XSIZE_fetch_and_add1 #endif #if !defined(AO_HAVE_XSIZE_fetch_and_add1) \ && defined(AO_HAVE_XSIZE_fetch_and_add1_acquire) # define AO_XSIZE_fetch_and_add1(addr) AO_XSIZE_fetch_and_add1_acquire(addr) # define AO_HAVE_XSIZE_fetch_and_add1 #endif #if !defined(AO_HAVE_XSIZE_fetch_and_add1) \ && defined(AO_HAVE_XSIZE_fetch_and_add1_write) # define AO_XSIZE_fetch_and_add1(addr) AO_XSIZE_fetch_and_add1_write(addr) # define AO_HAVE_XSIZE_fetch_and_add1 #endif #if !defined(AO_HAVE_XSIZE_fetch_and_add1) \ && defined(AO_HAVE_XSIZE_fetch_and_add1_read) # define AO_XSIZE_fetch_and_add1(addr) AO_XSIZE_fetch_and_add1_read(addr) # define AO_HAVE_XSIZE_fetch_and_add1 #endif #if defined(AO_HAVE_XSIZE_fetch_and_add1_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_XSIZE_fetch_and_add1_full) # define AO_XSIZE_fetch_and_add1_full(addr) \ (AO_nop_full(), AO_XSIZE_fetch_and_add1_acquire(addr)) # define AO_HAVE_XSIZE_fetch_and_add1_full #endif #if !defined(AO_HAVE_XSIZE_fetch_and_add1_release_write) \ && defined(AO_HAVE_XSIZE_fetch_and_add1_write) # define AO_XSIZE_fetch_and_add1_release_write(addr) \ AO_XSIZE_fetch_and_add1_write(addr) # define AO_HAVE_XSIZE_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_XSIZE_fetch_and_add1_release_write) \ && defined(AO_HAVE_XSIZE_fetch_and_add1_release) # define AO_XSIZE_fetch_and_add1_release_write(addr) \ AO_XSIZE_fetch_and_add1_release(addr) # define AO_HAVE_XSIZE_fetch_and_add1_release_write #endif #if !defined(AO_HAVE_XSIZE_fetch_and_add1_acquire_read) \ && defined(AO_HAVE_XSIZE_fetch_and_add1_read) # define AO_XSIZE_fetch_and_add1_acquire_read(addr) \ AO_XSIZE_fetch_and_add1_read(addr) # define AO_HAVE_XSIZE_fetch_and_add1_acquire_read #endif #if !defined(AO_HAVE_XSIZE_fetch_and_add1_acquire_read) \ && defined(AO_HAVE_XSIZE_fetch_and_add1_acquire) # define AO_XSIZE_fetch_and_add1_acquire_read(addr) \ AO_XSIZE_fetch_and_add1_acquire(addr) # define AO_HAVE_XSIZE_fetch_and_add1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_XSIZE_fetch_and_add1_acquire_read) # define AO_XSIZE_fetch_and_add1_dd_acquire_read(addr) \ AO_XSIZE_fetch_and_add1_acquire_read(addr) # define AO_HAVE_XSIZE_fetch_and_add1_dd_acquire_read # endif #else # if defined(AO_HAVE_XSIZE_fetch_and_add1) # define AO_XSIZE_fetch_and_add1_dd_acquire_read(addr) \ AO_XSIZE_fetch_and_add1(addr) # define AO_HAVE_XSIZE_fetch_and_add1_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* XSIZE_fetch_and_sub1 */ #if defined(AO_HAVE_XSIZE_fetch_and_add_full) \ && !defined(AO_HAVE_XSIZE_fetch_and_sub1_full) # define AO_XSIZE_fetch_and_sub1_full(addr) \ AO_XSIZE_fetch_and_add_full(addr, (XCTYPE)(-1)) # define AO_HAVE_XSIZE_fetch_and_sub1_full #endif #if defined(AO_HAVE_XSIZE_fetch_and_add_release) \ && !defined(AO_HAVE_XSIZE_fetch_and_sub1_release) # define AO_XSIZE_fetch_and_sub1_release(addr) \ AO_XSIZE_fetch_and_add_release(addr, (XCTYPE)(-1)) # define AO_HAVE_XSIZE_fetch_and_sub1_release #endif #if defined(AO_HAVE_XSIZE_fetch_and_add_acquire) \ && !defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire) # define AO_XSIZE_fetch_and_sub1_acquire(addr) \ AO_XSIZE_fetch_and_add_acquire(addr, (XCTYPE)(-1)) # define AO_HAVE_XSIZE_fetch_and_sub1_acquire #endif #if defined(AO_HAVE_XSIZE_fetch_and_add_write) \ && !defined(AO_HAVE_XSIZE_fetch_and_sub1_write) # define AO_XSIZE_fetch_and_sub1_write(addr) \ AO_XSIZE_fetch_and_add_write(addr, (XCTYPE)(-1)) # define AO_HAVE_XSIZE_fetch_and_sub1_write #endif #if defined(AO_HAVE_XSIZE_fetch_and_add_read) \ && !defined(AO_HAVE_XSIZE_fetch_and_sub1_read) # define AO_XSIZE_fetch_and_sub1_read(addr) \ AO_XSIZE_fetch_and_add_read(addr, (XCTYPE)(-1)) # define AO_HAVE_XSIZE_fetch_and_sub1_read #endif #if defined(AO_HAVE_XSIZE_fetch_and_add_release_write) \ && !defined(AO_HAVE_XSIZE_fetch_and_sub1_release_write) # define AO_XSIZE_fetch_and_sub1_release_write(addr) \ AO_XSIZE_fetch_and_add_release_write(addr, (XCTYPE)(-1)) # define AO_HAVE_XSIZE_fetch_and_sub1_release_write #endif #if defined(AO_HAVE_XSIZE_fetch_and_add_acquire_read) \ && !defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire_read) # define AO_XSIZE_fetch_and_sub1_acquire_read(addr) \ AO_XSIZE_fetch_and_add_acquire_read(addr, (XCTYPE)(-1)) # define AO_HAVE_XSIZE_fetch_and_sub1_acquire_read #endif #if defined(AO_HAVE_XSIZE_fetch_and_add) \ && !defined(AO_HAVE_XSIZE_fetch_and_sub1) # define AO_XSIZE_fetch_and_sub1(addr) \ AO_XSIZE_fetch_and_add(addr, (XCTYPE)(-1)) # define AO_HAVE_XSIZE_fetch_and_sub1 #endif #if defined(AO_HAVE_XSIZE_fetch_and_sub1_full) # if !defined(AO_HAVE_XSIZE_fetch_and_sub1_release) # define AO_XSIZE_fetch_and_sub1_release(addr) \ AO_XSIZE_fetch_and_sub1_full(addr) # define AO_HAVE_XSIZE_fetch_and_sub1_release # endif # if !defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire) # define AO_XSIZE_fetch_and_sub1_acquire(addr) \ AO_XSIZE_fetch_and_sub1_full(addr) # define AO_HAVE_XSIZE_fetch_and_sub1_acquire # endif # if !defined(AO_HAVE_XSIZE_fetch_and_sub1_write) # define AO_XSIZE_fetch_and_sub1_write(addr) \ AO_XSIZE_fetch_and_sub1_full(addr) # define AO_HAVE_XSIZE_fetch_and_sub1_write # endif # if !defined(AO_HAVE_XSIZE_fetch_and_sub1_read) # define AO_XSIZE_fetch_and_sub1_read(addr) \ AO_XSIZE_fetch_and_sub1_full(addr) # define AO_HAVE_XSIZE_fetch_and_sub1_read # endif #endif /* AO_HAVE_XSIZE_fetch_and_sub1_full */ #if !defined(AO_HAVE_XSIZE_fetch_and_sub1) \ && defined(AO_HAVE_XSIZE_fetch_and_sub1_release) # define AO_XSIZE_fetch_and_sub1(addr) AO_XSIZE_fetch_and_sub1_release(addr) # define AO_HAVE_XSIZE_fetch_and_sub1 #endif #if !defined(AO_HAVE_XSIZE_fetch_and_sub1) \ && defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire) # define AO_XSIZE_fetch_and_sub1(addr) AO_XSIZE_fetch_and_sub1_acquire(addr) # define AO_HAVE_XSIZE_fetch_and_sub1 #endif #if !defined(AO_HAVE_XSIZE_fetch_and_sub1) \ && defined(AO_HAVE_XSIZE_fetch_and_sub1_write) # define AO_XSIZE_fetch_and_sub1(addr) AO_XSIZE_fetch_and_sub1_write(addr) # define AO_HAVE_XSIZE_fetch_and_sub1 #endif #if !defined(AO_HAVE_XSIZE_fetch_and_sub1) \ && defined(AO_HAVE_XSIZE_fetch_and_sub1_read) # define AO_XSIZE_fetch_and_sub1(addr) AO_XSIZE_fetch_and_sub1_read(addr) # define AO_HAVE_XSIZE_fetch_and_sub1 #endif #if defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_XSIZE_fetch_and_sub1_full) # define AO_XSIZE_fetch_and_sub1_full(addr) \ (AO_nop_full(), AO_XSIZE_fetch_and_sub1_acquire(addr)) # define AO_HAVE_XSIZE_fetch_and_sub1_full #endif #if !defined(AO_HAVE_XSIZE_fetch_and_sub1_release_write) \ && defined(AO_HAVE_XSIZE_fetch_and_sub1_write) # define AO_XSIZE_fetch_and_sub1_release_write(addr) \ AO_XSIZE_fetch_and_sub1_write(addr) # define AO_HAVE_XSIZE_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_XSIZE_fetch_and_sub1_release_write) \ && defined(AO_HAVE_XSIZE_fetch_and_sub1_release) # define AO_XSIZE_fetch_and_sub1_release_write(addr) \ AO_XSIZE_fetch_and_sub1_release(addr) # define AO_HAVE_XSIZE_fetch_and_sub1_release_write #endif #if !defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire_read) \ && defined(AO_HAVE_XSIZE_fetch_and_sub1_read) # define AO_XSIZE_fetch_and_sub1_acquire_read(addr) \ AO_XSIZE_fetch_and_sub1_read(addr) # define AO_HAVE_XSIZE_fetch_and_sub1_acquire_read #endif #if !defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire_read) \ && defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire) # define AO_XSIZE_fetch_and_sub1_acquire_read(addr) \ AO_XSIZE_fetch_and_sub1_acquire(addr) # define AO_HAVE_XSIZE_fetch_and_sub1_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_XSIZE_fetch_and_sub1_acquire_read) # define AO_XSIZE_fetch_and_sub1_dd_acquire_read(addr) \ AO_XSIZE_fetch_and_sub1_acquire_read(addr) # define AO_HAVE_XSIZE_fetch_and_sub1_dd_acquire_read # endif #else # if defined(AO_HAVE_XSIZE_fetch_and_sub1) # define AO_XSIZE_fetch_and_sub1_dd_acquire_read(addr) \ AO_XSIZE_fetch_and_sub1(addr) # define AO_HAVE_XSIZE_fetch_and_sub1_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* XSIZE_and */ #if defined(AO_HAVE_XSIZE_compare_and_swap_full) \ && !defined(AO_HAVE_XSIZE_and_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_XSIZE_and_full(volatile XCTYPE *addr, XCTYPE value) { XCTYPE old; do { old = *(XCTYPE *)addr; } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full(addr, old, old & value))); } # define AO_HAVE_XSIZE_and_full #endif #if defined(AO_HAVE_XSIZE_and_full) # if !defined(AO_HAVE_XSIZE_and_release) # define AO_XSIZE_and_release(addr, val) AO_XSIZE_and_full(addr, val) # define AO_HAVE_XSIZE_and_release # endif # if !defined(AO_HAVE_XSIZE_and_acquire) # define AO_XSIZE_and_acquire(addr, val) AO_XSIZE_and_full(addr, val) # define AO_HAVE_XSIZE_and_acquire # endif # if !defined(AO_HAVE_XSIZE_and_write) # define AO_XSIZE_and_write(addr, val) AO_XSIZE_and_full(addr, val) # define AO_HAVE_XSIZE_and_write # endif # if !defined(AO_HAVE_XSIZE_and_read) # define AO_XSIZE_and_read(addr, val) AO_XSIZE_and_full(addr, val) # define AO_HAVE_XSIZE_and_read # endif #endif /* AO_HAVE_XSIZE_and_full */ #if !defined(AO_HAVE_XSIZE_and) && defined(AO_HAVE_XSIZE_and_release) # define AO_XSIZE_and(addr, val) AO_XSIZE_and_release(addr, val) # define AO_HAVE_XSIZE_and #endif #if !defined(AO_HAVE_XSIZE_and) && defined(AO_HAVE_XSIZE_and_acquire) # define AO_XSIZE_and(addr, val) AO_XSIZE_and_acquire(addr, val) # define AO_HAVE_XSIZE_and #endif #if !defined(AO_HAVE_XSIZE_and) && defined(AO_HAVE_XSIZE_and_write) # define AO_XSIZE_and(addr, val) AO_XSIZE_and_write(addr, val) # define AO_HAVE_XSIZE_and #endif #if !defined(AO_HAVE_XSIZE_and) && defined(AO_HAVE_XSIZE_and_read) # define AO_XSIZE_and(addr, val) AO_XSIZE_and_read(addr, val) # define AO_HAVE_XSIZE_and #endif #if defined(AO_HAVE_XSIZE_and_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_XSIZE_and_full) # define AO_XSIZE_and_full(addr, val) \ (AO_nop_full(), AO_XSIZE_and_acquire(addr, val)) # define AO_HAVE_XSIZE_and_full #endif #if !defined(AO_HAVE_XSIZE_and_release_write) \ && defined(AO_HAVE_XSIZE_and_write) # define AO_XSIZE_and_release_write(addr, val) AO_XSIZE_and_write(addr, val) # define AO_HAVE_XSIZE_and_release_write #endif #if !defined(AO_HAVE_XSIZE_and_release_write) \ && defined(AO_HAVE_XSIZE_and_release) # define AO_XSIZE_and_release_write(addr, val) AO_XSIZE_and_release(addr, val) # define AO_HAVE_XSIZE_and_release_write #endif #if !defined(AO_HAVE_XSIZE_and_acquire_read) \ && defined(AO_HAVE_XSIZE_and_read) # define AO_XSIZE_and_acquire_read(addr, val) AO_XSIZE_and_read(addr, val) # define AO_HAVE_XSIZE_and_acquire_read #endif #if !defined(AO_HAVE_XSIZE_and_acquire_read) \ && defined(AO_HAVE_XSIZE_and_acquire) # define AO_XSIZE_and_acquire_read(addr, val) AO_XSIZE_and_acquire(addr, val) # define AO_HAVE_XSIZE_and_acquire_read #endif /* XSIZE_or */ #if defined(AO_HAVE_XSIZE_compare_and_swap_full) \ && !defined(AO_HAVE_XSIZE_or_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_XSIZE_or_full(volatile XCTYPE *addr, XCTYPE value) { XCTYPE old; do { old = *(XCTYPE *)addr; } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full(addr, old, old | value))); } # define AO_HAVE_XSIZE_or_full #endif #if defined(AO_HAVE_XSIZE_or_full) # if !defined(AO_HAVE_XSIZE_or_release) # define AO_XSIZE_or_release(addr, val) AO_XSIZE_or_full(addr, val) # define AO_HAVE_XSIZE_or_release # endif # if !defined(AO_HAVE_XSIZE_or_acquire) # define AO_XSIZE_or_acquire(addr, val) AO_XSIZE_or_full(addr, val) # define AO_HAVE_XSIZE_or_acquire # endif # if !defined(AO_HAVE_XSIZE_or_write) # define AO_XSIZE_or_write(addr, val) AO_XSIZE_or_full(addr, val) # define AO_HAVE_XSIZE_or_write # endif # if !defined(AO_HAVE_XSIZE_or_read) # define AO_XSIZE_or_read(addr, val) AO_XSIZE_or_full(addr, val) # define AO_HAVE_XSIZE_or_read # endif #endif /* AO_HAVE_XSIZE_or_full */ #if !defined(AO_HAVE_XSIZE_or) && defined(AO_HAVE_XSIZE_or_release) # define AO_XSIZE_or(addr, val) AO_XSIZE_or_release(addr, val) # define AO_HAVE_XSIZE_or #endif #if !defined(AO_HAVE_XSIZE_or) && defined(AO_HAVE_XSIZE_or_acquire) # define AO_XSIZE_or(addr, val) AO_XSIZE_or_acquire(addr, val) # define AO_HAVE_XSIZE_or #endif #if !defined(AO_HAVE_XSIZE_or) && defined(AO_HAVE_XSIZE_or_write) # define AO_XSIZE_or(addr, val) AO_XSIZE_or_write(addr, val) # define AO_HAVE_XSIZE_or #endif #if !defined(AO_HAVE_XSIZE_or) && defined(AO_HAVE_XSIZE_or_read) # define AO_XSIZE_or(addr, val) AO_XSIZE_or_read(addr, val) # define AO_HAVE_XSIZE_or #endif #if defined(AO_HAVE_XSIZE_or_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_XSIZE_or_full) # define AO_XSIZE_or_full(addr, val) \ (AO_nop_full(), AO_XSIZE_or_acquire(addr, val)) # define AO_HAVE_XSIZE_or_full #endif #if !defined(AO_HAVE_XSIZE_or_release_write) \ && defined(AO_HAVE_XSIZE_or_write) # define AO_XSIZE_or_release_write(addr, val) AO_XSIZE_or_write(addr, val) # define AO_HAVE_XSIZE_or_release_write #endif #if !defined(AO_HAVE_XSIZE_or_release_write) \ && defined(AO_HAVE_XSIZE_or_release) # define AO_XSIZE_or_release_write(addr, val) AO_XSIZE_or_release(addr, val) # define AO_HAVE_XSIZE_or_release_write #endif #if !defined(AO_HAVE_XSIZE_or_acquire_read) && defined(AO_HAVE_XSIZE_or_read) # define AO_XSIZE_or_acquire_read(addr, val) AO_XSIZE_or_read(addr, val) # define AO_HAVE_XSIZE_or_acquire_read #endif #if !defined(AO_HAVE_XSIZE_or_acquire_read) \ && defined(AO_HAVE_XSIZE_or_acquire) # define AO_XSIZE_or_acquire_read(addr, val) AO_XSIZE_or_acquire(addr, val) # define AO_HAVE_XSIZE_or_acquire_read #endif /* XSIZE_xor */ #if defined(AO_HAVE_XSIZE_compare_and_swap_full) \ && !defined(AO_HAVE_XSIZE_xor_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_XSIZE_xor_full(volatile XCTYPE *addr, XCTYPE value) { XCTYPE old; do { old = *(XCTYPE *)addr; } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full(addr, old, old ^ value))); } # define AO_HAVE_XSIZE_xor_full #endif #if defined(AO_HAVE_XSIZE_xor_full) # if !defined(AO_HAVE_XSIZE_xor_release) # define AO_XSIZE_xor_release(addr, val) AO_XSIZE_xor_full(addr, val) # define AO_HAVE_XSIZE_xor_release # endif # if !defined(AO_HAVE_XSIZE_xor_acquire) # define AO_XSIZE_xor_acquire(addr, val) AO_XSIZE_xor_full(addr, val) # define AO_HAVE_XSIZE_xor_acquire # endif # if !defined(AO_HAVE_XSIZE_xor_write) # define AO_XSIZE_xor_write(addr, val) AO_XSIZE_xor_full(addr, val) # define AO_HAVE_XSIZE_xor_write # endif # if !defined(AO_HAVE_XSIZE_xor_read) # define AO_XSIZE_xor_read(addr, val) AO_XSIZE_xor_full(addr, val) # define AO_HAVE_XSIZE_xor_read # endif #endif /* AO_HAVE_XSIZE_xor_full */ #if !defined(AO_HAVE_XSIZE_xor) && defined(AO_HAVE_XSIZE_xor_release) # define AO_XSIZE_xor(addr, val) AO_XSIZE_xor_release(addr, val) # define AO_HAVE_XSIZE_xor #endif #if !defined(AO_HAVE_XSIZE_xor) && defined(AO_HAVE_XSIZE_xor_acquire) # define AO_XSIZE_xor(addr, val) AO_XSIZE_xor_acquire(addr, val) # define AO_HAVE_XSIZE_xor #endif #if !defined(AO_HAVE_XSIZE_xor) && defined(AO_HAVE_XSIZE_xor_write) # define AO_XSIZE_xor(addr, val) AO_XSIZE_xor_write(addr, val) # define AO_HAVE_XSIZE_xor #endif #if !defined(AO_HAVE_XSIZE_xor) && defined(AO_HAVE_XSIZE_xor_read) # define AO_XSIZE_xor(addr, val) AO_XSIZE_xor_read(addr, val) # define AO_HAVE_XSIZE_xor #endif #if defined(AO_HAVE_XSIZE_xor_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_XSIZE_xor_full) # define AO_XSIZE_xor_full(addr, val) \ (AO_nop_full(), AO_XSIZE_xor_acquire(addr, val)) # define AO_HAVE_XSIZE_xor_full #endif #if !defined(AO_HAVE_XSIZE_xor_release_write) \ && defined(AO_HAVE_XSIZE_xor_write) # define AO_XSIZE_xor_release_write(addr, val) AO_XSIZE_xor_write(addr, val) # define AO_HAVE_XSIZE_xor_release_write #endif #if !defined(AO_HAVE_XSIZE_xor_release_write) \ && defined(AO_HAVE_XSIZE_xor_release) # define AO_XSIZE_xor_release_write(addr, val) AO_XSIZE_xor_release(addr, val) # define AO_HAVE_XSIZE_xor_release_write #endif #if !defined(AO_HAVE_XSIZE_xor_acquire_read) \ && defined(AO_HAVE_XSIZE_xor_read) # define AO_XSIZE_xor_acquire_read(addr, val) AO_XSIZE_xor_read(addr, val) # define AO_HAVE_XSIZE_xor_acquire_read #endif #if !defined(AO_HAVE_XSIZE_xor_acquire_read) \ && defined(AO_HAVE_XSIZE_xor_acquire) # define AO_XSIZE_xor_acquire_read(addr, val) AO_XSIZE_xor_acquire(addr, val) # define AO_HAVE_XSIZE_xor_acquire_read #endif /* XSIZE_and/or/xor_dd_acquire_read are meaningless. */ libatomic_ops-7.6.12/src/atomic_ops/generalize-small.h000066400000000000000000003125551411761111000227670ustar00rootroot00000000000000/* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* char_fetch_compare_and_swap */ #if defined(AO_HAVE_char_fetch_compare_and_swap) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_char_fetch_compare_and_swap_acquire) AO_INLINE unsigned/**/char AO_char_fetch_compare_and_swap_acquire(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { unsigned/**/char result = AO_char_fetch_compare_and_swap(addr, old_val, new_val); AO_nop_full(); return result; } # define AO_HAVE_char_fetch_compare_and_swap_acquire #endif #if defined(AO_HAVE_char_fetch_compare_and_swap) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_char_fetch_compare_and_swap_release) # define AO_char_fetch_compare_and_swap_release(addr, old_val, new_val) \ (AO_nop_full(), \ AO_char_fetch_compare_and_swap(addr, old_val, new_val)) # define AO_HAVE_char_fetch_compare_and_swap_release #endif #if defined(AO_HAVE_char_fetch_compare_and_swap_full) # if !defined(AO_HAVE_char_fetch_compare_and_swap_release) # define AO_char_fetch_compare_and_swap_release(addr, old_val, new_val) \ AO_char_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_char_fetch_compare_and_swap_release # endif # if !defined(AO_HAVE_char_fetch_compare_and_swap_acquire) # define AO_char_fetch_compare_and_swap_acquire(addr, old_val, new_val) \ AO_char_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_char_fetch_compare_and_swap_acquire # endif # if !defined(AO_HAVE_char_fetch_compare_and_swap_write) # define AO_char_fetch_compare_and_swap_write(addr, old_val, new_val) \ AO_char_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_char_fetch_compare_and_swap_write # endif # if !defined(AO_HAVE_char_fetch_compare_and_swap_read) # define AO_char_fetch_compare_and_swap_read(addr, old_val, new_val) \ AO_char_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_char_fetch_compare_and_swap_read # endif #endif /* AO_HAVE_char_fetch_compare_and_swap_full */ #if !defined(AO_HAVE_char_fetch_compare_and_swap) \ && defined(AO_HAVE_char_fetch_compare_and_swap_release) # define AO_char_fetch_compare_and_swap(addr, old_val, new_val) \ AO_char_fetch_compare_and_swap_release(addr, old_val, new_val) # define AO_HAVE_char_fetch_compare_and_swap #endif #if !defined(AO_HAVE_char_fetch_compare_and_swap) \ && defined(AO_HAVE_char_fetch_compare_and_swap_acquire) # define AO_char_fetch_compare_and_swap(addr, old_val, new_val) \ AO_char_fetch_compare_and_swap_acquire(addr, old_val, new_val) # define AO_HAVE_char_fetch_compare_and_swap #endif #if !defined(AO_HAVE_char_fetch_compare_and_swap) \ && defined(AO_HAVE_char_fetch_compare_and_swap_write) # define AO_char_fetch_compare_and_swap(addr, old_val, new_val) \ AO_char_fetch_compare_and_swap_write(addr, old_val, new_val) # define AO_HAVE_char_fetch_compare_and_swap #endif #if !defined(AO_HAVE_char_fetch_compare_and_swap) \ && defined(AO_HAVE_char_fetch_compare_and_swap_read) # define AO_char_fetch_compare_and_swap(addr, old_val, new_val) \ AO_char_fetch_compare_and_swap_read(addr, old_val, new_val) # define AO_HAVE_char_fetch_compare_and_swap #endif #if defined(AO_HAVE_char_fetch_compare_and_swap_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_char_fetch_compare_and_swap_full) # define AO_char_fetch_compare_and_swap_full(addr, old_val, new_val) \ (AO_nop_full(), \ AO_char_fetch_compare_and_swap_acquire(addr, old_val, new_val)) # define AO_HAVE_char_fetch_compare_and_swap_full #endif #if !defined(AO_HAVE_char_fetch_compare_and_swap_release_write) \ && defined(AO_HAVE_char_fetch_compare_and_swap_write) # define AO_char_fetch_compare_and_swap_release_write(addr,old_val,new_val) \ AO_char_fetch_compare_and_swap_write(addr, old_val, new_val) # define AO_HAVE_char_fetch_compare_and_swap_release_write #endif #if !defined(AO_HAVE_char_fetch_compare_and_swap_release_write) \ && defined(AO_HAVE_char_fetch_compare_and_swap_release) # define AO_char_fetch_compare_and_swap_release_write(addr,old_val,new_val) \ AO_char_fetch_compare_and_swap_release(addr, old_val, new_val) # define AO_HAVE_char_fetch_compare_and_swap_release_write #endif #if !defined(AO_HAVE_char_fetch_compare_and_swap_acquire_read) \ && defined(AO_HAVE_char_fetch_compare_and_swap_read) # define AO_char_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \ AO_char_fetch_compare_and_swap_read(addr, old_val, new_val) # define AO_HAVE_char_fetch_compare_and_swap_acquire_read #endif #if !defined(AO_HAVE_char_fetch_compare_and_swap_acquire_read) \ && defined(AO_HAVE_char_fetch_compare_and_swap_acquire) # define AO_char_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \ AO_char_fetch_compare_and_swap_acquire(addr, old_val, new_val) # define AO_HAVE_char_fetch_compare_and_swap_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_char_fetch_compare_and_swap_acquire_read) # define AO_char_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \ AO_char_fetch_compare_and_swap_acquire_read(addr, old_val, new_val) # define AO_HAVE_char_fetch_compare_and_swap_dd_acquire_read # endif #else # if defined(AO_HAVE_char_fetch_compare_and_swap) # define AO_char_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \ AO_char_fetch_compare_and_swap(addr, old_val, new_val) # define AO_HAVE_char_fetch_compare_and_swap_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* char_compare_and_swap */ #if defined(AO_HAVE_char_compare_and_swap) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_char_compare_and_swap_acquire) AO_INLINE int AO_char_compare_and_swap_acquire(volatile unsigned/**/char *addr, unsigned/**/char old, unsigned/**/char new_val) { int result = AO_char_compare_and_swap(addr, old, new_val); AO_nop_full(); return result; } # define AO_HAVE_char_compare_and_swap_acquire #endif #if defined(AO_HAVE_char_compare_and_swap) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_char_compare_and_swap_release) # define AO_char_compare_and_swap_release(addr, old, new_val) \ (AO_nop_full(), AO_char_compare_and_swap(addr, old, new_val)) # define AO_HAVE_char_compare_and_swap_release #endif #if defined(AO_HAVE_char_compare_and_swap_full) # if !defined(AO_HAVE_char_compare_and_swap_release) # define AO_char_compare_and_swap_release(addr, old, new_val) \ AO_char_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_char_compare_and_swap_release # endif # if !defined(AO_HAVE_char_compare_and_swap_acquire) # define AO_char_compare_and_swap_acquire(addr, old, new_val) \ AO_char_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_char_compare_and_swap_acquire # endif # if !defined(AO_HAVE_char_compare_and_swap_write) # define AO_char_compare_and_swap_write(addr, old, new_val) \ AO_char_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_char_compare_and_swap_write # endif # if !defined(AO_HAVE_char_compare_and_swap_read) # define AO_char_compare_and_swap_read(addr, old, new_val) \ AO_char_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_char_compare_and_swap_read # endif #endif /* AO_HAVE_char_compare_and_swap_full */ #if !defined(AO_HAVE_char_compare_and_swap) \ && defined(AO_HAVE_char_compare_and_swap_release) # define AO_char_compare_and_swap(addr, old, new_val) \ AO_char_compare_and_swap_release(addr, old, new_val) # define AO_HAVE_char_compare_and_swap #endif #if !defined(AO_HAVE_char_compare_and_swap) \ && defined(AO_HAVE_char_compare_and_swap_acquire) # define AO_char_compare_and_swap(addr, old, new_val) \ AO_char_compare_and_swap_acquire(addr, old, new_val) # define AO_HAVE_char_compare_and_swap #endif #if !defined(AO_HAVE_char_compare_and_swap) \ && defined(AO_HAVE_char_compare_and_swap_write) # define AO_char_compare_and_swap(addr, old, new_val) \ AO_char_compare_and_swap_write(addr, old, new_val) # define AO_HAVE_char_compare_and_swap #endif #if !defined(AO_HAVE_char_compare_and_swap) \ && defined(AO_HAVE_char_compare_and_swap_read) # define AO_char_compare_and_swap(addr, old, new_val) \ AO_char_compare_and_swap_read(addr, old, new_val) # define AO_HAVE_char_compare_and_swap #endif #if defined(AO_HAVE_char_compare_and_swap_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_char_compare_and_swap_full) # define AO_char_compare_and_swap_full(addr, old, new_val) \ (AO_nop_full(), \ AO_char_compare_and_swap_acquire(addr, old, new_val)) # define AO_HAVE_char_compare_and_swap_full #endif #if !defined(AO_HAVE_char_compare_and_swap_release_write) \ && defined(AO_HAVE_char_compare_and_swap_write) # define AO_char_compare_and_swap_release_write(addr, old, new_val) \ AO_char_compare_and_swap_write(addr, old, new_val) # define AO_HAVE_char_compare_and_swap_release_write #endif #if !defined(AO_HAVE_char_compare_and_swap_release_write) \ && defined(AO_HAVE_char_compare_and_swap_release) # define AO_char_compare_and_swap_release_write(addr, old, new_val) \ AO_char_compare_and_swap_release(addr, old, new_val) # define AO_HAVE_char_compare_and_swap_release_write #endif #if !defined(AO_HAVE_char_compare_and_swap_acquire_read) \ && defined(AO_HAVE_char_compare_and_swap_read) # define AO_char_compare_and_swap_acquire_read(addr, old, new_val) \ AO_char_compare_and_swap_read(addr, old, new_val) # define AO_HAVE_char_compare_and_swap_acquire_read #endif #if !defined(AO_HAVE_char_compare_and_swap_acquire_read) \ && defined(AO_HAVE_char_compare_and_swap_acquire) # define AO_char_compare_and_swap_acquire_read(addr, old, new_val) \ AO_char_compare_and_swap_acquire(addr, old, new_val) # define AO_HAVE_char_compare_and_swap_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_char_compare_and_swap_acquire_read) # define AO_char_compare_and_swap_dd_acquire_read(addr, old, new_val) \ AO_char_compare_and_swap_acquire_read(addr, old, new_val) # define AO_HAVE_char_compare_and_swap_dd_acquire_read # endif #else # if defined(AO_HAVE_char_compare_and_swap) # define AO_char_compare_and_swap_dd_acquire_read(addr, old, new_val) \ AO_char_compare_and_swap(addr, old, new_val) # define AO_HAVE_char_compare_and_swap_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* char_load */ #if defined(AO_HAVE_char_load_full) && !defined(AO_HAVE_char_load_acquire) # define AO_char_load_acquire(addr) AO_char_load_full(addr) # define AO_HAVE_char_load_acquire #endif #if defined(AO_HAVE_char_load_acquire) && !defined(AO_HAVE_char_load) # define AO_char_load(addr) AO_char_load_acquire(addr) # define AO_HAVE_char_load #endif #if defined(AO_HAVE_char_load_full) && !defined(AO_HAVE_char_load_read) # define AO_char_load_read(addr) AO_char_load_full(addr) # define AO_HAVE_char_load_read #endif #if !defined(AO_HAVE_char_load_acquire_read) \ && defined(AO_HAVE_char_load_acquire) # define AO_char_load_acquire_read(addr) AO_char_load_acquire(addr) # define AO_HAVE_char_load_acquire_read #endif #if defined(AO_HAVE_char_load) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_char_load_acquire) AO_INLINE unsigned/**/char AO_char_load_acquire(const volatile unsigned/**/char *addr) { unsigned/**/char result = AO_char_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ /* beyond it. */ AO_nop_full(); return result; } # define AO_HAVE_char_load_acquire #endif #if defined(AO_HAVE_char_load) && defined(AO_HAVE_nop_read) \ && !defined(AO_HAVE_char_load_read) AO_INLINE unsigned/**/char AO_char_load_read(const volatile unsigned/**/char *addr) { unsigned/**/char result = AO_char_load(addr); AO_nop_read(); return result; } # define AO_HAVE_char_load_read #endif #if defined(AO_HAVE_char_load_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_char_load_full) # define AO_char_load_full(addr) (AO_nop_full(), AO_char_load_acquire(addr)) # define AO_HAVE_char_load_full #endif #if defined(AO_HAVE_char_compare_and_swap_read) \ && !defined(AO_HAVE_char_load_read) # define AO_char_CAS_BASED_LOAD_READ AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned/**/char AO_char_load_read(const volatile unsigned/**/char *addr) { unsigned/**/char result; do { result = *(const unsigned/**/char *)addr; } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_read( (volatile unsigned/**/char *)addr, result, result))); return result; } # define AO_HAVE_char_load_read #endif #if !defined(AO_HAVE_char_load_acquire_read) \ && defined(AO_HAVE_char_load_read) # define AO_char_load_acquire_read(addr) AO_char_load_read(addr) # define AO_HAVE_char_load_acquire_read #endif #if defined(AO_HAVE_char_load_acquire_read) && !defined(AO_HAVE_char_load) \ && (!defined(AO_char_CAS_BASED_LOAD_READ) \ || !defined(AO_HAVE_char_compare_and_swap)) # define AO_char_load(addr) AO_char_load_acquire_read(addr) # define AO_HAVE_char_load #endif #if defined(AO_HAVE_char_compare_and_swap_full) \ && !defined(AO_HAVE_char_load_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned/**/char AO_char_load_full(const volatile unsigned/**/char *addr) { unsigned/**/char result; do { result = *(const unsigned/**/char *)addr; } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_full( (volatile unsigned/**/char *)addr, result, result))); return result; } # define AO_HAVE_char_load_full #endif #if defined(AO_HAVE_char_compare_and_swap_acquire) \ && !defined(AO_HAVE_char_load_acquire) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned/**/char AO_char_load_acquire(const volatile unsigned/**/char *addr) { unsigned/**/char result; do { result = *(const unsigned/**/char *)addr; } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_acquire( (volatile unsigned/**/char *)addr, result, result))); return result; } # define AO_HAVE_char_load_acquire #endif #if defined(AO_HAVE_char_compare_and_swap) && !defined(AO_HAVE_char_load) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned/**/char AO_char_load(const volatile unsigned/**/char *addr) { unsigned/**/char result; do { result = *(const unsigned/**/char *)addr; } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap( (volatile unsigned/**/char *)addr, result, result))); return result; } # define AO_HAVE_char_load #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_char_load_acquire_read) # define AO_char_load_dd_acquire_read(addr) \ AO_char_load_acquire_read(addr) # define AO_HAVE_char_load_dd_acquire_read # endif #else # if defined(AO_HAVE_char_load) # define AO_char_load_dd_acquire_read(addr) AO_char_load(addr) # define AO_HAVE_char_load_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* char_store */ #if defined(AO_HAVE_char_store_full) && !defined(AO_HAVE_char_store_release) # define AO_char_store_release(addr, val) AO_char_store_full(addr, val) # define AO_HAVE_char_store_release #endif #if defined(AO_HAVE_char_store_release) && !defined(AO_HAVE_char_store) # define AO_char_store(addr, val) AO_char_store_release(addr, val) # define AO_HAVE_char_store #endif #if defined(AO_HAVE_char_store_full) && !defined(AO_HAVE_char_store_write) # define AO_char_store_write(addr, val) AO_char_store_full(addr, val) # define AO_HAVE_char_store_write #endif #if defined(AO_HAVE_char_store_release) \ && !defined(AO_HAVE_char_store_release_write) # define AO_char_store_release_write(addr, val) \ AO_char_store_release(addr, val) # define AO_HAVE_char_store_release_write #endif #if defined(AO_HAVE_char_store_write) && !defined(AO_HAVE_char_store) # define AO_char_store(addr, val) AO_char_store_write(addr, val) # define AO_HAVE_char_store #endif #if defined(AO_HAVE_char_store) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_char_store_release) # define AO_char_store_release(addr, val) \ (AO_nop_full(), AO_char_store(addr, val)) # define AO_HAVE_char_store_release #endif #if defined(AO_HAVE_char_store) && defined(AO_HAVE_nop_write) \ && !defined(AO_HAVE_char_store_write) # define AO_char_store_write(addr, val) \ (AO_nop_write(), AO_char_store(addr, val)) # define AO_HAVE_char_store_write #endif #if defined(AO_HAVE_char_compare_and_swap_write) \ && !defined(AO_HAVE_char_store_write) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_char_store_write(volatile unsigned/**/char *addr, unsigned/**/char new_val) { unsigned/**/char old_val; do { old_val = *(unsigned/**/char *)addr; } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_write(addr, old_val, new_val))); } # define AO_HAVE_char_store_write #endif #if defined(AO_HAVE_char_store_write) \ && !defined(AO_HAVE_char_store_release_write) # define AO_char_store_release_write(addr, val) \ AO_char_store_write(addr, val) # define AO_HAVE_char_store_release_write #endif #if defined(AO_HAVE_char_store_release) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_char_store_full) # define AO_char_store_full(addr, val) \ (AO_char_store_release(addr, val), \ AO_nop_full()) # define AO_HAVE_char_store_full #endif #if defined(AO_HAVE_char_compare_and_swap) && !defined(AO_HAVE_char_store) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_char_store(volatile unsigned/**/char *addr, unsigned/**/char new_val) { unsigned/**/char old_val; do { old_val = *(unsigned/**/char *)addr; } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap(addr, old_val, new_val))); } # define AO_HAVE_char_store #endif #if defined(AO_HAVE_char_compare_and_swap_release) \ && !defined(AO_HAVE_char_store_release) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_char_store_release(volatile unsigned/**/char *addr, unsigned/**/char new_val) { unsigned/**/char old_val; do { old_val = *(unsigned/**/char *)addr; } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_release(addr, old_val, new_val))); } # define AO_HAVE_char_store_release #endif #if defined(AO_HAVE_char_compare_and_swap_full) \ && !defined(AO_HAVE_char_store_full) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_char_store_full(volatile unsigned/**/char *addr, unsigned/**/char new_val) { unsigned/**/char old_val; do { old_val = *(unsigned/**/char *)addr; } while (AO_EXPECT_FALSE(!AO_char_compare_and_swap_full(addr, old_val, new_val))); } # define AO_HAVE_char_store_full #endif /* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* short_fetch_compare_and_swap */ #if defined(AO_HAVE_short_fetch_compare_and_swap) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_short_fetch_compare_and_swap_acquire) AO_INLINE unsigned/**/short AO_short_fetch_compare_and_swap_acquire(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { unsigned/**/short result = AO_short_fetch_compare_and_swap(addr, old_val, new_val); AO_nop_full(); return result; } # define AO_HAVE_short_fetch_compare_and_swap_acquire #endif #if defined(AO_HAVE_short_fetch_compare_and_swap) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_short_fetch_compare_and_swap_release) # define AO_short_fetch_compare_and_swap_release(addr, old_val, new_val) \ (AO_nop_full(), \ AO_short_fetch_compare_and_swap(addr, old_val, new_val)) # define AO_HAVE_short_fetch_compare_and_swap_release #endif #if defined(AO_HAVE_short_fetch_compare_and_swap_full) # if !defined(AO_HAVE_short_fetch_compare_and_swap_release) # define AO_short_fetch_compare_and_swap_release(addr, old_val, new_val) \ AO_short_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_short_fetch_compare_and_swap_release # endif # if !defined(AO_HAVE_short_fetch_compare_and_swap_acquire) # define AO_short_fetch_compare_and_swap_acquire(addr, old_val, new_val) \ AO_short_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_short_fetch_compare_and_swap_acquire # endif # if !defined(AO_HAVE_short_fetch_compare_and_swap_write) # define AO_short_fetch_compare_and_swap_write(addr, old_val, new_val) \ AO_short_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_short_fetch_compare_and_swap_write # endif # if !defined(AO_HAVE_short_fetch_compare_and_swap_read) # define AO_short_fetch_compare_and_swap_read(addr, old_val, new_val) \ AO_short_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_short_fetch_compare_and_swap_read # endif #endif /* AO_HAVE_short_fetch_compare_and_swap_full */ #if !defined(AO_HAVE_short_fetch_compare_and_swap) \ && defined(AO_HAVE_short_fetch_compare_and_swap_release) # define AO_short_fetch_compare_and_swap(addr, old_val, new_val) \ AO_short_fetch_compare_and_swap_release(addr, old_val, new_val) # define AO_HAVE_short_fetch_compare_and_swap #endif #if !defined(AO_HAVE_short_fetch_compare_and_swap) \ && defined(AO_HAVE_short_fetch_compare_and_swap_acquire) # define AO_short_fetch_compare_and_swap(addr, old_val, new_val) \ AO_short_fetch_compare_and_swap_acquire(addr, old_val, new_val) # define AO_HAVE_short_fetch_compare_and_swap #endif #if !defined(AO_HAVE_short_fetch_compare_and_swap) \ && defined(AO_HAVE_short_fetch_compare_and_swap_write) # define AO_short_fetch_compare_and_swap(addr, old_val, new_val) \ AO_short_fetch_compare_and_swap_write(addr, old_val, new_val) # define AO_HAVE_short_fetch_compare_and_swap #endif #if !defined(AO_HAVE_short_fetch_compare_and_swap) \ && defined(AO_HAVE_short_fetch_compare_and_swap_read) # define AO_short_fetch_compare_and_swap(addr, old_val, new_val) \ AO_short_fetch_compare_and_swap_read(addr, old_val, new_val) # define AO_HAVE_short_fetch_compare_and_swap #endif #if defined(AO_HAVE_short_fetch_compare_and_swap_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_short_fetch_compare_and_swap_full) # define AO_short_fetch_compare_and_swap_full(addr, old_val, new_val) \ (AO_nop_full(), \ AO_short_fetch_compare_and_swap_acquire(addr, old_val, new_val)) # define AO_HAVE_short_fetch_compare_and_swap_full #endif #if !defined(AO_HAVE_short_fetch_compare_and_swap_release_write) \ && defined(AO_HAVE_short_fetch_compare_and_swap_write) # define AO_short_fetch_compare_and_swap_release_write(addr,old_val,new_val) \ AO_short_fetch_compare_and_swap_write(addr, old_val, new_val) # define AO_HAVE_short_fetch_compare_and_swap_release_write #endif #if !defined(AO_HAVE_short_fetch_compare_and_swap_release_write) \ && defined(AO_HAVE_short_fetch_compare_and_swap_release) # define AO_short_fetch_compare_and_swap_release_write(addr,old_val,new_val) \ AO_short_fetch_compare_and_swap_release(addr, old_val, new_val) # define AO_HAVE_short_fetch_compare_and_swap_release_write #endif #if !defined(AO_HAVE_short_fetch_compare_and_swap_acquire_read) \ && defined(AO_HAVE_short_fetch_compare_and_swap_read) # define AO_short_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \ AO_short_fetch_compare_and_swap_read(addr, old_val, new_val) # define AO_HAVE_short_fetch_compare_and_swap_acquire_read #endif #if !defined(AO_HAVE_short_fetch_compare_and_swap_acquire_read) \ && defined(AO_HAVE_short_fetch_compare_and_swap_acquire) # define AO_short_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \ AO_short_fetch_compare_and_swap_acquire(addr, old_val, new_val) # define AO_HAVE_short_fetch_compare_and_swap_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_short_fetch_compare_and_swap_acquire_read) # define AO_short_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \ AO_short_fetch_compare_and_swap_acquire_read(addr, old_val, new_val) # define AO_HAVE_short_fetch_compare_and_swap_dd_acquire_read # endif #else # if defined(AO_HAVE_short_fetch_compare_and_swap) # define AO_short_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \ AO_short_fetch_compare_and_swap(addr, old_val, new_val) # define AO_HAVE_short_fetch_compare_and_swap_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* short_compare_and_swap */ #if defined(AO_HAVE_short_compare_and_swap) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_short_compare_and_swap_acquire) AO_INLINE int AO_short_compare_and_swap_acquire(volatile unsigned/**/short *addr, unsigned/**/short old, unsigned/**/short new_val) { int result = AO_short_compare_and_swap(addr, old, new_val); AO_nop_full(); return result; } # define AO_HAVE_short_compare_and_swap_acquire #endif #if defined(AO_HAVE_short_compare_and_swap) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_short_compare_and_swap_release) # define AO_short_compare_and_swap_release(addr, old, new_val) \ (AO_nop_full(), AO_short_compare_and_swap(addr, old, new_val)) # define AO_HAVE_short_compare_and_swap_release #endif #if defined(AO_HAVE_short_compare_and_swap_full) # if !defined(AO_HAVE_short_compare_and_swap_release) # define AO_short_compare_and_swap_release(addr, old, new_val) \ AO_short_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_short_compare_and_swap_release # endif # if !defined(AO_HAVE_short_compare_and_swap_acquire) # define AO_short_compare_and_swap_acquire(addr, old, new_val) \ AO_short_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_short_compare_and_swap_acquire # endif # if !defined(AO_HAVE_short_compare_and_swap_write) # define AO_short_compare_and_swap_write(addr, old, new_val) \ AO_short_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_short_compare_and_swap_write # endif # if !defined(AO_HAVE_short_compare_and_swap_read) # define AO_short_compare_and_swap_read(addr, old, new_val) \ AO_short_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_short_compare_and_swap_read # endif #endif /* AO_HAVE_short_compare_and_swap_full */ #if !defined(AO_HAVE_short_compare_and_swap) \ && defined(AO_HAVE_short_compare_and_swap_release) # define AO_short_compare_and_swap(addr, old, new_val) \ AO_short_compare_and_swap_release(addr, old, new_val) # define AO_HAVE_short_compare_and_swap #endif #if !defined(AO_HAVE_short_compare_and_swap) \ && defined(AO_HAVE_short_compare_and_swap_acquire) # define AO_short_compare_and_swap(addr, old, new_val) \ AO_short_compare_and_swap_acquire(addr, old, new_val) # define AO_HAVE_short_compare_and_swap #endif #if !defined(AO_HAVE_short_compare_and_swap) \ && defined(AO_HAVE_short_compare_and_swap_write) # define AO_short_compare_and_swap(addr, old, new_val) \ AO_short_compare_and_swap_write(addr, old, new_val) # define AO_HAVE_short_compare_and_swap #endif #if !defined(AO_HAVE_short_compare_and_swap) \ && defined(AO_HAVE_short_compare_and_swap_read) # define AO_short_compare_and_swap(addr, old, new_val) \ AO_short_compare_and_swap_read(addr, old, new_val) # define AO_HAVE_short_compare_and_swap #endif #if defined(AO_HAVE_short_compare_and_swap_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_short_compare_and_swap_full) # define AO_short_compare_and_swap_full(addr, old, new_val) \ (AO_nop_full(), \ AO_short_compare_and_swap_acquire(addr, old, new_val)) # define AO_HAVE_short_compare_and_swap_full #endif #if !defined(AO_HAVE_short_compare_and_swap_release_write) \ && defined(AO_HAVE_short_compare_and_swap_write) # define AO_short_compare_and_swap_release_write(addr, old, new_val) \ AO_short_compare_and_swap_write(addr, old, new_val) # define AO_HAVE_short_compare_and_swap_release_write #endif #if !defined(AO_HAVE_short_compare_and_swap_release_write) \ && defined(AO_HAVE_short_compare_and_swap_release) # define AO_short_compare_and_swap_release_write(addr, old, new_val) \ AO_short_compare_and_swap_release(addr, old, new_val) # define AO_HAVE_short_compare_and_swap_release_write #endif #if !defined(AO_HAVE_short_compare_and_swap_acquire_read) \ && defined(AO_HAVE_short_compare_and_swap_read) # define AO_short_compare_and_swap_acquire_read(addr, old, new_val) \ AO_short_compare_and_swap_read(addr, old, new_val) # define AO_HAVE_short_compare_and_swap_acquire_read #endif #if !defined(AO_HAVE_short_compare_and_swap_acquire_read) \ && defined(AO_HAVE_short_compare_and_swap_acquire) # define AO_short_compare_and_swap_acquire_read(addr, old, new_val) \ AO_short_compare_and_swap_acquire(addr, old, new_val) # define AO_HAVE_short_compare_and_swap_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_short_compare_and_swap_acquire_read) # define AO_short_compare_and_swap_dd_acquire_read(addr, old, new_val) \ AO_short_compare_and_swap_acquire_read(addr, old, new_val) # define AO_HAVE_short_compare_and_swap_dd_acquire_read # endif #else # if defined(AO_HAVE_short_compare_and_swap) # define AO_short_compare_and_swap_dd_acquire_read(addr, old, new_val) \ AO_short_compare_and_swap(addr, old, new_val) # define AO_HAVE_short_compare_and_swap_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* short_load */ #if defined(AO_HAVE_short_load_full) && !defined(AO_HAVE_short_load_acquire) # define AO_short_load_acquire(addr) AO_short_load_full(addr) # define AO_HAVE_short_load_acquire #endif #if defined(AO_HAVE_short_load_acquire) && !defined(AO_HAVE_short_load) # define AO_short_load(addr) AO_short_load_acquire(addr) # define AO_HAVE_short_load #endif #if defined(AO_HAVE_short_load_full) && !defined(AO_HAVE_short_load_read) # define AO_short_load_read(addr) AO_short_load_full(addr) # define AO_HAVE_short_load_read #endif #if !defined(AO_HAVE_short_load_acquire_read) \ && defined(AO_HAVE_short_load_acquire) # define AO_short_load_acquire_read(addr) AO_short_load_acquire(addr) # define AO_HAVE_short_load_acquire_read #endif #if defined(AO_HAVE_short_load) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_short_load_acquire) AO_INLINE unsigned/**/short AO_short_load_acquire(const volatile unsigned/**/short *addr) { unsigned/**/short result = AO_short_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ /* beyond it. */ AO_nop_full(); return result; } # define AO_HAVE_short_load_acquire #endif #if defined(AO_HAVE_short_load) && defined(AO_HAVE_nop_read) \ && !defined(AO_HAVE_short_load_read) AO_INLINE unsigned/**/short AO_short_load_read(const volatile unsigned/**/short *addr) { unsigned/**/short result = AO_short_load(addr); AO_nop_read(); return result; } # define AO_HAVE_short_load_read #endif #if defined(AO_HAVE_short_load_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_short_load_full) # define AO_short_load_full(addr) (AO_nop_full(), AO_short_load_acquire(addr)) # define AO_HAVE_short_load_full #endif #if defined(AO_HAVE_short_compare_and_swap_read) \ && !defined(AO_HAVE_short_load_read) # define AO_short_CAS_BASED_LOAD_READ AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned/**/short AO_short_load_read(const volatile unsigned/**/short *addr) { unsigned/**/short result; do { result = *(const unsigned/**/short *)addr; } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_read( (volatile unsigned/**/short *)addr, result, result))); return result; } # define AO_HAVE_short_load_read #endif #if !defined(AO_HAVE_short_load_acquire_read) \ && defined(AO_HAVE_short_load_read) # define AO_short_load_acquire_read(addr) AO_short_load_read(addr) # define AO_HAVE_short_load_acquire_read #endif #if defined(AO_HAVE_short_load_acquire_read) && !defined(AO_HAVE_short_load) \ && (!defined(AO_short_CAS_BASED_LOAD_READ) \ || !defined(AO_HAVE_short_compare_and_swap)) # define AO_short_load(addr) AO_short_load_acquire_read(addr) # define AO_HAVE_short_load #endif #if defined(AO_HAVE_short_compare_and_swap_full) \ && !defined(AO_HAVE_short_load_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned/**/short AO_short_load_full(const volatile unsigned/**/short *addr) { unsigned/**/short result; do { result = *(const unsigned/**/short *)addr; } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_full( (volatile unsigned/**/short *)addr, result, result))); return result; } # define AO_HAVE_short_load_full #endif #if defined(AO_HAVE_short_compare_and_swap_acquire) \ && !defined(AO_HAVE_short_load_acquire) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned/**/short AO_short_load_acquire(const volatile unsigned/**/short *addr) { unsigned/**/short result; do { result = *(const unsigned/**/short *)addr; } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_acquire( (volatile unsigned/**/short *)addr, result, result))); return result; } # define AO_HAVE_short_load_acquire #endif #if defined(AO_HAVE_short_compare_and_swap) && !defined(AO_HAVE_short_load) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned/**/short AO_short_load(const volatile unsigned/**/short *addr) { unsigned/**/short result; do { result = *(const unsigned/**/short *)addr; } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap( (volatile unsigned/**/short *)addr, result, result))); return result; } # define AO_HAVE_short_load #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_short_load_acquire_read) # define AO_short_load_dd_acquire_read(addr) \ AO_short_load_acquire_read(addr) # define AO_HAVE_short_load_dd_acquire_read # endif #else # if defined(AO_HAVE_short_load) # define AO_short_load_dd_acquire_read(addr) AO_short_load(addr) # define AO_HAVE_short_load_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* short_store */ #if defined(AO_HAVE_short_store_full) && !defined(AO_HAVE_short_store_release) # define AO_short_store_release(addr, val) AO_short_store_full(addr, val) # define AO_HAVE_short_store_release #endif #if defined(AO_HAVE_short_store_release) && !defined(AO_HAVE_short_store) # define AO_short_store(addr, val) AO_short_store_release(addr, val) # define AO_HAVE_short_store #endif #if defined(AO_HAVE_short_store_full) && !defined(AO_HAVE_short_store_write) # define AO_short_store_write(addr, val) AO_short_store_full(addr, val) # define AO_HAVE_short_store_write #endif #if defined(AO_HAVE_short_store_release) \ && !defined(AO_HAVE_short_store_release_write) # define AO_short_store_release_write(addr, val) \ AO_short_store_release(addr, val) # define AO_HAVE_short_store_release_write #endif #if defined(AO_HAVE_short_store_write) && !defined(AO_HAVE_short_store) # define AO_short_store(addr, val) AO_short_store_write(addr, val) # define AO_HAVE_short_store #endif #if defined(AO_HAVE_short_store) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_short_store_release) # define AO_short_store_release(addr, val) \ (AO_nop_full(), AO_short_store(addr, val)) # define AO_HAVE_short_store_release #endif #if defined(AO_HAVE_short_store) && defined(AO_HAVE_nop_write) \ && !defined(AO_HAVE_short_store_write) # define AO_short_store_write(addr, val) \ (AO_nop_write(), AO_short_store(addr, val)) # define AO_HAVE_short_store_write #endif #if defined(AO_HAVE_short_compare_and_swap_write) \ && !defined(AO_HAVE_short_store_write) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_short_store_write(volatile unsigned/**/short *addr, unsigned/**/short new_val) { unsigned/**/short old_val; do { old_val = *(unsigned/**/short *)addr; } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_write(addr, old_val, new_val))); } # define AO_HAVE_short_store_write #endif #if defined(AO_HAVE_short_store_write) \ && !defined(AO_HAVE_short_store_release_write) # define AO_short_store_release_write(addr, val) \ AO_short_store_write(addr, val) # define AO_HAVE_short_store_release_write #endif #if defined(AO_HAVE_short_store_release) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_short_store_full) # define AO_short_store_full(addr, val) \ (AO_short_store_release(addr, val), \ AO_nop_full()) # define AO_HAVE_short_store_full #endif #if defined(AO_HAVE_short_compare_and_swap) && !defined(AO_HAVE_short_store) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short new_val) { unsigned/**/short old_val; do { old_val = *(unsigned/**/short *)addr; } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap(addr, old_val, new_val))); } # define AO_HAVE_short_store #endif #if defined(AO_HAVE_short_compare_and_swap_release) \ && !defined(AO_HAVE_short_store_release) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_short_store_release(volatile unsigned/**/short *addr, unsigned/**/short new_val) { unsigned/**/short old_val; do { old_val = *(unsigned/**/short *)addr; } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_release(addr, old_val, new_val))); } # define AO_HAVE_short_store_release #endif #if defined(AO_HAVE_short_compare_and_swap_full) \ && !defined(AO_HAVE_short_store_full) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_short_store_full(volatile unsigned/**/short *addr, unsigned/**/short new_val) { unsigned/**/short old_val; do { old_val = *(unsigned/**/short *)addr; } while (AO_EXPECT_FALSE(!AO_short_compare_and_swap_full(addr, old_val, new_val))); } # define AO_HAVE_short_store_full #endif /* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* int_fetch_compare_and_swap */ #if defined(AO_HAVE_int_fetch_compare_and_swap) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_int_fetch_compare_and_swap_acquire) AO_INLINE unsigned AO_int_fetch_compare_and_swap_acquire(volatile unsigned *addr, unsigned old_val, unsigned new_val) { unsigned result = AO_int_fetch_compare_and_swap(addr, old_val, new_val); AO_nop_full(); return result; } # define AO_HAVE_int_fetch_compare_and_swap_acquire #endif #if defined(AO_HAVE_int_fetch_compare_and_swap) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_int_fetch_compare_and_swap_release) # define AO_int_fetch_compare_and_swap_release(addr, old_val, new_val) \ (AO_nop_full(), \ AO_int_fetch_compare_and_swap(addr, old_val, new_val)) # define AO_HAVE_int_fetch_compare_and_swap_release #endif #if defined(AO_HAVE_int_fetch_compare_and_swap_full) # if !defined(AO_HAVE_int_fetch_compare_and_swap_release) # define AO_int_fetch_compare_and_swap_release(addr, old_val, new_val) \ AO_int_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_int_fetch_compare_and_swap_release # endif # if !defined(AO_HAVE_int_fetch_compare_and_swap_acquire) # define AO_int_fetch_compare_and_swap_acquire(addr, old_val, new_val) \ AO_int_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_int_fetch_compare_and_swap_acquire # endif # if !defined(AO_HAVE_int_fetch_compare_and_swap_write) # define AO_int_fetch_compare_and_swap_write(addr, old_val, new_val) \ AO_int_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_int_fetch_compare_and_swap_write # endif # if !defined(AO_HAVE_int_fetch_compare_and_swap_read) # define AO_int_fetch_compare_and_swap_read(addr, old_val, new_val) \ AO_int_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_int_fetch_compare_and_swap_read # endif #endif /* AO_HAVE_int_fetch_compare_and_swap_full */ #if !defined(AO_HAVE_int_fetch_compare_and_swap) \ && defined(AO_HAVE_int_fetch_compare_and_swap_release) # define AO_int_fetch_compare_and_swap(addr, old_val, new_val) \ AO_int_fetch_compare_and_swap_release(addr, old_val, new_val) # define AO_HAVE_int_fetch_compare_and_swap #endif #if !defined(AO_HAVE_int_fetch_compare_and_swap) \ && defined(AO_HAVE_int_fetch_compare_and_swap_acquire) # define AO_int_fetch_compare_and_swap(addr, old_val, new_val) \ AO_int_fetch_compare_and_swap_acquire(addr, old_val, new_val) # define AO_HAVE_int_fetch_compare_and_swap #endif #if !defined(AO_HAVE_int_fetch_compare_and_swap) \ && defined(AO_HAVE_int_fetch_compare_and_swap_write) # define AO_int_fetch_compare_and_swap(addr, old_val, new_val) \ AO_int_fetch_compare_and_swap_write(addr, old_val, new_val) # define AO_HAVE_int_fetch_compare_and_swap #endif #if !defined(AO_HAVE_int_fetch_compare_and_swap) \ && defined(AO_HAVE_int_fetch_compare_and_swap_read) # define AO_int_fetch_compare_and_swap(addr, old_val, new_val) \ AO_int_fetch_compare_and_swap_read(addr, old_val, new_val) # define AO_HAVE_int_fetch_compare_and_swap #endif #if defined(AO_HAVE_int_fetch_compare_and_swap_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_int_fetch_compare_and_swap_full) # define AO_int_fetch_compare_and_swap_full(addr, old_val, new_val) \ (AO_nop_full(), \ AO_int_fetch_compare_and_swap_acquire(addr, old_val, new_val)) # define AO_HAVE_int_fetch_compare_and_swap_full #endif #if !defined(AO_HAVE_int_fetch_compare_and_swap_release_write) \ && defined(AO_HAVE_int_fetch_compare_and_swap_write) # define AO_int_fetch_compare_and_swap_release_write(addr,old_val,new_val) \ AO_int_fetch_compare_and_swap_write(addr, old_val, new_val) # define AO_HAVE_int_fetch_compare_and_swap_release_write #endif #if !defined(AO_HAVE_int_fetch_compare_and_swap_release_write) \ && defined(AO_HAVE_int_fetch_compare_and_swap_release) # define AO_int_fetch_compare_and_swap_release_write(addr,old_val,new_val) \ AO_int_fetch_compare_and_swap_release(addr, old_val, new_val) # define AO_HAVE_int_fetch_compare_and_swap_release_write #endif #if !defined(AO_HAVE_int_fetch_compare_and_swap_acquire_read) \ && defined(AO_HAVE_int_fetch_compare_and_swap_read) # define AO_int_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \ AO_int_fetch_compare_and_swap_read(addr, old_val, new_val) # define AO_HAVE_int_fetch_compare_and_swap_acquire_read #endif #if !defined(AO_HAVE_int_fetch_compare_and_swap_acquire_read) \ && defined(AO_HAVE_int_fetch_compare_and_swap_acquire) # define AO_int_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \ AO_int_fetch_compare_and_swap_acquire(addr, old_val, new_val) # define AO_HAVE_int_fetch_compare_and_swap_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_int_fetch_compare_and_swap_acquire_read) # define AO_int_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \ AO_int_fetch_compare_and_swap_acquire_read(addr, old_val, new_val) # define AO_HAVE_int_fetch_compare_and_swap_dd_acquire_read # endif #else # if defined(AO_HAVE_int_fetch_compare_and_swap) # define AO_int_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \ AO_int_fetch_compare_and_swap(addr, old_val, new_val) # define AO_HAVE_int_fetch_compare_and_swap_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* int_compare_and_swap */ #if defined(AO_HAVE_int_compare_and_swap) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_int_compare_and_swap_acquire) AO_INLINE int AO_int_compare_and_swap_acquire(volatile unsigned *addr, unsigned old, unsigned new_val) { int result = AO_int_compare_and_swap(addr, old, new_val); AO_nop_full(); return result; } # define AO_HAVE_int_compare_and_swap_acquire #endif #if defined(AO_HAVE_int_compare_and_swap) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_int_compare_and_swap_release) # define AO_int_compare_and_swap_release(addr, old, new_val) \ (AO_nop_full(), AO_int_compare_and_swap(addr, old, new_val)) # define AO_HAVE_int_compare_and_swap_release #endif #if defined(AO_HAVE_int_compare_and_swap_full) # if !defined(AO_HAVE_int_compare_and_swap_release) # define AO_int_compare_and_swap_release(addr, old, new_val) \ AO_int_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_int_compare_and_swap_release # endif # if !defined(AO_HAVE_int_compare_and_swap_acquire) # define AO_int_compare_and_swap_acquire(addr, old, new_val) \ AO_int_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_int_compare_and_swap_acquire # endif # if !defined(AO_HAVE_int_compare_and_swap_write) # define AO_int_compare_and_swap_write(addr, old, new_val) \ AO_int_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_int_compare_and_swap_write # endif # if !defined(AO_HAVE_int_compare_and_swap_read) # define AO_int_compare_and_swap_read(addr, old, new_val) \ AO_int_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_int_compare_and_swap_read # endif #endif /* AO_HAVE_int_compare_and_swap_full */ #if !defined(AO_HAVE_int_compare_and_swap) \ && defined(AO_HAVE_int_compare_and_swap_release) # define AO_int_compare_and_swap(addr, old, new_val) \ AO_int_compare_and_swap_release(addr, old, new_val) # define AO_HAVE_int_compare_and_swap #endif #if !defined(AO_HAVE_int_compare_and_swap) \ && defined(AO_HAVE_int_compare_and_swap_acquire) # define AO_int_compare_and_swap(addr, old, new_val) \ AO_int_compare_and_swap_acquire(addr, old, new_val) # define AO_HAVE_int_compare_and_swap #endif #if !defined(AO_HAVE_int_compare_and_swap) \ && defined(AO_HAVE_int_compare_and_swap_write) # define AO_int_compare_and_swap(addr, old, new_val) \ AO_int_compare_and_swap_write(addr, old, new_val) # define AO_HAVE_int_compare_and_swap #endif #if !defined(AO_HAVE_int_compare_and_swap) \ && defined(AO_HAVE_int_compare_and_swap_read) # define AO_int_compare_and_swap(addr, old, new_val) \ AO_int_compare_and_swap_read(addr, old, new_val) # define AO_HAVE_int_compare_and_swap #endif #if defined(AO_HAVE_int_compare_and_swap_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_int_compare_and_swap_full) # define AO_int_compare_and_swap_full(addr, old, new_val) \ (AO_nop_full(), \ AO_int_compare_and_swap_acquire(addr, old, new_val)) # define AO_HAVE_int_compare_and_swap_full #endif #if !defined(AO_HAVE_int_compare_and_swap_release_write) \ && defined(AO_HAVE_int_compare_and_swap_write) # define AO_int_compare_and_swap_release_write(addr, old, new_val) \ AO_int_compare_and_swap_write(addr, old, new_val) # define AO_HAVE_int_compare_and_swap_release_write #endif #if !defined(AO_HAVE_int_compare_and_swap_release_write) \ && defined(AO_HAVE_int_compare_and_swap_release) # define AO_int_compare_and_swap_release_write(addr, old, new_val) \ AO_int_compare_and_swap_release(addr, old, new_val) # define AO_HAVE_int_compare_and_swap_release_write #endif #if !defined(AO_HAVE_int_compare_and_swap_acquire_read) \ && defined(AO_HAVE_int_compare_and_swap_read) # define AO_int_compare_and_swap_acquire_read(addr, old, new_val) \ AO_int_compare_and_swap_read(addr, old, new_val) # define AO_HAVE_int_compare_and_swap_acquire_read #endif #if !defined(AO_HAVE_int_compare_and_swap_acquire_read) \ && defined(AO_HAVE_int_compare_and_swap_acquire) # define AO_int_compare_and_swap_acquire_read(addr, old, new_val) \ AO_int_compare_and_swap_acquire(addr, old, new_val) # define AO_HAVE_int_compare_and_swap_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_int_compare_and_swap_acquire_read) # define AO_int_compare_and_swap_dd_acquire_read(addr, old, new_val) \ AO_int_compare_and_swap_acquire_read(addr, old, new_val) # define AO_HAVE_int_compare_and_swap_dd_acquire_read # endif #else # if defined(AO_HAVE_int_compare_and_swap) # define AO_int_compare_and_swap_dd_acquire_read(addr, old, new_val) \ AO_int_compare_and_swap(addr, old, new_val) # define AO_HAVE_int_compare_and_swap_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* int_load */ #if defined(AO_HAVE_int_load_full) && !defined(AO_HAVE_int_load_acquire) # define AO_int_load_acquire(addr) AO_int_load_full(addr) # define AO_HAVE_int_load_acquire #endif #if defined(AO_HAVE_int_load_acquire) && !defined(AO_HAVE_int_load) # define AO_int_load(addr) AO_int_load_acquire(addr) # define AO_HAVE_int_load #endif #if defined(AO_HAVE_int_load_full) && !defined(AO_HAVE_int_load_read) # define AO_int_load_read(addr) AO_int_load_full(addr) # define AO_HAVE_int_load_read #endif #if !defined(AO_HAVE_int_load_acquire_read) \ && defined(AO_HAVE_int_load_acquire) # define AO_int_load_acquire_read(addr) AO_int_load_acquire(addr) # define AO_HAVE_int_load_acquire_read #endif #if defined(AO_HAVE_int_load) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_int_load_acquire) AO_INLINE unsigned AO_int_load_acquire(const volatile unsigned *addr) { unsigned result = AO_int_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ /* beyond it. */ AO_nop_full(); return result; } # define AO_HAVE_int_load_acquire #endif #if defined(AO_HAVE_int_load) && defined(AO_HAVE_nop_read) \ && !defined(AO_HAVE_int_load_read) AO_INLINE unsigned AO_int_load_read(const volatile unsigned *addr) { unsigned result = AO_int_load(addr); AO_nop_read(); return result; } # define AO_HAVE_int_load_read #endif #if defined(AO_HAVE_int_load_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_int_load_full) # define AO_int_load_full(addr) (AO_nop_full(), AO_int_load_acquire(addr)) # define AO_HAVE_int_load_full #endif #if defined(AO_HAVE_int_compare_and_swap_read) \ && !defined(AO_HAVE_int_load_read) # define AO_int_CAS_BASED_LOAD_READ AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned AO_int_load_read(const volatile unsigned *addr) { unsigned result; do { result = *(const unsigned *)addr; } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_read( (volatile unsigned *)addr, result, result))); return result; } # define AO_HAVE_int_load_read #endif #if !defined(AO_HAVE_int_load_acquire_read) \ && defined(AO_HAVE_int_load_read) # define AO_int_load_acquire_read(addr) AO_int_load_read(addr) # define AO_HAVE_int_load_acquire_read #endif #if defined(AO_HAVE_int_load_acquire_read) && !defined(AO_HAVE_int_load) \ && (!defined(AO_int_CAS_BASED_LOAD_READ) \ || !defined(AO_HAVE_int_compare_and_swap)) # define AO_int_load(addr) AO_int_load_acquire_read(addr) # define AO_HAVE_int_load #endif #if defined(AO_HAVE_int_compare_and_swap_full) \ && !defined(AO_HAVE_int_load_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned AO_int_load_full(const volatile unsigned *addr) { unsigned result; do { result = *(const unsigned *)addr; } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_full( (volatile unsigned *)addr, result, result))); return result; } # define AO_HAVE_int_load_full #endif #if defined(AO_HAVE_int_compare_and_swap_acquire) \ && !defined(AO_HAVE_int_load_acquire) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned AO_int_load_acquire(const volatile unsigned *addr) { unsigned result; do { result = *(const unsigned *)addr; } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_acquire( (volatile unsigned *)addr, result, result))); return result; } # define AO_HAVE_int_load_acquire #endif #if defined(AO_HAVE_int_compare_and_swap) && !defined(AO_HAVE_int_load) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE unsigned AO_int_load(const volatile unsigned *addr) { unsigned result; do { result = *(const unsigned *)addr; } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap( (volatile unsigned *)addr, result, result))); return result; } # define AO_HAVE_int_load #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_int_load_acquire_read) # define AO_int_load_dd_acquire_read(addr) \ AO_int_load_acquire_read(addr) # define AO_HAVE_int_load_dd_acquire_read # endif #else # if defined(AO_HAVE_int_load) # define AO_int_load_dd_acquire_read(addr) AO_int_load(addr) # define AO_HAVE_int_load_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* int_store */ #if defined(AO_HAVE_int_store_full) && !defined(AO_HAVE_int_store_release) # define AO_int_store_release(addr, val) AO_int_store_full(addr, val) # define AO_HAVE_int_store_release #endif #if defined(AO_HAVE_int_store_release) && !defined(AO_HAVE_int_store) # define AO_int_store(addr, val) AO_int_store_release(addr, val) # define AO_HAVE_int_store #endif #if defined(AO_HAVE_int_store_full) && !defined(AO_HAVE_int_store_write) # define AO_int_store_write(addr, val) AO_int_store_full(addr, val) # define AO_HAVE_int_store_write #endif #if defined(AO_HAVE_int_store_release) \ && !defined(AO_HAVE_int_store_release_write) # define AO_int_store_release_write(addr, val) \ AO_int_store_release(addr, val) # define AO_HAVE_int_store_release_write #endif #if defined(AO_HAVE_int_store_write) && !defined(AO_HAVE_int_store) # define AO_int_store(addr, val) AO_int_store_write(addr, val) # define AO_HAVE_int_store #endif #if defined(AO_HAVE_int_store) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_int_store_release) # define AO_int_store_release(addr, val) \ (AO_nop_full(), AO_int_store(addr, val)) # define AO_HAVE_int_store_release #endif #if defined(AO_HAVE_int_store) && defined(AO_HAVE_nop_write) \ && !defined(AO_HAVE_int_store_write) # define AO_int_store_write(addr, val) \ (AO_nop_write(), AO_int_store(addr, val)) # define AO_HAVE_int_store_write #endif #if defined(AO_HAVE_int_compare_and_swap_write) \ && !defined(AO_HAVE_int_store_write) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_int_store_write(volatile unsigned *addr, unsigned new_val) { unsigned old_val; do { old_val = *(unsigned *)addr; } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_write(addr, old_val, new_val))); } # define AO_HAVE_int_store_write #endif #if defined(AO_HAVE_int_store_write) \ && !defined(AO_HAVE_int_store_release_write) # define AO_int_store_release_write(addr, val) \ AO_int_store_write(addr, val) # define AO_HAVE_int_store_release_write #endif #if defined(AO_HAVE_int_store_release) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_int_store_full) # define AO_int_store_full(addr, val) \ (AO_int_store_release(addr, val), \ AO_nop_full()) # define AO_HAVE_int_store_full #endif #if defined(AO_HAVE_int_compare_and_swap) && !defined(AO_HAVE_int_store) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_int_store(volatile unsigned *addr, unsigned new_val) { unsigned old_val; do { old_val = *(unsigned *)addr; } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap(addr, old_val, new_val))); } # define AO_HAVE_int_store #endif #if defined(AO_HAVE_int_compare_and_swap_release) \ && !defined(AO_HAVE_int_store_release) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_int_store_release(volatile unsigned *addr, unsigned new_val) { unsigned old_val; do { old_val = *(unsigned *)addr; } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_release(addr, old_val, new_val))); } # define AO_HAVE_int_store_release #endif #if defined(AO_HAVE_int_compare_and_swap_full) \ && !defined(AO_HAVE_int_store_full) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_int_store_full(volatile unsigned *addr, unsigned new_val) { unsigned old_val; do { old_val = *(unsigned *)addr; } while (AO_EXPECT_FALSE(!AO_int_compare_and_swap_full(addr, old_val, new_val))); } # define AO_HAVE_int_store_full #endif /* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* fetch_compare_and_swap */ #if defined(AO_HAVE_fetch_compare_and_swap) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_fetch_compare_and_swap_acquire) AO_INLINE AO_t AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val, AO_t new_val) { AO_t result = AO_fetch_compare_and_swap(addr, old_val, new_val); AO_nop_full(); return result; } # define AO_HAVE_fetch_compare_and_swap_acquire #endif #if defined(AO_HAVE_fetch_compare_and_swap) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_fetch_compare_and_swap_release) # define AO_fetch_compare_and_swap_release(addr, old_val, new_val) \ (AO_nop_full(), \ AO_fetch_compare_and_swap(addr, old_val, new_val)) # define AO_HAVE_fetch_compare_and_swap_release #endif #if defined(AO_HAVE_fetch_compare_and_swap_full) # if !defined(AO_HAVE_fetch_compare_and_swap_release) # define AO_fetch_compare_and_swap_release(addr, old_val, new_val) \ AO_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_fetch_compare_and_swap_release # endif # if !defined(AO_HAVE_fetch_compare_and_swap_acquire) # define AO_fetch_compare_and_swap_acquire(addr, old_val, new_val) \ AO_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_fetch_compare_and_swap_acquire # endif # if !defined(AO_HAVE_fetch_compare_and_swap_write) # define AO_fetch_compare_and_swap_write(addr, old_val, new_val) \ AO_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_fetch_compare_and_swap_write # endif # if !defined(AO_HAVE_fetch_compare_and_swap_read) # define AO_fetch_compare_and_swap_read(addr, old_val, new_val) \ AO_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_fetch_compare_and_swap_read # endif #endif /* AO_HAVE_fetch_compare_and_swap_full */ #if !defined(AO_HAVE_fetch_compare_and_swap) \ && defined(AO_HAVE_fetch_compare_and_swap_release) # define AO_fetch_compare_and_swap(addr, old_val, new_val) \ AO_fetch_compare_and_swap_release(addr, old_val, new_val) # define AO_HAVE_fetch_compare_and_swap #endif #if !defined(AO_HAVE_fetch_compare_and_swap) \ && defined(AO_HAVE_fetch_compare_and_swap_acquire) # define AO_fetch_compare_and_swap(addr, old_val, new_val) \ AO_fetch_compare_and_swap_acquire(addr, old_val, new_val) # define AO_HAVE_fetch_compare_and_swap #endif #if !defined(AO_HAVE_fetch_compare_and_swap) \ && defined(AO_HAVE_fetch_compare_and_swap_write) # define AO_fetch_compare_and_swap(addr, old_val, new_val) \ AO_fetch_compare_and_swap_write(addr, old_val, new_val) # define AO_HAVE_fetch_compare_and_swap #endif #if !defined(AO_HAVE_fetch_compare_and_swap) \ && defined(AO_HAVE_fetch_compare_and_swap_read) # define AO_fetch_compare_and_swap(addr, old_val, new_val) \ AO_fetch_compare_and_swap_read(addr, old_val, new_val) # define AO_HAVE_fetch_compare_and_swap #endif #if defined(AO_HAVE_fetch_compare_and_swap_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_fetch_compare_and_swap_full) # define AO_fetch_compare_and_swap_full(addr, old_val, new_val) \ (AO_nop_full(), \ AO_fetch_compare_and_swap_acquire(addr, old_val, new_val)) # define AO_HAVE_fetch_compare_and_swap_full #endif #if !defined(AO_HAVE_fetch_compare_and_swap_release_write) \ && defined(AO_HAVE_fetch_compare_and_swap_write) # define AO_fetch_compare_and_swap_release_write(addr,old_val,new_val) \ AO_fetch_compare_and_swap_write(addr, old_val, new_val) # define AO_HAVE_fetch_compare_and_swap_release_write #endif #if !defined(AO_HAVE_fetch_compare_and_swap_release_write) \ && defined(AO_HAVE_fetch_compare_and_swap_release) # define AO_fetch_compare_and_swap_release_write(addr,old_val,new_val) \ AO_fetch_compare_and_swap_release(addr, old_val, new_val) # define AO_HAVE_fetch_compare_and_swap_release_write #endif #if !defined(AO_HAVE_fetch_compare_and_swap_acquire_read) \ && defined(AO_HAVE_fetch_compare_and_swap_read) # define AO_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \ AO_fetch_compare_and_swap_read(addr, old_val, new_val) # define AO_HAVE_fetch_compare_and_swap_acquire_read #endif #if !defined(AO_HAVE_fetch_compare_and_swap_acquire_read) \ && defined(AO_HAVE_fetch_compare_and_swap_acquire) # define AO_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \ AO_fetch_compare_and_swap_acquire(addr, old_val, new_val) # define AO_HAVE_fetch_compare_and_swap_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_fetch_compare_and_swap_acquire_read) # define AO_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \ AO_fetch_compare_and_swap_acquire_read(addr, old_val, new_val) # define AO_HAVE_fetch_compare_and_swap_dd_acquire_read # endif #else # if defined(AO_HAVE_fetch_compare_and_swap) # define AO_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \ AO_fetch_compare_and_swap(addr, old_val, new_val) # define AO_HAVE_fetch_compare_and_swap_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* compare_and_swap */ #if defined(AO_HAVE_compare_and_swap) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_compare_and_swap_acquire) AO_INLINE int AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val) { int result = AO_compare_and_swap(addr, old, new_val); AO_nop_full(); return result; } # define AO_HAVE_compare_and_swap_acquire #endif #if defined(AO_HAVE_compare_and_swap) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_compare_and_swap_release) # define AO_compare_and_swap_release(addr, old, new_val) \ (AO_nop_full(), AO_compare_and_swap(addr, old, new_val)) # define AO_HAVE_compare_and_swap_release #endif #if defined(AO_HAVE_compare_and_swap_full) # if !defined(AO_HAVE_compare_and_swap_release) # define AO_compare_and_swap_release(addr, old, new_val) \ AO_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_compare_and_swap_release # endif # if !defined(AO_HAVE_compare_and_swap_acquire) # define AO_compare_and_swap_acquire(addr, old, new_val) \ AO_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_compare_and_swap_acquire # endif # if !defined(AO_HAVE_compare_and_swap_write) # define AO_compare_and_swap_write(addr, old, new_val) \ AO_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_compare_and_swap_write # endif # if !defined(AO_HAVE_compare_and_swap_read) # define AO_compare_and_swap_read(addr, old, new_val) \ AO_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_compare_and_swap_read # endif #endif /* AO_HAVE_compare_and_swap_full */ #if !defined(AO_HAVE_compare_and_swap) \ && defined(AO_HAVE_compare_and_swap_release) # define AO_compare_and_swap(addr, old, new_val) \ AO_compare_and_swap_release(addr, old, new_val) # define AO_HAVE_compare_and_swap #endif #if !defined(AO_HAVE_compare_and_swap) \ && defined(AO_HAVE_compare_and_swap_acquire) # define AO_compare_and_swap(addr, old, new_val) \ AO_compare_and_swap_acquire(addr, old, new_val) # define AO_HAVE_compare_and_swap #endif #if !defined(AO_HAVE_compare_and_swap) \ && defined(AO_HAVE_compare_and_swap_write) # define AO_compare_and_swap(addr, old, new_val) \ AO_compare_and_swap_write(addr, old, new_val) # define AO_HAVE_compare_and_swap #endif #if !defined(AO_HAVE_compare_and_swap) \ && defined(AO_HAVE_compare_and_swap_read) # define AO_compare_and_swap(addr, old, new_val) \ AO_compare_and_swap_read(addr, old, new_val) # define AO_HAVE_compare_and_swap #endif #if defined(AO_HAVE_compare_and_swap_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_compare_and_swap_full) # define AO_compare_and_swap_full(addr, old, new_val) \ (AO_nop_full(), \ AO_compare_and_swap_acquire(addr, old, new_val)) # define AO_HAVE_compare_and_swap_full #endif #if !defined(AO_HAVE_compare_and_swap_release_write) \ && defined(AO_HAVE_compare_and_swap_write) # define AO_compare_and_swap_release_write(addr, old, new_val) \ AO_compare_and_swap_write(addr, old, new_val) # define AO_HAVE_compare_and_swap_release_write #endif #if !defined(AO_HAVE_compare_and_swap_release_write) \ && defined(AO_HAVE_compare_and_swap_release) # define AO_compare_and_swap_release_write(addr, old, new_val) \ AO_compare_and_swap_release(addr, old, new_val) # define AO_HAVE_compare_and_swap_release_write #endif #if !defined(AO_HAVE_compare_and_swap_acquire_read) \ && defined(AO_HAVE_compare_and_swap_read) # define AO_compare_and_swap_acquire_read(addr, old, new_val) \ AO_compare_and_swap_read(addr, old, new_val) # define AO_HAVE_compare_and_swap_acquire_read #endif #if !defined(AO_HAVE_compare_and_swap_acquire_read) \ && defined(AO_HAVE_compare_and_swap_acquire) # define AO_compare_and_swap_acquire_read(addr, old, new_val) \ AO_compare_and_swap_acquire(addr, old, new_val) # define AO_HAVE_compare_and_swap_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_compare_and_swap_acquire_read) # define AO_compare_and_swap_dd_acquire_read(addr, old, new_val) \ AO_compare_and_swap_acquire_read(addr, old, new_val) # define AO_HAVE_compare_and_swap_dd_acquire_read # endif #else # if defined(AO_HAVE_compare_and_swap) # define AO_compare_and_swap_dd_acquire_read(addr, old, new_val) \ AO_compare_and_swap(addr, old, new_val) # define AO_HAVE_compare_and_swap_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* load */ #if defined(AO_HAVE_load_full) && !defined(AO_HAVE_load_acquire) # define AO_load_acquire(addr) AO_load_full(addr) # define AO_HAVE_load_acquire #endif #if defined(AO_HAVE_load_acquire) && !defined(AO_HAVE_load) # define AO_load(addr) AO_load_acquire(addr) # define AO_HAVE_load #endif #if defined(AO_HAVE_load_full) && !defined(AO_HAVE_load_read) # define AO_load_read(addr) AO_load_full(addr) # define AO_HAVE_load_read #endif #if !defined(AO_HAVE_load_acquire_read) \ && defined(AO_HAVE_load_acquire) # define AO_load_acquire_read(addr) AO_load_acquire(addr) # define AO_HAVE_load_acquire_read #endif #if defined(AO_HAVE_load) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_load_acquire) AO_INLINE AO_t AO_load_acquire(const volatile AO_t *addr) { AO_t result = AO_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ /* beyond it. */ AO_nop_full(); return result; } # define AO_HAVE_load_acquire #endif #if defined(AO_HAVE_load) && defined(AO_HAVE_nop_read) \ && !defined(AO_HAVE_load_read) AO_INLINE AO_t AO_load_read(const volatile AO_t *addr) { AO_t result = AO_load(addr); AO_nop_read(); return result; } # define AO_HAVE_load_read #endif #if defined(AO_HAVE_load_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_load_full) # define AO_load_full(addr) (AO_nop_full(), AO_load_acquire(addr)) # define AO_HAVE_load_full #endif #if defined(AO_HAVE_compare_and_swap_read) \ && !defined(AO_HAVE_load_read) # define AO_CAS_BASED_LOAD_READ AO_ATTR_NO_SANITIZE_THREAD AO_INLINE AO_t AO_load_read(const volatile AO_t *addr) { AO_t result; do { result = *(const AO_t *)addr; } while (AO_EXPECT_FALSE(!AO_compare_and_swap_read( (volatile AO_t *)addr, result, result))); return result; } # define AO_HAVE_load_read #endif #if !defined(AO_HAVE_load_acquire_read) \ && defined(AO_HAVE_load_read) # define AO_load_acquire_read(addr) AO_load_read(addr) # define AO_HAVE_load_acquire_read #endif #if defined(AO_HAVE_load_acquire_read) && !defined(AO_HAVE_load) \ && (!defined(AO_CAS_BASED_LOAD_READ) \ || !defined(AO_HAVE_compare_and_swap)) # define AO_load(addr) AO_load_acquire_read(addr) # define AO_HAVE_load #endif #if defined(AO_HAVE_compare_and_swap_full) \ && !defined(AO_HAVE_load_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE AO_t AO_load_full(const volatile AO_t *addr) { AO_t result; do { result = *(const AO_t *)addr; } while (AO_EXPECT_FALSE(!AO_compare_and_swap_full( (volatile AO_t *)addr, result, result))); return result; } # define AO_HAVE_load_full #endif #if defined(AO_HAVE_compare_and_swap_acquire) \ && !defined(AO_HAVE_load_acquire) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE AO_t AO_load_acquire(const volatile AO_t *addr) { AO_t result; do { result = *(const AO_t *)addr; } while (AO_EXPECT_FALSE(!AO_compare_and_swap_acquire( (volatile AO_t *)addr, result, result))); return result; } # define AO_HAVE_load_acquire #endif #if defined(AO_HAVE_compare_and_swap) && !defined(AO_HAVE_load) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE AO_t AO_load(const volatile AO_t *addr) { AO_t result; do { result = *(const AO_t *)addr; } while (AO_EXPECT_FALSE(!AO_compare_and_swap( (volatile AO_t *)addr, result, result))); return result; } # define AO_HAVE_load #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_load_acquire_read) # define AO_load_dd_acquire_read(addr) \ AO_load_acquire_read(addr) # define AO_HAVE_load_dd_acquire_read # endif #else # if defined(AO_HAVE_load) # define AO_load_dd_acquire_read(addr) AO_load(addr) # define AO_HAVE_load_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* store */ #if defined(AO_HAVE_store_full) && !defined(AO_HAVE_store_release) # define AO_store_release(addr, val) AO_store_full(addr, val) # define AO_HAVE_store_release #endif #if defined(AO_HAVE_store_release) && !defined(AO_HAVE_store) # define AO_store(addr, val) AO_store_release(addr, val) # define AO_HAVE_store #endif #if defined(AO_HAVE_store_full) && !defined(AO_HAVE_store_write) # define AO_store_write(addr, val) AO_store_full(addr, val) # define AO_HAVE_store_write #endif #if defined(AO_HAVE_store_release) \ && !defined(AO_HAVE_store_release_write) # define AO_store_release_write(addr, val) \ AO_store_release(addr, val) # define AO_HAVE_store_release_write #endif #if defined(AO_HAVE_store_write) && !defined(AO_HAVE_store) # define AO_store(addr, val) AO_store_write(addr, val) # define AO_HAVE_store #endif #if defined(AO_HAVE_store) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_store_release) # define AO_store_release(addr, val) \ (AO_nop_full(), AO_store(addr, val)) # define AO_HAVE_store_release #endif #if defined(AO_HAVE_store) && defined(AO_HAVE_nop_write) \ && !defined(AO_HAVE_store_write) # define AO_store_write(addr, val) \ (AO_nop_write(), AO_store(addr, val)) # define AO_HAVE_store_write #endif #if defined(AO_HAVE_compare_and_swap_write) \ && !defined(AO_HAVE_store_write) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_store_write(volatile AO_t *addr, AO_t new_val) { AO_t old_val; do { old_val = *(AO_t *)addr; } while (AO_EXPECT_FALSE(!AO_compare_and_swap_write(addr, old_val, new_val))); } # define AO_HAVE_store_write #endif #if defined(AO_HAVE_store_write) \ && !defined(AO_HAVE_store_release_write) # define AO_store_release_write(addr, val) \ AO_store_write(addr, val) # define AO_HAVE_store_release_write #endif #if defined(AO_HAVE_store_release) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_store_full) # define AO_store_full(addr, val) \ (AO_store_release(addr, val), \ AO_nop_full()) # define AO_HAVE_store_full #endif #if defined(AO_HAVE_compare_and_swap) && !defined(AO_HAVE_store) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_store(volatile AO_t *addr, AO_t new_val) { AO_t old_val; do { old_val = *(AO_t *)addr; } while (AO_EXPECT_FALSE(!AO_compare_and_swap(addr, old_val, new_val))); } # define AO_HAVE_store #endif #if defined(AO_HAVE_compare_and_swap_release) \ && !defined(AO_HAVE_store_release) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_store_release(volatile AO_t *addr, AO_t new_val) { AO_t old_val; do { old_val = *(AO_t *)addr; } while (AO_EXPECT_FALSE(!AO_compare_and_swap_release(addr, old_val, new_val))); } # define AO_HAVE_store_release #endif #if defined(AO_HAVE_compare_and_swap_full) \ && !defined(AO_HAVE_store_full) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_store_full(volatile AO_t *addr, AO_t new_val) { AO_t old_val; do { old_val = *(AO_t *)addr; } while (AO_EXPECT_FALSE(!AO_compare_and_swap_full(addr, old_val, new_val))); } # define AO_HAVE_store_full #endif /* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* double_fetch_compare_and_swap */ #if defined(AO_HAVE_double_fetch_compare_and_swap) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_double_fetch_compare_and_swap_acquire) AO_INLINE AO_double_t AO_double_fetch_compare_and_swap_acquire(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { AO_double_t result = AO_double_fetch_compare_and_swap(addr, old_val, new_val); AO_nop_full(); return result; } # define AO_HAVE_double_fetch_compare_and_swap_acquire #endif #if defined(AO_HAVE_double_fetch_compare_and_swap) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_double_fetch_compare_and_swap_release) # define AO_double_fetch_compare_and_swap_release(addr, old_val, new_val) \ (AO_nop_full(), \ AO_double_fetch_compare_and_swap(addr, old_val, new_val)) # define AO_HAVE_double_fetch_compare_and_swap_release #endif #if defined(AO_HAVE_double_fetch_compare_and_swap_full) # if !defined(AO_HAVE_double_fetch_compare_and_swap_release) # define AO_double_fetch_compare_and_swap_release(addr, old_val, new_val) \ AO_double_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_double_fetch_compare_and_swap_release # endif # if !defined(AO_HAVE_double_fetch_compare_and_swap_acquire) # define AO_double_fetch_compare_and_swap_acquire(addr, old_val, new_val) \ AO_double_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_double_fetch_compare_and_swap_acquire # endif # if !defined(AO_HAVE_double_fetch_compare_and_swap_write) # define AO_double_fetch_compare_and_swap_write(addr, old_val, new_val) \ AO_double_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_double_fetch_compare_and_swap_write # endif # if !defined(AO_HAVE_double_fetch_compare_and_swap_read) # define AO_double_fetch_compare_and_swap_read(addr, old_val, new_val) \ AO_double_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_double_fetch_compare_and_swap_read # endif #endif /* AO_HAVE_double_fetch_compare_and_swap_full */ #if !defined(AO_HAVE_double_fetch_compare_and_swap) \ && defined(AO_HAVE_double_fetch_compare_and_swap_release) # define AO_double_fetch_compare_and_swap(addr, old_val, new_val) \ AO_double_fetch_compare_and_swap_release(addr, old_val, new_val) # define AO_HAVE_double_fetch_compare_and_swap #endif #if !defined(AO_HAVE_double_fetch_compare_and_swap) \ && defined(AO_HAVE_double_fetch_compare_and_swap_acquire) # define AO_double_fetch_compare_and_swap(addr, old_val, new_val) \ AO_double_fetch_compare_and_swap_acquire(addr, old_val, new_val) # define AO_HAVE_double_fetch_compare_and_swap #endif #if !defined(AO_HAVE_double_fetch_compare_and_swap) \ && defined(AO_HAVE_double_fetch_compare_and_swap_write) # define AO_double_fetch_compare_and_swap(addr, old_val, new_val) \ AO_double_fetch_compare_and_swap_write(addr, old_val, new_val) # define AO_HAVE_double_fetch_compare_and_swap #endif #if !defined(AO_HAVE_double_fetch_compare_and_swap) \ && defined(AO_HAVE_double_fetch_compare_and_swap_read) # define AO_double_fetch_compare_and_swap(addr, old_val, new_val) \ AO_double_fetch_compare_and_swap_read(addr, old_val, new_val) # define AO_HAVE_double_fetch_compare_and_swap #endif #if defined(AO_HAVE_double_fetch_compare_and_swap_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_double_fetch_compare_and_swap_full) # define AO_double_fetch_compare_and_swap_full(addr, old_val, new_val) \ (AO_nop_full(), \ AO_double_fetch_compare_and_swap_acquire(addr, old_val, new_val)) # define AO_HAVE_double_fetch_compare_and_swap_full #endif #if !defined(AO_HAVE_double_fetch_compare_and_swap_release_write) \ && defined(AO_HAVE_double_fetch_compare_and_swap_write) # define AO_double_fetch_compare_and_swap_release_write(addr,old_val,new_val) \ AO_double_fetch_compare_and_swap_write(addr, old_val, new_val) # define AO_HAVE_double_fetch_compare_and_swap_release_write #endif #if !defined(AO_HAVE_double_fetch_compare_and_swap_release_write) \ && defined(AO_HAVE_double_fetch_compare_and_swap_release) # define AO_double_fetch_compare_and_swap_release_write(addr,old_val,new_val) \ AO_double_fetch_compare_and_swap_release(addr, old_val, new_val) # define AO_HAVE_double_fetch_compare_and_swap_release_write #endif #if !defined(AO_HAVE_double_fetch_compare_and_swap_acquire_read) \ && defined(AO_HAVE_double_fetch_compare_and_swap_read) # define AO_double_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \ AO_double_fetch_compare_and_swap_read(addr, old_val, new_val) # define AO_HAVE_double_fetch_compare_and_swap_acquire_read #endif #if !defined(AO_HAVE_double_fetch_compare_and_swap_acquire_read) \ && defined(AO_HAVE_double_fetch_compare_and_swap_acquire) # define AO_double_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \ AO_double_fetch_compare_and_swap_acquire(addr, old_val, new_val) # define AO_HAVE_double_fetch_compare_and_swap_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_double_fetch_compare_and_swap_acquire_read) # define AO_double_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \ AO_double_fetch_compare_and_swap_acquire_read(addr, old_val, new_val) # define AO_HAVE_double_fetch_compare_and_swap_dd_acquire_read # endif #else # if defined(AO_HAVE_double_fetch_compare_and_swap) # define AO_double_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \ AO_double_fetch_compare_and_swap(addr, old_val, new_val) # define AO_HAVE_double_fetch_compare_and_swap_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* double_compare_and_swap */ #if defined(AO_HAVE_double_compare_and_swap) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_double_compare_and_swap_acquire) AO_INLINE int AO_double_compare_and_swap_acquire(volatile AO_double_t *addr, AO_double_t old, AO_double_t new_val) { int result = AO_double_compare_and_swap(addr, old, new_val); AO_nop_full(); return result; } # define AO_HAVE_double_compare_and_swap_acquire #endif #if defined(AO_HAVE_double_compare_and_swap) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_double_compare_and_swap_release) # define AO_double_compare_and_swap_release(addr, old, new_val) \ (AO_nop_full(), AO_double_compare_and_swap(addr, old, new_val)) # define AO_HAVE_double_compare_and_swap_release #endif #if defined(AO_HAVE_double_compare_and_swap_full) # if !defined(AO_HAVE_double_compare_and_swap_release) # define AO_double_compare_and_swap_release(addr, old, new_val) \ AO_double_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_double_compare_and_swap_release # endif # if !defined(AO_HAVE_double_compare_and_swap_acquire) # define AO_double_compare_and_swap_acquire(addr, old, new_val) \ AO_double_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_double_compare_and_swap_acquire # endif # if !defined(AO_HAVE_double_compare_and_swap_write) # define AO_double_compare_and_swap_write(addr, old, new_val) \ AO_double_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_double_compare_and_swap_write # endif # if !defined(AO_HAVE_double_compare_and_swap_read) # define AO_double_compare_and_swap_read(addr, old, new_val) \ AO_double_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_double_compare_and_swap_read # endif #endif /* AO_HAVE_double_compare_and_swap_full */ #if !defined(AO_HAVE_double_compare_and_swap) \ && defined(AO_HAVE_double_compare_and_swap_release) # define AO_double_compare_and_swap(addr, old, new_val) \ AO_double_compare_and_swap_release(addr, old, new_val) # define AO_HAVE_double_compare_and_swap #endif #if !defined(AO_HAVE_double_compare_and_swap) \ && defined(AO_HAVE_double_compare_and_swap_acquire) # define AO_double_compare_and_swap(addr, old, new_val) \ AO_double_compare_and_swap_acquire(addr, old, new_val) # define AO_HAVE_double_compare_and_swap #endif #if !defined(AO_HAVE_double_compare_and_swap) \ && defined(AO_HAVE_double_compare_and_swap_write) # define AO_double_compare_and_swap(addr, old, new_val) \ AO_double_compare_and_swap_write(addr, old, new_val) # define AO_HAVE_double_compare_and_swap #endif #if !defined(AO_HAVE_double_compare_and_swap) \ && defined(AO_HAVE_double_compare_and_swap_read) # define AO_double_compare_and_swap(addr, old, new_val) \ AO_double_compare_and_swap_read(addr, old, new_val) # define AO_HAVE_double_compare_and_swap #endif #if defined(AO_HAVE_double_compare_and_swap_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_double_compare_and_swap_full) # define AO_double_compare_and_swap_full(addr, old, new_val) \ (AO_nop_full(), \ AO_double_compare_and_swap_acquire(addr, old, new_val)) # define AO_HAVE_double_compare_and_swap_full #endif #if !defined(AO_HAVE_double_compare_and_swap_release_write) \ && defined(AO_HAVE_double_compare_and_swap_write) # define AO_double_compare_and_swap_release_write(addr, old, new_val) \ AO_double_compare_and_swap_write(addr, old, new_val) # define AO_HAVE_double_compare_and_swap_release_write #endif #if !defined(AO_HAVE_double_compare_and_swap_release_write) \ && defined(AO_HAVE_double_compare_and_swap_release) # define AO_double_compare_and_swap_release_write(addr, old, new_val) \ AO_double_compare_and_swap_release(addr, old, new_val) # define AO_HAVE_double_compare_and_swap_release_write #endif #if !defined(AO_HAVE_double_compare_and_swap_acquire_read) \ && defined(AO_HAVE_double_compare_and_swap_read) # define AO_double_compare_and_swap_acquire_read(addr, old, new_val) \ AO_double_compare_and_swap_read(addr, old, new_val) # define AO_HAVE_double_compare_and_swap_acquire_read #endif #if !defined(AO_HAVE_double_compare_and_swap_acquire_read) \ && defined(AO_HAVE_double_compare_and_swap_acquire) # define AO_double_compare_and_swap_acquire_read(addr, old, new_val) \ AO_double_compare_and_swap_acquire(addr, old, new_val) # define AO_HAVE_double_compare_and_swap_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_double_compare_and_swap_acquire_read) # define AO_double_compare_and_swap_dd_acquire_read(addr, old, new_val) \ AO_double_compare_and_swap_acquire_read(addr, old, new_val) # define AO_HAVE_double_compare_and_swap_dd_acquire_read # endif #else # if defined(AO_HAVE_double_compare_and_swap) # define AO_double_compare_and_swap_dd_acquire_read(addr, old, new_val) \ AO_double_compare_and_swap(addr, old, new_val) # define AO_HAVE_double_compare_and_swap_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* double_load */ #if defined(AO_HAVE_double_load_full) && !defined(AO_HAVE_double_load_acquire) # define AO_double_load_acquire(addr) AO_double_load_full(addr) # define AO_HAVE_double_load_acquire #endif #if defined(AO_HAVE_double_load_acquire) && !defined(AO_HAVE_double_load) # define AO_double_load(addr) AO_double_load_acquire(addr) # define AO_HAVE_double_load #endif #if defined(AO_HAVE_double_load_full) && !defined(AO_HAVE_double_load_read) # define AO_double_load_read(addr) AO_double_load_full(addr) # define AO_HAVE_double_load_read #endif #if !defined(AO_HAVE_double_load_acquire_read) \ && defined(AO_HAVE_double_load_acquire) # define AO_double_load_acquire_read(addr) AO_double_load_acquire(addr) # define AO_HAVE_double_load_acquire_read #endif #if defined(AO_HAVE_double_load) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_double_load_acquire) AO_INLINE AO_double_t AO_double_load_acquire(const volatile AO_double_t *addr) { AO_double_t result = AO_double_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ /* beyond it. */ AO_nop_full(); return result; } # define AO_HAVE_double_load_acquire #endif #if defined(AO_HAVE_double_load) && defined(AO_HAVE_nop_read) \ && !defined(AO_HAVE_double_load_read) AO_INLINE AO_double_t AO_double_load_read(const volatile AO_double_t *addr) { AO_double_t result = AO_double_load(addr); AO_nop_read(); return result; } # define AO_HAVE_double_load_read #endif #if defined(AO_HAVE_double_load_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_double_load_full) # define AO_double_load_full(addr) (AO_nop_full(), AO_double_load_acquire(addr)) # define AO_HAVE_double_load_full #endif #if defined(AO_HAVE_double_compare_and_swap_read) \ && !defined(AO_HAVE_double_load_read) # define AO_double_CAS_BASED_LOAD_READ AO_ATTR_NO_SANITIZE_THREAD AO_INLINE AO_double_t AO_double_load_read(const volatile AO_double_t *addr) { AO_double_t result; do { result = *(const AO_double_t *)addr; } while (AO_EXPECT_FALSE(!AO_double_compare_and_swap_read( (volatile AO_double_t *)addr, result, result))); return result; } # define AO_HAVE_double_load_read #endif #if !defined(AO_HAVE_double_load_acquire_read) \ && defined(AO_HAVE_double_load_read) # define AO_double_load_acquire_read(addr) AO_double_load_read(addr) # define AO_HAVE_double_load_acquire_read #endif #if defined(AO_HAVE_double_load_acquire_read) && !defined(AO_HAVE_double_load) \ && (!defined(AO_double_CAS_BASED_LOAD_READ) \ || !defined(AO_HAVE_double_compare_and_swap)) # define AO_double_load(addr) AO_double_load_acquire_read(addr) # define AO_HAVE_double_load #endif #if defined(AO_HAVE_double_compare_and_swap_full) \ && !defined(AO_HAVE_double_load_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE AO_double_t AO_double_load_full(const volatile AO_double_t *addr) { AO_double_t result; do { result = *(const AO_double_t *)addr; } while (AO_EXPECT_FALSE(!AO_double_compare_and_swap_full( (volatile AO_double_t *)addr, result, result))); return result; } # define AO_HAVE_double_load_full #endif #if defined(AO_HAVE_double_compare_and_swap_acquire) \ && !defined(AO_HAVE_double_load_acquire) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE AO_double_t AO_double_load_acquire(const volatile AO_double_t *addr) { AO_double_t result; do { result = *(const AO_double_t *)addr; } while (AO_EXPECT_FALSE(!AO_double_compare_and_swap_acquire( (volatile AO_double_t *)addr, result, result))); return result; } # define AO_HAVE_double_load_acquire #endif #if defined(AO_HAVE_double_compare_and_swap) && !defined(AO_HAVE_double_load) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE AO_double_t AO_double_load(const volatile AO_double_t *addr) { AO_double_t result; do { result = *(const AO_double_t *)addr; } while (AO_EXPECT_FALSE(!AO_double_compare_and_swap( (volatile AO_double_t *)addr, result, result))); return result; } # define AO_HAVE_double_load #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_double_load_acquire_read) # define AO_double_load_dd_acquire_read(addr) \ AO_double_load_acquire_read(addr) # define AO_HAVE_double_load_dd_acquire_read # endif #else # if defined(AO_HAVE_double_load) # define AO_double_load_dd_acquire_read(addr) AO_double_load(addr) # define AO_HAVE_double_load_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* double_store */ #if defined(AO_HAVE_double_store_full) && !defined(AO_HAVE_double_store_release) # define AO_double_store_release(addr, val) AO_double_store_full(addr, val) # define AO_HAVE_double_store_release #endif #if defined(AO_HAVE_double_store_release) && !defined(AO_HAVE_double_store) # define AO_double_store(addr, val) AO_double_store_release(addr, val) # define AO_HAVE_double_store #endif #if defined(AO_HAVE_double_store_full) && !defined(AO_HAVE_double_store_write) # define AO_double_store_write(addr, val) AO_double_store_full(addr, val) # define AO_HAVE_double_store_write #endif #if defined(AO_HAVE_double_store_release) \ && !defined(AO_HAVE_double_store_release_write) # define AO_double_store_release_write(addr, val) \ AO_double_store_release(addr, val) # define AO_HAVE_double_store_release_write #endif #if defined(AO_HAVE_double_store_write) && !defined(AO_HAVE_double_store) # define AO_double_store(addr, val) AO_double_store_write(addr, val) # define AO_HAVE_double_store #endif #if defined(AO_HAVE_double_store) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_double_store_release) # define AO_double_store_release(addr, val) \ (AO_nop_full(), AO_double_store(addr, val)) # define AO_HAVE_double_store_release #endif #if defined(AO_HAVE_double_store) && defined(AO_HAVE_nop_write) \ && !defined(AO_HAVE_double_store_write) # define AO_double_store_write(addr, val) \ (AO_nop_write(), AO_double_store(addr, val)) # define AO_HAVE_double_store_write #endif #if defined(AO_HAVE_double_compare_and_swap_write) \ && !defined(AO_HAVE_double_store_write) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_double_store_write(volatile AO_double_t *addr, AO_double_t new_val) { AO_double_t old_val; do { old_val = *(AO_double_t *)addr; } while (AO_EXPECT_FALSE(!AO_double_compare_and_swap_write(addr, old_val, new_val))); } # define AO_HAVE_double_store_write #endif #if defined(AO_HAVE_double_store_write) \ && !defined(AO_HAVE_double_store_release_write) # define AO_double_store_release_write(addr, val) \ AO_double_store_write(addr, val) # define AO_HAVE_double_store_release_write #endif #if defined(AO_HAVE_double_store_release) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_double_store_full) # define AO_double_store_full(addr, val) \ (AO_double_store_release(addr, val), \ AO_nop_full()) # define AO_HAVE_double_store_full #endif #if defined(AO_HAVE_double_compare_and_swap) && !defined(AO_HAVE_double_store) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_double_store(volatile AO_double_t *addr, AO_double_t new_val) { AO_double_t old_val; do { old_val = *(AO_double_t *)addr; } while (AO_EXPECT_FALSE(!AO_double_compare_and_swap(addr, old_val, new_val))); } # define AO_HAVE_double_store #endif #if defined(AO_HAVE_double_compare_and_swap_release) \ && !defined(AO_HAVE_double_store_release) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_double_store_release(volatile AO_double_t *addr, AO_double_t new_val) { AO_double_t old_val; do { old_val = *(AO_double_t *)addr; } while (AO_EXPECT_FALSE(!AO_double_compare_and_swap_release(addr, old_val, new_val))); } # define AO_HAVE_double_store_release #endif #if defined(AO_HAVE_double_compare_and_swap_full) \ && !defined(AO_HAVE_double_store_full) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_double_store_full(volatile AO_double_t *addr, AO_double_t new_val) { AO_double_t old_val; do { old_val = *(AO_double_t *)addr; } while (AO_EXPECT_FALSE(!AO_double_compare_and_swap_full(addr, old_val, new_val))); } # define AO_HAVE_double_store_full #endif libatomic_ops-7.6.12/src/atomic_ops/generalize-small.template000066400000000000000000000507751411761111000243560ustar00rootroot00000000000000/* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* XSIZE_fetch_compare_and_swap */ #if defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire) AO_INLINE XCTYPE AO_XSIZE_fetch_compare_and_swap_acquire(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { XCTYPE result = AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val); AO_nop_full(); return result; } # define AO_HAVE_XSIZE_fetch_compare_and_swap_acquire #endif #if defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release) # define AO_XSIZE_fetch_compare_and_swap_release(addr, old_val, new_val) \ (AO_nop_full(), \ AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val)) # define AO_HAVE_XSIZE_fetch_compare_and_swap_release #endif #if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_full) # if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release) # define AO_XSIZE_fetch_compare_and_swap_release(addr, old_val, new_val) \ AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_XSIZE_fetch_compare_and_swap_release # endif # if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire) # define AO_XSIZE_fetch_compare_and_swap_acquire(addr, old_val, new_val) \ AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_XSIZE_fetch_compare_and_swap_acquire # endif # if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_write) # define AO_XSIZE_fetch_compare_and_swap_write(addr, old_val, new_val) \ AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_XSIZE_fetch_compare_and_swap_write # endif # if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_read) # define AO_XSIZE_fetch_compare_and_swap_read(addr, old_val, new_val) \ AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val) # define AO_HAVE_XSIZE_fetch_compare_and_swap_read # endif #endif /* AO_HAVE_XSIZE_fetch_compare_and_swap_full */ #if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \ && defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release) # define AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val) \ AO_XSIZE_fetch_compare_and_swap_release(addr, old_val, new_val) # define AO_HAVE_XSIZE_fetch_compare_and_swap #endif #if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \ && defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire) # define AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val) \ AO_XSIZE_fetch_compare_and_swap_acquire(addr, old_val, new_val) # define AO_HAVE_XSIZE_fetch_compare_and_swap #endif #if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \ && defined(AO_HAVE_XSIZE_fetch_compare_and_swap_write) # define AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val) \ AO_XSIZE_fetch_compare_and_swap_write(addr, old_val, new_val) # define AO_HAVE_XSIZE_fetch_compare_and_swap #endif #if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap) \ && defined(AO_HAVE_XSIZE_fetch_compare_and_swap_read) # define AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val) \ AO_XSIZE_fetch_compare_and_swap_read(addr, old_val, new_val) # define AO_HAVE_XSIZE_fetch_compare_and_swap #endif #if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_full) # define AO_XSIZE_fetch_compare_and_swap_full(addr, old_val, new_val) \ (AO_nop_full(), \ AO_XSIZE_fetch_compare_and_swap_acquire(addr, old_val, new_val)) # define AO_HAVE_XSIZE_fetch_compare_and_swap_full #endif #if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release_write) \ && defined(AO_HAVE_XSIZE_fetch_compare_and_swap_write) # define AO_XSIZE_fetch_compare_and_swap_release_write(addr,old_val,new_val) \ AO_XSIZE_fetch_compare_and_swap_write(addr, old_val, new_val) # define AO_HAVE_XSIZE_fetch_compare_and_swap_release_write #endif #if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release_write) \ && defined(AO_HAVE_XSIZE_fetch_compare_and_swap_release) # define AO_XSIZE_fetch_compare_and_swap_release_write(addr,old_val,new_val) \ AO_XSIZE_fetch_compare_and_swap_release(addr, old_val, new_val) # define AO_HAVE_XSIZE_fetch_compare_and_swap_release_write #endif #if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read) \ && defined(AO_HAVE_XSIZE_fetch_compare_and_swap_read) # define AO_XSIZE_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \ AO_XSIZE_fetch_compare_and_swap_read(addr, old_val, new_val) # define AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read #endif #if !defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read) \ && defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire) # define AO_XSIZE_fetch_compare_and_swap_acquire_read(addr,old_val,new_val) \ AO_XSIZE_fetch_compare_and_swap_acquire(addr, old_val, new_val) # define AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_XSIZE_fetch_compare_and_swap_acquire_read) # define AO_XSIZE_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \ AO_XSIZE_fetch_compare_and_swap_acquire_read(addr, old_val, new_val) # define AO_HAVE_XSIZE_fetch_compare_and_swap_dd_acquire_read # endif #else # if defined(AO_HAVE_XSIZE_fetch_compare_and_swap) # define AO_XSIZE_fetch_compare_and_swap_dd_acquire_read(addr,old_val,new_val) \ AO_XSIZE_fetch_compare_and_swap(addr, old_val, new_val) # define AO_HAVE_XSIZE_fetch_compare_and_swap_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* XSIZE_compare_and_swap */ #if defined(AO_HAVE_XSIZE_compare_and_swap) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_XSIZE_compare_and_swap_acquire) AO_INLINE int AO_XSIZE_compare_and_swap_acquire(volatile XCTYPE *addr, XCTYPE old, XCTYPE new_val) { int result = AO_XSIZE_compare_and_swap(addr, old, new_val); AO_nop_full(); return result; } # define AO_HAVE_XSIZE_compare_and_swap_acquire #endif #if defined(AO_HAVE_XSIZE_compare_and_swap) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_XSIZE_compare_and_swap_release) # define AO_XSIZE_compare_and_swap_release(addr, old, new_val) \ (AO_nop_full(), AO_XSIZE_compare_and_swap(addr, old, new_val)) # define AO_HAVE_XSIZE_compare_and_swap_release #endif #if defined(AO_HAVE_XSIZE_compare_and_swap_full) # if !defined(AO_HAVE_XSIZE_compare_and_swap_release) # define AO_XSIZE_compare_and_swap_release(addr, old, new_val) \ AO_XSIZE_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_XSIZE_compare_and_swap_release # endif # if !defined(AO_HAVE_XSIZE_compare_and_swap_acquire) # define AO_XSIZE_compare_and_swap_acquire(addr, old, new_val) \ AO_XSIZE_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_XSIZE_compare_and_swap_acquire # endif # if !defined(AO_HAVE_XSIZE_compare_and_swap_write) # define AO_XSIZE_compare_and_swap_write(addr, old, new_val) \ AO_XSIZE_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_XSIZE_compare_and_swap_write # endif # if !defined(AO_HAVE_XSIZE_compare_and_swap_read) # define AO_XSIZE_compare_and_swap_read(addr, old, new_val) \ AO_XSIZE_compare_and_swap_full(addr, old, new_val) # define AO_HAVE_XSIZE_compare_and_swap_read # endif #endif /* AO_HAVE_XSIZE_compare_and_swap_full */ #if !defined(AO_HAVE_XSIZE_compare_and_swap) \ && defined(AO_HAVE_XSIZE_compare_and_swap_release) # define AO_XSIZE_compare_and_swap(addr, old, new_val) \ AO_XSIZE_compare_and_swap_release(addr, old, new_val) # define AO_HAVE_XSIZE_compare_and_swap #endif #if !defined(AO_HAVE_XSIZE_compare_and_swap) \ && defined(AO_HAVE_XSIZE_compare_and_swap_acquire) # define AO_XSIZE_compare_and_swap(addr, old, new_val) \ AO_XSIZE_compare_and_swap_acquire(addr, old, new_val) # define AO_HAVE_XSIZE_compare_and_swap #endif #if !defined(AO_HAVE_XSIZE_compare_and_swap) \ && defined(AO_HAVE_XSIZE_compare_and_swap_write) # define AO_XSIZE_compare_and_swap(addr, old, new_val) \ AO_XSIZE_compare_and_swap_write(addr, old, new_val) # define AO_HAVE_XSIZE_compare_and_swap #endif #if !defined(AO_HAVE_XSIZE_compare_and_swap) \ && defined(AO_HAVE_XSIZE_compare_and_swap_read) # define AO_XSIZE_compare_and_swap(addr, old, new_val) \ AO_XSIZE_compare_and_swap_read(addr, old, new_val) # define AO_HAVE_XSIZE_compare_and_swap #endif #if defined(AO_HAVE_XSIZE_compare_and_swap_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_XSIZE_compare_and_swap_full) # define AO_XSIZE_compare_and_swap_full(addr, old, new_val) \ (AO_nop_full(), \ AO_XSIZE_compare_and_swap_acquire(addr, old, new_val)) # define AO_HAVE_XSIZE_compare_and_swap_full #endif #if !defined(AO_HAVE_XSIZE_compare_and_swap_release_write) \ && defined(AO_HAVE_XSIZE_compare_and_swap_write) # define AO_XSIZE_compare_and_swap_release_write(addr, old, new_val) \ AO_XSIZE_compare_and_swap_write(addr, old, new_val) # define AO_HAVE_XSIZE_compare_and_swap_release_write #endif #if !defined(AO_HAVE_XSIZE_compare_and_swap_release_write) \ && defined(AO_HAVE_XSIZE_compare_and_swap_release) # define AO_XSIZE_compare_and_swap_release_write(addr, old, new_val) \ AO_XSIZE_compare_and_swap_release(addr, old, new_val) # define AO_HAVE_XSIZE_compare_and_swap_release_write #endif #if !defined(AO_HAVE_XSIZE_compare_and_swap_acquire_read) \ && defined(AO_HAVE_XSIZE_compare_and_swap_read) # define AO_XSIZE_compare_and_swap_acquire_read(addr, old, new_val) \ AO_XSIZE_compare_and_swap_read(addr, old, new_val) # define AO_HAVE_XSIZE_compare_and_swap_acquire_read #endif #if !defined(AO_HAVE_XSIZE_compare_and_swap_acquire_read) \ && defined(AO_HAVE_XSIZE_compare_and_swap_acquire) # define AO_XSIZE_compare_and_swap_acquire_read(addr, old, new_val) \ AO_XSIZE_compare_and_swap_acquire(addr, old, new_val) # define AO_HAVE_XSIZE_compare_and_swap_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_XSIZE_compare_and_swap_acquire_read) # define AO_XSIZE_compare_and_swap_dd_acquire_read(addr, old, new_val) \ AO_XSIZE_compare_and_swap_acquire_read(addr, old, new_val) # define AO_HAVE_XSIZE_compare_and_swap_dd_acquire_read # endif #else # if defined(AO_HAVE_XSIZE_compare_and_swap) # define AO_XSIZE_compare_and_swap_dd_acquire_read(addr, old, new_val) \ AO_XSIZE_compare_and_swap(addr, old, new_val) # define AO_HAVE_XSIZE_compare_and_swap_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* XSIZE_load */ #if defined(AO_HAVE_XSIZE_load_full) && !defined(AO_HAVE_XSIZE_load_acquire) # define AO_XSIZE_load_acquire(addr) AO_XSIZE_load_full(addr) # define AO_HAVE_XSIZE_load_acquire #endif #if defined(AO_HAVE_XSIZE_load_acquire) && !defined(AO_HAVE_XSIZE_load) # define AO_XSIZE_load(addr) AO_XSIZE_load_acquire(addr) # define AO_HAVE_XSIZE_load #endif #if defined(AO_HAVE_XSIZE_load_full) && !defined(AO_HAVE_XSIZE_load_read) # define AO_XSIZE_load_read(addr) AO_XSIZE_load_full(addr) # define AO_HAVE_XSIZE_load_read #endif #if !defined(AO_HAVE_XSIZE_load_acquire_read) \ && defined(AO_HAVE_XSIZE_load_acquire) # define AO_XSIZE_load_acquire_read(addr) AO_XSIZE_load_acquire(addr) # define AO_HAVE_XSIZE_load_acquire_read #endif #if defined(AO_HAVE_XSIZE_load) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_XSIZE_load_acquire) AO_INLINE XCTYPE AO_XSIZE_load_acquire(const volatile XCTYPE *addr) { XCTYPE result = AO_XSIZE_load(addr); /* Acquire barrier would be useless, since the load could be delayed */ /* beyond it. */ AO_nop_full(); return result; } # define AO_HAVE_XSIZE_load_acquire #endif #if defined(AO_HAVE_XSIZE_load) && defined(AO_HAVE_nop_read) \ && !defined(AO_HAVE_XSIZE_load_read) AO_INLINE XCTYPE AO_XSIZE_load_read(const volatile XCTYPE *addr) { XCTYPE result = AO_XSIZE_load(addr); AO_nop_read(); return result; } # define AO_HAVE_XSIZE_load_read #endif #if defined(AO_HAVE_XSIZE_load_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_XSIZE_load_full) # define AO_XSIZE_load_full(addr) (AO_nop_full(), AO_XSIZE_load_acquire(addr)) # define AO_HAVE_XSIZE_load_full #endif #if defined(AO_HAVE_XSIZE_compare_and_swap_read) \ && !defined(AO_HAVE_XSIZE_load_read) # define AO_XSIZE_CAS_BASED_LOAD_READ AO_ATTR_NO_SANITIZE_THREAD AO_INLINE XCTYPE AO_XSIZE_load_read(const volatile XCTYPE *addr) { XCTYPE result; do { result = *(const XCTYPE *)addr; } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_read( (volatile XCTYPE *)addr, result, result))); return result; } # define AO_HAVE_XSIZE_load_read #endif #if !defined(AO_HAVE_XSIZE_load_acquire_read) \ && defined(AO_HAVE_XSIZE_load_read) # define AO_XSIZE_load_acquire_read(addr) AO_XSIZE_load_read(addr) # define AO_HAVE_XSIZE_load_acquire_read #endif #if defined(AO_HAVE_XSIZE_load_acquire_read) && !defined(AO_HAVE_XSIZE_load) \ && (!defined(AO_XSIZE_CAS_BASED_LOAD_READ) \ || !defined(AO_HAVE_XSIZE_compare_and_swap)) # define AO_XSIZE_load(addr) AO_XSIZE_load_acquire_read(addr) # define AO_HAVE_XSIZE_load #endif #if defined(AO_HAVE_XSIZE_compare_and_swap_full) \ && !defined(AO_HAVE_XSIZE_load_full) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE XCTYPE AO_XSIZE_load_full(const volatile XCTYPE *addr) { XCTYPE result; do { result = *(const XCTYPE *)addr; } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full( (volatile XCTYPE *)addr, result, result))); return result; } # define AO_HAVE_XSIZE_load_full #endif #if defined(AO_HAVE_XSIZE_compare_and_swap_acquire) \ && !defined(AO_HAVE_XSIZE_load_acquire) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE XCTYPE AO_XSIZE_load_acquire(const volatile XCTYPE *addr) { XCTYPE result; do { result = *(const XCTYPE *)addr; } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_acquire( (volatile XCTYPE *)addr, result, result))); return result; } # define AO_HAVE_XSIZE_load_acquire #endif #if defined(AO_HAVE_XSIZE_compare_and_swap) && !defined(AO_HAVE_XSIZE_load) AO_ATTR_NO_SANITIZE_THREAD AO_INLINE XCTYPE AO_XSIZE_load(const volatile XCTYPE *addr) { XCTYPE result; do { result = *(const XCTYPE *)addr; } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap( (volatile XCTYPE *)addr, result, result))); return result; } # define AO_HAVE_XSIZE_load #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_XSIZE_load_acquire_read) # define AO_XSIZE_load_dd_acquire_read(addr) \ AO_XSIZE_load_acquire_read(addr) # define AO_HAVE_XSIZE_load_dd_acquire_read # endif #else # if defined(AO_HAVE_XSIZE_load) # define AO_XSIZE_load_dd_acquire_read(addr) AO_XSIZE_load(addr) # define AO_HAVE_XSIZE_load_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* XSIZE_store */ #if defined(AO_HAVE_XSIZE_store_full) && !defined(AO_HAVE_XSIZE_store_release) # define AO_XSIZE_store_release(addr, val) AO_XSIZE_store_full(addr, val) # define AO_HAVE_XSIZE_store_release #endif #if defined(AO_HAVE_XSIZE_store_release) && !defined(AO_HAVE_XSIZE_store) # define AO_XSIZE_store(addr, val) AO_XSIZE_store_release(addr, val) # define AO_HAVE_XSIZE_store #endif #if defined(AO_HAVE_XSIZE_store_full) && !defined(AO_HAVE_XSIZE_store_write) # define AO_XSIZE_store_write(addr, val) AO_XSIZE_store_full(addr, val) # define AO_HAVE_XSIZE_store_write #endif #if defined(AO_HAVE_XSIZE_store_release) \ && !defined(AO_HAVE_XSIZE_store_release_write) # define AO_XSIZE_store_release_write(addr, val) \ AO_XSIZE_store_release(addr, val) # define AO_HAVE_XSIZE_store_release_write #endif #if defined(AO_HAVE_XSIZE_store_write) && !defined(AO_HAVE_XSIZE_store) # define AO_XSIZE_store(addr, val) AO_XSIZE_store_write(addr, val) # define AO_HAVE_XSIZE_store #endif #if defined(AO_HAVE_XSIZE_store) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_XSIZE_store_release) # define AO_XSIZE_store_release(addr, val) \ (AO_nop_full(), AO_XSIZE_store(addr, val)) # define AO_HAVE_XSIZE_store_release #endif #if defined(AO_HAVE_XSIZE_store) && defined(AO_HAVE_nop_write) \ && !defined(AO_HAVE_XSIZE_store_write) # define AO_XSIZE_store_write(addr, val) \ (AO_nop_write(), AO_XSIZE_store(addr, val)) # define AO_HAVE_XSIZE_store_write #endif #if defined(AO_HAVE_XSIZE_compare_and_swap_write) \ && !defined(AO_HAVE_XSIZE_store_write) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_XSIZE_store_write(volatile XCTYPE *addr, XCTYPE new_val) { XCTYPE old_val; do { old_val = *(XCTYPE *)addr; } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_write(addr, old_val, new_val))); } # define AO_HAVE_XSIZE_store_write #endif #if defined(AO_HAVE_XSIZE_store_write) \ && !defined(AO_HAVE_XSIZE_store_release_write) # define AO_XSIZE_store_release_write(addr, val) \ AO_XSIZE_store_write(addr, val) # define AO_HAVE_XSIZE_store_release_write #endif #if defined(AO_HAVE_XSIZE_store_release) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_XSIZE_store_full) # define AO_XSIZE_store_full(addr, val) \ (AO_XSIZE_store_release(addr, val), \ AO_nop_full()) # define AO_HAVE_XSIZE_store_full #endif #if defined(AO_HAVE_XSIZE_compare_and_swap) && !defined(AO_HAVE_XSIZE_store) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE new_val) { XCTYPE old_val; do { old_val = *(XCTYPE *)addr; } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap(addr, old_val, new_val))); } # define AO_HAVE_XSIZE_store #endif #if defined(AO_HAVE_XSIZE_compare_and_swap_release) \ && !defined(AO_HAVE_XSIZE_store_release) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_XSIZE_store_release(volatile XCTYPE *addr, XCTYPE new_val) { XCTYPE old_val; do { old_val = *(XCTYPE *)addr; } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_release(addr, old_val, new_val))); } # define AO_HAVE_XSIZE_store_release #endif #if defined(AO_HAVE_XSIZE_compare_and_swap_full) \ && !defined(AO_HAVE_XSIZE_store_full) AO_ATTR_NO_SANITIZE_MEMORY AO_ATTR_NO_SANITIZE_THREAD AO_INLINE void AO_XSIZE_store_full(volatile XCTYPE *addr, XCTYPE new_val) { XCTYPE old_val; do { old_val = *(XCTYPE *)addr; } while (AO_EXPECT_FALSE(!AO_XSIZE_compare_and_swap_full(addr, old_val, new_val))); } # define AO_HAVE_XSIZE_store_full #endif libatomic_ops-7.6.12/src/atomic_ops/generalize.h000066400000000000000000000732061411761111000216560ustar00rootroot00000000000000/* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * Generalize atomic operations for atomic_ops.h. * Should not be included directly. * * We make no attempt to define useless operations, such as * AO_nop_acquire * AO_nop_release * * We have also so far neglected to define some others, which * do not appear likely to be useful, e.g. stores with acquire * or read barriers. * * This file is sometimes included twice by atomic_ops.h. * All definitions include explicit checks that we are not replacing * an earlier definition. In general, more desirable expansions * appear earlier so that we are more likely to use them. * * We only make safe generalizations, except that by default we define * the ...dd_acquire_read operations to be equivalent to those without * a barrier. On platforms for which this is unsafe, the platform-specific * file must define AO_NO_DD_ORDERING. */ #ifndef AO_ATOMIC_OPS_H # error This file should not be included directly. #endif /* Generate test_and_set_full, if necessary and possible. */ #if !defined(AO_HAVE_test_and_set) && !defined(AO_HAVE_test_and_set_release) \ && !defined(AO_HAVE_test_and_set_acquire) \ && !defined(AO_HAVE_test_and_set_read) \ && !defined(AO_HAVE_test_and_set_full) /* Emulate AO_compare_and_swap() via AO_fetch_compare_and_swap(). */ # if defined(AO_HAVE_fetch_compare_and_swap) \ && !defined(AO_HAVE_compare_and_swap) AO_INLINE int AO_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return AO_fetch_compare_and_swap(addr, old_val, new_val) == old_val; } # define AO_HAVE_compare_and_swap # endif # if defined(AO_HAVE_fetch_compare_and_swap_full) \ && !defined(AO_HAVE_compare_and_swap_full) AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return AO_fetch_compare_and_swap_full(addr, old_val, new_val) == old_val; } # define AO_HAVE_compare_and_swap_full # endif # if defined(AO_HAVE_fetch_compare_and_swap_acquire) \ && !defined(AO_HAVE_compare_and_swap_acquire) AO_INLINE int AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return AO_fetch_compare_and_swap_acquire(addr, old_val, new_val) == old_val; } # define AO_HAVE_compare_and_swap_acquire # endif # if defined(AO_HAVE_fetch_compare_and_swap_release) \ && !defined(AO_HAVE_compare_and_swap_release) AO_INLINE int AO_compare_and_swap_release(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return AO_fetch_compare_and_swap_release(addr, old_val, new_val) == old_val; } # define AO_HAVE_compare_and_swap_release # endif # if defined(AO_CHAR_TS_T) # define AO_TS_COMPARE_AND_SWAP_FULL(a,o,n) \ AO_char_compare_and_swap_full(a,o,n) # define AO_TS_COMPARE_AND_SWAP_ACQUIRE(a,o,n) \ AO_char_compare_and_swap_acquire(a,o,n) # define AO_TS_COMPARE_AND_SWAP_RELEASE(a,o,n) \ AO_char_compare_and_swap_release(a,o,n) # define AO_TS_COMPARE_AND_SWAP(a,o,n) AO_char_compare_and_swap(a,o,n) # endif # if defined(AO_AO_TS_T) # define AO_TS_COMPARE_AND_SWAP_FULL(a,o,n) AO_compare_and_swap_full(a,o,n) # define AO_TS_COMPARE_AND_SWAP_ACQUIRE(a,o,n) \ AO_compare_and_swap_acquire(a,o,n) # define AO_TS_COMPARE_AND_SWAP_RELEASE(a,o,n) \ AO_compare_and_swap_release(a,o,n) # define AO_TS_COMPARE_AND_SWAP(a,o,n) AO_compare_and_swap(a,o,n) # endif # if (defined(AO_AO_TS_T) && defined(AO_HAVE_compare_and_swap_full)) \ || (defined(AO_CHAR_TS_T) && defined(AO_HAVE_char_compare_and_swap_full)) AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t *addr) { if (AO_TS_COMPARE_AND_SWAP_FULL(addr, AO_TS_CLEAR, AO_TS_SET)) return AO_TS_CLEAR; else return AO_TS_SET; } # define AO_HAVE_test_and_set_full # endif /* AO_HAVE_compare_and_swap_full */ # if (defined(AO_AO_TS_T) && defined(AO_HAVE_compare_and_swap_acquire)) \ || (defined(AO_CHAR_TS_T) \ && defined(AO_HAVE_char_compare_and_swap_acquire)) AO_INLINE AO_TS_VAL_t AO_test_and_set_acquire(volatile AO_TS_t *addr) { if (AO_TS_COMPARE_AND_SWAP_ACQUIRE(addr, AO_TS_CLEAR, AO_TS_SET)) return AO_TS_CLEAR; else return AO_TS_SET; } # define AO_HAVE_test_and_set_acquire # endif /* AO_HAVE_compare_and_swap_acquire */ # if (defined(AO_AO_TS_T) && defined(AO_HAVE_compare_and_swap_release)) \ || (defined(AO_CHAR_TS_T) \ && defined(AO_HAVE_char_compare_and_swap_release)) AO_INLINE AO_TS_VAL_t AO_test_and_set_release(volatile AO_TS_t *addr) { if (AO_TS_COMPARE_AND_SWAP_RELEASE(addr, AO_TS_CLEAR, AO_TS_SET)) return AO_TS_CLEAR; else return AO_TS_SET; } # define AO_HAVE_test_and_set_release # endif /* AO_HAVE_compare_and_swap_release */ # if (defined(AO_AO_TS_T) && defined(AO_HAVE_compare_and_swap)) \ || (defined(AO_CHAR_TS_T) && defined(AO_HAVE_char_compare_and_swap)) AO_INLINE AO_TS_VAL_t AO_test_and_set(volatile AO_TS_t *addr) { if (AO_TS_COMPARE_AND_SWAP(addr, AO_TS_CLEAR, AO_TS_SET)) return AO_TS_CLEAR; else return AO_TS_SET; } # define AO_HAVE_test_and_set # endif /* AO_HAVE_compare_and_swap */ #endif /* No prior test and set */ /* Nop */ #if !defined(AO_HAVE_nop) AO_INLINE void AO_nop(void) {} # define AO_HAVE_nop #endif #if defined(AO_HAVE_test_and_set_full) && !defined(AO_HAVE_nop_full) AO_INLINE void AO_nop_full(void) { AO_TS_t dummy = AO_TS_INITIALIZER; AO_test_and_set_full(&dummy); } # define AO_HAVE_nop_full #endif #if defined(AO_HAVE_nop_acquire) && !defined(CPPCHECK) # error AO_nop_acquire is useless: do not define. #endif #if defined(AO_HAVE_nop_release) && !defined(CPPCHECK) # error AO_nop_release is useless: do not define. #endif #if defined(AO_HAVE_nop_full) && !defined(AO_HAVE_nop_read) # define AO_nop_read() AO_nop_full() # define AO_HAVE_nop_read #endif #if defined(AO_HAVE_nop_full) && !defined(AO_HAVE_nop_write) # define AO_nop_write() AO_nop_full() # define AO_HAVE_nop_write #endif /* Test_and_set */ #if defined(AO_HAVE_test_and_set) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_test_and_set_release) # define AO_test_and_set_release(addr) (AO_nop_full(), AO_test_and_set(addr)) # define AO_HAVE_test_and_set_release #endif #if defined(AO_HAVE_test_and_set) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_test_and_set_acquire) AO_INLINE AO_TS_VAL_t AO_test_and_set_acquire(volatile AO_TS_t *addr) { AO_TS_VAL_t result = AO_test_and_set(addr); AO_nop_full(); return result; } # define AO_HAVE_test_and_set_acquire #endif #if defined(AO_HAVE_test_and_set_full) # if !defined(AO_HAVE_test_and_set_release) # define AO_test_and_set_release(addr) AO_test_and_set_full(addr) # define AO_HAVE_test_and_set_release # endif # if !defined(AO_HAVE_test_and_set_acquire) # define AO_test_and_set_acquire(addr) AO_test_and_set_full(addr) # define AO_HAVE_test_and_set_acquire # endif # if !defined(AO_HAVE_test_and_set_write) # define AO_test_and_set_write(addr) AO_test_and_set_full(addr) # define AO_HAVE_test_and_set_write # endif # if !defined(AO_HAVE_test_and_set_read) # define AO_test_and_set_read(addr) AO_test_and_set_full(addr) # define AO_HAVE_test_and_set_read # endif #endif /* AO_HAVE_test_and_set_full */ #if !defined(AO_HAVE_test_and_set) && defined(AO_HAVE_test_and_set_release) # define AO_test_and_set(addr) AO_test_and_set_release(addr) # define AO_HAVE_test_and_set #endif #if !defined(AO_HAVE_test_and_set) && defined(AO_HAVE_test_and_set_acquire) # define AO_test_and_set(addr) AO_test_and_set_acquire(addr) # define AO_HAVE_test_and_set #endif #if !defined(AO_HAVE_test_and_set) && defined(AO_HAVE_test_and_set_write) # define AO_test_and_set(addr) AO_test_and_set_write(addr) # define AO_HAVE_test_and_set #endif #if !defined(AO_HAVE_test_and_set) && defined(AO_HAVE_test_and_set_read) # define AO_test_and_set(addr) AO_test_and_set_read(addr) # define AO_HAVE_test_and_set #endif #if defined(AO_HAVE_test_and_set_acquire) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_test_and_set_full) # define AO_test_and_set_full(addr) \ (AO_nop_full(), AO_test_and_set_acquire(addr)) # define AO_HAVE_test_and_set_full #endif #if !defined(AO_HAVE_test_and_set_release_write) \ && defined(AO_HAVE_test_and_set_write) # define AO_test_and_set_release_write(addr) AO_test_and_set_write(addr) # define AO_HAVE_test_and_set_release_write #endif #if !defined(AO_HAVE_test_and_set_release_write) \ && defined(AO_HAVE_test_and_set_release) # define AO_test_and_set_release_write(addr) AO_test_and_set_release(addr) # define AO_HAVE_test_and_set_release_write #endif #if !defined(AO_HAVE_test_and_set_acquire_read) \ && defined(AO_HAVE_test_and_set_read) # define AO_test_and_set_acquire_read(addr) AO_test_and_set_read(addr) # define AO_HAVE_test_and_set_acquire_read #endif #if !defined(AO_HAVE_test_and_set_acquire_read) \ && defined(AO_HAVE_test_and_set_acquire) # define AO_test_and_set_acquire_read(addr) AO_test_and_set_acquire(addr) # define AO_HAVE_test_and_set_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_test_and_set_acquire_read) # define AO_test_and_set_dd_acquire_read(addr) \ AO_test_and_set_acquire_read(addr) # define AO_HAVE_test_and_set_dd_acquire_read # endif #else # if defined(AO_HAVE_test_and_set) # define AO_test_and_set_dd_acquire_read(addr) AO_test_and_set(addr) # define AO_HAVE_test_and_set_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ #include "generalize-small.h" #include "generalize-arithm.h" /* Compare_double_and_swap_double based on double_compare_and_swap. */ #ifdef AO_HAVE_DOUBLE_PTR_STORAGE # if defined(AO_HAVE_double_compare_and_swap) \ && !defined(AO_HAVE_compare_double_and_swap_double) AO_INLINE int AO_compare_double_and_swap_double(volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2) { AO_double_t old_w; AO_double_t new_w; old_w.AO_val1 = old_val1; old_w.AO_val2 = old_val2; new_w.AO_val1 = new_val1; new_w.AO_val2 = new_val2; return AO_double_compare_and_swap(addr, old_w, new_w); } # define AO_HAVE_compare_double_and_swap_double # endif # if defined(AO_HAVE_double_compare_and_swap_acquire) \ && !defined(AO_HAVE_compare_double_and_swap_double_acquire) AO_INLINE int AO_compare_double_and_swap_double_acquire(volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2) { AO_double_t old_w; AO_double_t new_w; old_w.AO_val1 = old_val1; old_w.AO_val2 = old_val2; new_w.AO_val1 = new_val1; new_w.AO_val2 = new_val2; return AO_double_compare_and_swap_acquire(addr, old_w, new_w); } # define AO_HAVE_compare_double_and_swap_double_acquire # endif # if defined(AO_HAVE_double_compare_and_swap_release) \ && !defined(AO_HAVE_compare_double_and_swap_double_release) AO_INLINE int AO_compare_double_and_swap_double_release(volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2) { AO_double_t old_w; AO_double_t new_w; old_w.AO_val1 = old_val1; old_w.AO_val2 = old_val2; new_w.AO_val1 = new_val1; new_w.AO_val2 = new_val2; return AO_double_compare_and_swap_release(addr, old_w, new_w); } # define AO_HAVE_compare_double_and_swap_double_release # endif # if defined(AO_HAVE_double_compare_and_swap_full) \ && !defined(AO_HAVE_compare_double_and_swap_double_full) AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2) { AO_double_t old_w; AO_double_t new_w; old_w.AO_val1 = old_val1; old_w.AO_val2 = old_val2; new_w.AO_val1 = new_val1; new_w.AO_val2 = new_val2; return AO_double_compare_and_swap_full(addr, old_w, new_w); } # define AO_HAVE_compare_double_and_swap_double_full # endif #endif /* AO_HAVE_DOUBLE_PTR_STORAGE */ /* Compare_double_and_swap_double */ #if defined(AO_HAVE_compare_double_and_swap_double) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_compare_double_and_swap_double_acquire) AO_INLINE int AO_compare_double_and_swap_double_acquire(volatile AO_double_t *addr, AO_t o1, AO_t o2, AO_t n1, AO_t n2) { int result = AO_compare_double_and_swap_double(addr, o1, o2, n1, n2); AO_nop_full(); return result; } # define AO_HAVE_compare_double_and_swap_double_acquire #endif #if defined(AO_HAVE_compare_double_and_swap_double) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_compare_double_and_swap_double_release) # define AO_compare_double_and_swap_double_release(addr,o1,o2,n1,n2) \ (AO_nop_full(), AO_compare_double_and_swap_double(addr,o1,o2,n1,n2)) # define AO_HAVE_compare_double_and_swap_double_release #endif #if defined(AO_HAVE_compare_double_and_swap_double_full) # if !defined(AO_HAVE_compare_double_and_swap_double_release) # define AO_compare_double_and_swap_double_release(addr,o1,o2,n1,n2) \ AO_compare_double_and_swap_double_full(addr,o1,o2,n1,n2) # define AO_HAVE_compare_double_and_swap_double_release # endif # if !defined(AO_HAVE_compare_double_and_swap_double_acquire) # define AO_compare_double_and_swap_double_acquire(addr,o1,o2,n1,n2) \ AO_compare_double_and_swap_double_full(addr,o1,o2,n1,n2) # define AO_HAVE_compare_double_and_swap_double_acquire # endif # if !defined(AO_HAVE_compare_double_and_swap_double_write) # define AO_compare_double_and_swap_double_write(addr,o1,o2,n1,n2) \ AO_compare_double_and_swap_double_full(addr,o1,o2,n1,n2) # define AO_HAVE_compare_double_and_swap_double_write # endif # if !defined(AO_HAVE_compare_double_and_swap_double_read) # define AO_compare_double_and_swap_double_read(addr,o1,o2,n1,n2) \ AO_compare_double_and_swap_double_full(addr,o1,o2,n1,n2) # define AO_HAVE_compare_double_and_swap_double_read # endif #endif /* AO_HAVE_compare_double_and_swap_double_full */ #if !defined(AO_HAVE_compare_double_and_swap_double) \ && defined(AO_HAVE_compare_double_and_swap_double_release) # define AO_compare_double_and_swap_double(addr,o1,o2,n1,n2) \ AO_compare_double_and_swap_double_release(addr,o1,o2,n1,n2) # define AO_HAVE_compare_double_and_swap_double #endif #if !defined(AO_HAVE_compare_double_and_swap_double) \ && defined(AO_HAVE_compare_double_and_swap_double_acquire) # define AO_compare_double_and_swap_double(addr,o1,o2,n1,n2) \ AO_compare_double_and_swap_double_acquire(addr,o1,o2,n1,n2) # define AO_HAVE_compare_double_and_swap_double #endif #if !defined(AO_HAVE_compare_double_and_swap_double) \ && defined(AO_HAVE_compare_double_and_swap_double_write) # define AO_compare_double_and_swap_double(addr,o1,o2,n1,n2) \ AO_compare_double_and_swap_double_write(addr,o1,o2,n1,n2) # define AO_HAVE_compare_double_and_swap_double #endif #if !defined(AO_HAVE_compare_double_and_swap_double) \ && defined(AO_HAVE_compare_double_and_swap_double_read) # define AO_compare_double_and_swap_double(addr,o1,o2,n1,n2) \ AO_compare_double_and_swap_double_read(addr,o1,o2,n1,n2) # define AO_HAVE_compare_double_and_swap_double #endif #if defined(AO_HAVE_compare_double_and_swap_double_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_compare_double_and_swap_double_full) # define AO_compare_double_and_swap_double_full(addr,o1,o2,n1,n2) \ (AO_nop_full(), \ AO_compare_double_and_swap_double_acquire(addr,o1,o2,n1,n2)) # define AO_HAVE_compare_double_and_swap_double_full #endif #if !defined(AO_HAVE_compare_double_and_swap_double_release_write) \ && defined(AO_HAVE_compare_double_and_swap_double_write) # define AO_compare_double_and_swap_double_release_write(addr,o1,o2,n1,n2) \ AO_compare_double_and_swap_double_write(addr,o1,o2,n1,n2) # define AO_HAVE_compare_double_and_swap_double_release_write #endif #if !defined(AO_HAVE_compare_double_and_swap_double_release_write) \ && defined(AO_HAVE_compare_double_and_swap_double_release) # define AO_compare_double_and_swap_double_release_write(addr,o1,o2,n1,n2) \ AO_compare_double_and_swap_double_release(addr,o1,o2,n1,n2) # define AO_HAVE_compare_double_and_swap_double_release_write #endif #if !defined(AO_HAVE_compare_double_and_swap_double_acquire_read) \ && defined(AO_HAVE_compare_double_and_swap_double_read) # define AO_compare_double_and_swap_double_acquire_read(addr,o1,o2,n1,n2) \ AO_compare_double_and_swap_double_read(addr,o1,o2,n1,n2) # define AO_HAVE_compare_double_and_swap_double_acquire_read #endif #if !defined(AO_HAVE_compare_double_and_swap_double_acquire_read) \ && defined(AO_HAVE_compare_double_and_swap_double_acquire) # define AO_compare_double_and_swap_double_acquire_read(addr,o1,o2,n1,n2) \ AO_compare_double_and_swap_double_acquire(addr,o1,o2,n1,n2) # define AO_HAVE_compare_double_and_swap_double_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_compare_double_and_swap_double_acquire_read) # define AO_compare_double_and_swap_double_dd_acquire_read(addr,o1,o2,n1,n2) \ AO_compare_double_and_swap_double_acquire_read(addr,o1,o2,n1,n2) # define AO_HAVE_compare_double_and_swap_double_dd_acquire_read # endif #else # if defined(AO_HAVE_compare_double_and_swap_double) # define AO_compare_double_and_swap_double_dd_acquire_read(addr,o1,o2,n1,n2) \ AO_compare_double_and_swap_double(addr,o1,o2,n1,n2) # define AO_HAVE_compare_double_and_swap_double_dd_acquire_read # endif #endif /* !AO_NO_DD_ORDERING */ /* Compare_and_swap_double */ #if defined(AO_HAVE_compare_and_swap_double) && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_compare_and_swap_double_acquire) AO_INLINE int AO_compare_and_swap_double_acquire(volatile AO_double_t *addr, AO_t o1, AO_t n1, AO_t n2) { int result = AO_compare_and_swap_double(addr, o1, n1, n2); AO_nop_full(); return result; } # define AO_HAVE_compare_and_swap_double_acquire #endif #if defined(AO_HAVE_compare_and_swap_double) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_compare_and_swap_double_release) # define AO_compare_and_swap_double_release(addr,o1,n1,n2) \ (AO_nop_full(), AO_compare_and_swap_double(addr,o1,n1,n2)) # define AO_HAVE_compare_and_swap_double_release #endif #if defined(AO_HAVE_compare_and_swap_double_full) # if !defined(AO_HAVE_compare_and_swap_double_release) # define AO_compare_and_swap_double_release(addr,o1,n1,n2) \ AO_compare_and_swap_double_full(addr,o1,n1,n2) # define AO_HAVE_compare_and_swap_double_release # endif # if !defined(AO_HAVE_compare_and_swap_double_acquire) # define AO_compare_and_swap_double_acquire(addr,o1,n1,n2) \ AO_compare_and_swap_double_full(addr,o1,n1,n2) # define AO_HAVE_compare_and_swap_double_acquire # endif # if !defined(AO_HAVE_compare_and_swap_double_write) # define AO_compare_and_swap_double_write(addr,o1,n1,n2) \ AO_compare_and_swap_double_full(addr,o1,n1,n2) # define AO_HAVE_compare_and_swap_double_write # endif # if !defined(AO_HAVE_compare_and_swap_double_read) # define AO_compare_and_swap_double_read(addr,o1,n1,n2) \ AO_compare_and_swap_double_full(addr,o1,n1,n2) # define AO_HAVE_compare_and_swap_double_read # endif #endif /* AO_HAVE_compare_and_swap_double_full */ #if !defined(AO_HAVE_compare_and_swap_double) \ && defined(AO_HAVE_compare_and_swap_double_release) # define AO_compare_and_swap_double(addr,o1,n1,n2) \ AO_compare_and_swap_double_release(addr,o1,n1,n2) # define AO_HAVE_compare_and_swap_double #endif #if !defined(AO_HAVE_compare_and_swap_double) \ && defined(AO_HAVE_compare_and_swap_double_acquire) # define AO_compare_and_swap_double(addr,o1,n1,n2) \ AO_compare_and_swap_double_acquire(addr,o1,n1,n2) # define AO_HAVE_compare_and_swap_double #endif #if !defined(AO_HAVE_compare_and_swap_double) \ && defined(AO_HAVE_compare_and_swap_double_write) # define AO_compare_and_swap_double(addr,o1,n1,n2) \ AO_compare_and_swap_double_write(addr,o1,n1,n2) # define AO_HAVE_compare_and_swap_double #endif #if !defined(AO_HAVE_compare_and_swap_double) \ && defined(AO_HAVE_compare_and_swap_double_read) # define AO_compare_and_swap_double(addr,o1,n1,n2) \ AO_compare_and_swap_double_read(addr,o1,n1,n2) # define AO_HAVE_compare_and_swap_double #endif #if defined(AO_HAVE_compare_and_swap_double_acquire) \ && defined(AO_HAVE_nop_full) \ && !defined(AO_HAVE_compare_and_swap_double_full) # define AO_compare_and_swap_double_full(addr,o1,n1,n2) \ (AO_nop_full(), AO_compare_and_swap_double_acquire(addr,o1,n1,n2)) # define AO_HAVE_compare_and_swap_double_full #endif #if !defined(AO_HAVE_compare_and_swap_double_release_write) \ && defined(AO_HAVE_compare_and_swap_double_write) # define AO_compare_and_swap_double_release_write(addr,o1,n1,n2) \ AO_compare_and_swap_double_write(addr,o1,n1,n2) # define AO_HAVE_compare_and_swap_double_release_write #endif #if !defined(AO_HAVE_compare_and_swap_double_release_write) \ && defined(AO_HAVE_compare_and_swap_double_release) # define AO_compare_and_swap_double_release_write(addr,o1,n1,n2) \ AO_compare_and_swap_double_release(addr,o1,n1,n2) # define AO_HAVE_compare_and_swap_double_release_write #endif #if !defined(AO_HAVE_compare_and_swap_double_acquire_read) \ && defined(AO_HAVE_compare_and_swap_double_read) # define AO_compare_and_swap_double_acquire_read(addr,o1,n1,n2) \ AO_compare_and_swap_double_read(addr,o1,n1,n2) # define AO_HAVE_compare_and_swap_double_acquire_read #endif #if !defined(AO_HAVE_compare_and_swap_double_acquire_read) \ && defined(AO_HAVE_compare_and_swap_double_acquire) # define AO_compare_and_swap_double_acquire_read(addr,o1,n1,n2) \ AO_compare_and_swap_double_acquire(addr,o1,n1,n2) # define AO_HAVE_compare_and_swap_double_acquire_read #endif #ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_compare_and_swap_double_acquire_read) # define AO_compare_and_swap_double_dd_acquire_read(addr,o1,n1,n2) \ AO_compare_and_swap_double_acquire_read(addr,o1,n1,n2) # define AO_HAVE_compare_and_swap_double_dd_acquire_read # endif #else # if defined(AO_HAVE_compare_and_swap_double) # define AO_compare_and_swap_double_dd_acquire_read(addr,o1,n1,n2) \ AO_compare_and_swap_double(addr,o1,n1,n2) # define AO_HAVE_compare_and_swap_double_dd_acquire_read # endif #endif /* Convenience functions for AO_double compare-and-swap which types and */ /* reads easier in code. */ #if defined(AO_HAVE_compare_double_and_swap_double) \ && !defined(AO_HAVE_double_compare_and_swap) AO_INLINE int AO_double_compare_and_swap(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { return AO_compare_double_and_swap_double(addr, old_val.AO_val1, old_val.AO_val2, new_val.AO_val1, new_val.AO_val2); } # define AO_HAVE_double_compare_and_swap #endif #if defined(AO_HAVE_compare_double_and_swap_double_release) \ && !defined(AO_HAVE_double_compare_and_swap_release) AO_INLINE int AO_double_compare_and_swap_release(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { return AO_compare_double_and_swap_double_release(addr, old_val.AO_val1, old_val.AO_val2, new_val.AO_val1, new_val.AO_val2); } # define AO_HAVE_double_compare_and_swap_release #endif #if defined(AO_HAVE_compare_double_and_swap_double_acquire) \ && !defined(AO_HAVE_double_compare_and_swap_acquire) AO_INLINE int AO_double_compare_and_swap_acquire(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { return AO_compare_double_and_swap_double_acquire(addr, old_val.AO_val1, old_val.AO_val2, new_val.AO_val1, new_val.AO_val2); } # define AO_HAVE_double_compare_and_swap_acquire #endif #if defined(AO_HAVE_compare_double_and_swap_double_read) \ && !defined(AO_HAVE_double_compare_and_swap_read) AO_INLINE int AO_double_compare_and_swap_read(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { return AO_compare_double_and_swap_double_read(addr, old_val.AO_val1, old_val.AO_val2, new_val.AO_val1, new_val.AO_val2); } # define AO_HAVE_double_compare_and_swap_read #endif #if defined(AO_HAVE_compare_double_and_swap_double_write) \ && !defined(AO_HAVE_double_compare_and_swap_write) AO_INLINE int AO_double_compare_and_swap_write(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { return AO_compare_double_and_swap_double_write(addr, old_val.AO_val1, old_val.AO_val2, new_val.AO_val1, new_val.AO_val2); } # define AO_HAVE_double_compare_and_swap_write #endif #if defined(AO_HAVE_compare_double_and_swap_double_release_write) \ && !defined(AO_HAVE_double_compare_and_swap_release_write) AO_INLINE int AO_double_compare_and_swap_release_write(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { return AO_compare_double_and_swap_double_release_write(addr, old_val.AO_val1, old_val.AO_val2, new_val.AO_val1, new_val.AO_val2); } # define AO_HAVE_double_compare_and_swap_release_write #endif #if defined(AO_HAVE_compare_double_and_swap_double_acquire_read) \ && !defined(AO_HAVE_double_compare_and_swap_acquire_read) AO_INLINE int AO_double_compare_and_swap_acquire_read(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { return AO_compare_double_and_swap_double_acquire_read(addr, old_val.AO_val1, old_val.AO_val2, new_val.AO_val1, new_val.AO_val2); } # define AO_HAVE_double_compare_and_swap_acquire_read #endif #if defined(AO_HAVE_compare_double_and_swap_double_full) \ && !defined(AO_HAVE_double_compare_and_swap_full) AO_INLINE int AO_double_compare_and_swap_full(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { return AO_compare_double_and_swap_double_full(addr, old_val.AO_val1, old_val.AO_val2, new_val.AO_val1, new_val.AO_val2); } # define AO_HAVE_double_compare_and_swap_full #endif #ifndef AO_HAVE_double_compare_and_swap_dd_acquire_read /* Duplicated from generalize-small because double CAS might be */ /* defined after the include. */ # ifdef AO_NO_DD_ORDERING # if defined(AO_HAVE_double_compare_and_swap_acquire_read) # define AO_double_compare_and_swap_dd_acquire_read(addr, old, new_val) \ AO_double_compare_and_swap_acquire_read(addr, old, new_val) # define AO_HAVE_double_compare_and_swap_dd_acquire_read # endif # elif defined(AO_HAVE_double_compare_and_swap) # define AO_double_compare_and_swap_dd_acquire_read(addr, old, new_val) \ AO_double_compare_and_swap(addr, old, new_val) # define AO_HAVE_double_compare_and_swap_dd_acquire_read # endif /* !AO_NO_DD_ORDERING */ #endif libatomic_ops-7.6.12/src/atomic_ops/sysdeps/000077500000000000000000000000001411761111000210425ustar00rootroot00000000000000libatomic_ops-7.6.12/src/atomic_ops/sysdeps/README000066400000000000000000000005411411761111000217220ustar00rootroot00000000000000There are two kinds of entities in this directory: - Subdirectories corresponding to specific compilers (or compiler/OS combinations). Each of these includes one or more architecture-specific headers. - More generic header files corresponding to a particular ordering and/or atomicity property that might be shared by multiple hardware platforms. libatomic_ops-7.6.12/src/atomic_ops/sysdeps/all_acquire_release_volatile.h000066400000000000000000000030541411761111000270750ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Describes architectures on which volatile AO_t, unsigned char, */ /* unsigned short, and unsigned int loads and stores have */ /* acquire/release semantics for all normally legal alignments. */ #include "loadstore/acquire_release_volatile.h" #include "loadstore/char_acquire_release_volatile.h" #include "loadstore/short_acquire_release_volatile.h" #include "loadstore/int_acquire_release_volatile.h" libatomic_ops-7.6.12/src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h000066400000000000000000000034751411761111000272260ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Describes architectures on which AO_t, unsigned char, unsigned */ /* short, and unsigned int loads and stores are atomic but only if data */ /* is suitably aligned. */ #if defined(__m68k__) && !defined(AO_ALIGNOF_SUPPORTED) /* Even though AO_t is redefined in m68k.h, some clients use AO */ /* pointer size primitives to access variables not declared as AO_t. */ /* Such variables may have 2-byte alignment, while their sizeof is 4. */ #else # define AO_ACCESS_CHECK_ALIGNED #endif /* Check for char type is a misnomer. */ #define AO_ACCESS_short_CHECK_ALIGNED #define AO_ACCESS_int_CHECK_ALIGNED #include "all_atomic_load_store.h" libatomic_ops-7.6.12/src/atomic_ops/sysdeps/all_atomic_load_store.h000066400000000000000000000030371411761111000255350ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Describes architectures on which AO_t, unsigned char, unsigned */ /* short, and unsigned int loads and stores are atomic for all normally */ /* legal alignments. */ #include "all_atomic_only_load.h" #include "loadstore/atomic_store.h" #include "loadstore/char_atomic_store.h" #include "loadstore/short_atomic_store.h" #include "loadstore/int_atomic_store.h" libatomic_ops-7.6.12/src/atomic_ops/sysdeps/all_atomic_only_load.h000066400000000000000000000027701411761111000253650ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Describes architectures on which AO_t, unsigned char, unsigned */ /* short, and unsigned int loads are atomic for all normally legal */ /* alignments. */ #include "loadstore/atomic_load.h" #include "loadstore/char_atomic_load.h" #include "loadstore/short_atomic_load.h" #include "loadstore/int_atomic_load.h" libatomic_ops-7.6.12/src/atomic_ops/sysdeps/ao_t_is_int.h000066400000000000000000000560401411761111000235070ustar00rootroot00000000000000/* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Inclusion of this file signifies that AO_t is in fact int. */ /* Hence any AO_... operation can also serve as AO_int_... operation. */ #if defined(AO_HAVE_load) && !defined(AO_HAVE_int_load) # define AO_int_load(addr) \ (unsigned)AO_load((const volatile AO_t *)(addr)) # define AO_HAVE_int_load #endif #if defined(AO_HAVE_store) && !defined(AO_HAVE_int_store) # define AO_int_store(addr, val) \ AO_store((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_store #endif #if defined(AO_HAVE_fetch_and_add) \ && !defined(AO_HAVE_int_fetch_and_add) # define AO_int_fetch_and_add(addr, incr) \ (unsigned)AO_fetch_and_add((volatile AO_t *)(addr), \ (AO_t)(incr)) # define AO_HAVE_int_fetch_and_add #endif #if defined(AO_HAVE_fetch_and_add1) \ && !defined(AO_HAVE_int_fetch_and_add1) # define AO_int_fetch_and_add1(addr) \ (unsigned)AO_fetch_and_add1((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_add1 #endif #if defined(AO_HAVE_fetch_and_sub1) \ && !defined(AO_HAVE_int_fetch_and_sub1) # define AO_int_fetch_and_sub1(addr) \ (unsigned)AO_fetch_and_sub1((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_sub1 #endif #if defined(AO_HAVE_and) && !defined(AO_HAVE_int_and) # define AO_int_and(addr, val) \ AO_and((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_and #endif #if defined(AO_HAVE_or) && !defined(AO_HAVE_int_or) # define AO_int_or(addr, val) \ AO_or((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_or #endif #if defined(AO_HAVE_xor) && !defined(AO_HAVE_int_xor) # define AO_int_xor(addr, val) \ AO_xor((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_xor #endif #if defined(AO_HAVE_fetch_compare_and_swap) \ && !defined(AO_HAVE_int_fetch_compare_and_swap) # define AO_int_fetch_compare_and_swap(addr, old, new_val) \ (unsigned)AO_fetch_compare_and_swap((volatile AO_t *)(addr), \ (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_fetch_compare_and_swap #endif #if defined(AO_HAVE_compare_and_swap) \ && !defined(AO_HAVE_int_compare_and_swap) # define AO_int_compare_and_swap(addr, old, new_val) \ AO_compare_and_swap((volatile AO_t *)(addr), \ (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_compare_and_swap #endif /* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Inclusion of this file signifies that AO_t is in fact int. */ /* Hence any AO_... operation can also serve as AO_int_... operation. */ #if defined(AO_HAVE_load_full) && !defined(AO_HAVE_int_load_full) # define AO_int_load_full(addr) \ (unsigned)AO_load_full((const volatile AO_t *)(addr)) # define AO_HAVE_int_load_full #endif #if defined(AO_HAVE_store_full) && !defined(AO_HAVE_int_store_full) # define AO_int_store_full(addr, val) \ AO_store_full((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_store_full #endif #if defined(AO_HAVE_fetch_and_add_full) \ && !defined(AO_HAVE_int_fetch_and_add_full) # define AO_int_fetch_and_add_full(addr, incr) \ (unsigned)AO_fetch_and_add_full((volatile AO_t *)(addr), \ (AO_t)(incr)) # define AO_HAVE_int_fetch_and_add_full #endif #if defined(AO_HAVE_fetch_and_add1_full) \ && !defined(AO_HAVE_int_fetch_and_add1_full) # define AO_int_fetch_and_add1_full(addr) \ (unsigned)AO_fetch_and_add1_full((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_add1_full #endif #if defined(AO_HAVE_fetch_and_sub1_full) \ && !defined(AO_HAVE_int_fetch_and_sub1_full) # define AO_int_fetch_and_sub1_full(addr) \ (unsigned)AO_fetch_and_sub1_full((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_sub1_full #endif #if defined(AO_HAVE_and_full) && !defined(AO_HAVE_int_and_full) # define AO_int_and_full(addr, val) \ AO_and_full((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_and_full #endif #if defined(AO_HAVE_or_full) && !defined(AO_HAVE_int_or_full) # define AO_int_or_full(addr, val) \ AO_or_full((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_or_full #endif #if defined(AO_HAVE_xor_full) && !defined(AO_HAVE_int_xor_full) # define AO_int_xor_full(addr, val) \ AO_xor_full((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_xor_full #endif #if defined(AO_HAVE_fetch_compare_and_swap_full) \ && !defined(AO_HAVE_int_fetch_compare_and_swap_full) # define AO_int_fetch_compare_and_swap_full(addr, old, new_val) \ (unsigned)AO_fetch_compare_and_swap_full((volatile AO_t *)(addr), \ (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_fetch_compare_and_swap_full #endif #if defined(AO_HAVE_compare_and_swap_full) \ && !defined(AO_HAVE_int_compare_and_swap_full) # define AO_int_compare_and_swap_full(addr, old, new_val) \ AO_compare_and_swap_full((volatile AO_t *)(addr), \ (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_compare_and_swap_full #endif /* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Inclusion of this file signifies that AO_t is in fact int. */ /* Hence any AO_... operation can also serve as AO_int_... operation. */ #if defined(AO_HAVE_load_acquire) && !defined(AO_HAVE_int_load_acquire) # define AO_int_load_acquire(addr) \ (unsigned)AO_load_acquire((const volatile AO_t *)(addr)) # define AO_HAVE_int_load_acquire #endif #if defined(AO_HAVE_store_acquire) && !defined(AO_HAVE_int_store_acquire) # define AO_int_store_acquire(addr, val) \ AO_store_acquire((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_store_acquire #endif #if defined(AO_HAVE_fetch_and_add_acquire) \ && !defined(AO_HAVE_int_fetch_and_add_acquire) # define AO_int_fetch_and_add_acquire(addr, incr) \ (unsigned)AO_fetch_and_add_acquire((volatile AO_t *)(addr), \ (AO_t)(incr)) # define AO_HAVE_int_fetch_and_add_acquire #endif #if defined(AO_HAVE_fetch_and_add1_acquire) \ && !defined(AO_HAVE_int_fetch_and_add1_acquire) # define AO_int_fetch_and_add1_acquire(addr) \ (unsigned)AO_fetch_and_add1_acquire((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_add1_acquire #endif #if defined(AO_HAVE_fetch_and_sub1_acquire) \ && !defined(AO_HAVE_int_fetch_and_sub1_acquire) # define AO_int_fetch_and_sub1_acquire(addr) \ (unsigned)AO_fetch_and_sub1_acquire((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_sub1_acquire #endif #if defined(AO_HAVE_and_acquire) && !defined(AO_HAVE_int_and_acquire) # define AO_int_and_acquire(addr, val) \ AO_and_acquire((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_and_acquire #endif #if defined(AO_HAVE_or_acquire) && !defined(AO_HAVE_int_or_acquire) # define AO_int_or_acquire(addr, val) \ AO_or_acquire((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_or_acquire #endif #if defined(AO_HAVE_xor_acquire) && !defined(AO_HAVE_int_xor_acquire) # define AO_int_xor_acquire(addr, val) \ AO_xor_acquire((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_xor_acquire #endif #if defined(AO_HAVE_fetch_compare_and_swap_acquire) \ && !defined(AO_HAVE_int_fetch_compare_and_swap_acquire) # define AO_int_fetch_compare_and_swap_acquire(addr, old, new_val) \ (unsigned)AO_fetch_compare_and_swap_acquire((volatile AO_t *)(addr), \ (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_fetch_compare_and_swap_acquire #endif #if defined(AO_HAVE_compare_and_swap_acquire) \ && !defined(AO_HAVE_int_compare_and_swap_acquire) # define AO_int_compare_and_swap_acquire(addr, old, new_val) \ AO_compare_and_swap_acquire((volatile AO_t *)(addr), \ (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_compare_and_swap_acquire #endif /* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Inclusion of this file signifies that AO_t is in fact int. */ /* Hence any AO_... operation can also serve as AO_int_... operation. */ #if defined(AO_HAVE_load_release) && !defined(AO_HAVE_int_load_release) # define AO_int_load_release(addr) \ (unsigned)AO_load_release((const volatile AO_t *)(addr)) # define AO_HAVE_int_load_release #endif #if defined(AO_HAVE_store_release) && !defined(AO_HAVE_int_store_release) # define AO_int_store_release(addr, val) \ AO_store_release((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_store_release #endif #if defined(AO_HAVE_fetch_and_add_release) \ && !defined(AO_HAVE_int_fetch_and_add_release) # define AO_int_fetch_and_add_release(addr, incr) \ (unsigned)AO_fetch_and_add_release((volatile AO_t *)(addr), \ (AO_t)(incr)) # define AO_HAVE_int_fetch_and_add_release #endif #if defined(AO_HAVE_fetch_and_add1_release) \ && !defined(AO_HAVE_int_fetch_and_add1_release) # define AO_int_fetch_and_add1_release(addr) \ (unsigned)AO_fetch_and_add1_release((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_add1_release #endif #if defined(AO_HAVE_fetch_and_sub1_release) \ && !defined(AO_HAVE_int_fetch_and_sub1_release) # define AO_int_fetch_and_sub1_release(addr) \ (unsigned)AO_fetch_and_sub1_release((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_sub1_release #endif #if defined(AO_HAVE_and_release) && !defined(AO_HAVE_int_and_release) # define AO_int_and_release(addr, val) \ AO_and_release((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_and_release #endif #if defined(AO_HAVE_or_release) && !defined(AO_HAVE_int_or_release) # define AO_int_or_release(addr, val) \ AO_or_release((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_or_release #endif #if defined(AO_HAVE_xor_release) && !defined(AO_HAVE_int_xor_release) # define AO_int_xor_release(addr, val) \ AO_xor_release((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_xor_release #endif #if defined(AO_HAVE_fetch_compare_and_swap_release) \ && !defined(AO_HAVE_int_fetch_compare_and_swap_release) # define AO_int_fetch_compare_and_swap_release(addr, old, new_val) \ (unsigned)AO_fetch_compare_and_swap_release((volatile AO_t *)(addr), \ (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_fetch_compare_and_swap_release #endif #if defined(AO_HAVE_compare_and_swap_release) \ && !defined(AO_HAVE_int_compare_and_swap_release) # define AO_int_compare_and_swap_release(addr, old, new_val) \ AO_compare_and_swap_release((volatile AO_t *)(addr), \ (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_compare_and_swap_release #endif /* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Inclusion of this file signifies that AO_t is in fact int. */ /* Hence any AO_... operation can also serve as AO_int_... operation. */ #if defined(AO_HAVE_load_write) && !defined(AO_HAVE_int_load_write) # define AO_int_load_write(addr) \ (unsigned)AO_load_write((const volatile AO_t *)(addr)) # define AO_HAVE_int_load_write #endif #if defined(AO_HAVE_store_write) && !defined(AO_HAVE_int_store_write) # define AO_int_store_write(addr, val) \ AO_store_write((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_store_write #endif #if defined(AO_HAVE_fetch_and_add_write) \ && !defined(AO_HAVE_int_fetch_and_add_write) # define AO_int_fetch_and_add_write(addr, incr) \ (unsigned)AO_fetch_and_add_write((volatile AO_t *)(addr), \ (AO_t)(incr)) # define AO_HAVE_int_fetch_and_add_write #endif #if defined(AO_HAVE_fetch_and_add1_write) \ && !defined(AO_HAVE_int_fetch_and_add1_write) # define AO_int_fetch_and_add1_write(addr) \ (unsigned)AO_fetch_and_add1_write((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_add1_write #endif #if defined(AO_HAVE_fetch_and_sub1_write) \ && !defined(AO_HAVE_int_fetch_and_sub1_write) # define AO_int_fetch_and_sub1_write(addr) \ (unsigned)AO_fetch_and_sub1_write((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_sub1_write #endif #if defined(AO_HAVE_and_write) && !defined(AO_HAVE_int_and_write) # define AO_int_and_write(addr, val) \ AO_and_write((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_and_write #endif #if defined(AO_HAVE_or_write) && !defined(AO_HAVE_int_or_write) # define AO_int_or_write(addr, val) \ AO_or_write((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_or_write #endif #if defined(AO_HAVE_xor_write) && !defined(AO_HAVE_int_xor_write) # define AO_int_xor_write(addr, val) \ AO_xor_write((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_xor_write #endif #if defined(AO_HAVE_fetch_compare_and_swap_write) \ && !defined(AO_HAVE_int_fetch_compare_and_swap_write) # define AO_int_fetch_compare_and_swap_write(addr, old, new_val) \ (unsigned)AO_fetch_compare_and_swap_write((volatile AO_t *)(addr), \ (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_fetch_compare_and_swap_write #endif #if defined(AO_HAVE_compare_and_swap_write) \ && !defined(AO_HAVE_int_compare_and_swap_write) # define AO_int_compare_and_swap_write(addr, old, new_val) \ AO_compare_and_swap_write((volatile AO_t *)(addr), \ (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_compare_and_swap_write #endif /* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Inclusion of this file signifies that AO_t is in fact int. */ /* Hence any AO_... operation can also serve as AO_int_... operation. */ #if defined(AO_HAVE_load_read) && !defined(AO_HAVE_int_load_read) # define AO_int_load_read(addr) \ (unsigned)AO_load_read((const volatile AO_t *)(addr)) # define AO_HAVE_int_load_read #endif #if defined(AO_HAVE_store_read) && !defined(AO_HAVE_int_store_read) # define AO_int_store_read(addr, val) \ AO_store_read((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_store_read #endif #if defined(AO_HAVE_fetch_and_add_read) \ && !defined(AO_HAVE_int_fetch_and_add_read) # define AO_int_fetch_and_add_read(addr, incr) \ (unsigned)AO_fetch_and_add_read((volatile AO_t *)(addr), \ (AO_t)(incr)) # define AO_HAVE_int_fetch_and_add_read #endif #if defined(AO_HAVE_fetch_and_add1_read) \ && !defined(AO_HAVE_int_fetch_and_add1_read) # define AO_int_fetch_and_add1_read(addr) \ (unsigned)AO_fetch_and_add1_read((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_add1_read #endif #if defined(AO_HAVE_fetch_and_sub1_read) \ && !defined(AO_HAVE_int_fetch_and_sub1_read) # define AO_int_fetch_and_sub1_read(addr) \ (unsigned)AO_fetch_and_sub1_read((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_sub1_read #endif #if defined(AO_HAVE_and_read) && !defined(AO_HAVE_int_and_read) # define AO_int_and_read(addr, val) \ AO_and_read((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_and_read #endif #if defined(AO_HAVE_or_read) && !defined(AO_HAVE_int_or_read) # define AO_int_or_read(addr, val) \ AO_or_read((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_or_read #endif #if defined(AO_HAVE_xor_read) && !defined(AO_HAVE_int_xor_read) # define AO_int_xor_read(addr, val) \ AO_xor_read((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_xor_read #endif #if defined(AO_HAVE_fetch_compare_and_swap_read) \ && !defined(AO_HAVE_int_fetch_compare_and_swap_read) # define AO_int_fetch_compare_and_swap_read(addr, old, new_val) \ (unsigned)AO_fetch_compare_and_swap_read((volatile AO_t *)(addr), \ (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_fetch_compare_and_swap_read #endif #if defined(AO_HAVE_compare_and_swap_read) \ && !defined(AO_HAVE_int_compare_and_swap_read) # define AO_int_compare_and_swap_read(addr, old, new_val) \ AO_compare_and_swap_read((volatile AO_t *)(addr), \ (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_compare_and_swap_read #endif libatomic_ops-7.6.12/src/atomic_ops/sysdeps/ao_t_is_int.template000066400000000000000000000075121411761111000250730ustar00rootroot00000000000000/* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Inclusion of this file signifies that AO_t is in fact int. */ /* Hence any AO_... operation can also serve as AO_int_... operation. */ #if defined(AO_HAVE_load_XBAR) && !defined(AO_HAVE_int_load_XBAR) # define AO_int_load_XBAR(addr) \ (unsigned)AO_load_XBAR((const volatile AO_t *)(addr)) # define AO_HAVE_int_load_XBAR #endif #if defined(AO_HAVE_store_XBAR) && !defined(AO_HAVE_int_store_XBAR) # define AO_int_store_XBAR(addr, val) \ AO_store_XBAR((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_store_XBAR #endif #if defined(AO_HAVE_fetch_and_add_XBAR) \ && !defined(AO_HAVE_int_fetch_and_add_XBAR) # define AO_int_fetch_and_add_XBAR(addr, incr) \ (unsigned)AO_fetch_and_add_XBAR((volatile AO_t *)(addr), \ (AO_t)(incr)) # define AO_HAVE_int_fetch_and_add_XBAR #endif #if defined(AO_HAVE_fetch_and_add1_XBAR) \ && !defined(AO_HAVE_int_fetch_and_add1_XBAR) # define AO_int_fetch_and_add1_XBAR(addr) \ (unsigned)AO_fetch_and_add1_XBAR((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_add1_XBAR #endif #if defined(AO_HAVE_fetch_and_sub1_XBAR) \ && !defined(AO_HAVE_int_fetch_and_sub1_XBAR) # define AO_int_fetch_and_sub1_XBAR(addr) \ (unsigned)AO_fetch_and_sub1_XBAR((volatile AO_t *)(addr)) # define AO_HAVE_int_fetch_and_sub1_XBAR #endif #if defined(AO_HAVE_and_XBAR) && !defined(AO_HAVE_int_and_XBAR) # define AO_int_and_XBAR(addr, val) \ AO_and_XBAR((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_and_XBAR #endif #if defined(AO_HAVE_or_XBAR) && !defined(AO_HAVE_int_or_XBAR) # define AO_int_or_XBAR(addr, val) \ AO_or_XBAR((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_or_XBAR #endif #if defined(AO_HAVE_xor_XBAR) && !defined(AO_HAVE_int_xor_XBAR) # define AO_int_xor_XBAR(addr, val) \ AO_xor_XBAR((volatile AO_t *)(addr), (AO_t)(val)) # define AO_HAVE_int_xor_XBAR #endif #if defined(AO_HAVE_fetch_compare_and_swap_XBAR) \ && !defined(AO_HAVE_int_fetch_compare_and_swap_XBAR) # define AO_int_fetch_compare_and_swap_XBAR(addr, old, new_val) \ (unsigned)AO_fetch_compare_and_swap_XBAR((volatile AO_t *)(addr), \ (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_fetch_compare_and_swap_XBAR #endif #if defined(AO_HAVE_compare_and_swap_XBAR) \ && !defined(AO_HAVE_int_compare_and_swap_XBAR) # define AO_int_compare_and_swap_XBAR(addr, old, new_val) \ AO_compare_and_swap_XBAR((volatile AO_t *)(addr), \ (AO_t)(old), (AO_t)(new_val)) # define AO_HAVE_int_compare_and_swap_XBAR #endif libatomic_ops-7.6.12/src/atomic_ops/sysdeps/armcc/000077500000000000000000000000001411761111000221275ustar00rootroot00000000000000libatomic_ops-7.6.12/src/atomic_ops/sysdeps/armcc/arm_v6.h000066400000000000000000000162501411761111000234760ustar00rootroot00000000000000/* * Copyright (c) 2007 by NEC LE-IT: All rights reserved. * A transcription of ARMv6 atomic operations for the ARM Realview Toolchain. * This code works with armcc from RVDS 3.1 * This is based on work in gcc/arm.h by * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. * * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #include "../test_and_set_t_is_ao_t.h" /* Probably suboptimal */ #if __TARGET_ARCH_ARM < 6 # if !defined(CPPCHECK) # error Do not use with ARM instruction sets lower than v6 # endif #else #define AO_ACCESS_CHECK_ALIGNED #define AO_ACCESS_short_CHECK_ALIGNED #define AO_ACCESS_int_CHECK_ALIGNED #include "../all_atomic_only_load.h" #include "../standard_ao_double_t.h" /* NEC LE-IT: ARMv6 is the first architecture providing support for simple LL/SC * A data memory barrier must be raised via CP15 command (see documentation). * * ARMv7 is compatible to ARMv6 but has a simpler command for issuing a * memory barrier (DMB). Raising it via CP15 should still work as told me by the * support engineers. If it turns out to be much quicker than we should implement * custom code for ARMv7 using the asm { dmb } command. * * If only a single processor is used, we can define AO_UNIPROCESSOR * and do not need to access CP15 for ensuring a DMB at all. */ AO_INLINE void AO_nop_full(void) { # ifndef AO_UNIPROCESSOR unsigned int dest=0; /* Issue a data memory barrier (keeps ordering of memory transactions */ /* before and after this operation). */ __asm { mcr p15,0,dest,c7,c10,5 }; # else AO_compiler_barrier(); # endif } #define AO_HAVE_nop_full /* NEC LE-IT: atomic "store" - according to ARM documentation this is * the only safe way to set variables also used in LL/SC environment. * A direct write won't be recognized by the LL/SC construct in other CPUs. * * HB: Based on subsequent discussion, I think it would be OK to use an * ordinary store here if we knew that interrupt handlers always cleared * the reservation. They should, but there is some doubt that this is * currently always the case for e.g. Linux. */ AO_INLINE void AO_store(volatile AO_t *addr, AO_t value) { unsigned long tmp; retry: __asm { ldrex tmp, [addr] strex tmp, value, [addr] teq tmp, #0 bne retry }; } #define AO_HAVE_store /* NEC LE-IT: replace the SWAP as recommended by ARM: "Applies to: ARM11 Cores Though the SWP instruction will still work with ARM V6 cores, it is recommended to use the new V6 synchronization instructions. The SWP instruction produces locked read and write accesses which are atomic, i.e. another operation cannot be done between these locked accesses which ties up external bus (AHB,AXI) bandwidth and can increase worst case interrupt latencies. LDREX,STREX are more flexible, other instructions can be done between the LDREX and STREX accesses. " */ #ifndef AO_PREFER_GENERALIZED AO_INLINE AO_TS_VAL_t AO_test_and_set(volatile AO_TS_t *addr) { AO_TS_VAL_t oldval; unsigned long tmp; unsigned long one = 1; retry: __asm { ldrex oldval, [addr] strex tmp, one, [addr] teq tmp, #0 bne retry } return oldval; } #define AO_HAVE_test_and_set AO_INLINE AO_t AO_fetch_and_add(volatile AO_t *p, AO_t incr) { unsigned long tmp,tmp2; AO_t result; retry: __asm { ldrex result, [p] add tmp, incr, result strex tmp2, tmp, [p] teq tmp2, #0 bne retry } return result; } #define AO_HAVE_fetch_and_add AO_INLINE AO_t AO_fetch_and_add1(volatile AO_t *p) { unsigned long tmp,tmp2; AO_t result; retry: __asm { ldrex result, [p] add tmp, result, #1 strex tmp2, tmp, [p] teq tmp2, #0 bne retry } return result; } #define AO_HAVE_fetch_and_add1 AO_INLINE AO_t AO_fetch_and_sub1(volatile AO_t *p) { unsigned long tmp,tmp2; AO_t result; retry: __asm { ldrex result, [p] sub tmp, result, #1 strex tmp2, tmp, [p] teq tmp2, #0 bne retry } return result; } #define AO_HAVE_fetch_and_sub1 #endif /* !AO_PREFER_GENERALIZED */ #ifndef AO_GENERALIZE_ASM_BOOL_CAS /* Returns nonzero if the comparison succeeded. */ AO_INLINE int AO_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val) { AO_t result, tmp; retry: __asm__ { mov result, #2 ldrex tmp, [addr] teq tmp, old_val # ifdef __thumb__ it eq # endif strexeq result, new_val, [addr] teq result, #1 beq retry } return !(result&2); } # define AO_HAVE_compare_and_swap #endif /* !AO_GENERALIZE_ASM_BOOL_CAS */ AO_INLINE AO_t AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val) { AO_t fetched_val, tmp; retry: __asm__ { mov tmp, #2 ldrex fetched_val, [addr] teq fetched_val, old_val # ifdef __thumb__ it eq # endif strexeq tmp, new_val, [addr] teq tmp, #1 beq retry } return fetched_val; } #define AO_HAVE_fetch_compare_and_swap /* helper functions for the Realview compiler: LDREXD is not usable * with inline assembler, so use the "embedded" assembler as * suggested by ARM Dev. support (June 2008). */ __asm inline double_ptr_storage AO_load_ex(const volatile AO_double_t *addr) { LDREXD r0,r1,[r0] } __asm inline int AO_store_ex(AO_t val1, AO_t val2, volatile AO_double_t *addr) { STREXD r3,r0,r1,[r2] MOV r0,r3 } AO_INLINE AO_double_t AO_double_load(const volatile AO_double_t *addr) { AO_double_t result; result.AO_whole = AO_load_ex(addr); return result; } #define AO_HAVE_double_load AO_INLINE int AO_compare_double_and_swap_double(volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2) { double_ptr_storage old_val = ((double_ptr_storage)old_val2 << 32) | old_val1; double_ptr_storage tmp; int result; while(1) { tmp = AO_load_ex(addr); if(tmp != old_val) return 0; result = AO_store_ex(new_val1, new_val2, addr); if(!result) return 1; } } #define AO_HAVE_compare_double_and_swap_double #endif /* __TARGET_ARCH_ARM >= 6 */ #define AO_T_IS_INT libatomic_ops-7.6.12/src/atomic_ops/sysdeps/emul_cas.h000066400000000000000000000065531411761111000230140ustar00rootroot00000000000000/* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * Ensure, if at all possible, that AO_compare_and_swap_full() is * available. The emulation should be brute-force signal-safe, even * though it actually blocks. * Including this file will generate an error if AO_compare_and_swap_full() * cannot be made available. * This will be included from platform-specific atomic_ops files * if appropriate, and if AO_REQUIRE_CAS is defined. It should not be * included directly, especially since it affects the implementation * of other atomic update primitives. * The implementation assumes that only AO_store_XXX and AO_test_and_set_XXX * variants are defined, and that AO_test_and_set_XXX is not used to * operate on compare_and_swap locations. */ #ifndef AO_ATOMIC_OPS_H # error This file should not be included directly. #endif #ifndef AO_HAVE_double_t # include "standard_ao_double_t.h" #endif #ifdef __cplusplus extern "C" { #endif AO_t AO_fetch_compare_and_swap_emulation(volatile AO_t *addr, AO_t old_val, AO_t new_val); int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2); void AO_store_full_emulation(volatile AO_t *addr, AO_t val); #ifndef AO_HAVE_fetch_compare_and_swap_full # define AO_fetch_compare_and_swap_full(addr, old, newval) \ AO_fetch_compare_and_swap_emulation(addr, old, newval) # define AO_HAVE_fetch_compare_and_swap_full #endif #ifndef AO_HAVE_compare_double_and_swap_double_full # define AO_compare_double_and_swap_double_full(addr, old1, old2, \ newval1, newval2) \ AO_compare_double_and_swap_double_emulation(addr, old1, old2, \ newval1, newval2) # define AO_HAVE_compare_double_and_swap_double_full #endif #undef AO_store #undef AO_HAVE_store #undef AO_store_write #undef AO_HAVE_store_write #undef AO_store_release #undef AO_HAVE_store_release #undef AO_store_full #undef AO_HAVE_store_full #define AO_store_full(addr, val) AO_store_full_emulation(addr, val) #define AO_HAVE_store_full #ifdef __cplusplus } /* extern "C" */ #endif libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/000077500000000000000000000000001411761111000215765ustar00rootroot00000000000000libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/aarch64.h000066400000000000000000000220161411761111000232000ustar00rootroot00000000000000/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. * Copyright (c) 2013-2017 Ivan Maidanski * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ /* As of clang-5.0 (and gcc-5.4), __atomic_thread_fence is always */ /* translated to DMB (which is inefficient for AO_nop_write). */ /* TODO: Update it for newer Clang and GCC releases. */ #if !defined(AO_PREFER_BUILTIN_ATOMICS) && !defined(AO_THREAD_SANITIZER) \ && !defined(AO_UNIPROCESSOR) AO_INLINE void AO_nop_write(void) { __asm__ __volatile__("dmb ishst" : : : "memory"); } # define AO_HAVE_nop_write #endif /* There were some bugs in the older clang releases (related to */ /* optimization of functions dealing with __int128 values, supposedly), */ /* so even asm-based implementation did not work correctly. */ #if !defined(__clang__) || AO_CLANG_PREREQ(3, 9) # include "../standard_ao_double_t.h" /* As of gcc-5.4, all built-in load/store and CAS atomics for double */ /* word require -latomic, are not lock-free and cause test_stack */ /* failure, so the asm-based implementation is used for now. */ /* TODO: Update it for newer GCC releases. */ #if (!defined(__ILP32__) && !defined(__clang__)) \ || defined(AO_AARCH64_ASM_LOAD_STORE_CAS) # ifndef AO_PREFER_GENERALIZED AO_INLINE AO_double_t AO_double_load(const volatile AO_double_t *addr) { AO_double_t result; int status; /* Note that STXP cannot be discarded because LD[A]XP is not */ /* single-copy atomic (unlike LDREXD for 32-bit ARM). */ do { __asm__ __volatile__("//AO_double_load\n" # ifdef __ILP32__ " ldxp %w0, %w1, %3\n" " stxp %w2, %w0, %w1, %3" # else " ldxp %0, %1, %3\n" " stxp %w2, %0, %1, %3" # endif : "=&r" (result.AO_val1), "=&r" (result.AO_val2), "=&r" (status) : "Q" (*addr)); } while (AO_EXPECT_FALSE(status)); return result; } # define AO_HAVE_double_load AO_INLINE AO_double_t AO_double_load_acquire(const volatile AO_double_t *addr) { AO_double_t result; int status; do { __asm__ __volatile__("//AO_double_load_acquire\n" # ifdef __ILP32__ " ldaxp %w0, %w1, %3\n" " stxp %w2, %w0, %w1, %3" # else " ldaxp %0, %1, %3\n" " stxp %w2, %0, %1, %3" # endif : "=&r" (result.AO_val1), "=&r" (result.AO_val2), "=&r" (status) : "Q" (*addr)); } while (AO_EXPECT_FALSE(status)); return result; } # define AO_HAVE_double_load_acquire AO_INLINE void AO_double_store(volatile AO_double_t *addr, AO_double_t value) { AO_double_t old_val; int status; do { __asm__ __volatile__("//AO_double_store\n" # ifdef __ILP32__ " ldxp %w0, %w1, %3\n" " stxp %w2, %w4, %w5, %3" # else " ldxp %0, %1, %3\n" " stxp %w2, %4, %5, %3" # endif : "=&r" (old_val.AO_val1), "=&r" (old_val.AO_val2), "=&r" (status), "=Q" (*addr) : "r" (value.AO_val1), "r" (value.AO_val2)); /* Compared to the arm.h implementation, the 'cc' (flags) are */ /* not clobbered because A64 has no concept of conditional */ /* execution. */ } while (AO_EXPECT_FALSE(status)); } # define AO_HAVE_double_store AO_INLINE void AO_double_store_release(volatile AO_double_t *addr, AO_double_t value) { AO_double_t old_val; int status; do { __asm__ __volatile__("//AO_double_store_release\n" # ifdef __ILP32__ " ldxp %w0, %w1, %3\n" " stlxp %w2, %w4, %w5, %3" # else " ldxp %0, %1, %3\n" " stlxp %w2, %4, %5, %3" # endif : "=&r" (old_val.AO_val1), "=&r" (old_val.AO_val2), "=&r" (status), "=Q" (*addr) : "r" (value.AO_val1), "r" (value.AO_val2)); } while (AO_EXPECT_FALSE(status)); } # define AO_HAVE_double_store_release # endif /* !AO_PREFER_GENERALIZED */ AO_INLINE int AO_double_compare_and_swap(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { AO_double_t tmp; int result = 1; do { __asm__ __volatile__("//AO_double_compare_and_swap\n" # ifdef __ILP32__ " ldxp %w0, %w1, %2\n" # else " ldxp %0, %1, %2\n" # endif : "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2) : "Q" (*addr)); if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2) break; __asm__ __volatile__( # ifdef __ILP32__ " stxp %w0, %w2, %w3, %1\n" # else " stxp %w0, %2, %3, %1\n" # endif : "=&r" (result), "=Q" (*addr) : "r" (new_val.AO_val1), "r" (new_val.AO_val2)); } while (AO_EXPECT_FALSE(result)); return !result; } # define AO_HAVE_double_compare_and_swap AO_INLINE int AO_double_compare_and_swap_acquire(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { AO_double_t tmp; int result = 1; do { __asm__ __volatile__("//AO_double_compare_and_swap_acquire\n" # ifdef __ILP32__ " ldaxp %w0, %w1, %2\n" # else " ldaxp %0, %1, %2\n" # endif : "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2) : "Q" (*addr)); if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2) break; __asm__ __volatile__( # ifdef __ILP32__ " stxp %w0, %w2, %w3, %1\n" # else " stxp %w0, %2, %3, %1\n" # endif : "=&r" (result), "=Q" (*addr) : "r" (new_val.AO_val1), "r" (new_val.AO_val2)); } while (AO_EXPECT_FALSE(result)); return !result; } # define AO_HAVE_double_compare_and_swap_acquire AO_INLINE int AO_double_compare_and_swap_release(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { AO_double_t tmp; int result = 1; do { __asm__ __volatile__("//AO_double_compare_and_swap_release\n" # ifdef __ILP32__ " ldxp %w0, %w1, %2\n" # else " ldxp %0, %1, %2\n" # endif : "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2) : "Q" (*addr)); if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2) break; __asm__ __volatile__( # ifdef __ILP32__ " stlxp %w0, %w2, %w3, %1\n" # else " stlxp %w0, %2, %3, %1\n" # endif : "=&r" (result), "=Q" (*addr) : "r" (new_val.AO_val1), "r" (new_val.AO_val2)); } while (AO_EXPECT_FALSE(result)); return !result; } # define AO_HAVE_double_compare_and_swap_release AO_INLINE int AO_double_compare_and_swap_full(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { AO_double_t tmp; int result = 1; do { __asm__ __volatile__("//AO_double_compare_and_swap_full\n" # ifdef __ILP32__ " ldaxp %w0, %w1, %2\n" # else " ldaxp %0, %1, %2\n" # endif : "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2) : "Q" (*addr)); if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2) break; __asm__ __volatile__( # ifdef __ILP32__ " stlxp %w0, %w2, %w3, %1\n" # else " stlxp %w0, %2, %3, %1\n" # endif : "=&r" (result), "=Q" (*addr) : "r" (new_val.AO_val1), "r" (new_val.AO_val2)); } while (AO_EXPECT_FALSE(result)); return !result; } # define AO_HAVE_double_compare_and_swap_full #endif /* !__ILP32__ && !__clang__ || AO_AARCH64_ASM_LOAD_STORE_CAS */ /* As of clang-5.0 and gcc-8.1, __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 */ /* macro is still missing (while the double-word CAS is available). */ # ifndef __ILP32__ # define AO_GCC_HAVE_double_SYNC_CAS # endif #endif /* !__clang__ || AO_CLANG_PREREQ(3, 9) */ #if (defined(__clang__) && !AO_CLANG_PREREQ(3, 8)) || defined(__APPLE_CC__) /* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n macros are missing. */ # define AO_GCC_FORCE_HAVE_CAS #endif #include "generic.h" #undef AO_GCC_FORCE_HAVE_CAS #undef AO_GCC_HAVE_double_SYNC_CAS libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/alpha.h000066400000000000000000000040031411761111000230310ustar00rootroot00000000000000/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #include "../loadstore/atomic_load.h" #include "../loadstore/atomic_store.h" #include "../test_and_set_t_is_ao_t.h" #define AO_NO_DD_ORDERING /* Data dependence does not imply read ordering. */ AO_INLINE void AO_nop_full(void) { __asm__ __volatile__("mb" : : : "memory"); } #define AO_HAVE_nop_full AO_INLINE void AO_nop_write(void) { __asm__ __volatile__("wmb" : : : "memory"); } #define AO_HAVE_nop_write /* mb should be used for AO_nop_read(). That's the default. */ /* TODO: implement AO_fetch_and_add explicitly. */ /* We believe that ldq_l ... stq_c does not imply any memory barrier. */ AO_INLINE int AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) { unsigned long was_equal; unsigned long temp; __asm__ __volatile__( "1: ldq_l %0,%1\n" " cmpeq %0,%4,%2\n" " mov %3,%0\n" " beq %2,2f\n" " stq_c %0,%1\n" " beq %0,1b\n" "2:\n" : "=&r" (temp), "+m" (*addr), "=&r" (was_equal) : "r" (new_val), "Ir" (old) :"memory"); return (int)was_equal; } #define AO_HAVE_compare_and_swap /* TODO: implement AO_fetch_compare_and_swap */ libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/arm.h000066400000000000000000000616211411761111000225340ustar00rootroot00000000000000/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. * Copyright (c) 2008-2017 Ivan Maidanski * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #if (AO_GNUC_PREREQ(4, 8) || AO_CLANG_PREREQ(3, 5)) \ && !defined(AO_DISABLE_GCC_ATOMICS) /* Probably, it could be enabled even for earlier gcc/clang versions. */ # define AO_GCC_ATOMIC_TEST_AND_SET #endif #ifdef __native_client__ /* Mask instruction should immediately precede access instruction. */ # define AO_MASK_PTR(reg) " bical " reg ", " reg ", #0xc0000000\n" # define AO_BR_ALIGN " .align 4\n" #else # define AO_MASK_PTR(reg) /* empty */ # define AO_BR_ALIGN /* empty */ #endif #if defined(__thumb__) && !defined(__thumb2__) /* Thumb One mode does not have ARM "mcr", "swp" and some load/store */ /* instructions, so we temporarily switch to ARM mode and go back */ /* afterwards (clobbering "r3" register). */ # define AO_THUMB_GO_ARM \ " adr r3, 4f\n" \ " bx r3\n" \ " .align\n" \ " .arm\n" \ AO_BR_ALIGN \ "4:\n" # define AO_THUMB_RESTORE_MODE \ " adr r3, 5f + 1\n" \ " bx r3\n" \ " .thumb\n" \ AO_BR_ALIGN \ "5:\n" # define AO_THUMB_SWITCH_CLOBBERS "r3", #else # define AO_THUMB_GO_ARM /* empty */ # define AO_THUMB_RESTORE_MODE /* empty */ # define AO_THUMB_SWITCH_CLOBBERS /* empty */ #endif /* !__thumb__ */ /* NEC LE-IT: gcc has no way to easily check the arm architecture */ /* but it defines only one (or several) of __ARM_ARCH_x__ to be true. */ #if !defined(__ARM_ARCH_2__) && !defined(__ARM_ARCH_3__) \ && !defined(__ARM_ARCH_3M__) && !defined(__ARM_ARCH_4__) \ && !defined(__ARM_ARCH_4T__) \ && ((!defined(__ARM_ARCH_5__) && !defined(__ARM_ARCH_5E__) \ && !defined(__ARM_ARCH_5T__) && !defined(__ARM_ARCH_5TE__) \ && !defined(__ARM_ARCH_5TEJ__) && !defined(__ARM_ARCH_6M__)) \ || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ || defined(__ARM_ARCH_8A__)) # define AO_ARM_HAVE_LDREX # if !defined(__ARM_ARCH_6__) && !defined(__ARM_ARCH_6J__) \ && !defined(__ARM_ARCH_6T2__) /* LDREXB/STREXB and LDREXH/STREXH are present in ARMv6K/Z+. */ # define AO_ARM_HAVE_LDREXBH # endif # if !defined(__ARM_ARCH_6__) && !defined(__ARM_ARCH_6J__) \ && !defined(__ARM_ARCH_6T2__) && !defined(__ARM_ARCH_6Z__) \ && !defined(__ARM_ARCH_6ZT2__) # if !defined(__ARM_ARCH_6K__) && !defined(__ARM_ARCH_6KZ__) \ && !defined(__ARM_ARCH_6ZK__) /* DMB is present in ARMv6M and ARMv7+. */ # define AO_ARM_HAVE_DMB # endif # if (!defined(__thumb__) \ || (defined(__thumb2__) && !defined(__ARM_ARCH_7__) \ && !defined(__ARM_ARCH_7M__) && !defined(__ARM_ARCH_7EM__))) \ && (!defined(__clang__) || AO_CLANG_PREREQ(3, 3)) /* LDREXD/STREXD present in ARMv6K/M+ (see gas/config/tc-arm.c). */ /* In the Thumb mode, this works only starting from ARMv7 (except */ /* for the base and 'M' models). Clang3.2 (and earlier) does not */ /* allocate register pairs for LDREXD/STREXD properly (besides, */ /* Clang3.1 does not support "%H" operand specification). */ # define AO_ARM_HAVE_LDREXD # endif /* !thumb || ARMv7A || ARMv7R+ */ # endif /* ARMv7+ */ #endif /* ARMv6+ */ #if !defined(__ARM_ARCH_2__) && !defined(__ARM_ARCH_6M__) \ && !defined(__ARM_ARCH_8A__) && !defined(__thumb2__) # define AO_ARM_HAVE_SWP /* Note: ARMv6M is excluded due to no ARM mode support. */ /* Also, SWP is obsoleted for ARMv8+. */ #endif /* !__thumb2__ */ #if !defined(AO_UNIPROCESSOR) && defined(AO_ARM_HAVE_DMB) \ && !defined(AO_PREFER_BUILTIN_ATOMICS) AO_INLINE void AO_nop_write(void) { /* AO_THUMB_GO_ARM is empty. */ /* This will target the system domain and thus be overly */ /* conservative as the CPUs (even in case of big.LITTLE SoC) will */ /* occupy the inner shareable domain. */ /* The plain variant (dmb st) is theoretically slower, and should */ /* not be needed. That said, with limited experimentation, a CPU */ /* implementation for which it actually matters has not been found */ /* yet, though they should already exist. */ /* Anyway, note that the "st" and "ishst" barriers are actually */ /* quite weak and, as the libatomic_ops documentation states, */ /* usually not what you really want. */ __asm__ __volatile__("dmb ishst" : : : "memory"); } # define AO_HAVE_nop_write #endif /* AO_ARM_HAVE_DMB */ #ifndef AO_GCC_ATOMIC_TEST_AND_SET #ifdef AO_UNIPROCESSOR /* If only a single processor (core) is used, AO_UNIPROCESSOR could */ /* be defined by the client to avoid unnecessary memory barrier. */ AO_INLINE void AO_nop_full(void) { AO_compiler_barrier(); } # define AO_HAVE_nop_full #elif defined(AO_ARM_HAVE_DMB) /* ARMv7 is compatible to ARMv6 but has a simpler command for issuing */ /* a memory barrier (DMB). Raising it via CP15 should still work */ /* (but slightly less efficient because it requires the use of */ /* a general-purpose register). */ AO_INLINE void AO_nop_full(void) { /* AO_THUMB_GO_ARM is empty. */ __asm__ __volatile__("dmb" : : : "memory"); } # define AO_HAVE_nop_full #elif defined(AO_ARM_HAVE_LDREX) /* ARMv6 is the first architecture providing support for a simple */ /* LL/SC. A data memory barrier must be raised via CP15 command. */ AO_INLINE void AO_nop_full(void) { unsigned dest = 0; /* Issue a data memory barrier (keeps ordering of memory */ /* transactions before and after this operation). */ __asm__ __volatile__("@AO_nop_full\n" AO_THUMB_GO_ARM " mcr p15,0,%0,c7,c10,5\n" AO_THUMB_RESTORE_MODE : "=&r"(dest) : /* empty */ : AO_THUMB_SWITCH_CLOBBERS "memory"); } # define AO_HAVE_nop_full #else /* AO_nop_full() is emulated using AO_test_and_set_full(). */ #endif /* !AO_UNIPROCESSOR && !AO_ARM_HAVE_LDREX */ #endif /* !AO_GCC_ATOMIC_TEST_AND_SET */ #ifdef AO_ARM_HAVE_LDREX /* "ARM Architecture Reference Manual" (chapter A3.5.3) says that the */ /* single-copy atomic processor accesses are all byte accesses, all */ /* halfword accesses to halfword-aligned locations, all word accesses */ /* to word-aligned locations. */ /* There is only a single concern related to AO store operations: */ /* a direct write (by STR[B/H] instruction) will not be recognized */ /* by the LL/SC construct on the same CPU (i.e., according to ARM */ /* documentation, e.g., see CortexA8 TRM reference, point 8.5, */ /* atomic "store" (using LDREX/STREX[B/H]) is the only safe way to */ /* set variables also used in LL/SC environment). */ /* This is only a problem if interrupt handlers do not clear the */ /* reservation (by CLREX instruction or a dummy STREX one), as they */ /* almost certainly should (e.g., see restore_user_regs defined in */ /* arch/arm/kernel/entry-header.S of Linux. Nonetheless, there is */ /* a doubt this was properly implemented in some ancient OS releases. */ # ifdef AO_BROKEN_TASKSWITCH_CLREX # define AO_SKIPATOMIC_store # define AO_SKIPATOMIC_store_release # define AO_SKIPATOMIC_char_store # define AO_SKIPATOMIC_char_store_release # define AO_SKIPATOMIC_short_store # define AO_SKIPATOMIC_short_store_release # define AO_SKIPATOMIC_int_store # define AO_SKIPATOMIC_int_store_release # ifndef AO_PREFER_BUILTIN_ATOMICS AO_INLINE void AO_store(volatile AO_t *addr, AO_t value) { int flag; __asm__ __volatile__("@AO_store\n" AO_THUMB_GO_ARM AO_BR_ALIGN "1: " AO_MASK_PTR("%2") " ldrex %0, [%2]\n" AO_MASK_PTR("%2") " strex %0, %3, [%2]\n" " teq %0, #0\n" " bne 1b\n" AO_THUMB_RESTORE_MODE : "=&r" (flag), "+m" (*addr) : "r" (addr), "r" (value) : AO_THUMB_SWITCH_CLOBBERS "cc"); } # define AO_HAVE_store # ifdef AO_ARM_HAVE_LDREXBH AO_INLINE void AO_char_store(volatile unsigned char *addr, unsigned char value) { int flag; __asm__ __volatile__("@AO_char_store\n" AO_THUMB_GO_ARM AO_BR_ALIGN "1: " AO_MASK_PTR("%2") " ldrexb %0, [%2]\n" AO_MASK_PTR("%2") " strexb %0, %3, [%2]\n" " teq %0, #0\n" " bne 1b\n" AO_THUMB_RESTORE_MODE : "=&r" (flag), "+m" (*addr) : "r" (addr), "r" (value) : AO_THUMB_SWITCH_CLOBBERS "cc"); } # define AO_HAVE_char_store AO_INLINE void AO_short_store(volatile unsigned short *addr, unsigned short value) { int flag; __asm__ __volatile__("@AO_short_store\n" AO_THUMB_GO_ARM AO_BR_ALIGN "1: " AO_MASK_PTR("%2") " ldrexh %0, [%2]\n" AO_MASK_PTR("%2") " strexh %0, %3, [%2]\n" " teq %0, #0\n" " bne 1b\n" AO_THUMB_RESTORE_MODE : "=&r" (flag), "+m" (*addr) : "r" (addr), "r" (value) : AO_THUMB_SWITCH_CLOBBERS "cc"); } # define AO_HAVE_short_store # endif /* AO_ARM_HAVE_LDREXBH */ # endif /* !AO_PREFER_BUILTIN_ATOMICS */ # elif !defined(AO_GCC_ATOMIC_TEST_AND_SET) # include "../loadstore/atomic_store.h" /* AO_int_store is defined in ao_t_is_int.h. */ # endif /* !AO_BROKEN_TASKSWITCH_CLREX */ #endif /* AO_ARM_HAVE_LDREX */ #ifndef AO_GCC_ATOMIC_TEST_AND_SET # include "../test_and_set_t_is_ao_t.h" /* Probably suboptimal */ #ifdef AO_ARM_HAVE_LDREX /* AO_t/char/short/int load is simple reading. */ /* Unaligned accesses are not guaranteed to be atomic. */ # define AO_ACCESS_CHECK_ALIGNED # define AO_ACCESS_short_CHECK_ALIGNED # define AO_ACCESS_int_CHECK_ALIGNED # include "../all_atomic_only_load.h" # ifndef AO_HAVE_char_store # include "../loadstore/char_atomic_store.h" # include "../loadstore/short_atomic_store.h" # endif /* NEC LE-IT: replace the SWAP as recommended by ARM: "Applies to: ARM11 Cores Though the SWP instruction will still work with ARM V6 cores, it is recommended to use the new V6 synchronization instructions. The SWP instruction produces 'locked' read and write accesses which are atomic, i.e. another operation cannot be done between these locked accesses which ties up external bus (AHB, AXI) bandwidth and can increase worst case interrupt latencies. LDREX, STREX are more flexible, other instructions can be done between the LDREX and STREX accesses." */ #ifndef AO_PREFER_GENERALIZED #if !defined(AO_FORCE_USE_SWP) || !defined(AO_ARM_HAVE_SWP) /* But, on the other hand, there could be a considerable performance */ /* degradation in case of a race. Eg., test_atomic.c executing */ /* test_and_set test on a dual-core ARMv7 processor using LDREX/STREX */ /* showed around 35 times lower performance than that using SWP. */ /* To force use of SWP instruction, use -D AO_FORCE_USE_SWP option */ /* (the latter is ignored if SWP instruction is unsupported). */ AO_INLINE AO_TS_VAL_t AO_test_and_set(volatile AO_TS_t *addr) { AO_TS_VAL_t oldval; int flag; __asm__ __volatile__("@AO_test_and_set\n" AO_THUMB_GO_ARM AO_BR_ALIGN "1: " AO_MASK_PTR("%3") " ldrex %0, [%3]\n" AO_MASK_PTR("%3") " strex %1, %4, [%3]\n" " teq %1, #0\n" " bne 1b\n" AO_THUMB_RESTORE_MODE : "=&r"(oldval), "=&r"(flag), "+m"(*addr) : "r"(addr), "r"(1) : AO_THUMB_SWITCH_CLOBBERS "cc"); return oldval; } # define AO_HAVE_test_and_set #endif /* !AO_FORCE_USE_SWP */ AO_INLINE AO_t AO_fetch_and_add(volatile AO_t *p, AO_t incr) { AO_t result, tmp; int flag; __asm__ __volatile__("@AO_fetch_and_add\n" AO_THUMB_GO_ARM AO_BR_ALIGN "1: " AO_MASK_PTR("%5") " ldrex %0, [%5]\n" /* get original */ " add %2, %0, %4\n" /* sum up in incr */ AO_MASK_PTR("%5") " strex %1, %2, [%5]\n" /* store them */ " teq %1, #0\n" " bne 1b\n" AO_THUMB_RESTORE_MODE : "=&r"(result), "=&r"(flag), "=&r"(tmp), "+m"(*p) /* 0..3 */ : "r"(incr), "r"(p) /* 4..5 */ : AO_THUMB_SWITCH_CLOBBERS "cc"); return result; } #define AO_HAVE_fetch_and_add AO_INLINE AO_t AO_fetch_and_add1(volatile AO_t *p) { AO_t result, tmp; int flag; __asm__ __volatile__("@AO_fetch_and_add1\n" AO_THUMB_GO_ARM AO_BR_ALIGN "1: " AO_MASK_PTR("%4") " ldrex %0, [%4]\n" /* get original */ " add %1, %0, #1\n" /* increment */ AO_MASK_PTR("%4") " strex %2, %1, [%4]\n" /* store them */ " teq %2, #0\n" " bne 1b\n" AO_THUMB_RESTORE_MODE : "=&r"(result), "=&r"(tmp), "=&r"(flag), "+m"(*p) : "r"(p) : AO_THUMB_SWITCH_CLOBBERS "cc"); return result; } #define AO_HAVE_fetch_and_add1 AO_INLINE AO_t AO_fetch_and_sub1(volatile AO_t *p) { AO_t result, tmp; int flag; __asm__ __volatile__("@AO_fetch_and_sub1\n" AO_THUMB_GO_ARM AO_BR_ALIGN "1: " AO_MASK_PTR("%4") " ldrex %0, [%4]\n" /* get original */ " sub %1, %0, #1\n" /* decrement */ AO_MASK_PTR("%4") " strex %2, %1, [%4]\n" /* store them */ " teq %2, #0\n" " bne 1b\n" AO_THUMB_RESTORE_MODE : "=&r"(result), "=&r"(tmp), "=&r"(flag), "+m"(*p) : "r"(p) : AO_THUMB_SWITCH_CLOBBERS "cc"); return result; } #define AO_HAVE_fetch_and_sub1 AO_INLINE void AO_and(volatile AO_t *p, AO_t value) { AO_t tmp, result; __asm__ __volatile__("@AO_and\n" AO_THUMB_GO_ARM AO_BR_ALIGN "1: " AO_MASK_PTR("%4") " ldrex %0, [%4]\n" " and %1, %0, %3\n" AO_MASK_PTR("%4") " strex %0, %1, [%4]\n" " teq %0, #0\n" " bne 1b\n" AO_THUMB_RESTORE_MODE : "=&r" (tmp), "=&r" (result), "+m" (*p) : "r" (value), "r" (p) : AO_THUMB_SWITCH_CLOBBERS "cc"); } #define AO_HAVE_and AO_INLINE void AO_or(volatile AO_t *p, AO_t value) { AO_t tmp, result; __asm__ __volatile__("@AO_or\n" AO_THUMB_GO_ARM AO_BR_ALIGN "1: " AO_MASK_PTR("%4") " ldrex %0, [%4]\n" " orr %1, %0, %3\n" AO_MASK_PTR("%4") " strex %0, %1, [%4]\n" " teq %0, #0\n" " bne 1b\n" AO_THUMB_RESTORE_MODE : "=&r" (tmp), "=&r" (result), "+m" (*p) : "r" (value), "r" (p) : AO_THUMB_SWITCH_CLOBBERS "cc"); } #define AO_HAVE_or AO_INLINE void AO_xor(volatile AO_t *p, AO_t value) { AO_t tmp, result; __asm__ __volatile__("@AO_xor\n" AO_THUMB_GO_ARM AO_BR_ALIGN "1: " AO_MASK_PTR("%4") " ldrex %0, [%4]\n" " eor %1, %0, %3\n" AO_MASK_PTR("%4") " strex %0, %1, [%4]\n" " teq %0, #0\n" " bne 1b\n" AO_THUMB_RESTORE_MODE : "=&r" (tmp), "=&r" (result), "+m" (*p) : "r" (value), "r" (p) : AO_THUMB_SWITCH_CLOBBERS "cc"); } #define AO_HAVE_xor #endif /* !AO_PREFER_GENERALIZED */ #ifdef AO_ARM_HAVE_LDREXBH AO_INLINE unsigned char AO_char_fetch_and_add(volatile unsigned char *p, unsigned char incr) { unsigned result, tmp; int flag; __asm__ __volatile__("@AO_char_fetch_and_add\n" AO_THUMB_GO_ARM AO_BR_ALIGN "1: " AO_MASK_PTR("%5") " ldrexb %0, [%5]\n" " add %2, %0, %4\n" AO_MASK_PTR("%5") " strexb %1, %2, [%5]\n" " teq %1, #0\n" " bne 1b\n" AO_THUMB_RESTORE_MODE : "=&r" (result), "=&r" (flag), "=&r" (tmp), "+m" (*p) : "r" ((unsigned)incr), "r" (p) : AO_THUMB_SWITCH_CLOBBERS "cc"); return (unsigned char)result; } # define AO_HAVE_char_fetch_and_add AO_INLINE unsigned short AO_short_fetch_and_add(volatile unsigned short *p, unsigned short incr) { unsigned result, tmp; int flag; __asm__ __volatile__("@AO_short_fetch_and_add\n" AO_THUMB_GO_ARM AO_BR_ALIGN "1: " AO_MASK_PTR("%5") " ldrexh %0, [%5]\n" " add %2, %0, %4\n" AO_MASK_PTR("%5") " strexh %1, %2, [%5]\n" " teq %1, #0\n" " bne 1b\n" AO_THUMB_RESTORE_MODE : "=&r" (result), "=&r" (flag), "=&r" (tmp), "+m" (*p) : "r" ((unsigned)incr), "r" (p) : AO_THUMB_SWITCH_CLOBBERS "cc"); return (unsigned short)result; } # define AO_HAVE_short_fetch_and_add #endif /* AO_ARM_HAVE_LDREXBH */ #ifndef AO_GENERALIZE_ASM_BOOL_CAS /* Returns nonzero if the comparison succeeded. */ AO_INLINE int AO_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val) { AO_t result, tmp; __asm__ __volatile__("@AO_compare_and_swap\n" AO_THUMB_GO_ARM AO_BR_ALIGN "1: mov %0, #2\n" /* store a flag */ AO_MASK_PTR("%3") " ldrex %1, [%3]\n" /* get original */ " teq %1, %4\n" /* see if match */ AO_MASK_PTR("%3") # ifdef __thumb2__ /* TODO: Eliminate warning: it blocks containing wide Thumb */ /* instructions are deprecated in ARMv8. */ " it eq\n" # endif " strexeq %0, %5, [%3]\n" /* store new one if matched */ " teq %0, #1\n" " beq 1b\n" /* if update failed, repeat */ AO_THUMB_RESTORE_MODE : "=&r"(result), "=&r"(tmp), "+m"(*addr) : "r"(addr), "r"(old_val), "r"(new_val) : AO_THUMB_SWITCH_CLOBBERS "cc"); return !(result&2); /* if succeeded then return 1 else 0 */ } # define AO_HAVE_compare_and_swap #endif /* !AO_GENERALIZE_ASM_BOOL_CAS */ AO_INLINE AO_t AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val) { AO_t fetched_val; int flag; __asm__ __volatile__("@AO_fetch_compare_and_swap\n" AO_THUMB_GO_ARM AO_BR_ALIGN "1: mov %0, #2\n" /* store a flag */ AO_MASK_PTR("%3") " ldrex %1, [%3]\n" /* get original */ " teq %1, %4\n" /* see if match */ AO_MASK_PTR("%3") # ifdef __thumb2__ " it eq\n" # endif " strexeq %0, %5, [%3]\n" /* store new one if matched */ " teq %0, #1\n" " beq 1b\n" /* if update failed, repeat */ AO_THUMB_RESTORE_MODE : "=&r"(flag), "=&r"(fetched_val), "+m"(*addr) : "r"(addr), "r"(old_val), "r"(new_val) : AO_THUMB_SWITCH_CLOBBERS "cc"); return fetched_val; } #define AO_HAVE_fetch_compare_and_swap #ifdef AO_ARM_HAVE_LDREXD # include "../standard_ao_double_t.h" /* "ARM Architecture Reference Manual ARMv7-A/R edition" (chapter */ /* A3.5.3) says that memory accesses caused by LDREXD and STREXD */ /* instructions to doubleword-aligned locations are single-copy */ /* atomic; accesses to 64-bit elements by other instructions might */ /* not be single-copy atomic as they are executed as a sequence of */ /* 32-bit accesses. */ AO_INLINE AO_double_t AO_double_load(const volatile AO_double_t *addr) { AO_double_t result; /* AO_THUMB_GO_ARM is empty. */ __asm__ __volatile__("@AO_double_load\n" AO_MASK_PTR("%1") " ldrexd %0, %H0, [%1]" : "=&r" (result.AO_whole) : "r" (addr) /* : no clobber */); return result; } # define AO_HAVE_double_load AO_INLINE void AO_double_store(volatile AO_double_t *addr, AO_double_t new_val) { AO_double_t old_val; int status; do { /* AO_THUMB_GO_ARM is empty. */ __asm__ __volatile__("@AO_double_store\n" AO_MASK_PTR("%3") " ldrexd %0, %H0, [%3]\n" AO_MASK_PTR("%3") " strexd %1, %4, %H4, [%3]" : "=&r" (old_val.AO_whole), "=&r" (status), "+m" (*addr) : "r" (addr), "r" (new_val.AO_whole) : "cc"); } while (AO_EXPECT_FALSE(status)); } # define AO_HAVE_double_store AO_INLINE int AO_double_compare_and_swap(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { double_ptr_storage tmp; int result = 1; do { /* AO_THUMB_GO_ARM is empty. */ __asm__ __volatile__("@AO_double_compare_and_swap\n" AO_MASK_PTR("%1") " ldrexd %0, %H0, [%1]\n" /* get original to r1 & r2 */ : "=&r"(tmp) : "r"(addr) /* : no clobber */); if (tmp != old_val.AO_whole) break; __asm__ __volatile__( AO_MASK_PTR("%2") " strexd %0, %3, %H3, [%2]\n" /* store new one if matched */ : "=&r"(result), "+m"(*addr) : "r" (addr), "r" (new_val.AO_whole) : "cc"); } while (AO_EXPECT_FALSE(result)); return !result; /* if succeeded then return 1 else 0 */ } # define AO_HAVE_double_compare_and_swap #endif /* AO_ARM_HAVE_LDREXD */ #else /* pre ARMv6 architectures ... */ /* I found a slide set that, if I read it correctly, claims that */ /* Loads followed by either a Load or Store are ordered, but nothing */ /* else is. */ /* It appears that SWP is the only simple memory barrier. */ #include "../all_aligned_atomic_load_store.h" /* The code should run correctly on a multi-core ARMv6+ as well. */ #endif /* !AO_ARM_HAVE_LDREX */ #if !defined(AO_HAVE_test_and_set_full) && !defined(AO_HAVE_test_and_set) \ && defined (AO_ARM_HAVE_SWP) && (!defined(AO_PREFER_GENERALIZED) \ || !defined(AO_HAVE_fetch_compare_and_swap)) AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t *addr) { AO_TS_VAL_t oldval; /* SWP on ARM is very similar to XCHG on x86. */ /* The first operand is the result, the second the value */ /* to be stored. Both registers must be different from addr. */ /* Make the address operand an early clobber output so it */ /* doesn't overlap with the other operands. The early clobber */ /* on oldval is necessary to prevent the compiler allocating */ /* them to the same register if they are both unused. */ __asm__ __volatile__("@AO_test_and_set_full\n" AO_THUMB_GO_ARM AO_MASK_PTR("%3") " swp %0, %2, [%3]\n" /* Ignore GCC "SWP is deprecated for this architecture" */ /* warning here (for ARMv6+). */ AO_THUMB_RESTORE_MODE : "=&r"(oldval), "=&r"(addr) : "r"(1), "1"(addr) : AO_THUMB_SWITCH_CLOBBERS "memory"); return oldval; } # define AO_HAVE_test_and_set_full #endif /* !AO_HAVE_test_and_set[_full] && AO_ARM_HAVE_SWP */ #define AO_T_IS_INT #else /* AO_GCC_ATOMIC_TEST_AND_SET */ # if defined(__clang__) && !defined(AO_ARM_HAVE_LDREX) /* As of clang-3.8, it cannot compile __atomic_and/or/xor_fetch */ /* library calls yet for pre ARMv6. */ # define AO_SKIPATOMIC_ANY_and_ANY # define AO_SKIPATOMIC_ANY_or_ANY # define AO_SKIPATOMIC_ANY_xor_ANY # endif # ifdef AO_ARM_HAVE_LDREXD # include "../standard_ao_double_t.h" # endif # include "generic.h" #endif /* AO_GCC_ATOMIC_TEST_AND_SET */ #undef AO_ARM_HAVE_DMB #undef AO_ARM_HAVE_LDREX #undef AO_ARM_HAVE_LDREXBH #undef AO_ARM_HAVE_LDREXD #undef AO_ARM_HAVE_SWP #undef AO_BR_ALIGN #undef AO_MASK_PTR #undef AO_SKIPATOMIC_ANY_and_ANY #undef AO_SKIPATOMIC_ANY_or_ANY #undef AO_SKIPATOMIC_ANY_xor_ANY #undef AO_SKIPATOMIC_char_store #undef AO_SKIPATOMIC_char_store_release #undef AO_SKIPATOMIC_int_store #undef AO_SKIPATOMIC_int_store_release #undef AO_SKIPATOMIC_short_store #undef AO_SKIPATOMIC_short_store_release #undef AO_SKIPATOMIC_store #undef AO_SKIPATOMIC_store_release #undef AO_THUMB_GO_ARM #undef AO_THUMB_RESTORE_MODE #undef AO_THUMB_SWITCH_CLOBBERS libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/avr32.h000066400000000000000000000046151411761111000227120ustar00rootroot00000000000000/* * Copyright (C) 2009 Bradley Smith * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "../all_atomic_load_store.h" #include "../ordered.h" /* There are no multiprocessor implementations. */ #include "../test_and_set_t_is_ao_t.h" #ifndef AO_PREFER_GENERALIZED AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t *addr) { register long ret; __asm__ __volatile__( "xchg %[oldval], %[mem], %[newval]" : [oldval] "=&r"(ret) : [mem] "r"(addr), [newval] "r"(1) : "memory"); return (AO_TS_VAL_t)ret; } # define AO_HAVE_test_and_set_full #endif /* !AO_PREFER_GENERALIZED */ AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) { register long ret; __asm__ __volatile__( "1: ssrf 5\n" " ld.w %[res], %[mem]\n" " eor %[res], %[oldval]\n" " brne 2f\n" " stcond %[mem], %[newval]\n" " brne 1b\n" "2:\n" : [res] "=&r"(ret), [mem] "=m"(*addr) : "m"(*addr), [newval] "r"(new_val), [oldval] "r"(old) : "cc", "memory"); return (int)ret; } #define AO_HAVE_compare_and_swap_full /* TODO: implement AO_fetch_compare_and_swap. */ #define AO_T_IS_INT libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/cris.h000066400000000000000000000052271411761111000227150ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* FIXME: seems to be untested. */ #include "../all_atomic_load_store.h" #include "../ordered.h" /* There are no multiprocessor implementations. */ #include "../test_and_set_t_is_ao_t.h" /* * The architecture apparently supports an "f" flag which is * set on preemption. This essentially gives us load-locked, * store-conditional primitives, though I'm not quite sure how * this would work on a hypothetical multiprocessor. -HB * * For details, see * http://developer.axis.com/doc/hardware/etrax100lx/prog_man/ * 1_architectural_description.pdf * * TODO: Presumably many other primitives (notably CAS, including the double- * width versions) could be implemented in this manner, if someone got * around to it. */ AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t *addr) { /* Ripped from linuxthreads/sysdeps/cris/pt-machine.h */ register unsigned long int ret; /* Note the use of a dummy output of *addr to expose the write. The memory barrier is to stop *other* writes being moved past this code. */ __asm__ __volatile__("clearf\n" "0:\n\t" "movu.b [%2],%0\n\t" "ax\n\t" "move.b %3,[%2]\n\t" "bwf 0b\n\t" "clearf" : "=&r" (ret), "=m" (*addr) : "r" (addr), "r" ((int) 1), "m" (*addr) : "memory"); return ret; } #define AO_HAVE_test_and_set_full libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/generic-arithm.h000066400000000000000000000623341411761111000246550ustar00rootroot00000000000000/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #ifndef AO_NO_char_ARITHM AO_INLINE unsigned/**/char AO_char_fetch_and_add(volatile unsigned/**/char *addr, unsigned/**/char incr) { return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED); } #define AO_HAVE_char_fetch_and_add #ifndef AO_SKIPATOMIC_ANY_and_ANY AO_INLINE void AO_char_and(volatile unsigned/**/char *addr, unsigned/**/char value) { (void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED); } # define AO_HAVE_char_and #endif #ifndef AO_SKIPATOMIC_ANY_or_ANY AO_INLINE void AO_char_or(volatile unsigned/**/char *addr, unsigned/**/char value) { (void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED); } # define AO_HAVE_char_or #endif #ifndef AO_SKIPATOMIC_ANY_xor_ANY AO_INLINE void AO_char_xor(volatile unsigned/**/char *addr, unsigned/**/char value) { (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED); } # define AO_HAVE_char_xor #endif #endif /* !AO_NO_char_ARITHM */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #ifndef AO_NO_short_ARITHM AO_INLINE unsigned/**/short AO_short_fetch_and_add(volatile unsigned/**/short *addr, unsigned/**/short incr) { return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED); } #define AO_HAVE_short_fetch_and_add #ifndef AO_SKIPATOMIC_ANY_and_ANY AO_INLINE void AO_short_and(volatile unsigned/**/short *addr, unsigned/**/short value) { (void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED); } # define AO_HAVE_short_and #endif #ifndef AO_SKIPATOMIC_ANY_or_ANY AO_INLINE void AO_short_or(volatile unsigned/**/short *addr, unsigned/**/short value) { (void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED); } # define AO_HAVE_short_or #endif #ifndef AO_SKIPATOMIC_ANY_xor_ANY AO_INLINE void AO_short_xor(volatile unsigned/**/short *addr, unsigned/**/short value) { (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED); } # define AO_HAVE_short_xor #endif #endif /* !AO_NO_short_ARITHM */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #ifndef AO_NO_int_ARITHM AO_INLINE unsigned AO_int_fetch_and_add(volatile unsigned *addr, unsigned incr) { return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED); } #define AO_HAVE_int_fetch_and_add #ifndef AO_SKIPATOMIC_ANY_and_ANY AO_INLINE void AO_int_and(volatile unsigned *addr, unsigned value) { (void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED); } # define AO_HAVE_int_and #endif #ifndef AO_SKIPATOMIC_ANY_or_ANY AO_INLINE void AO_int_or(volatile unsigned *addr, unsigned value) { (void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED); } # define AO_HAVE_int_or #endif #ifndef AO_SKIPATOMIC_ANY_xor_ANY AO_INLINE void AO_int_xor(volatile unsigned *addr, unsigned value) { (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED); } # define AO_HAVE_int_xor #endif #endif /* !AO_NO_int_ARITHM */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #ifndef AO_NO_ARITHM AO_INLINE AO_t AO_fetch_and_add(volatile AO_t *addr, AO_t incr) { return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED); } #define AO_HAVE_fetch_and_add #ifndef AO_SKIPATOMIC_ANY_and_ANY AO_INLINE void AO_and(volatile AO_t *addr, AO_t value) { (void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED); } # define AO_HAVE_and #endif #ifndef AO_SKIPATOMIC_ANY_or_ANY AO_INLINE void AO_or(volatile AO_t *addr, AO_t value) { (void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED); } # define AO_HAVE_or #endif #ifndef AO_SKIPATOMIC_ANY_xor_ANY AO_INLINE void AO_xor(volatile AO_t *addr, AO_t value) { (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED); } # define AO_HAVE_xor #endif #endif /* !AO_NO_ARITHM */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #ifndef AO_NO_char_ARITHM AO_INLINE unsigned/**/char AO_char_fetch_and_add_acquire(volatile unsigned/**/char *addr, unsigned/**/char incr) { return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE); } #define AO_HAVE_char_fetch_and_add_acquire #ifndef AO_SKIPATOMIC_ANY_and_ANY AO_INLINE void AO_char_and_acquire(volatile unsigned/**/char *addr, unsigned/**/char value) { (void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE); } # define AO_HAVE_char_and_acquire #endif #ifndef AO_SKIPATOMIC_ANY_or_ANY AO_INLINE void AO_char_or_acquire(volatile unsigned/**/char *addr, unsigned/**/char value) { (void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE); } # define AO_HAVE_char_or_acquire #endif #ifndef AO_SKIPATOMIC_ANY_xor_ANY AO_INLINE void AO_char_xor_acquire(volatile unsigned/**/char *addr, unsigned/**/char value) { (void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE); } # define AO_HAVE_char_xor_acquire #endif #endif /* !AO_NO_char_ARITHM */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #ifndef AO_NO_short_ARITHM AO_INLINE unsigned/**/short AO_short_fetch_and_add_acquire(volatile unsigned/**/short *addr, unsigned/**/short incr) { return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE); } #define AO_HAVE_short_fetch_and_add_acquire #ifndef AO_SKIPATOMIC_ANY_and_ANY AO_INLINE void AO_short_and_acquire(volatile unsigned/**/short *addr, unsigned/**/short value) { (void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE); } # define AO_HAVE_short_and_acquire #endif #ifndef AO_SKIPATOMIC_ANY_or_ANY AO_INLINE void AO_short_or_acquire(volatile unsigned/**/short *addr, unsigned/**/short value) { (void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE); } # define AO_HAVE_short_or_acquire #endif #ifndef AO_SKIPATOMIC_ANY_xor_ANY AO_INLINE void AO_short_xor_acquire(volatile unsigned/**/short *addr, unsigned/**/short value) { (void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE); } # define AO_HAVE_short_xor_acquire #endif #endif /* !AO_NO_short_ARITHM */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #ifndef AO_NO_int_ARITHM AO_INLINE unsigned AO_int_fetch_and_add_acquire(volatile unsigned *addr, unsigned incr) { return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE); } #define AO_HAVE_int_fetch_and_add_acquire #ifndef AO_SKIPATOMIC_ANY_and_ANY AO_INLINE void AO_int_and_acquire(volatile unsigned *addr, unsigned value) { (void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE); } # define AO_HAVE_int_and_acquire #endif #ifndef AO_SKIPATOMIC_ANY_or_ANY AO_INLINE void AO_int_or_acquire(volatile unsigned *addr, unsigned value) { (void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE); } # define AO_HAVE_int_or_acquire #endif #ifndef AO_SKIPATOMIC_ANY_xor_ANY AO_INLINE void AO_int_xor_acquire(volatile unsigned *addr, unsigned value) { (void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE); } # define AO_HAVE_int_xor_acquire #endif #endif /* !AO_NO_int_ARITHM */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #ifndef AO_NO_ARITHM AO_INLINE AO_t AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr) { return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE); } #define AO_HAVE_fetch_and_add_acquire #ifndef AO_SKIPATOMIC_ANY_and_ANY AO_INLINE void AO_and_acquire(volatile AO_t *addr, AO_t value) { (void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE); } # define AO_HAVE_and_acquire #endif #ifndef AO_SKIPATOMIC_ANY_or_ANY AO_INLINE void AO_or_acquire(volatile AO_t *addr, AO_t value) { (void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE); } # define AO_HAVE_or_acquire #endif #ifndef AO_SKIPATOMIC_ANY_xor_ANY AO_INLINE void AO_xor_acquire(volatile AO_t *addr, AO_t value) { (void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE); } # define AO_HAVE_xor_acquire #endif #endif /* !AO_NO_ARITHM */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #ifndef AO_NO_char_ARITHM AO_INLINE unsigned/**/char AO_char_fetch_and_add_release(volatile unsigned/**/char *addr, unsigned/**/char incr) { return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE); } #define AO_HAVE_char_fetch_and_add_release #ifndef AO_SKIPATOMIC_ANY_and_ANY AO_INLINE void AO_char_and_release(volatile unsigned/**/char *addr, unsigned/**/char value) { (void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE); } # define AO_HAVE_char_and_release #endif #ifndef AO_SKIPATOMIC_ANY_or_ANY AO_INLINE void AO_char_or_release(volatile unsigned/**/char *addr, unsigned/**/char value) { (void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE); } # define AO_HAVE_char_or_release #endif #ifndef AO_SKIPATOMIC_ANY_xor_ANY AO_INLINE void AO_char_xor_release(volatile unsigned/**/char *addr, unsigned/**/char value) { (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE); } # define AO_HAVE_char_xor_release #endif #endif /* !AO_NO_char_ARITHM */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #ifndef AO_NO_short_ARITHM AO_INLINE unsigned/**/short AO_short_fetch_and_add_release(volatile unsigned/**/short *addr, unsigned/**/short incr) { return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE); } #define AO_HAVE_short_fetch_and_add_release #ifndef AO_SKIPATOMIC_ANY_and_ANY AO_INLINE void AO_short_and_release(volatile unsigned/**/short *addr, unsigned/**/short value) { (void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE); } # define AO_HAVE_short_and_release #endif #ifndef AO_SKIPATOMIC_ANY_or_ANY AO_INLINE void AO_short_or_release(volatile unsigned/**/short *addr, unsigned/**/short value) { (void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE); } # define AO_HAVE_short_or_release #endif #ifndef AO_SKIPATOMIC_ANY_xor_ANY AO_INLINE void AO_short_xor_release(volatile unsigned/**/short *addr, unsigned/**/short value) { (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE); } # define AO_HAVE_short_xor_release #endif #endif /* !AO_NO_short_ARITHM */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #ifndef AO_NO_int_ARITHM AO_INLINE unsigned AO_int_fetch_and_add_release(volatile unsigned *addr, unsigned incr) { return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE); } #define AO_HAVE_int_fetch_and_add_release #ifndef AO_SKIPATOMIC_ANY_and_ANY AO_INLINE void AO_int_and_release(volatile unsigned *addr, unsigned value) { (void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE); } # define AO_HAVE_int_and_release #endif #ifndef AO_SKIPATOMIC_ANY_or_ANY AO_INLINE void AO_int_or_release(volatile unsigned *addr, unsigned value) { (void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE); } # define AO_HAVE_int_or_release #endif #ifndef AO_SKIPATOMIC_ANY_xor_ANY AO_INLINE void AO_int_xor_release(volatile unsigned *addr, unsigned value) { (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE); } # define AO_HAVE_int_xor_release #endif #endif /* !AO_NO_int_ARITHM */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #ifndef AO_NO_ARITHM AO_INLINE AO_t AO_fetch_and_add_release(volatile AO_t *addr, AO_t incr) { return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE); } #define AO_HAVE_fetch_and_add_release #ifndef AO_SKIPATOMIC_ANY_and_ANY AO_INLINE void AO_and_release(volatile AO_t *addr, AO_t value) { (void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE); } # define AO_HAVE_and_release #endif #ifndef AO_SKIPATOMIC_ANY_or_ANY AO_INLINE void AO_or_release(volatile AO_t *addr, AO_t value) { (void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE); } # define AO_HAVE_or_release #endif #ifndef AO_SKIPATOMIC_ANY_xor_ANY AO_INLINE void AO_xor_release(volatile AO_t *addr, AO_t value) { (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE); } # define AO_HAVE_xor_release #endif #endif /* !AO_NO_ARITHM */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #ifndef AO_NO_char_ARITHM AO_INLINE unsigned/**/char AO_char_fetch_and_add_full(volatile unsigned/**/char *addr, unsigned/**/char incr) { return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST); } #define AO_HAVE_char_fetch_and_add_full #ifndef AO_SKIPATOMIC_ANY_and_ANY AO_INLINE void AO_char_and_full(volatile unsigned/**/char *addr, unsigned/**/char value) { (void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST); } # define AO_HAVE_char_and_full #endif #ifndef AO_SKIPATOMIC_ANY_or_ANY AO_INLINE void AO_char_or_full(volatile unsigned/**/char *addr, unsigned/**/char value) { (void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST); } # define AO_HAVE_char_or_full #endif #ifndef AO_SKIPATOMIC_ANY_xor_ANY AO_INLINE void AO_char_xor_full(volatile unsigned/**/char *addr, unsigned/**/char value) { (void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST); } # define AO_HAVE_char_xor_full #endif #endif /* !AO_NO_char_ARITHM */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #ifndef AO_NO_short_ARITHM AO_INLINE unsigned/**/short AO_short_fetch_and_add_full(volatile unsigned/**/short *addr, unsigned/**/short incr) { return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST); } #define AO_HAVE_short_fetch_and_add_full #ifndef AO_SKIPATOMIC_ANY_and_ANY AO_INLINE void AO_short_and_full(volatile unsigned/**/short *addr, unsigned/**/short value) { (void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST); } # define AO_HAVE_short_and_full #endif #ifndef AO_SKIPATOMIC_ANY_or_ANY AO_INLINE void AO_short_or_full(volatile unsigned/**/short *addr, unsigned/**/short value) { (void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST); } # define AO_HAVE_short_or_full #endif #ifndef AO_SKIPATOMIC_ANY_xor_ANY AO_INLINE void AO_short_xor_full(volatile unsigned/**/short *addr, unsigned/**/short value) { (void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST); } # define AO_HAVE_short_xor_full #endif #endif /* !AO_NO_short_ARITHM */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #ifndef AO_NO_int_ARITHM AO_INLINE unsigned AO_int_fetch_and_add_full(volatile unsigned *addr, unsigned incr) { return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST); } #define AO_HAVE_int_fetch_and_add_full #ifndef AO_SKIPATOMIC_ANY_and_ANY AO_INLINE void AO_int_and_full(volatile unsigned *addr, unsigned value) { (void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST); } # define AO_HAVE_int_and_full #endif #ifndef AO_SKIPATOMIC_ANY_or_ANY AO_INLINE void AO_int_or_full(volatile unsigned *addr, unsigned value) { (void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST); } # define AO_HAVE_int_or_full #endif #ifndef AO_SKIPATOMIC_ANY_xor_ANY AO_INLINE void AO_int_xor_full(volatile unsigned *addr, unsigned value) { (void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST); } # define AO_HAVE_int_xor_full #endif #endif /* !AO_NO_int_ARITHM */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #ifndef AO_NO_ARITHM AO_INLINE AO_t AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr) { return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST); } #define AO_HAVE_fetch_and_add_full #ifndef AO_SKIPATOMIC_ANY_and_ANY AO_INLINE void AO_and_full(volatile AO_t *addr, AO_t value) { (void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST); } # define AO_HAVE_and_full #endif #ifndef AO_SKIPATOMIC_ANY_or_ANY AO_INLINE void AO_or_full(volatile AO_t *addr, AO_t value) { (void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST); } # define AO_HAVE_or_full #endif #ifndef AO_SKIPATOMIC_ANY_xor_ANY AO_INLINE void AO_xor_full(volatile AO_t *addr, AO_t value) { (void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST); } # define AO_HAVE_xor_full #endif #endif /* !AO_NO_ARITHM */ libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/generic-arithm.template000066400000000000000000000030631411761111000262330ustar00rootroot00000000000000/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #ifndef AO_NO_XSIZE_ARITHM AO_INLINE XCTYPE AO_XSIZE_fetch_and_add_XBAR(volatile XCTYPE *addr, XCTYPE incr) { return __atomic_fetch_add(addr, incr, __ATOMIC_XGCCBAR); } #define AO_HAVE_XSIZE_fetch_and_add_XBAR #ifndef AO_SKIPATOMIC_ANY_and_ANY AO_INLINE void AO_XSIZE_and_XBAR(volatile XCTYPE *addr, XCTYPE value) { (void)__atomic_and_fetch(addr, value, __ATOMIC_XGCCBAR); } # define AO_HAVE_XSIZE_and_XBAR #endif #ifndef AO_SKIPATOMIC_ANY_or_ANY AO_INLINE void AO_XSIZE_or_XBAR(volatile XCTYPE *addr, XCTYPE value) { (void)__atomic_or_fetch(addr, value, __ATOMIC_XGCCBAR); } # define AO_HAVE_XSIZE_or_XBAR #endif #ifndef AO_SKIPATOMIC_ANY_xor_ANY AO_INLINE void AO_XSIZE_xor_XBAR(volatile XCTYPE *addr, XCTYPE value) { (void)__atomic_xor_fetch(addr, value, __ATOMIC_XGCCBAR); } # define AO_HAVE_XSIZE_xor_XBAR #endif #endif /* !AO_NO_XSIZE_ARITHM */ libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/generic-small.h000066400000000000000000000563271411761111000245060ustar00rootroot00000000000000/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #if !defined(AO_GCC_HAVE_char_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED) AO_INLINE unsigned/**/char AO_char_load(const volatile unsigned/**/char *addr) { return __atomic_load_n(addr, __ATOMIC_RELAXED); } #define AO_HAVE_char_load AO_INLINE unsigned/**/char AO_char_load_acquire(const volatile unsigned/**/char *addr) { return __atomic_load_n(addr, __ATOMIC_ACQUIRE); } #define AO_HAVE_char_load_acquire /* char_load_read is defined using load and nop_read. */ /* TODO: Map it to ACQUIRE. We should be strengthening the read and */ /* write stuff to the more general acquire/release versions. It almost */ /* never makes a difference and is much less error-prone. */ /* char_load_full is generalized using load and nop_full. */ /* TODO: Map it to SEQ_CST and clarify the documentation. */ /* TODO: Map load_dd_acquire_read to ACQUIRE. Ideally it should be */ /* mapped to CONSUME, but the latter is currently broken. */ /* char_store_full definition is omitted similar to load_full reason. */ /* TODO: Map store_write to RELEASE. */ #ifndef AO_SKIPATOMIC_char_store AO_INLINE void AO_char_store(volatile unsigned/**/char *addr, unsigned/**/char value) { __atomic_store_n(addr, value, __ATOMIC_RELAXED); } # define AO_HAVE_char_store #endif #ifndef AO_SKIPATOMIC_char_store_release AO_INLINE void AO_char_store_release(volatile unsigned/**/char *addr, unsigned/**/char value) { __atomic_store_n(addr, value, __ATOMIC_RELEASE); } # define AO_HAVE_char_store_release #endif #endif /* !AO_GCC_HAVE_char_SYNC_CAS || !AO_PREFER_GENERALIZED */ #ifdef AO_GCC_HAVE_char_SYNC_CAS AO_INLINE unsigned/**/char AO_char_fetch_compare_and_swap(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { (void)__atomic_compare_exchange_n(addr, &old_val /* p_expected */, new_val /* desired */, 0 /* is_weak: false */, __ATOMIC_RELAXED /* success */, __ATOMIC_RELAXED /* failure */); return old_val; } # define AO_HAVE_char_fetch_compare_and_swap AO_INLINE unsigned/**/char AO_char_fetch_compare_and_swap_acquire(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { (void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); return old_val; } # define AO_HAVE_char_fetch_compare_and_swap_acquire AO_INLINE unsigned/**/char AO_char_fetch_compare_and_swap_release(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { (void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED /* failure */); return old_val; } # define AO_HAVE_char_fetch_compare_and_swap_release AO_INLINE unsigned/**/char AO_char_fetch_compare_and_swap_full(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { (void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE /* failure */); return old_val; } # define AO_HAVE_char_fetch_compare_and_swap_full # ifndef AO_GENERALIZE_ASM_BOOL_CAS AO_INLINE int AO_char_compare_and_swap(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); } # define AO_HAVE_char_compare_and_swap AO_INLINE int AO_char_compare_and_swap_acquire(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); } # define AO_HAVE_char_compare_and_swap_acquire AO_INLINE int AO_char_compare_and_swap_release(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED /* failure */); } # define AO_HAVE_char_compare_and_swap_release AO_INLINE int AO_char_compare_and_swap_full(volatile unsigned/**/char *addr, unsigned/**/char old_val, unsigned/**/char new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE /* failure */); } # define AO_HAVE_char_compare_and_swap_full # endif /* !AO_GENERALIZE_ASM_BOOL_CAS */ #endif /* AO_GCC_HAVE_char_SYNC_CAS */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #if !defined(AO_GCC_HAVE_short_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED) AO_INLINE unsigned/**/short AO_short_load(const volatile unsigned/**/short *addr) { return __atomic_load_n(addr, __ATOMIC_RELAXED); } #define AO_HAVE_short_load AO_INLINE unsigned/**/short AO_short_load_acquire(const volatile unsigned/**/short *addr) { return __atomic_load_n(addr, __ATOMIC_ACQUIRE); } #define AO_HAVE_short_load_acquire /* short_load_read is defined using load and nop_read. */ /* TODO: Map it to ACQUIRE. We should be strengthening the read and */ /* write stuff to the more general acquire/release versions. It almost */ /* never makes a difference and is much less error-prone. */ /* short_load_full is generalized using load and nop_full. */ /* TODO: Map it to SEQ_CST and clarify the documentation. */ /* TODO: Map load_dd_acquire_read to ACQUIRE. Ideally it should be */ /* mapped to CONSUME, but the latter is currently broken. */ /* short_store_full definition is omitted similar to load_full reason. */ /* TODO: Map store_write to RELEASE. */ #ifndef AO_SKIPATOMIC_short_store AO_INLINE void AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short value) { __atomic_store_n(addr, value, __ATOMIC_RELAXED); } # define AO_HAVE_short_store #endif #ifndef AO_SKIPATOMIC_short_store_release AO_INLINE void AO_short_store_release(volatile unsigned/**/short *addr, unsigned/**/short value) { __atomic_store_n(addr, value, __ATOMIC_RELEASE); } # define AO_HAVE_short_store_release #endif #endif /* !AO_GCC_HAVE_short_SYNC_CAS || !AO_PREFER_GENERALIZED */ #ifdef AO_GCC_HAVE_short_SYNC_CAS AO_INLINE unsigned/**/short AO_short_fetch_compare_and_swap(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { (void)__atomic_compare_exchange_n(addr, &old_val /* p_expected */, new_val /* desired */, 0 /* is_weak: false */, __ATOMIC_RELAXED /* success */, __ATOMIC_RELAXED /* failure */); return old_val; } # define AO_HAVE_short_fetch_compare_and_swap AO_INLINE unsigned/**/short AO_short_fetch_compare_and_swap_acquire(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { (void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); return old_val; } # define AO_HAVE_short_fetch_compare_and_swap_acquire AO_INLINE unsigned/**/short AO_short_fetch_compare_and_swap_release(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { (void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED /* failure */); return old_val; } # define AO_HAVE_short_fetch_compare_and_swap_release AO_INLINE unsigned/**/short AO_short_fetch_compare_and_swap_full(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { (void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE /* failure */); return old_val; } # define AO_HAVE_short_fetch_compare_and_swap_full # ifndef AO_GENERALIZE_ASM_BOOL_CAS AO_INLINE int AO_short_compare_and_swap(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); } # define AO_HAVE_short_compare_and_swap AO_INLINE int AO_short_compare_and_swap_acquire(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); } # define AO_HAVE_short_compare_and_swap_acquire AO_INLINE int AO_short_compare_and_swap_release(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED /* failure */); } # define AO_HAVE_short_compare_and_swap_release AO_INLINE int AO_short_compare_and_swap_full(volatile unsigned/**/short *addr, unsigned/**/short old_val, unsigned/**/short new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE /* failure */); } # define AO_HAVE_short_compare_and_swap_full # endif /* !AO_GENERALIZE_ASM_BOOL_CAS */ #endif /* AO_GCC_HAVE_short_SYNC_CAS */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #if !defined(AO_GCC_HAVE_int_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED) AO_INLINE unsigned AO_int_load(const volatile unsigned *addr) { return __atomic_load_n(addr, __ATOMIC_RELAXED); } #define AO_HAVE_int_load AO_INLINE unsigned AO_int_load_acquire(const volatile unsigned *addr) { return __atomic_load_n(addr, __ATOMIC_ACQUIRE); } #define AO_HAVE_int_load_acquire /* int_load_read is defined using load and nop_read. */ /* TODO: Map it to ACQUIRE. We should be strengthening the read and */ /* write stuff to the more general acquire/release versions. It almost */ /* never makes a difference and is much less error-prone. */ /* int_load_full is generalized using load and nop_full. */ /* TODO: Map it to SEQ_CST and clarify the documentation. */ /* TODO: Map load_dd_acquire_read to ACQUIRE. Ideally it should be */ /* mapped to CONSUME, but the latter is currently broken. */ /* int_store_full definition is omitted similar to load_full reason. */ /* TODO: Map store_write to RELEASE. */ #ifndef AO_SKIPATOMIC_int_store AO_INLINE void AO_int_store(volatile unsigned *addr, unsigned value) { __atomic_store_n(addr, value, __ATOMIC_RELAXED); } # define AO_HAVE_int_store #endif #ifndef AO_SKIPATOMIC_int_store_release AO_INLINE void AO_int_store_release(volatile unsigned *addr, unsigned value) { __atomic_store_n(addr, value, __ATOMIC_RELEASE); } # define AO_HAVE_int_store_release #endif #endif /* !AO_GCC_HAVE_int_SYNC_CAS || !AO_PREFER_GENERALIZED */ #ifdef AO_GCC_HAVE_int_SYNC_CAS AO_INLINE unsigned AO_int_fetch_compare_and_swap(volatile unsigned *addr, unsigned old_val, unsigned new_val) { (void)__atomic_compare_exchange_n(addr, &old_val /* p_expected */, new_val /* desired */, 0 /* is_weak: false */, __ATOMIC_RELAXED /* success */, __ATOMIC_RELAXED /* failure */); return old_val; } # define AO_HAVE_int_fetch_compare_and_swap AO_INLINE unsigned AO_int_fetch_compare_and_swap_acquire(volatile unsigned *addr, unsigned old_val, unsigned new_val) { (void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); return old_val; } # define AO_HAVE_int_fetch_compare_and_swap_acquire AO_INLINE unsigned AO_int_fetch_compare_and_swap_release(volatile unsigned *addr, unsigned old_val, unsigned new_val) { (void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED /* failure */); return old_val; } # define AO_HAVE_int_fetch_compare_and_swap_release AO_INLINE unsigned AO_int_fetch_compare_and_swap_full(volatile unsigned *addr, unsigned old_val, unsigned new_val) { (void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE /* failure */); return old_val; } # define AO_HAVE_int_fetch_compare_and_swap_full # ifndef AO_GENERALIZE_ASM_BOOL_CAS AO_INLINE int AO_int_compare_and_swap(volatile unsigned *addr, unsigned old_val, unsigned new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); } # define AO_HAVE_int_compare_and_swap AO_INLINE int AO_int_compare_and_swap_acquire(volatile unsigned *addr, unsigned old_val, unsigned new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); } # define AO_HAVE_int_compare_and_swap_acquire AO_INLINE int AO_int_compare_and_swap_release(volatile unsigned *addr, unsigned old_val, unsigned new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED /* failure */); } # define AO_HAVE_int_compare_and_swap_release AO_INLINE int AO_int_compare_and_swap_full(volatile unsigned *addr, unsigned old_val, unsigned new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE /* failure */); } # define AO_HAVE_int_compare_and_swap_full # endif /* !AO_GENERALIZE_ASM_BOOL_CAS */ #endif /* AO_GCC_HAVE_int_SYNC_CAS */ /* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #if !defined(AO_GCC_HAVE_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED) AO_INLINE AO_t AO_load(const volatile AO_t *addr) { return __atomic_load_n(addr, __ATOMIC_RELAXED); } #define AO_HAVE_load AO_INLINE AO_t AO_load_acquire(const volatile AO_t *addr) { return __atomic_load_n(addr, __ATOMIC_ACQUIRE); } #define AO_HAVE_load_acquire /* load_read is defined using load and nop_read. */ /* TODO: Map it to ACQUIRE. We should be strengthening the read and */ /* write stuff to the more general acquire/release versions. It almost */ /* never makes a difference and is much less error-prone. */ /* load_full is generalized using load and nop_full. */ /* TODO: Map it to SEQ_CST and clarify the documentation. */ /* TODO: Map load_dd_acquire_read to ACQUIRE. Ideally it should be */ /* mapped to CONSUME, but the latter is currently broken. */ /* store_full definition is omitted similar to load_full reason. */ /* TODO: Map store_write to RELEASE. */ #ifndef AO_SKIPATOMIC_store AO_INLINE void AO_store(volatile AO_t *addr, AO_t value) { __atomic_store_n(addr, value, __ATOMIC_RELAXED); } # define AO_HAVE_store #endif #ifndef AO_SKIPATOMIC_store_release AO_INLINE void AO_store_release(volatile AO_t *addr, AO_t value) { __atomic_store_n(addr, value, __ATOMIC_RELEASE); } # define AO_HAVE_store_release #endif #endif /* !AO_GCC_HAVE_SYNC_CAS || !AO_PREFER_GENERALIZED */ #ifdef AO_GCC_HAVE_SYNC_CAS AO_INLINE AO_t AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val) { (void)__atomic_compare_exchange_n(addr, &old_val /* p_expected */, new_val /* desired */, 0 /* is_weak: false */, __ATOMIC_RELAXED /* success */, __ATOMIC_RELAXED /* failure */); return old_val; } # define AO_HAVE_fetch_compare_and_swap AO_INLINE AO_t AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val, AO_t new_val) { (void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); return old_val; } # define AO_HAVE_fetch_compare_and_swap_acquire AO_INLINE AO_t AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old_val, AO_t new_val) { (void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED /* failure */); return old_val; } # define AO_HAVE_fetch_compare_and_swap_release AO_INLINE AO_t AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val, AO_t new_val) { (void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE /* failure */); return old_val; } # define AO_HAVE_fetch_compare_and_swap_full # ifndef AO_GENERALIZE_ASM_BOOL_CAS AO_INLINE int AO_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); } # define AO_HAVE_compare_and_swap AO_INLINE int AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); } # define AO_HAVE_compare_and_swap_acquire AO_INLINE int AO_compare_and_swap_release(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED /* failure */); } # define AO_HAVE_compare_and_swap_release AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE /* failure */); } # define AO_HAVE_compare_and_swap_full # endif /* !AO_GENERALIZE_ASM_BOOL_CAS */ #endif /* AO_GCC_HAVE_SYNC_CAS */ libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/generic-small.template000066400000000000000000000133031411761111000260550ustar00rootroot00000000000000/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #if !defined(AO_GCC_HAVE_XSIZE_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED) AO_INLINE XCTYPE AO_XSIZE_load(const volatile XCTYPE *addr) { return __atomic_load_n(addr, __ATOMIC_RELAXED); } #define AO_HAVE_XSIZE_load AO_INLINE XCTYPE AO_XSIZE_load_acquire(const volatile XCTYPE *addr) { return __atomic_load_n(addr, __ATOMIC_ACQUIRE); } #define AO_HAVE_XSIZE_load_acquire /* XSIZE_load_read is defined using load and nop_read. */ /* TODO: Map it to ACQUIRE. We should be strengthening the read and */ /* write stuff to the more general acquire/release versions. It almost */ /* never makes a difference and is much less error-prone. */ /* XSIZE_load_full is generalized using load and nop_full. */ /* TODO: Map it to SEQ_CST and clarify the documentation. */ /* TODO: Map load_dd_acquire_read to ACQUIRE. Ideally it should be */ /* mapped to CONSUME, but the latter is currently broken. */ /* XSIZE_store_full definition is omitted similar to load_full reason. */ /* TODO: Map store_write to RELEASE. */ #ifndef AO_SKIPATOMIC_XSIZE_store AO_INLINE void AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE value) { __atomic_store_n(addr, value, __ATOMIC_RELAXED); } # define AO_HAVE_XSIZE_store #endif #ifndef AO_SKIPATOMIC_XSIZE_store_release AO_INLINE void AO_XSIZE_store_release(volatile XCTYPE *addr, XCTYPE value) { __atomic_store_n(addr, value, __ATOMIC_RELEASE); } # define AO_HAVE_XSIZE_store_release #endif #endif /* !AO_GCC_HAVE_XSIZE_SYNC_CAS || !AO_PREFER_GENERALIZED */ #ifdef AO_GCC_HAVE_XSIZE_SYNC_CAS AO_INLINE XCTYPE AO_XSIZE_fetch_compare_and_swap(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { (void)__atomic_compare_exchange_n(addr, &old_val /* p_expected */, new_val /* desired */, 0 /* is_weak: false */, __ATOMIC_RELAXED /* success */, __ATOMIC_RELAXED /* failure */); return old_val; } # define AO_HAVE_XSIZE_fetch_compare_and_swap AO_INLINE XCTYPE AO_XSIZE_fetch_compare_and_swap_acquire(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { (void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); return old_val; } # define AO_HAVE_XSIZE_fetch_compare_and_swap_acquire AO_INLINE XCTYPE AO_XSIZE_fetch_compare_and_swap_release(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { (void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED /* failure */); return old_val; } # define AO_HAVE_XSIZE_fetch_compare_and_swap_release AO_INLINE XCTYPE AO_XSIZE_fetch_compare_and_swap_full(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { (void)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE /* failure */); return old_val; } # define AO_HAVE_XSIZE_fetch_compare_and_swap_full # ifndef AO_GENERALIZE_ASM_BOOL_CAS AO_INLINE int AO_XSIZE_compare_and_swap(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); } # define AO_HAVE_XSIZE_compare_and_swap AO_INLINE int AO_XSIZE_compare_and_swap_acquire(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); } # define AO_HAVE_XSIZE_compare_and_swap_acquire AO_INLINE int AO_XSIZE_compare_and_swap_release(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED /* failure */); } # define AO_HAVE_XSIZE_compare_and_swap_release AO_INLINE int AO_XSIZE_compare_and_swap_full(volatile XCTYPE *addr, XCTYPE old_val, XCTYPE new_val) { return (int)__atomic_compare_exchange_n(addr, &old_val, new_val, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE /* failure */); } # define AO_HAVE_XSIZE_compare_and_swap_full # endif /* !AO_GENERALIZE_ASM_BOOL_CAS */ #endif /* AO_GCC_HAVE_XSIZE_SYNC_CAS */ libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/generic.h000066400000000000000000000172201411761111000233650ustar00rootroot00000000000000/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * Copyright (c) 2013-2017 Ivan Maidanski * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ /* The following implementation assumes GCC 4.7 or later. */ /* For the details, see GNU Manual, chapter 6.52 (Built-in functions */ /* for memory model aware atomic operations). */ #define AO_GCC_ATOMIC_TEST_AND_SET #include "../test_and_set_t_is_char.h" #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) \ || defined(AO_GCC_FORCE_HAVE_CAS) # define AO_GCC_HAVE_char_SYNC_CAS #endif #if (__SIZEOF_SHORT__ == 2 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)) \ || defined(AO_GCC_FORCE_HAVE_CAS) # define AO_GCC_HAVE_short_SYNC_CAS #endif #if (__SIZEOF_INT__ == 4 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)) \ || (__SIZEOF_INT__ == 8 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)) \ || defined(AO_GCC_FORCE_HAVE_CAS) # define AO_GCC_HAVE_int_SYNC_CAS #endif #if (__SIZEOF_SIZE_T__ == 4 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)) \ || (__SIZEOF_SIZE_T__ == 8 \ && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)) \ || defined(AO_GCC_FORCE_HAVE_CAS) # define AO_GCC_HAVE_SYNC_CAS #endif #undef AO_compiler_barrier #define AO_compiler_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST) #ifdef AO_UNIPROCESSOR /* If only a single processor (core) is used, AO_UNIPROCESSOR could */ /* be defined by the client to avoid unnecessary memory barrier. */ AO_INLINE void AO_nop_full(void) { AO_compiler_barrier(); } # define AO_HAVE_nop_full #else AO_INLINE void AO_nop_read(void) { __atomic_thread_fence(__ATOMIC_ACQUIRE); } # define AO_HAVE_nop_read # ifndef AO_HAVE_nop_write AO_INLINE void AO_nop_write(void) { __atomic_thread_fence(__ATOMIC_RELEASE); } # define AO_HAVE_nop_write # endif AO_INLINE void AO_nop_full(void) { /* __sync_synchronize() could be used instead. */ __atomic_thread_fence(__ATOMIC_SEQ_CST); } # define AO_HAVE_nop_full #endif /* !AO_UNIPROCESSOR */ #include "generic-small.h" #ifndef AO_PREFER_GENERALIZED # include "generic-arithm.h" # define AO_CLEAR(addr) __atomic_clear(addr, __ATOMIC_RELEASE) # define AO_HAVE_CLEAR AO_INLINE AO_TS_VAL_t AO_test_and_set(volatile AO_TS_t *addr) { return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_RELAXED); } # define AO_HAVE_test_and_set AO_INLINE AO_TS_VAL_t AO_test_and_set_acquire(volatile AO_TS_t *addr) { return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_ACQUIRE); } # define AO_HAVE_test_and_set_acquire AO_INLINE AO_TS_VAL_t AO_test_and_set_release(volatile AO_TS_t *addr) { return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_RELEASE); } # define AO_HAVE_test_and_set_release AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t *addr) { return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_SEQ_CST); } # define AO_HAVE_test_and_set_full #endif /* !AO_PREFER_GENERALIZED */ #ifdef AO_HAVE_DOUBLE_PTR_STORAGE # if ((__SIZEOF_SIZE_T__ == 4 \ && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)) \ || (__SIZEOF_SIZE_T__ == 8 /* half of AO_double_t */ \ && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16))) \ && !defined(AO_SKIPATOMIC_double_compare_and_swap_ANY) # define AO_GCC_HAVE_double_SYNC_CAS # endif # if !defined(AO_GCC_HAVE_double_SYNC_CAS) || !defined(AO_PREFER_GENERALIZED) # if !defined(AO_HAVE_double_load) && !defined(AO_SKIPATOMIC_double_load) AO_INLINE AO_double_t AO_double_load(const volatile AO_double_t *addr) { AO_double_t result; result.AO_whole = __atomic_load_n(&addr->AO_whole, __ATOMIC_RELAXED); return result; } # define AO_HAVE_double_load # endif # if !defined(AO_HAVE_double_load_acquire) \ && !defined(AO_SKIPATOMIC_double_load_acquire) AO_INLINE AO_double_t AO_double_load_acquire(const volatile AO_double_t *addr) { AO_double_t result; result.AO_whole = __atomic_load_n(&addr->AO_whole, __ATOMIC_ACQUIRE); return result; } # define AO_HAVE_double_load_acquire # endif # if !defined(AO_HAVE_double_store) && !defined(AO_SKIPATOMIC_double_store) AO_INLINE void AO_double_store(volatile AO_double_t *addr, AO_double_t value) { __atomic_store_n(&addr->AO_whole, value.AO_whole, __ATOMIC_RELAXED); } # define AO_HAVE_double_store # endif # if !defined(AO_HAVE_double_store_release) \ && !defined(AO_SKIPATOMIC_double_store_release) AO_INLINE void AO_double_store_release(volatile AO_double_t *addr, AO_double_t value) { __atomic_store_n(&addr->AO_whole, value.AO_whole, __ATOMIC_RELEASE); } # define AO_HAVE_double_store_release # endif #endif /* !AO_GCC_HAVE_double_SYNC_CAS || !AO_PREFER_GENERALIZED */ #endif /* AO_HAVE_DOUBLE_PTR_STORAGE */ #ifdef AO_GCC_HAVE_double_SYNC_CAS # ifndef AO_HAVE_double_compare_and_swap AO_INLINE int AO_double_compare_and_swap(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { return (int)__atomic_compare_exchange_n(&addr->AO_whole, &old_val.AO_whole /* p_expected */, new_val.AO_whole /* desired */, 0 /* is_weak: false */, __ATOMIC_RELAXED /* success */, __ATOMIC_RELAXED /* failure */); } # define AO_HAVE_double_compare_and_swap # endif # ifndef AO_HAVE_double_compare_and_swap_acquire AO_INLINE int AO_double_compare_and_swap_acquire(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { return (int)__atomic_compare_exchange_n(&addr->AO_whole, &old_val.AO_whole, new_val.AO_whole, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); } # define AO_HAVE_double_compare_and_swap_acquire # endif # ifndef AO_HAVE_double_compare_and_swap_release AO_INLINE int AO_double_compare_and_swap_release(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { return (int)__atomic_compare_exchange_n(&addr->AO_whole, &old_val.AO_whole, new_val.AO_whole, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED /* failure */); } # define AO_HAVE_double_compare_and_swap_release # endif # ifndef AO_HAVE_double_compare_and_swap_full AO_INLINE int AO_double_compare_and_swap_full(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { return (int)__atomic_compare_exchange_n(&addr->AO_whole, &old_val.AO_whole, new_val.AO_whole, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE /* failure */); } # define AO_HAVE_double_compare_and_swap_full # endif #endif /* AO_GCC_HAVE_double_SYNC_CAS */ libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/hexagon.h000066400000000000000000000111531411761111000234010ustar00rootroot00000000000000/* * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. */ #if AO_CLANG_PREREQ(3, 9) && !defined(AO_DISABLE_GCC_ATOMICS) /* Probably, it could be enabled for earlier clang versions as well. */ /* As of clang-3.9, __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n are missing. */ # define AO_GCC_FORCE_HAVE_CAS # define AO_GCC_HAVE_double_SYNC_CAS # include "../standard_ao_double_t.h" # include "generic.h" #else /* AO_DISABLE_GCC_ATOMICS */ #include "../all_aligned_atomic_load_store.h" #include "../test_and_set_t_is_ao_t.h" /* There's also "isync" and "barrier"; however, for all current CPU */ /* versions, "syncht" should suffice. Likewise, it seems that the */ /* auto-defined versions of *_acquire, *_release or *_full suffice for */ /* all current ISA implementations. */ AO_INLINE void AO_nop_full(void) { __asm__ __volatile__("syncht" : : : "memory"); } #define AO_HAVE_nop_full /* The Hexagon has load-locked, store-conditional primitives, and so */ /* resulting code is very nearly identical to that of PowerPC. */ #ifndef AO_PREFER_GENERALIZED AO_INLINE AO_t AO_fetch_and_add(volatile AO_t *addr, AO_t incr) { AO_t oldval; AO_t newval; __asm__ __volatile__( "1:\n" " %0 = memw_locked(%3);\n" /* load and reserve */ " %1 = add (%0,%4);\n" /* increment */ " memw_locked(%3,p1) = %1;\n" /* store conditional */ " if (!p1) jump 1b;\n" /* retry if lost reservation */ : "=&r"(oldval), "=&r"(newval), "+m"(*addr) : "r"(addr), "r"(incr) : "memory", "p1"); return oldval; } #define AO_HAVE_fetch_and_add AO_INLINE AO_TS_VAL_t AO_test_and_set(volatile AO_TS_t *addr) { int oldval; int locked_value = 1; __asm__ __volatile__( "1:\n" " %0 = memw_locked(%2);\n" /* load and reserve */ " {\n" " p2 = cmp.eq(%0,#0);\n" /* if load is not zero, */ " if (!p2.new) jump:nt 2f;\n" /* we are done */ " }\n" " memw_locked(%2,p1) = %3;\n" /* else store conditional */ " if (!p1) jump 1b;\n" /* retry if lost reservation */ "2:\n" /* oldval is zero if we set */ : "=&r"(oldval), "+m"(*addr) : "r"(addr), "r"(locked_value) : "memory", "p1", "p2"); return (AO_TS_VAL_t)oldval; } #define AO_HAVE_test_and_set #endif /* !AO_PREFER_GENERALIZED */ #ifndef AO_GENERALIZE_ASM_BOOL_CAS AO_INLINE int AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) { AO_t __oldval; int result = 0; __asm__ __volatile__( "1:\n" " %0 = memw_locked(%3);\n" /* load and reserve */ " {\n" " p2 = cmp.eq(%0,%4);\n" /* if load is not equal to */ " if (!p2.new) jump:nt 2f;\n" /* old, fail */ " }\n" " memw_locked(%3,p1) = %5;\n" /* else store conditional */ " if (!p1) jump 1b;\n" /* retry if lost reservation */ " %1 = #1\n" /* success, result = 1 */ "2:\n" : "=&r" (__oldval), "+r" (result), "+m"(*addr) : "r" (addr), "r" (old), "r" (new_val) : "p1", "p2", "memory" ); return result; } # define AO_HAVE_compare_and_swap #endif /* !AO_GENERALIZE_ASM_BOOL_CAS */ AO_INLINE AO_t AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val) { AO_t __oldval; __asm__ __volatile__( "1:\n" " %0 = memw_locked(%2);\n" /* load and reserve */ " {\n" " p2 = cmp.eq(%0,%3);\n" /* if load is not equal to */ " if (!p2.new) jump:nt 2f;\n" /* old_val, fail */ " }\n" " memw_locked(%2,p1) = %4;\n" /* else store conditional */ " if (!p1) jump 1b;\n" /* retry if lost reservation */ "2:\n" : "=&r" (__oldval), "+m"(*addr) : "r" (addr), "r" (old_val), "r" (new_val) : "p1", "p2", "memory" ); return __oldval; } #define AO_HAVE_fetch_compare_and_swap #define AO_T_IS_INT #endif /* AO_DISABLE_GCC_ATOMICS */ #undef AO_GCC_FORCE_HAVE_CAS #undef AO_GCC_HAVE_double_SYNC_CAS libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/hppa.h000066400000000000000000000075751411761111000227150ustar00rootroot00000000000000/* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "../all_atomic_load_store.h" /* Some architecture set descriptions include special "ordered" memory */ /* operations. As far as we can tell, no existing processors actually */ /* require those. Nor does it appear likely that future processors */ /* will. */ #include "../ordered.h" /* GCC will not guarantee the alignment we need, use four lock words */ /* and select the correctly aligned datum. See the glibc 2.3.2 */ /* linuxthread port for the original implementation. */ struct AO_pa_clearable_loc { int data[4]; }; #undef AO_TS_INITIALIZER #define AO_TS_t struct AO_pa_clearable_loc #define AO_TS_INITIALIZER { { 1, 1, 1, 1 } } /* Switch meaning of set and clear, since we only have an atomic clear */ /* instruction. */ typedef enum {AO_PA_TS_set = 0, AO_PA_TS_clear = 1} AO_PA_TS_val; #define AO_TS_VAL_t AO_PA_TS_val #define AO_TS_CLEAR AO_PA_TS_clear #define AO_TS_SET AO_PA_TS_set /* The hppa only has one atomic read and modify memory operation, */ /* load and clear, so hppa spinlocks must use zero to signify that */ /* someone is holding the lock. The address used for the ldcw */ /* semaphore must be 16-byte aligned. */ #define AO_ldcw(a, ret) \ __asm__ __volatile__("ldcw 0(%2), %0" \ : "=r" (ret), "=m" (*(a)) : "r" (a)) /* Because malloc only guarantees 8-byte alignment for malloc'd data, */ /* and GCC only guarantees 8-byte alignment for stack locals, we can't */ /* be assured of 16-byte alignment for atomic lock data even if we */ /* specify "__attribute ((aligned(16)))" in the type declaration. So, */ /* we use a struct containing an array of four ints for the atomic lock */ /* type and dynamically select the 16-byte aligned int from the array */ /* for the semaphore. */ #define AO_PA_LDCW_ALIGNMENT 16 #define AO_ldcw_align(addr) \ ((volatile unsigned *)(((unsigned long)(addr) \ + (AO_PA_LDCW_ALIGNMENT - 1)) \ & ~(AO_PA_LDCW_ALIGNMENT - 1))) /* Works on PA 1.1 and PA 2.0 systems */ AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t * addr) { volatile unsigned int ret; volatile unsigned *a = AO_ldcw_align(addr); AO_ldcw(a, ret); return (AO_TS_VAL_t)ret; } #define AO_HAVE_test_and_set_full AO_INLINE void AO_pa_clear(volatile AO_TS_t * addr) { volatile unsigned *a = AO_ldcw_align(addr); AO_compiler_barrier(); *a = 1; } #define AO_CLEAR(addr) AO_pa_clear(addr) #define AO_HAVE_CLEAR #undef AO_PA_LDCW_ALIGNMENT #undef AO_ldcw #undef AO_ldcw_align libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/ia64.h000066400000000000000000000234661411761111000225250ustar00rootroot00000000000000/* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "../all_atomic_load_store.h" #include "../all_acquire_release_volatile.h" #include "../test_and_set_t_is_char.h" #ifdef _ILP32 /* 32-bit HP/UX code. */ /* This requires pointer "swizzling". Pointers need to be expanded */ /* to 64 bits using the addp4 instruction before use. This makes it */ /* hard to share code, but we try anyway. */ # define AO_LEN "4" /* We assume that addr always appears in argument position 1 in asm */ /* code. If it is clobbered due to swizzling, we also need it in */ /* second position. Any later arguments are referenced symbolically, */ /* so that we don't have to worry about their position. This requires*/ /* gcc 3.1, but you shouldn't be using anything older than that on */ /* IA64 anyway. */ /* The AO_MASK macro is a workaround for the fact that HP/UX gcc */ /* appears to otherwise store 64-bit pointers in ar.ccv, i.e. it */ /* doesn't appear to clear high bits in a pointer value we pass into */ /* assembly code, even if it is supposedly of type AO_t. */ # define AO_IN_ADDR "1"(addr) # define AO_OUT_ADDR , "=r"(addr) # define AO_SWIZZLE "addp4 %1=0,%1;;\n" # define AO_MASK(ptr) __asm__ __volatile__("zxt4 %1=%1": "=r"(ptr) : "0"(ptr)) #else # define AO_LEN "8" # define AO_IN_ADDR "r"(addr) # define AO_OUT_ADDR # define AO_SWIZZLE # define AO_MASK(ptr) /* empty */ #endif /* !_ILP32 */ AO_INLINE void AO_nop_full(void) { __asm__ __volatile__("mf" : : : "memory"); } #define AO_HAVE_nop_full #ifndef AO_PREFER_GENERALIZED AO_INLINE AO_t AO_fetch_and_add1_acquire (volatile AO_t *addr) { AO_t result; __asm__ __volatile__ (AO_SWIZZLE "fetchadd" AO_LEN ".acq %0=[%1],1": "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory"); return result; } #define AO_HAVE_fetch_and_add1_acquire AO_INLINE AO_t AO_fetch_and_add1_release (volatile AO_t *addr) { AO_t result; __asm__ __volatile__ (AO_SWIZZLE "fetchadd" AO_LEN ".rel %0=[%1],1": "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory"); return result; } #define AO_HAVE_fetch_and_add1_release AO_INLINE AO_t AO_fetch_and_sub1_acquire (volatile AO_t *addr) { AO_t result; __asm__ __volatile__ (AO_SWIZZLE "fetchadd" AO_LEN ".acq %0=[%1],-1": "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory"); return result; } #define AO_HAVE_fetch_and_sub1_acquire AO_INLINE AO_t AO_fetch_and_sub1_release (volatile AO_t *addr) { AO_t result; __asm__ __volatile__ (AO_SWIZZLE "fetchadd" AO_LEN ".rel %0=[%1],-1": "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory"); return result; } #define AO_HAVE_fetch_and_sub1_release #endif /* !AO_PREFER_GENERALIZED */ AO_INLINE AO_t AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val) { AO_t fetched_val; AO_MASK(old); __asm__ __volatile__(AO_SWIZZLE "mov ar.ccv=%[old] ;; cmpxchg" AO_LEN ".acq %0=[%1],%[new_val],ar.ccv" : "=r"(fetched_val) AO_OUT_ADDR : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"(old) : "memory"); return fetched_val; } #define AO_HAVE_fetch_compare_and_swap_acquire AO_INLINE AO_t AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old, AO_t new_val) { AO_t fetched_val; AO_MASK(old); __asm__ __volatile__(AO_SWIZZLE "mov ar.ccv=%[old] ;; cmpxchg" AO_LEN ".rel %0=[%1],%[new_val],ar.ccv" : "=r"(fetched_val) AO_OUT_ADDR : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"(old) : "memory"); return fetched_val; } #define AO_HAVE_fetch_compare_and_swap_release AO_INLINE unsigned char AO_char_fetch_compare_and_swap_acquire(volatile unsigned char *addr, unsigned char old, unsigned char new_val) { unsigned char fetched_val; __asm__ __volatile__(AO_SWIZZLE "mov ar.ccv=%[old] ;; cmpxchg1.acq %0=[%1],%[new_val],ar.ccv" : "=r"(fetched_val) AO_OUT_ADDR : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old) : "memory"); return fetched_val; } #define AO_HAVE_char_fetch_compare_and_swap_acquire AO_INLINE unsigned char AO_char_fetch_compare_and_swap_release(volatile unsigned char *addr, unsigned char old, unsigned char new_val) { unsigned char fetched_val; __asm__ __volatile__(AO_SWIZZLE "mov ar.ccv=%[old] ;; cmpxchg1.rel %0=[%1],%[new_val],ar.ccv" : "=r"(fetched_val) AO_OUT_ADDR : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old) : "memory"); return fetched_val; } #define AO_HAVE_char_fetch_compare_and_swap_release AO_INLINE unsigned short AO_short_fetch_compare_and_swap_acquire(volatile unsigned short *addr, unsigned short old, unsigned short new_val) { unsigned short fetched_val; __asm__ __volatile__(AO_SWIZZLE "mov ar.ccv=%[old] ;; cmpxchg2.acq %0=[%1],%[new_val],ar.ccv" : "=r"(fetched_val) AO_OUT_ADDR : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old) : "memory"); return fetched_val; } #define AO_HAVE_short_fetch_compare_and_swap_acquire AO_INLINE unsigned short AO_short_fetch_compare_and_swap_release(volatile unsigned short *addr, unsigned short old, unsigned short new_val) { unsigned short fetched_val; __asm__ __volatile__(AO_SWIZZLE "mov ar.ccv=%[old] ;; cmpxchg2.rel %0=[%1],%[new_val],ar.ccv" : "=r"(fetched_val) AO_OUT_ADDR : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old) : "memory"); return fetched_val; } #define AO_HAVE_short_fetch_compare_and_swap_release #ifdef _ILP32 # define AO_T_IS_INT /* TODO: Add compare_double_and_swap_double for the _ILP32 case. */ #else # ifndef AO_PREFER_GENERALIZED AO_INLINE unsigned int AO_int_fetch_and_add1_acquire(volatile unsigned int *addr) { unsigned int result; __asm__ __volatile__("fetchadd4.acq %0=[%1],1" : "=r" (result) : AO_IN_ADDR : "memory"); return result; } # define AO_HAVE_int_fetch_and_add1_acquire AO_INLINE unsigned int AO_int_fetch_and_add1_release(volatile unsigned int *addr) { unsigned int result; __asm__ __volatile__("fetchadd4.rel %0=[%1],1" : "=r" (result) : AO_IN_ADDR : "memory"); return result; } # define AO_HAVE_int_fetch_and_add1_release AO_INLINE unsigned int AO_int_fetch_and_sub1_acquire(volatile unsigned int *addr) { unsigned int result; __asm__ __volatile__("fetchadd4.acq %0=[%1],-1" : "=r" (result) : AO_IN_ADDR : "memory"); return result; } # define AO_HAVE_int_fetch_and_sub1_acquire AO_INLINE unsigned int AO_int_fetch_and_sub1_release(volatile unsigned int *addr) { unsigned int result; __asm__ __volatile__("fetchadd4.rel %0=[%1],-1" : "=r" (result) : AO_IN_ADDR : "memory"); return result; } # define AO_HAVE_int_fetch_and_sub1_release # endif /* !AO_PREFER_GENERALIZED */ AO_INLINE unsigned int AO_int_fetch_compare_and_swap_acquire(volatile unsigned int *addr, unsigned int old, unsigned int new_val) { unsigned int fetched_val; __asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg4.acq %0=[%1],%2,ar.ccv" : "=r"(fetched_val) : AO_IN_ADDR, "r"(new_val), "r"((AO_t)old) : "memory"); return fetched_val; } # define AO_HAVE_int_fetch_compare_and_swap_acquire AO_INLINE unsigned int AO_int_fetch_compare_and_swap_release(volatile unsigned int *addr, unsigned int old, unsigned int new_val) { unsigned int fetched_val; __asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg4.rel %0=[%1],%2,ar.ccv" : "=r"(fetched_val) : AO_IN_ADDR, "r"(new_val), "r"((AO_t)old) : "memory"); return fetched_val; } # define AO_HAVE_int_fetch_compare_and_swap_release #endif /* !_ILP32 */ /* TODO: Add compare_and_swap_double as soon as there is widely */ /* available hardware that implements it. */ #undef AO_IN_ADDR #undef AO_LEN #undef AO_MASK #undef AO_OUT_ADDR #undef AO_SWIZZLE libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/m68k.h000066400000000000000000000042171411761111000225400ustar00rootroot00000000000000/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ /* The cas instruction causes an emulation trap for the */ /* 060 with a misaligned pointer, so let's avoid this. */ #undef AO_t typedef unsigned long AO_t __attribute__((__aligned__(4))); /* FIXME. Very incomplete. */ #include "../all_aligned_atomic_load_store.h" /* Are there any m68k multiprocessors still around? */ /* AFAIK, Alliants were sequentially consistent. */ #include "../ordered.h" #include "../test_and_set_t_is_char.h" AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t *addr) { AO_TS_t oldval; /* The value at addr is semi-phony. */ /* 'tas' sets bit 7 while the return */ /* value pretends all bits were set, */ /* which at least matches AO_TS_SET. */ __asm__ __volatile__( "tas %1; sne %0" : "=d" (oldval), "=m" (*addr) : "m" (*addr) : "memory"); /* This cast works due to the above. */ return (AO_TS_VAL_t)oldval; } #define AO_HAVE_test_and_set_full /* Returns nonzero if the comparison succeeded. */ AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) { char result; __asm__ __volatile__( "cas.l %3,%4,%1; seq %0" : "=d" (result), "=m" (*addr) : "m" (*addr), "d" (old), "d" (new_val) : "memory"); return -result; } #define AO_HAVE_compare_and_swap_full /* TODO: implement AO_fetch_compare_and_swap. */ #define AO_T_IS_INT libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/mips.h000066400000000000000000000131421411761111000227200ustar00rootroot00000000000000/* * Copyright (c) 2005,2007 Thiemo Seufer * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. */ /* * FIXME: This should probably make finer distinctions. SGI MIPS is * much more strongly ordered, and in fact closer to sequentially * consistent. This is really aimed at modern embedded implementations. */ /* Data dependence does not imply read ordering. */ #define AO_NO_DD_ORDERING /* #include "../standard_ao_double_t.h" */ /* TODO: Implement double-wide operations if available. */ #if (AO_GNUC_PREREQ(4, 9) || AO_CLANG_PREREQ(3, 5)) \ && !defined(AO_DISABLE_GCC_ATOMICS) /* Probably, it could be enabled even for earlier gcc/clang versions. */ /* As of clang-3.6/mips[64], __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n missing. */ # if defined(__clang__) # define AO_GCC_FORCE_HAVE_CAS # endif # include "generic.h" #else /* AO_DISABLE_GCC_ATOMICS */ # include "../test_and_set_t_is_ao_t.h" # include "../all_aligned_atomic_load_store.h" # if !defined(_ABI64) || _MIPS_SIM != _ABI64 # define AO_T_IS_INT # if __mips_isa_rev >= 6 /* Encoding of ll/sc in mips rel6 differs from that of mips2/3. */ # define AO_MIPS_SET_ISA "" # else # define AO_MIPS_SET_ISA " .set mips2\n" # endif # define AO_MIPS_LL_1(args) " ll " args "\n" # define AO_MIPS_SC(args) " sc " args "\n" # else # if __mips_isa_rev >= 6 # define AO_MIPS_SET_ISA "" # else # define AO_MIPS_SET_ISA " .set mips3\n" # endif # define AO_MIPS_LL_1(args) " lld " args "\n" # define AO_MIPS_SC(args) " scd " args "\n" # endif /* _MIPS_SIM == _ABI64 */ #ifdef AO_ICE9A1_LLSC_WAR /* ICE9 rev A1 chip (used in very few systems) is reported to */ /* have a low-frequency bug that causes LL to fail. */ /* To workaround, just issue the second 'LL'. */ # define AO_MIPS_LL(args) AO_MIPS_LL_1(args) AO_MIPS_LL_1(args) #else # define AO_MIPS_LL(args) AO_MIPS_LL_1(args) #endif AO_INLINE void AO_nop_full(void) { __asm__ __volatile__( " .set push\n" AO_MIPS_SET_ISA " .set noreorder\n" " .set nomacro\n" " sync\n" " .set pop" : : : "memory"); } #define AO_HAVE_nop_full #ifndef AO_PREFER_GENERALIZED AO_INLINE AO_t AO_fetch_and_add(volatile AO_t *addr, AO_t incr) { register int result; register int temp; __asm__ __volatile__( " .set push\n" AO_MIPS_SET_ISA " .set noreorder\n" " .set nomacro\n" "1: " AO_MIPS_LL("%0, %2") " addu %1, %0, %3\n" AO_MIPS_SC("%1, %2") " beqz %1, 1b\n" " nop\n" " .set pop" : "=&r" (result), "=&r" (temp), "+m" (*addr) : "Ir" (incr) : "memory"); return (AO_t)result; } #define AO_HAVE_fetch_and_add AO_INLINE AO_TS_VAL_t AO_test_and_set(volatile AO_TS_t *addr) { register int oldval; register int temp; __asm__ __volatile__( " .set push\n" AO_MIPS_SET_ISA " .set noreorder\n" " .set nomacro\n" "1: " AO_MIPS_LL("%0, %2") " move %1, %3\n" AO_MIPS_SC("%1, %2") " beqz %1, 1b\n" " nop\n" " .set pop" : "=&r" (oldval), "=&r" (temp), "+m" (*addr) : "r" (1) : "memory"); return (AO_TS_VAL_t)oldval; } #define AO_HAVE_test_and_set /* TODO: Implement AO_and/or/xor primitives directly. */ #endif /* !AO_PREFER_GENERALIZED */ #ifndef AO_GENERALIZE_ASM_BOOL_CAS AO_INLINE int AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) { register int was_equal = 0; register int temp; __asm__ __volatile__( " .set push\n" AO_MIPS_SET_ISA " .set noreorder\n" " .set nomacro\n" "1: " AO_MIPS_LL("%0, %1") " bne %0, %4, 2f\n" " move %0, %3\n" AO_MIPS_SC("%0, %1") " .set pop\n" " beqz %0, 1b\n" " li %2, 1\n" "2:" : "=&r" (temp), "+m" (*addr), "+r" (was_equal) : "r" (new_val), "r" (old) : "memory"); return was_equal; } # define AO_HAVE_compare_and_swap #endif /* !AO_GENERALIZE_ASM_BOOL_CAS */ AO_INLINE AO_t AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) { register int fetched_val; register int temp; __asm__ __volatile__( " .set push\n" AO_MIPS_SET_ISA " .set noreorder\n" " .set nomacro\n" "1: " AO_MIPS_LL("%0, %2") " bne %0, %4, 2f\n" " move %1, %3\n" AO_MIPS_SC("%1, %2") " beqz %1, 1b\n" " nop\n" " .set pop\n" "2:" : "=&r" (fetched_val), "=&r" (temp), "+m" (*addr) : "r" (new_val), "Jr" (old) : "memory"); return (AO_t)fetched_val; } #define AO_HAVE_fetch_compare_and_swap #endif /* AO_DISABLE_GCC_ATOMICS */ /* CAS primitives with acquire, release and full semantics are */ /* generated automatically (and AO_int_... primitives are */ /* defined properly after the first generalization pass). */ #undef AO_GCC_FORCE_HAVE_CAS #undef AO_MIPS_LL #undef AO_MIPS_LL_1 #undef AO_MIPS_SC #undef AO_MIPS_SET_ISA libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/powerpc.h000066400000000000000000000256701411761111000234400ustar00rootroot00000000000000/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ /* Memory model documented at http://www-106.ibm.com/developerworks/ */ /* eserver/articles/archguide.html and (clearer) */ /* http://www-106.ibm.com/developerworks/eserver/articles/powerpc.html. */ /* There appears to be no implicit ordering between any kind of */ /* independent memory references. */ /* TODO: Implement double-wide operations if available. */ #if (AO_GNUC_PREREQ(4, 8) || AO_CLANG_PREREQ(3, 8)) \ && !defined(AO_DISABLE_GCC_ATOMICS) /* Probably, it could be enabled even for earlier gcc/clang versions. */ /* TODO: As of clang-3.8.1, it emits lwsync in AO_load_acquire */ /* (i.e., the code is less efficient than the one given below). */ # include "generic.h" #else /* AO_DISABLE_GCC_ATOMICS */ /* Architecture enforces some ordering based on control dependence. */ /* I don't know if that could help. */ /* Data-dependent loads are always ordered. */ /* Based on the above references, eieio is intended for use on */ /* uncached memory, which we don't support. It does not order loads */ /* from cached memory. */ #include "../all_aligned_atomic_load_store.h" #include "../test_and_set_t_is_ao_t.h" /* There seems to be no byte equivalent of lwarx, so this */ /* may really be what we want, at least in the 32-bit case. */ AO_INLINE void AO_nop_full(void) { __asm__ __volatile__("sync" : : : "memory"); } #define AO_HAVE_nop_full /* lwsync apparently works for everything but a StoreLoad barrier. */ AO_INLINE void AO_lwsync(void) { #ifdef __NO_LWSYNC__ __asm__ __volatile__("sync" : : : "memory"); #else __asm__ __volatile__("lwsync" : : : "memory"); #endif } #define AO_nop_write() AO_lwsync() #define AO_HAVE_nop_write #define AO_nop_read() AO_lwsync() #define AO_HAVE_nop_read #if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__) /* ppc64 uses ld not lwz */ # define AO_PPC_LD "ld" # define AO_PPC_LxARX "ldarx" # define AO_PPC_CMPx "cmpd" # define AO_PPC_STxCXd "stdcx." # define AO_PPC_LOAD_CLOBBER "cr0" #else # define AO_PPC_LD "lwz" # define AO_PPC_LxARX "lwarx" # define AO_PPC_CMPx "cmpw" # define AO_PPC_STxCXd "stwcx." # define AO_PPC_LOAD_CLOBBER "cc" /* FIXME: We should get gcc to allocate one of the condition */ /* registers. I always got "impossible constraint" when I */ /* tried the "y" constraint. */ # define AO_T_IS_INT #endif #ifdef _AIX /* Labels are not supported on AIX. */ /* ppc64 has same size of instructions as 32-bit one. */ # define AO_PPC_L(label) /* empty */ # define AO_PPC_BR_A(labelBF, addr) addr #else # define AO_PPC_L(label) label ": " # define AO_PPC_BR_A(labelBF, addr) labelBF #endif /* We explicitly specify load_acquire, since it is important, and can */ /* be implemented relatively cheaply. It could be implemented */ /* with an ordinary load followed by a lwsync. But the general wisdom */ /* seems to be that a data dependent branch followed by an isync is */ /* cheaper. And the documentation is fairly explicit that this also */ /* has acquire semantics. */ AO_INLINE AO_t AO_load_acquire(const volatile AO_t *addr) { AO_t result; __asm__ __volatile__ ( AO_PPC_LD "%U1%X1 %0,%1\n" "cmpw %0,%0\n" "bne- " AO_PPC_BR_A("1f", "$+4") "\n" AO_PPC_L("1") "isync\n" : "=r" (result) : "m"(*addr) : "memory", AO_PPC_LOAD_CLOBBER); return result; } #define AO_HAVE_load_acquire /* We explicitly specify store_release, since it relies */ /* on the fact that lwsync is also a LoadStore barrier. */ AO_INLINE void AO_store_release(volatile AO_t *addr, AO_t value) { AO_lwsync(); *addr = value; } #define AO_HAVE_store_release #ifndef AO_PREFER_GENERALIZED /* This is similar to the code in the garbage collector. Deleting */ /* this and having it synthesized from compare_and_swap would probably */ /* only cost us a load immediate instruction. */ AO_INLINE AO_TS_VAL_t AO_test_and_set(volatile AO_TS_t *addr) { /* TODO: And we should be using smaller objects anyway. */ AO_t oldval; AO_t temp = 1; /* locked value */ __asm__ __volatile__( AO_PPC_L("1") AO_PPC_LxARX " %0,0,%1\n" /* load and reserve */ AO_PPC_CMPx "i %0, 0\n" /* if load is */ "bne " AO_PPC_BR_A("2f", "$+12") "\n" /* non-zero, return already set */ AO_PPC_STxCXd " %2,0,%1\n" /* else store conditional */ "bne- " AO_PPC_BR_A("1b", "$-16") "\n" /* retry if lost reservation */ AO_PPC_L("2") "\n" /* oldval is zero if we set */ : "=&r"(oldval) : "r"(addr), "r"(temp) : "memory", "cr0"); return (AO_TS_VAL_t)oldval; } #define AO_HAVE_test_and_set AO_INLINE AO_TS_VAL_t AO_test_and_set_acquire(volatile AO_TS_t *addr) { AO_TS_VAL_t result = AO_test_and_set(addr); AO_lwsync(); return result; } #define AO_HAVE_test_and_set_acquire AO_INLINE AO_TS_VAL_t AO_test_and_set_release(volatile AO_TS_t *addr) { AO_lwsync(); return AO_test_and_set(addr); } #define AO_HAVE_test_and_set_release AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t *addr) { AO_TS_VAL_t result; AO_lwsync(); result = AO_test_and_set(addr); AO_lwsync(); return result; } #define AO_HAVE_test_and_set_full #endif /* !AO_PREFER_GENERALIZED */ #ifndef AO_GENERALIZE_ASM_BOOL_CAS AO_INLINE int AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) { AO_t oldval; int result = 0; __asm__ __volatile__( AO_PPC_L("1") AO_PPC_LxARX " %0,0,%2\n" /* load and reserve */ AO_PPC_CMPx " %0, %4\n" /* if load is not equal to */ "bne " AO_PPC_BR_A("2f", "$+16") "\n" /* old, fail */ AO_PPC_STxCXd " %3,0,%2\n" /* else store conditional */ "bne- " AO_PPC_BR_A("1b", "$-16") "\n" /* retry if lost reservation */ "li %1,1\n" /* result = 1; */ AO_PPC_L("2") "\n" : "=&r"(oldval), "=&r"(result) : "r"(addr), "r"(new_val), "r"(old), "1"(result) : "memory", "cr0"); return result; } # define AO_HAVE_compare_and_swap AO_INLINE int AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val) { int result = AO_compare_and_swap(addr, old, new_val); AO_lwsync(); return result; } # define AO_HAVE_compare_and_swap_acquire AO_INLINE int AO_compare_and_swap_release(volatile AO_t *addr, AO_t old, AO_t new_val) { AO_lwsync(); return AO_compare_and_swap(addr, old, new_val); } # define AO_HAVE_compare_and_swap_release AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) { int result; AO_lwsync(); result = AO_compare_and_swap(addr, old, new_val); if (result) AO_lwsync(); return result; } # define AO_HAVE_compare_and_swap_full #endif /* !AO_GENERALIZE_ASM_BOOL_CAS */ AO_INLINE AO_t AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val) { AO_t fetched_val; __asm__ __volatile__( AO_PPC_L("1") AO_PPC_LxARX " %0,0,%1\n" /* load and reserve */ AO_PPC_CMPx " %0, %3\n" /* if load is not equal to */ "bne " AO_PPC_BR_A("2f", "$+12") "\n" /* old_val, fail */ AO_PPC_STxCXd " %2,0,%1\n" /* else store conditional */ "bne- " AO_PPC_BR_A("1b", "$-16") "\n" /* retry if lost reservation */ AO_PPC_L("2") "\n" : "=&r"(fetched_val) : "r"(addr), "r"(new_val), "r"(old_val) : "memory", "cr0"); return fetched_val; } #define AO_HAVE_fetch_compare_and_swap AO_INLINE AO_t AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val, AO_t new_val) { AO_t result = AO_fetch_compare_and_swap(addr, old_val, new_val); AO_lwsync(); return result; } #define AO_HAVE_fetch_compare_and_swap_acquire AO_INLINE AO_t AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old_val, AO_t new_val) { AO_lwsync(); return AO_fetch_compare_and_swap(addr, old_val, new_val); } #define AO_HAVE_fetch_compare_and_swap_release AO_INLINE AO_t AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val, AO_t new_val) { AO_t result; AO_lwsync(); result = AO_fetch_compare_and_swap(addr, old_val, new_val); if (result == old_val) AO_lwsync(); return result; } #define AO_HAVE_fetch_compare_and_swap_full #ifndef AO_PREFER_GENERALIZED AO_INLINE AO_t AO_fetch_and_add(volatile AO_t *addr, AO_t incr) { AO_t oldval; AO_t newval; __asm__ __volatile__( AO_PPC_L("1") AO_PPC_LxARX " %0,0,%2\n" /* load and reserve */ "add %1,%0,%3\n" /* increment */ AO_PPC_STxCXd " %1,0,%2\n" /* store conditional */ "bne- " AO_PPC_BR_A("1b", "$-12") "\n" /* retry if lost reservation */ : "=&r"(oldval), "=&r"(newval) : "r"(addr), "r"(incr) : "memory", "cr0"); return oldval; } #define AO_HAVE_fetch_and_add AO_INLINE AO_t AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr) { AO_t result = AO_fetch_and_add(addr, incr); AO_lwsync(); return result; } #define AO_HAVE_fetch_and_add_acquire AO_INLINE AO_t AO_fetch_and_add_release(volatile AO_t *addr, AO_t incr) { AO_lwsync(); return AO_fetch_and_add(addr, incr); } #define AO_HAVE_fetch_and_add_release AO_INLINE AO_t AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr) { AO_t result; AO_lwsync(); result = AO_fetch_and_add(addr, incr); AO_lwsync(); return result; } #define AO_HAVE_fetch_and_add_full #endif /* !AO_PREFER_GENERALIZED */ #undef AO_PPC_BR_A #undef AO_PPC_CMPx #undef AO_PPC_L #undef AO_PPC_LD #undef AO_PPC_LOAD_CLOBBER #undef AO_PPC_LxARX #undef AO_PPC_STxCXd #endif /* AO_DISABLE_GCC_ATOMICS */ libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/riscv.h000066400000000000000000000022301411761111000230720ustar00rootroot00000000000000/* * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. */ #if defined(__clang__) || defined(AO_PREFER_BUILTIN_ATOMICS) /* All __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n macros are still missing. */ /* The operations are lock-free even for the types smaller than word. */ # define AO_GCC_FORCE_HAVE_CAS #else /* As of gcc-7.5, CAS and arithmetic atomic operations for char and */ /* short are supported by the compiler but require -latomic flag. */ # if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) # define AO_NO_char_ARITHM # endif # if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) # define AO_NO_short_ARITHM # endif #endif /* !__clang__ */ #include "generic.h" #undef AO_GCC_FORCE_HAVE_CAS #undef AO_NO_char_ARITHM #undef AO_NO_short_ARITHM libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/s390.h000066400000000000000000000063371411761111000224560ustar00rootroot00000000000000/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ #if (AO_GNUC_PREREQ(5, 4) || AO_CLANG_PREREQ(8, 0)) && defined(__s390x__) \ && !defined(AO_DISABLE_GCC_ATOMICS) /* Probably, it could be enabled for earlier clang/gcc versions. */ /* But, e.g., clang-3.8.0 produces a backend error for AtomicFence. */ # include "generic.h" #else /* AO_DISABLE_GCC_ATOMICS */ /* The relevant documentation appears to be at */ /* http://publibz.boulder.ibm.com/epubs/pdf/dz9zr003.pdf */ /* around page 5-96. Apparently: */ /* - Memory references in general are atomic only for a single */ /* byte. But it appears that the most common load/store */ /* instructions also guarantee atomicity for aligned */ /* operands of standard types. WE FOOLISHLY ASSUME that */ /* compilers only generate those. If that turns out to be */ /* wrong, we need inline assembly code for AO_load and */ /* AO_store. */ /* - A store followed by a load is unordered since the store */ /* may be delayed. Otherwise everything is ordered. */ /* - There is a hardware compare-and-swap (CS) instruction. */ #include "../all_aligned_atomic_load_store.h" #include "../ordered_except_wr.h" #include "../test_and_set_t_is_ao_t.h" /* TODO: Is there a way to do byte-sized test-and-set? */ /* TODO: AO_nop_full should probably be implemented directly. */ /* It appears that certain BCR instructions have that effect. */ /* Presumably they're cheaper than CS? */ #ifndef AO_GENERALIZE_ASM_BOOL_CAS AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) { int retval; __asm__ __volatile__ ( # ifndef __s390x__ " cs %1,%2,0(%3)\n" # else " csg %1,%2,0(%3)\n" # endif " ipm %0\n" " srl %0,28\n" : "=&d" (retval), "+d" (old) : "d" (new_val), "a" (addr) : "cc", "memory"); return retval == 0; } #define AO_HAVE_compare_and_swap_full #endif /* !AO_GENERALIZE_ASM_BOOL_CAS */ AO_INLINE AO_t AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) { __asm__ __volatile__ ( # ifndef __s390x__ " cs %0,%2,%1\n" # else " csg %0,%2,%1\n" # endif : "+d" (old), "=Q" (*addr) : "d" (new_val), "m" (*addr) : "cc", "memory"); return old; } #define AO_HAVE_fetch_compare_and_swap_full #endif /* AO_DISABLE_GCC_ATOMICS */ /* TODO: Add double-wide operations for 32-bit executables. */ libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/sh.h000066400000000000000000000017451411761111000223700ustar00rootroot00000000000000/* * Copyright (c) 2009 by Takashi YOSHII. All rights reserved. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. */ #include "../all_atomic_load_store.h" #include "../ordered.h" /* sh has tas.b(byte) only */ #include "../test_and_set_t_is_char.h" AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t *addr) { int oldval; __asm__ __volatile__( "tas.b @%1; movt %0" : "=r" (oldval) : "r" (addr) : "t", "memory"); return oldval? AO_TS_CLEAR : AO_TS_SET; } #define AO_HAVE_test_and_set_full /* TODO: Very incomplete. */ libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/sparc.h000066400000000000000000000062701411761111000230640ustar00rootroot00000000000000/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ /* TODO: Very incomplete; Add support for sparc64. */ /* Non-ancient SPARCs provide compare-and-swap (casa). */ #include "../all_atomic_load_store.h" /* Real SPARC code uses TSO: */ #include "../ordered_except_wr.h" /* Test_and_set location is just a byte. */ #include "../test_and_set_t_is_char.h" AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t *addr) { AO_TS_VAL_t oldval; __asm__ __volatile__("ldstub %1,%0" : "=r"(oldval), "=m"(*addr) : "m"(*addr) : "memory"); return oldval; } #define AO_HAVE_test_and_set_full #ifndef AO_NO_SPARC_V9 # ifndef AO_GENERALIZE_ASM_BOOL_CAS /* Returns nonzero if the comparison succeeded. */ AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) { AO_t ret; __asm__ __volatile__ ("membar #StoreLoad | #LoadLoad\n\t" # if defined(__arch64__) "casx [%2],%0,%1\n\t" # else "cas [%2],%0,%1\n\t" /* 32-bit version */ # endif "membar #StoreLoad | #StoreStore\n\t" "cmp %0,%1\n\t" "be,a 0f\n\t" "mov 1,%0\n\t"/* one insn after branch always executed */ "clr %0\n\t" "0:\n\t" : "=r" (ret), "+r" (new_val) : "r" (addr), "0" (old) : "memory", "cc"); return (int)ret; } # define AO_HAVE_compare_and_swap_full # endif /* !AO_GENERALIZE_ASM_BOOL_CAS */ AO_INLINE AO_t AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) { __asm__ __volatile__ ("membar #StoreLoad | #LoadLoad\n\t" # if defined(__arch64__) "casx [%1],%2,%0\n\t" # else "cas [%1],%2,%0\n\t" /* 32-bit version */ # endif "membar #StoreLoad | #StoreStore\n\t" : "+r" (new_val) : "r" (addr), "r" (old) : "memory"); return new_val; } #define AO_HAVE_fetch_compare_and_swap_full #endif /* !AO_NO_SPARC_V9 */ /* TODO: Extend this for SPARC v8 and v9 (V8 also has swap, V9 has CAS, */ /* there are barriers like membar #LoadStore, CASA (32-bit) and */ /* CASXA (64-bit) instructions added in V9). */ libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/tile.h000066400000000000000000000025101411761111000227020ustar00rootroot00000000000000/* * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. */ /* Minimal support for tile. */ #if (AO_GNUC_PREREQ(4, 8) || AO_CLANG_PREREQ(3, 4)) \ && !defined(AO_DISABLE_GCC_ATOMICS) # include "generic.h" #else /* AO_DISABLE_GCC_ATOMICS */ # include "../all_atomic_load_store.h" # include "../test_and_set_t_is_ao_t.h" AO_INLINE void AO_nop_full(void) { __sync_synchronize(); } # define AO_HAVE_nop_full AO_INLINE AO_t AO_fetch_and_add_full(volatile AO_t *p, AO_t incr) { return __sync_fetch_and_add(p, incr); } # define AO_HAVE_fetch_and_add_full AO_INLINE AO_t AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return __sync_val_compare_and_swap(addr, old_val, new_val /* empty protection list */); } # define AO_HAVE_fetch_compare_and_swap_full #endif /* AO_DISABLE_GCC_ATOMICS */ libatomic_ops-7.6.12/src/atomic_ops/sysdeps/gcc/x86.h000066400000000000000000000617111411761111000224020ustar00rootroot00000000000000/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. * Copyright (c) 2008-2018 Ivan Maidanski * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * * Some of the machine specific code was borrowed from our GC distribution. */ #if (AO_GNUC_PREREQ(4, 8) || AO_CLANG_PREREQ(3, 4)) \ && !defined(__INTEL_COMPILER) /* TODO: test and enable icc */ \ && !defined(AO_DISABLE_GCC_ATOMICS) # define AO_GCC_ATOMIC_TEST_AND_SET # if defined(__APPLE_CC__) /* OS X 10.7 clang-425 lacks __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n */ /* predefined macro (unlike e.g. OS X 10.11 clang-703). */ # define AO_GCC_FORCE_HAVE_CAS # ifdef __x86_64__ # if !AO_CLANG_PREREQ(9, 0) /* < Apple clang-900 */ /* Older Apple clang (e.g., clang-600 based on LLVM 3.5svn) had */ /* some bug in the double word CAS implementation for x64. */ # define AO_SKIPATOMIC_double_compare_and_swap_ANY # endif # elif defined(__MACH__) /* OS X 10.8 lacks __atomic_load/store symbols for arch i386 */ /* (even with a non-Apple clang). */ # ifndef MAC_OS_X_VERSION_MIN_REQUIRED /* Include this header just to import the version macro. */ # include # endif # if MAC_OS_X_VERSION_MIN_REQUIRED < 1090 /* MAC_OS_X_VERSION_10_9 */ # define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY # endif # endif /* __i386__ */ # elif defined(__clang__) # if !defined(__x86_64__) # if !defined(AO_PREFER_BUILTIN_ATOMICS) && !defined(__CYGWIN__) \ && !AO_CLANG_PREREQ(5, 0) /* At least clang-3.8/i686 (from NDK r11c) required to specify */ /* -latomic in case of a double-word atomic operation use. */ # define AO_SKIPATOMIC_double_compare_and_swap_ANY # define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY # endif /* !AO_PREFER_BUILTIN_ATOMICS */ # elif !defined(__ILP32__) # if (!AO_CLANG_PREREQ(3, 5) && !defined(AO_PREFER_BUILTIN_ATOMICS)) \ || (!AO_CLANG_PREREQ(4, 0) && defined(AO_ADDRESS_SANITIZER)) \ || defined(AO_THREAD_SANITIZER) /* clang-3.4/x64 required -latomic. clang-3.9/x64 seems to */ /* pass double-wide arguments to atomic operations incorrectly */ /* in case of ASan/TSan. */ /* TODO: As of clang-4.0, lock-free test_stack fails if TSan. */ # define AO_SKIPATOMIC_double_compare_and_swap_ANY # define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY # endif # endif /* __x86_64__ */ # elif AO_GNUC_PREREQ(7, 0) && !defined(AO_PREFER_BUILTIN_ATOMICS) \ && !defined(AO_THREAD_SANITIZER) && !defined(__MINGW32__) /* gcc-7.x/x64 (gcc-7.2, at least) requires -latomic flag in case */ /* of double-word atomic operations use (but not in case of TSan). */ /* TODO: Revise it for the future gcc-7 releases. */ # define AO_SKIPATOMIC_double_compare_and_swap_ANY # define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY # endif /* __GNUC__ && !__clang__ */ # ifdef AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY # define AO_SKIPATOMIC_double_load # define AO_SKIPATOMIC_double_load_acquire # define AO_SKIPATOMIC_double_store # define AO_SKIPATOMIC_double_store_release # undef AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY # endif #else /* AO_DISABLE_GCC_ATOMICS */ /* The following really assume we have a 486 or better. Unfortunately */ /* gcc doesn't define a suitable feature test macro based on command */ /* line options. */ /* We should perhaps test dynamically. */ #include "../all_aligned_atomic_load_store.h" #include "../test_and_set_t_is_char.h" #if defined(__SSE2__) && !defined(AO_USE_PENTIUM4_INSTRS) /* "mfence" is a part of SSE2 set (introduced on Intel Pentium 4). */ # define AO_USE_PENTIUM4_INSTRS #endif #if defined(AO_USE_PENTIUM4_INSTRS) AO_INLINE void AO_nop_full(void) { __asm__ __volatile__("mfence" : : : "memory"); } # define AO_HAVE_nop_full #else /* We could use the cpuid instruction. But that seems to be slower */ /* than the default implementation based on test_and_set_full. Thus */ /* we omit that bit of misinformation here. */ #endif /* !AO_USE_PENTIUM4_INSTRS */ /* As far as we can tell, the lfence and sfence instructions are not */ /* currently needed or useful for cached memory accesses. */ /* Really only works for 486 and later */ #ifndef AO_PREFER_GENERALIZED AO_INLINE AO_t AO_fetch_and_add_full (volatile AO_t *p, AO_t incr) { AO_t result; __asm__ __volatile__ ("lock; xadd %0, %1" : "=r" (result), "+m" (*p) : "0" (incr) : "memory"); return result; } # define AO_HAVE_fetch_and_add_full #endif /* !AO_PREFER_GENERALIZED */ AO_INLINE unsigned char AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr) { unsigned char result; __asm__ __volatile__ ("lock; xaddb %0, %1" : "=q" (result), "+m" (*p) : "0" (incr) : "memory"); return result; } #define AO_HAVE_char_fetch_and_add_full AO_INLINE unsigned short AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr) { unsigned short result; __asm__ __volatile__ ("lock; xaddw %0, %1" : "=r" (result), "+m" (*p) : "0" (incr) : "memory"); return result; } #define AO_HAVE_short_fetch_and_add_full #ifndef AO_PREFER_GENERALIZED AO_INLINE void AO_and_full (volatile AO_t *p, AO_t value) { __asm__ __volatile__ ("lock; and %1, %0" : "+m" (*p) : "r" (value) : "memory"); } # define AO_HAVE_and_full AO_INLINE void AO_or_full (volatile AO_t *p, AO_t value) { __asm__ __volatile__ ("lock; or %1, %0" : "+m" (*p) : "r" (value) : "memory"); } # define AO_HAVE_or_full AO_INLINE void AO_xor_full (volatile AO_t *p, AO_t value) { __asm__ __volatile__ ("lock; xor %1, %0" : "+m" (*p) : "r" (value) : "memory"); } # define AO_HAVE_xor_full /* AO_store_full could be implemented directly using "xchg" but it */ /* could be generalized efficiently as an ordinary store accomplished */ /* with AO_nop_full ("mfence" instruction). */ AO_INLINE void AO_char_and_full (volatile unsigned char *p, unsigned char value) { __asm__ __volatile__ ("lock; andb %1, %0" : "+m" (*p) : "r" (value) : "memory"); } #define AO_HAVE_char_and_full AO_INLINE void AO_char_or_full (volatile unsigned char *p, unsigned char value) { __asm__ __volatile__ ("lock; orb %1, %0" : "+m" (*p) : "r" (value) : "memory"); } #define AO_HAVE_char_or_full AO_INLINE void AO_char_xor_full (volatile unsigned char *p, unsigned char value) { __asm__ __volatile__ ("lock; xorb %1, %0" : "+m" (*p) : "r" (value) : "memory"); } #define AO_HAVE_char_xor_full AO_INLINE void AO_short_and_full (volatile unsigned short *p, unsigned short value) { __asm__ __volatile__ ("lock; andw %1, %0" : "+m" (*p) : "r" (value) : "memory"); } #define AO_HAVE_short_and_full AO_INLINE void AO_short_or_full (volatile unsigned short *p, unsigned short value) { __asm__ __volatile__ ("lock; orw %1, %0" : "+m" (*p) : "r" (value) : "memory"); } #define AO_HAVE_short_or_full AO_INLINE void AO_short_xor_full (volatile unsigned short *p, unsigned short value) { __asm__ __volatile__ ("lock; xorw %1, %0" : "+m" (*p) : "r" (value) : "memory"); } #define AO_HAVE_short_xor_full #endif /* !AO_PREFER_GENERALIZED */ AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t *addr) { unsigned char oldval; /* Note: the "xchg" instruction does not need a "lock" prefix */ __asm__ __volatile__ ("xchgb %0, %1" : "=q" (oldval), "+m" (*addr) : "0" ((unsigned char)0xff) : "memory"); return (AO_TS_VAL_t)oldval; } #define AO_HAVE_test_and_set_full #ifndef AO_GENERALIZE_ASM_BOOL_CAS /* Returns nonzero if the comparison succeeded. */ AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) { # ifdef AO_USE_SYNC_CAS_BUILTIN return (int)__sync_bool_compare_and_swap(addr, old, new_val /* empty protection list */); /* Note: an empty list of variables protected by the */ /* memory barrier should mean all globally accessible */ /* variables are protected. */ # else char result; # if defined(__GCC_ASM_FLAG_OUTPUTS__) AO_t dummy; __asm__ __volatile__ ("lock; cmpxchg %3, %0" : "+m" (*addr), "=@ccz" (result), "=a" (dummy) : "r" (new_val), "a" (old) : "memory"); # else __asm__ __volatile__ ("lock; cmpxchg %2, %0; setz %1" : "+m" (*addr), "=a" (result) : "r" (new_val), "a" (old) : "memory"); # endif return (int)result; # endif } # define AO_HAVE_compare_and_swap_full #endif /* !AO_GENERALIZE_ASM_BOOL_CAS */ AO_INLINE AO_t AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val, AO_t new_val) { # ifdef AO_USE_SYNC_CAS_BUILTIN return __sync_val_compare_and_swap(addr, old_val, new_val /* empty protection list */); # else AO_t fetched_val; __asm__ __volatile__ ("lock; cmpxchg %3, %1" : "=a" (fetched_val), "+m" (*addr) : "a" (old_val), "r" (new_val) : "memory"); return fetched_val; # endif } #define AO_HAVE_fetch_compare_and_swap_full AO_INLINE unsigned char AO_char_fetch_compare_and_swap_full(volatile unsigned char *addr, unsigned char old_val, unsigned char new_val) { # ifdef AO_USE_SYNC_CAS_BUILTIN return __sync_val_compare_and_swap(addr, old_val, new_val /* empty protection list */); # else unsigned char fetched_val; __asm__ __volatile__ ("lock; cmpxchgb %3, %1" : "=a" (fetched_val), "+m" (*addr) : "a" (old_val), "q" (new_val) : "memory"); return fetched_val; # endif } # define AO_HAVE_char_fetch_compare_and_swap_full AO_INLINE unsigned short AO_short_fetch_compare_and_swap_full(volatile unsigned short *addr, unsigned short old_val, unsigned short new_val) { # ifdef AO_USE_SYNC_CAS_BUILTIN return __sync_val_compare_and_swap(addr, old_val, new_val /* empty protection list */); # else unsigned short fetched_val; __asm__ __volatile__ ("lock; cmpxchgw %3, %1" : "=a" (fetched_val), "+m" (*addr) : "a" (old_val), "r" (new_val) : "memory"); return fetched_val; # endif } # define AO_HAVE_short_fetch_compare_and_swap_full # if defined(__x86_64__) && !defined(__ILP32__) AO_INLINE unsigned int AO_int_fetch_compare_and_swap_full(volatile unsigned int *addr, unsigned int old_val, unsigned int new_val) { # ifdef AO_USE_SYNC_CAS_BUILTIN return __sync_val_compare_and_swap(addr, old_val, new_val /* empty protection list */); # else unsigned int fetched_val; __asm__ __volatile__ ("lock; cmpxchgl %3, %1" : "=a" (fetched_val), "+m" (*addr) : "a" (old_val), "r" (new_val) : "memory"); return fetched_val; # endif } # define AO_HAVE_int_fetch_compare_and_swap_full # ifndef AO_PREFER_GENERALIZED AO_INLINE unsigned int AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr) { unsigned int result; __asm__ __volatile__ ("lock; xaddl %0, %1" : "=r" (result), "+m" (*p) : "0" (incr) : "memory"); return result; } # define AO_HAVE_int_fetch_and_add_full AO_INLINE void AO_int_and_full (volatile unsigned int *p, unsigned int value) { __asm__ __volatile__ ("lock; andl %1, %0" : "+m" (*p) : "r" (value) : "memory"); } # define AO_HAVE_int_and_full AO_INLINE void AO_int_or_full (volatile unsigned int *p, unsigned int value) { __asm__ __volatile__ ("lock; orl %1, %0" : "+m" (*p) : "r" (value) : "memory"); } # define AO_HAVE_int_or_full AO_INLINE void AO_int_xor_full (volatile unsigned int *p, unsigned int value) { __asm__ __volatile__ ("lock; xorl %1, %0" : "+m" (*p) : "r" (value) : "memory"); } # define AO_HAVE_int_xor_full # endif /* !AO_PREFER_GENERALIZED */ # else # define AO_T_IS_INT # endif /* !x86_64 || ILP32 */ /* Real X86 implementations, except for some old 32-bit WinChips, */ /* appear to enforce ordering between memory operations, EXCEPT that */ /* a later read can pass earlier writes, presumably due to the */ /* visible presence of store buffers. */ /* We ignore both the WinChips and the fact that the official specs */ /* seem to be much weaker (and arguably too weak to be usable). */ # include "../ordered_except_wr.h" #endif /* AO_DISABLE_GCC_ATOMICS */ #if defined(AO_GCC_ATOMIC_TEST_AND_SET) \ && !defined(AO_SKIPATOMIC_double_compare_and_swap_ANY) # if defined(__ILP32__) || !defined(__x86_64__) /* 32-bit AO_t */ \ || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) /* 64-bit AO_t */ # include "../standard_ao_double_t.h" # endif #elif !defined(__x86_64__) && (!defined(AO_USE_SYNC_CAS_BUILTIN) \ || defined(AO_GCC_ATOMIC_TEST_AND_SET)) # include "../standard_ao_double_t.h" /* Reading or writing a quadword aligned on a 64-bit boundary is */ /* always carried out atomically on at least a Pentium according to */ /* Chapter 8.1.1 of Volume 3A Part 1 of Intel processor manuals. */ # ifndef AO_PREFER_GENERALIZED # define AO_ACCESS_double_CHECK_ALIGNED # include "../loadstore/double_atomic_load_store.h" # endif /* Returns nonzero if the comparison succeeded. */ /* Really requires at least a Pentium. */ AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2) { char result; # if defined(__PIC__) && !(AO_GNUC_PREREQ(5, 1) || AO_CLANG_PREREQ(4, 0)) AO_t saved_ebx; AO_t dummy; /* The following applies to an ancient GCC (and, probably, it was */ /* never needed for Clang): */ /* If PIC is turned on, we cannot use ebx as it is reserved for the */ /* GOT pointer. We should save and restore ebx. The proposed */ /* solution is not so efficient as the older alternatives using */ /* push ebx or edi as new_val1 (w/o clobbering edi and temporary */ /* local variable usage) but it is more portable (it works even if */ /* ebx is not used as GOT pointer, and it works for the buggy GCC */ /* releases that incorrectly evaluate memory operands offset in the */ /* inline assembly after push). */ # ifdef __OPTIMIZE__ __asm__ __volatile__("mov %%ebx, %2\n\t" /* save ebx */ "lea %0, %%edi\n\t" /* in case addr is in ebx */ "mov %7, %%ebx\n\t" /* load new_val1 */ "lock; cmpxchg8b (%%edi)\n\t" "mov %2, %%ebx\n\t" /* restore ebx */ "setz %1" : "+m" (*addr), "=a" (result), "=m" (saved_ebx), "=d" (dummy) : "d" (old_val2), "a" (old_val1), "c" (new_val2), "m" (new_val1) : "%edi", "memory"); # else /* A less-efficient code manually preserving edi if GCC invoked */ /* with -O0 option (otherwise it fails while finding a register */ /* in class 'GENERAL_REGS'). */ AO_t saved_edi; __asm__ __volatile__("mov %%edi, %3\n\t" /* save edi */ "mov %%ebx, %2\n\t" /* save ebx */ "lea %0, %%edi\n\t" /* in case addr is in ebx */ "mov %8, %%ebx\n\t" /* load new_val1 */ "lock; cmpxchg8b (%%edi)\n\t" "mov %2, %%ebx\n\t" /* restore ebx */ "mov %3, %%edi\n\t" /* restore edi */ "setz %1" : "+m" (*addr), "=a" (result), "=m" (saved_ebx), "=m" (saved_edi), "=d" (dummy) : "d" (old_val2), "a" (old_val1), "c" (new_val2), "m" (new_val1) : "memory"); # endif # else /* For non-PIC mode, this operation could be simplified (and be */ /* faster) by using ebx as new_val1. Reuse of the PIC hard */ /* register, instead of using a fixed register, is implemented */ /* in Clang and GCC 5.1+, at least. (Older GCC refused to compile */ /* such code for PIC mode). */ # if defined(__GCC_ASM_FLAG_OUTPUTS__) __asm__ __volatile__ ("lock; cmpxchg8b %0" : "+m" (*addr), "=@ccz" (result), "+d" (old_val2), "+a" (old_val1) : "c" (new_val2), "b" (new_val1) : "memory"); # else AO_t dummy; /* an output for clobbered edx */ __asm__ __volatile__ ("lock; cmpxchg8b %0; setz %1" : "+m" (*addr), "=a" (result), "=d" (dummy) : "d" (old_val2), "a" (old_val1), "c" (new_val2), "b" (new_val1) : "memory"); # endif # endif return (int) result; } # define AO_HAVE_compare_double_and_swap_double_full #elif defined(__ILP32__) || !defined(__x86_64__) # include "../standard_ao_double_t.h" /* Reading or writing a quadword aligned on a 64-bit boundary is */ /* always carried out atomically (requires at least a Pentium). */ # ifndef AO_PREFER_GENERALIZED # define AO_ACCESS_double_CHECK_ALIGNED # include "../loadstore/double_atomic_load_store.h" # endif /* X32 has native support for 64-bit integer operations (AO_double_t */ /* is a 64-bit integer and we could use 64-bit cmpxchg). */ /* This primitive is used by compare_double_and_swap_double_full. */ AO_INLINE int AO_double_compare_and_swap_full(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { /* It is safe to use __sync CAS built-in here. */ return __sync_bool_compare_and_swap(&addr->AO_whole, old_val.AO_whole, new_val.AO_whole /* empty protection list */); } # define AO_HAVE_double_compare_and_swap_full #elif defined(AO_CMPXCHG16B_AVAILABLE) \ || (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) \ && !defined(AO_THREAD_SANITIZER)) # include "../standard_ao_double_t.h" /* The Intel and AMD Architecture Programmer Manuals state roughly */ /* the following: */ /* - CMPXCHG16B (with a LOCK prefix) can be used to perform 16-byte */ /* atomic accesses in 64-bit mode (with certain alignment */ /* restrictions); */ /* - SSE instructions that access data larger than a quadword (like */ /* MOVDQA) may be implemented using multiple memory accesses; */ /* - LOCK prefix causes an invalid-opcode exception when used with */ /* 128-bit media (SSE) instructions. */ /* Thus, currently, the only way to implement lock-free double_load */ /* and double_store on x86_64 is to use CMPXCHG16B (if available). */ /* NEC LE-IT: older AMD Opterons are missing this instruction. */ /* On these machines SIGILL will be thrown. */ /* Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated (lock */ /* based) version available. */ /* HB: Changed this to not define either by default. There are */ /* enough machines and tool chains around on which cmpxchg16b */ /* doesn't work. And the emulation is unsafe by our usual rules. */ /* However both are clearly useful in certain cases. */ AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2) { char result; # if defined(__GCC_ASM_FLAG_OUTPUTS__) __asm__ __volatile__("lock; cmpxchg16b %0" : "+m" (*addr), "=@ccz" (result), "+d" (old_val2), "+a" (old_val1) : "c" (new_val2), "b" (new_val1) : "memory"); # else AO_t dummy; /* an output for clobbered rdx */ __asm__ __volatile__("lock; cmpxchg16b %0; setz %1" : "+m" (*addr), "=a" (result), "=d" (dummy) : "d" (old_val2), "a" (old_val1), "c" (new_val2), "b" (new_val1) : "memory"); # endif return (int) result; } # define AO_HAVE_compare_double_and_swap_double_full #elif defined(AO_WEAK_DOUBLE_CAS_EMULATION) # include "../standard_ao_double_t.h" # ifdef __cplusplus extern "C" { # endif /* This one provides spinlock based emulation of CAS implemented in */ /* atomic_ops.c. We probably do not want to do this here, since it */ /* is not atomic with respect to other kinds of updates of *addr. */ /* On the other hand, this may be a useful facility on occasion. */ int AO_compare_double_and_swap_double_emulation( volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2); # ifdef __cplusplus } /* extern "C" */ # endif AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2) { return AO_compare_double_and_swap_double_emulation(addr, old_val1, old_val2, new_val1, new_val2); } # define AO_HAVE_compare_double_and_swap_double_full #endif /* x86_64 && !ILP32 && CAS_EMULATION && !AO_CMPXCHG16B_AVAILABLE */ #ifdef AO_GCC_ATOMIC_TEST_AND_SET # include "generic.h" #endif #undef AO_GCC_FORCE_HAVE_CAS #undef AO_SKIPATOMIC_double_compare_and_swap_ANY #undef AO_SKIPATOMIC_double_load #undef AO_SKIPATOMIC_double_load_acquire #undef AO_SKIPATOMIC_double_store #undef AO_SKIPATOMIC_double_store_release libatomic_ops-7.6.12/src/atomic_ops/sysdeps/generic_pthread.h000066400000000000000000000261151411761111000243430ustar00rootroot00000000000000/* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* The following is useful primarily for debugging and documentation. */ /* We define various atomic operations by acquiring a global pthread */ /* lock. The resulting implementation will perform poorly, but should */ /* be correct unless it is used from signal handlers. */ /* We assume that all pthread operations act like full memory barriers. */ /* (We believe that is the intent of the specification.) */ #include #include "test_and_set_t_is_ao_t.h" /* This is not necessarily compatible with the native */ /* implementation. But those can't be safely mixed anyway. */ #ifdef __cplusplus extern "C" { #endif /* We define only the full barrier variants, and count on the */ /* generalization section below to fill in the rest. */ extern pthread_mutex_t AO_pt_lock; #ifdef __cplusplus } /* extern "C" */ #endif AO_INLINE void AO_nop_full(void) { pthread_mutex_lock(&AO_pt_lock); pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_nop_full AO_INLINE AO_t AO_load_full(const volatile AO_t *addr) { AO_t result; pthread_mutex_lock(&AO_pt_lock); result = *addr; pthread_mutex_unlock(&AO_pt_lock); return result; } #define AO_HAVE_load_full AO_INLINE void AO_store_full(volatile AO_t *addr, AO_t val) { pthread_mutex_lock(&AO_pt_lock); *addr = val; pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_store_full AO_INLINE unsigned char AO_char_load_full(const volatile unsigned char *addr) { unsigned char result; pthread_mutex_lock(&AO_pt_lock); result = *addr; pthread_mutex_unlock(&AO_pt_lock); return result; } #define AO_HAVE_char_load_full AO_INLINE void AO_char_store_full(volatile unsigned char *addr, unsigned char val) { pthread_mutex_lock(&AO_pt_lock); *addr = val; pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_char_store_full AO_INLINE unsigned short AO_short_load_full(const volatile unsigned short *addr) { unsigned short result; pthread_mutex_lock(&AO_pt_lock); result = *addr; pthread_mutex_unlock(&AO_pt_lock); return result; } #define AO_HAVE_short_load_full AO_INLINE void AO_short_store_full(volatile unsigned short *addr, unsigned short val) { pthread_mutex_lock(&AO_pt_lock); *addr = val; pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_short_store_full AO_INLINE unsigned int AO_int_load_full(const volatile unsigned int *addr) { unsigned int result; pthread_mutex_lock(&AO_pt_lock); result = *addr; pthread_mutex_unlock(&AO_pt_lock); return result; } #define AO_HAVE_int_load_full AO_INLINE void AO_int_store_full(volatile unsigned int *addr, unsigned int val) { pthread_mutex_lock(&AO_pt_lock); *addr = val; pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_int_store_full AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t *addr) { AO_TS_VAL_t result; pthread_mutex_lock(&AO_pt_lock); result = (AO_TS_VAL_t)(*addr); *addr = AO_TS_SET; pthread_mutex_unlock(&AO_pt_lock); assert(result == AO_TS_SET || result == AO_TS_CLEAR); return result; } #define AO_HAVE_test_and_set_full AO_INLINE AO_t AO_fetch_and_add_full(volatile AO_t *p, AO_t incr) { AO_t old_val; pthread_mutex_lock(&AO_pt_lock); old_val = *p; *p = old_val + incr; pthread_mutex_unlock(&AO_pt_lock); return old_val; } #define AO_HAVE_fetch_and_add_full AO_INLINE unsigned char AO_char_fetch_and_add_full(volatile unsigned char *p, unsigned char incr) { unsigned char old_val; pthread_mutex_lock(&AO_pt_lock); old_val = *p; *p = old_val + incr; pthread_mutex_unlock(&AO_pt_lock); return old_val; } #define AO_HAVE_char_fetch_and_add_full AO_INLINE unsigned short AO_short_fetch_and_add_full(volatile unsigned short *p, unsigned short incr) { unsigned short old_val; pthread_mutex_lock(&AO_pt_lock); old_val = *p; *p = old_val + incr; pthread_mutex_unlock(&AO_pt_lock); return old_val; } #define AO_HAVE_short_fetch_and_add_full AO_INLINE unsigned int AO_int_fetch_and_add_full(volatile unsigned int *p, unsigned int incr) { unsigned int old_val; pthread_mutex_lock(&AO_pt_lock); old_val = *p; *p = old_val + incr; pthread_mutex_unlock(&AO_pt_lock); return old_val; } #define AO_HAVE_int_fetch_and_add_full AO_INLINE void AO_and_full(volatile AO_t *p, AO_t value) { pthread_mutex_lock(&AO_pt_lock); *p &= value; pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_and_full AO_INLINE void AO_or_full(volatile AO_t *p, AO_t value) { pthread_mutex_lock(&AO_pt_lock); *p |= value; pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_or_full AO_INLINE void AO_xor_full(volatile AO_t *p, AO_t value) { pthread_mutex_lock(&AO_pt_lock); *p ^= value; pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_xor_full AO_INLINE void AO_char_and_full(volatile unsigned char *p, unsigned char value) { pthread_mutex_lock(&AO_pt_lock); *p &= value; pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_char_and_full AO_INLINE void AO_char_or_full(volatile unsigned char *p, unsigned char value) { pthread_mutex_lock(&AO_pt_lock); *p |= value; pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_char_or_full AO_INLINE void AO_char_xor_full(volatile unsigned char *p, unsigned char value) { pthread_mutex_lock(&AO_pt_lock); *p ^= value; pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_char_xor_full AO_INLINE void AO_short_and_full(volatile unsigned short *p, unsigned short value) { pthread_mutex_lock(&AO_pt_lock); *p &= value; pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_short_and_full AO_INLINE void AO_short_or_full(volatile unsigned short *p, unsigned short value) { pthread_mutex_lock(&AO_pt_lock); *p |= value; pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_short_or_full AO_INLINE void AO_short_xor_full(volatile unsigned short *p, unsigned short value) { pthread_mutex_lock(&AO_pt_lock); *p ^= value; pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_short_xor_full AO_INLINE void AO_int_and_full(volatile unsigned *p, unsigned value) { pthread_mutex_lock(&AO_pt_lock); *p &= value; pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_int_and_full AO_INLINE void AO_int_or_full(volatile unsigned *p, unsigned value) { pthread_mutex_lock(&AO_pt_lock); *p |= value; pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_int_or_full AO_INLINE void AO_int_xor_full(volatile unsigned *p, unsigned value) { pthread_mutex_lock(&AO_pt_lock); *p ^= value; pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_int_xor_full AO_INLINE AO_t AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val, AO_t new_val) { AO_t fetched_val; pthread_mutex_lock(&AO_pt_lock); fetched_val = *addr; if (fetched_val == old_val) *addr = new_val; pthread_mutex_unlock(&AO_pt_lock); return fetched_val; } #define AO_HAVE_fetch_compare_and_swap_full AO_INLINE unsigned char AO_char_fetch_compare_and_swap_full(volatile unsigned char *addr, unsigned char old_val, unsigned char new_val) { unsigned char fetched_val; pthread_mutex_lock(&AO_pt_lock); fetched_val = *addr; if (fetched_val == old_val) *addr = new_val; pthread_mutex_unlock(&AO_pt_lock); return fetched_val; } #define AO_HAVE_char_fetch_compare_and_swap_full AO_INLINE unsigned short AO_short_fetch_compare_and_swap_full(volatile unsigned short *addr, unsigned short old_val, unsigned short new_val) { unsigned short fetched_val; pthread_mutex_lock(&AO_pt_lock); fetched_val = *addr; if (fetched_val == old_val) *addr = new_val; pthread_mutex_unlock(&AO_pt_lock); return fetched_val; } #define AO_HAVE_short_fetch_compare_and_swap_full AO_INLINE unsigned AO_int_fetch_compare_and_swap_full(volatile unsigned *addr, unsigned old_val, unsigned new_val) { unsigned fetched_val; pthread_mutex_lock(&AO_pt_lock); fetched_val = *addr; if (fetched_val == old_val) *addr = new_val; pthread_mutex_unlock(&AO_pt_lock); return fetched_val; } #define AO_HAVE_int_fetch_compare_and_swap_full /* Unlike real architectures, we define both double-width CAS variants. */ typedef struct { AO_t AO_val1; AO_t AO_val2; } AO_double_t; #define AO_HAVE_double_t #define AO_DOUBLE_T_INITIALIZER { (AO_t)0, (AO_t)0 } AO_INLINE AO_double_t AO_double_load_full(const volatile AO_double_t *addr) { AO_double_t result; pthread_mutex_lock(&AO_pt_lock); result.AO_val1 = addr->AO_val1; result.AO_val2 = addr->AO_val2; pthread_mutex_unlock(&AO_pt_lock); return result; } #define AO_HAVE_double_load_full AO_INLINE void AO_double_store_full(volatile AO_double_t *addr, AO_double_t value) { pthread_mutex_lock(&AO_pt_lock); addr->AO_val1 = value.AO_val1; addr->AO_val2 = value.AO_val2; pthread_mutex_unlock(&AO_pt_lock); } #define AO_HAVE_double_store_full AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, AO_t old1, AO_t old2, AO_t new1, AO_t new2) { pthread_mutex_lock(&AO_pt_lock); if (addr -> AO_val1 == old1 && addr -> AO_val2 == old2) { addr -> AO_val1 = new1; addr -> AO_val2 = new2; pthread_mutex_unlock(&AO_pt_lock); return 1; } else pthread_mutex_unlock(&AO_pt_lock); return 0; } #define AO_HAVE_compare_double_and_swap_double_full AO_INLINE int AO_compare_and_swap_double_full(volatile AO_double_t *addr, AO_t old1, AO_t new1, AO_t new2) { pthread_mutex_lock(&AO_pt_lock); if (addr -> AO_val1 == old1) { addr -> AO_val1 = new1; addr -> AO_val2 = new2; pthread_mutex_unlock(&AO_pt_lock); return 1; } else pthread_mutex_unlock(&AO_pt_lock); return 0; } #define AO_HAVE_compare_and_swap_double_full /* We can't use hardware loads and stores, since they don't */ /* interact correctly with atomic updates. */ libatomic_ops-7.6.12/src/atomic_ops/sysdeps/hpc/000077500000000000000000000000001411761111000216145ustar00rootroot00000000000000libatomic_ops-7.6.12/src/atomic_ops/sysdeps/hpc/hppa.h000066400000000000000000000104211411761111000227130ustar00rootroot00000000000000/* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Derived from the corresponding header file for gcc. */ #include "../loadstore/atomic_load.h" #include "../loadstore/atomic_store.h" /* Some architecture set descriptions include special "ordered" memory */ /* operations. As far as we can tell, no existing processors actually */ /* require those. Nor does it appear likely that future processors */ /* will. */ /* FIXME: The PA emulator on Itanium may obey weaker restrictions. */ /* There should be a mode in which we don't assume sequential */ /* consistency here. */ #include "../ordered.h" #include /* GCC will not guarantee the alignment we need, use four lock words */ /* and select the correctly aligned datum. See the glibc 2.3.2 */ /* linuxthread port for the original implementation. */ struct AO_pa_clearable_loc { int data[4]; }; #undef AO_TS_INITIALIZER #define AO_TS_t struct AO_pa_clearable_loc #define AO_TS_INITIALIZER {1,1,1,1} /* Switch meaning of set and clear, since we only have an atomic clear */ /* instruction. */ typedef enum {AO_PA_TS_set = 0, AO_PA_TS_clear = 1} AO_PA_TS_val; #define AO_TS_VAL_t AO_PA_TS_val #define AO_TS_CLEAR AO_PA_TS_clear #define AO_TS_SET AO_PA_TS_set /* The hppa only has one atomic read and modify memory operation, */ /* load and clear, so hppa spinlocks must use zero to signify that */ /* someone is holding the lock. The address used for the ldcw */ /* semaphore must be 16-byte aligned. */ #define AO_ldcw(a, ret) \ _LDCWX(0 /* index */, 0 /* s */, a /* base */, ret) /* Because malloc only guarantees 8-byte alignment for malloc'd data, */ /* and GCC only guarantees 8-byte alignment for stack locals, we can't */ /* be assured of 16-byte alignment for atomic lock data even if we */ /* specify "__attribute ((aligned(16)))" in the type declaration. So, */ /* we use a struct containing an array of four ints for the atomic lock */ /* type and dynamically select the 16-byte aligned int from the array */ /* for the semaphore. */ #define AO_PA_LDCW_ALIGNMENT 16 #define AO_ldcw_align(addr) \ ((volatile unsigned *)(((unsigned long)(addr) \ + (AO_PA_LDCW_ALIGNMENT - 1)) \ & ~(AO_PA_LDCW_ALIGNMENT - 1))) /* Works on PA 1.1 and PA 2.0 systems */ AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t * addr) { register unsigned int ret; register unsigned long a = (unsigned long)AO_ldcw_align(addr); # if defined(CPPCHECK) ret = 0; /* to void 'uninitialized variable' warning */ # endif AO_ldcw(a, ret); return (AO_TS_VAL_t)ret; } #define AO_HAVE_test_and_set_full AO_INLINE void AO_pa_clear(volatile AO_TS_t * addr) { volatile unsigned *a = AO_ldcw_align(addr); AO_compiler_barrier(); *a = 1; } #define AO_CLEAR(addr) AO_pa_clear(addr) #define AO_HAVE_CLEAR #undef AO_PA_LDCW_ALIGNMENT #undef AO_ldcw #undef AO_ldcw_align libatomic_ops-7.6.12/src/atomic_ops/sysdeps/hpc/ia64.h000066400000000000000000000116671411761111000225430ustar00rootroot00000000000000/* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * This file specifies Itanimum primitives for use with the HP compiler * under HP/UX. We use intrinsics instead of the inline assembly code in the * gcc file. */ #include "../all_atomic_load_store.h" #include "../all_acquire_release_volatile.h" #include "../test_and_set_t_is_char.h" #include #ifdef __LP64__ # define AO_T_FASIZE _FASZ_D # define AO_T_SIZE _SZ_D #else # define AO_T_FASIZE _FASZ_W # define AO_T_SIZE _SZ_W #endif AO_INLINE void AO_nop_full(void) { _Asm_mf(); } #define AO_HAVE_nop_full #ifndef AO_PREFER_GENERALIZED AO_INLINE AO_t AO_fetch_and_add1_acquire (volatile AO_t *p) { return _Asm_fetchadd(AO_T_FASIZE, _SEM_ACQ, p, 1, _LDHINT_NONE, _DOWN_MEM_FENCE); } #define AO_HAVE_fetch_and_add1_acquire AO_INLINE AO_t AO_fetch_and_add1_release (volatile AO_t *p) { return _Asm_fetchadd(AO_T_FASIZE, _SEM_REL, p, 1, _LDHINT_NONE, _UP_MEM_FENCE); } #define AO_HAVE_fetch_and_add1_release AO_INLINE AO_t AO_fetch_and_sub1_acquire (volatile AO_t *p) { return _Asm_fetchadd(AO_T_FASIZE, _SEM_ACQ, p, -1, _LDHINT_NONE, _DOWN_MEM_FENCE); } #define AO_HAVE_fetch_and_sub1_acquire AO_INLINE AO_t AO_fetch_and_sub1_release (volatile AO_t *p) { return _Asm_fetchadd(AO_T_FASIZE, _SEM_REL, p, -1, _LDHINT_NONE, _UP_MEM_FENCE); } #define AO_HAVE_fetch_and_sub1_release #endif /* !AO_PREFER_GENERALIZED */ AO_INLINE AO_t AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val, AO_t new_val) { _Asm_mov_to_ar(_AREG_CCV, old_val, _DOWN_MEM_FENCE); return _Asm_cmpxchg(AO_T_SIZE, _SEM_ACQ, addr, new_val, _LDHINT_NONE, _DOWN_MEM_FENCE); } #define AO_HAVE_fetch_compare_and_swap_acquire AO_INLINE AO_t AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old_val, AO_t new_val) { _Asm_mov_to_ar(_AREG_CCV, old_val, _UP_MEM_FENCE); return _Asm_cmpxchg(AO_T_SIZE, _SEM_REL, addr, new_val, _LDHINT_NONE, _UP_MEM_FENCE); } #define AO_HAVE_fetch_compare_and_swap_release AO_INLINE unsigned char AO_char_fetch_compare_and_swap_acquire(volatile unsigned char *addr, unsigned char old_val, unsigned char new_val) { _Asm_mov_to_ar(_AREG_CCV, old_val, _DOWN_MEM_FENCE); return _Asm_cmpxchg(_SZ_B, _SEM_ACQ, addr, new_val, _LDHINT_NONE, _DOWN_MEM_FENCE); } #define AO_HAVE_char_fetch_compare_and_swap_acquire AO_INLINE unsigned char AO_char_fetch_compare_and_swap_release(volatile unsigned char *addr, unsigned char old_val, unsigned char new_val) { _Asm_mov_to_ar(_AREG_CCV, old_val, _UP_MEM_FENCE); return _Asm_cmpxchg(_SZ_B, _SEM_REL, addr, new_val, _LDHINT_NONE, _UP_MEM_FENCE); } #define AO_HAVE_char_fetch_compare_and_swap_release AO_INLINE unsigned short AO_short_fetch_compare_and_swap_acquire(volatile unsigned short *addr, unsigned short old_val, unsigned short new_val) { _Asm_mov_to_ar(_AREG_CCV, old_val, _DOWN_MEM_FENCE); return _Asm_cmpxchg(_SZ_B, _SEM_ACQ, addr, new_val, _LDHINT_NONE, _DOWN_MEM_FENCE); } #define AO_HAVE_short_fetch_compare_and_swap_acquire AO_INLINE unsigned short AO_short_fetch_compare_and_swap_release(volatile unsigned short *addr, unsigned short old_val, unsigned short new_val) { _Asm_mov_to_ar(_AREG_CCV, old_val, _UP_MEM_FENCE); return _Asm_cmpxchg(_SZ_B, _SEM_REL, addr, new_val, _LDHINT_NONE, _UP_MEM_FENCE); } #define AO_HAVE_short_fetch_compare_and_swap_release #ifndef __LP64__ # define AO_T_IS_INT #endif #undef AO_T_FASIZE #undef AO_T_SIZE libatomic_ops-7.6.12/src/atomic_ops/sysdeps/ibmc/000077500000000000000000000000001411761111000217545ustar00rootroot00000000000000libatomic_ops-7.6.12/src/atomic_ops/sysdeps/ibmc/powerpc.h000066400000000000000000000142471411761111000236140ustar00rootroot00000000000000 /* Memory model documented at http://www-106.ibm.com/developerworks/ */ /* eserver/articles/archguide.html and (clearer) */ /* http://www-106.ibm.com/developerworks/eserver/articles/powerpc.html. */ /* There appears to be no implicit ordering between any kind of */ /* independent memory references. */ /* Architecture enforces some ordering based on control dependence. */ /* I don't know if that could help. */ /* Data-dependent loads are always ordered. */ /* Based on the above references, eieio is intended for use on */ /* uncached memory, which we don't support. It does not order loads */ /* from cached memory. */ /* Thanks to Maged Michael, Doug Lea, and Roger Hoover for helping to */ /* track some of this down and correcting my misunderstandings. -HB */ #include "../all_aligned_atomic_load_store.h" #include "../test_and_set_t_is_ao_t.h" void AO_sync(void); #pragma mc_func AO_sync { "7c0004ac" } #ifdef __NO_LWSYNC__ # define AO_lwsync AO_sync #else void AO_lwsync(void); #pragma mc_func AO_lwsync { "7c2004ac" } #endif #define AO_nop_write() AO_lwsync() #define AO_HAVE_nop_write #define AO_nop_read() AO_lwsync() #define AO_HAVE_nop_read /* We explicitly specify load_acquire and store_release, since these */ /* rely on the fact that lwsync is also a LoadStore barrier. */ AO_INLINE AO_t AO_load_acquire(const volatile AO_t *addr) { AO_t result = *addr; AO_lwsync(); return result; } #define AO_HAVE_load_acquire AO_INLINE void AO_store_release(volatile AO_t *addr, AO_t value) { AO_lwsync(); *addr = value; } #define AO_HAVE_store_release #ifndef AO_PREFER_GENERALIZED /* This is similar to the code in the garbage collector. Deleting */ /* this and having it synthesized from compare_and_swap would probably */ /* only cost us a load immediate instruction. */ AO_INLINE AO_TS_VAL_t AO_test_and_set(volatile AO_TS_t *addr) { #if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__) /* Completely untested. And we should be using smaller objects anyway. */ unsigned long oldval; unsigned long temp = 1; /* locked value */ __asm__ __volatile__( "1:ldarx %0,0,%1\n" /* load and reserve */ "cmpdi %0, 0\n" /* if load is */ "bne 2f\n" /* non-zero, return already set */ "stdcx. %2,0,%1\n" /* else store conditional */ "bne- 1b\n" /* retry if lost reservation */ "2:\n" /* oldval is zero if we set */ : "=&r"(oldval) : "r"(addr), "r"(temp) : "memory", "cr0"); #else int oldval; int temp = 1; /* locked value */ __asm__ __volatile__( "1:lwarx %0,0,%1\n" /* load and reserve */ "cmpwi %0, 0\n" /* if load is */ "bne 2f\n" /* non-zero, return already set */ "stwcx. %2,0,%1\n" /* else store conditional */ "bne- 1b\n" /* retry if lost reservation */ "2:\n" /* oldval is zero if we set */ : "=&r"(oldval) : "r"(addr), "r"(temp) : "memory", "cr0"); #endif return (AO_TS_VAL_t)oldval; } #define AO_HAVE_test_and_set AO_INLINE AO_TS_VAL_t AO_test_and_set_acquire(volatile AO_TS_t *addr) { AO_TS_VAL_t result = AO_test_and_set(addr); AO_lwsync(); return result; } #define AO_HAVE_test_and_set_acquire AO_INLINE AO_TS_VAL_t AO_test_and_set_release(volatile AO_TS_t *addr) { AO_lwsync(); return AO_test_and_set(addr); } #define AO_HAVE_test_and_set_release AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t *addr) { AO_TS_VAL_t result; AO_lwsync(); result = AO_test_and_set(addr); AO_lwsync(); return result; } #define AO_HAVE_test_and_set_full #endif /* !AO_PREFER_GENERALIZED */ AO_INLINE AO_t AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val) { AO_t fetched_val; # if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__) __asm__ __volatile__( "1:ldarx %0,0,%1\n" /* load and reserve */ "cmpd %0, %3\n" /* if load is not equal to */ "bne 2f\n" /* old_val, fail */ "stdcx. %2,0,%1\n" /* else store conditional */ "bne- 1b\n" /* retry if lost reservation */ "2:\n" : "=&r"(fetched_val) : "r"(addr), "r"(new_val), "r"(old_val) : "memory", "cr0"); # else __asm__ __volatile__( "1:lwarx %0,0,%1\n" /* load and reserve */ "cmpw %0, %3\n" /* if load is not equal to */ "bne 2f\n" /* old_val, fail */ "stwcx. %2,0,%1\n" /* else store conditional */ "bne- 1b\n" /* retry if lost reservation */ "2:\n" : "=&r"(fetched_val) : "r"(addr), "r"(new_val), "r"(old_val) : "memory", "cr0"); # endif return fetched_val; } #define AO_HAVE_fetch_compare_and_swap AO_INLINE AO_t AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val, AO_t new_val) { AO_t result = AO_fetch_compare_and_swap(addr, old_val, new_val); AO_lwsync(); return result; } #define AO_HAVE_fetch_compare_and_swap_acquire AO_INLINE AO_t AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old_val, AO_t new_val) { AO_lwsync(); return AO_fetch_compare_and_swap(addr, old_val, new_val); } #define AO_HAVE_fetch_compare_and_swap_release AO_INLINE AO_t AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val, AO_t new_val) { AO_t result; AO_lwsync(); result = AO_fetch_compare_and_swap(addr, old_val, new_val); AO_lwsync(); return result; } #define AO_HAVE_fetch_compare_and_swap_full /* TODO: Implement AO_fetch_and_add, AO_and/or/xor directly. */ libatomic_ops-7.6.12/src/atomic_ops/sysdeps/icc/000077500000000000000000000000001411761111000216005ustar00rootroot00000000000000libatomic_ops-7.6.12/src/atomic_ops/sysdeps/icc/ia64.h000066400000000000000000000144611411761111000225220ustar00rootroot00000000000000/* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * This file specifies Itanimum primitives for use with the Intel (ecc) * compiler. We use intrinsics instead of the inline assembly code in the * gcc file. */ #include "../all_atomic_load_store.h" #include "../test_and_set_t_is_char.h" #include /* The acquire release semantics of volatile can be turned off. And volatile */ /* operations in icc9 don't imply ordering with respect to other nonvolatile */ /* operations. */ #define AO_INTEL_PTR_t void * AO_INLINE AO_t AO_load_acquire(const volatile AO_t *p) { return (AO_t)(__ld8_acq((AO_INTEL_PTR_t)p)); } #define AO_HAVE_load_acquire AO_INLINE void AO_store_release(volatile AO_t *p, AO_t val) { __st8_rel((AO_INTEL_PTR_t)p, (__int64)val); } #define AO_HAVE_store_release AO_INLINE unsigned char AO_char_load_acquire(const volatile unsigned char *p) { /* A normal volatile load generates an ld.acq */ return (__ld1_acq((AO_INTEL_PTR_t)p)); } #define AO_HAVE_char_load_acquire AO_INLINE void AO_char_store_release(volatile unsigned char *p, unsigned char val) { __st1_rel((AO_INTEL_PTR_t)p, val); } #define AO_HAVE_char_store_release AO_INLINE unsigned short AO_short_load_acquire(const volatile unsigned short *p) { /* A normal volatile load generates an ld.acq */ return (__ld2_acq((AO_INTEL_PTR_t)p)); } #define AO_HAVE_short_load_acquire AO_INLINE void AO_short_store_release(volatile unsigned short *p, unsigned short val) { __st2_rel((AO_INTEL_PTR_t)p, val); } #define AO_HAVE_short_store_release AO_INLINE unsigned int AO_int_load_acquire(const volatile unsigned int *p) { /* A normal volatile load generates an ld.acq */ return (__ld4_acq((AO_INTEL_PTR_t)p)); } #define AO_HAVE_int_load_acquire AO_INLINE void AO_int_store_release(volatile unsigned int *p, unsigned int val) { __st4_rel((AO_INTEL_PTR_t)p, val); } #define AO_HAVE_int_store_release AO_INLINE void AO_nop_full(void) { __mf(); } #define AO_HAVE_nop_full #ifndef AO_PREFER_GENERALIZED AO_INLINE AO_t AO_fetch_and_add1_acquire(volatile AO_t *p) { return __fetchadd8_acq((unsigned __int64 *)p, 1); } #define AO_HAVE_fetch_and_add1_acquire AO_INLINE AO_t AO_fetch_and_add1_release(volatile AO_t *p) { return __fetchadd8_rel((unsigned __int64 *)p, 1); } #define AO_HAVE_fetch_and_add1_release AO_INLINE AO_t AO_fetch_and_sub1_acquire(volatile AO_t *p) { return __fetchadd8_acq((unsigned __int64 *)p, -1); } #define AO_HAVE_fetch_and_sub1_acquire AO_INLINE AO_t AO_fetch_and_sub1_release(volatile AO_t *p) { return __fetchadd8_rel((unsigned __int64 *)p, -1); } #define AO_HAVE_fetch_and_sub1_release #endif /* !AO_PREFER_GENERALIZED */ AO_INLINE AO_t AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return _InterlockedCompareExchange64_acq(addr, new_val, old_val); } #define AO_HAVE_fetch_compare_and_swap_acquire AO_INLINE AO_t AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return _InterlockedCompareExchange64_rel(addr, new_val, old_val); } #define AO_HAVE_fetch_compare_and_swap_release AO_INLINE unsigned char AO_char_fetch_compare_and_swap_acquire(volatile unsigned char *addr, unsigned char old_val, unsigned char new_val) { return _InterlockedCompareExchange8_acq(addr, new_val, old_val); } #define AO_HAVE_char_fetch_compare_and_swap_acquire AO_INLINE unsigned char AO_char_fetch_compare_and_swap_release(volatile unsigned char *addr, unsigned char old_val, unsigned char new_val) { return _InterlockedCompareExchange8_rel(addr, new_val, old_val); } #define AO_HAVE_char_fetch_compare_and_swap_release AO_INLINE unsigned short AO_short_fetch_compare_and_swap_acquire(volatile unsigned short *addr, unsigned short old_val, unsigned short new_val) { return _InterlockedCompareExchange16_acq(addr, new_val, old_val); } #define AO_HAVE_short_fetch_compare_and_swap_acquire AO_INLINE unsigned short AO_short_fetch_compare_and_swap_release(volatile unsigned short *addr, unsigned short old_val, unsigned short new_val) { return _InterlockedCompareExchange16_rel(addr, new_val, old_val); } #define AO_HAVE_short_fetch_compare_and_swap_release AO_INLINE unsigned int AO_int_fetch_compare_and_swap_acquire(volatile unsigned int *addr, unsigned int old_val, unsigned int new_val) { return _InterlockedCompareExchange_acq(addr, new_val, old_val); } #define AO_HAVE_int_fetch_compare_and_swap_acquire AO_INLINE unsigned int AO_int_fetch_compare_and_swap_release(volatile unsigned int *addr, unsigned int old_val, unsigned int new_val) { return _InterlockedCompareExchange_rel(addr, new_val, old_val); } #define AO_HAVE_int_fetch_compare_and_swap_release #undef AO_INTEL_PTR_t libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/000077500000000000000000000000001411761111000230365ustar00rootroot00000000000000libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/acquire_release_volatile.h000066400000000000000000000050421411761111000302400ustar00rootroot00000000000000/* * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* This file adds definitions appropriate for environments in which */ /* volatile load of a given type has acquire semantics, and volatile */ /* store of a given type has release semantics. This is arguably */ /* supposed to be true with the standard Itanium software conventions. */ /* Empirically gcc/ia64 does some reordering of ordinary operations */ /* around volatiles even when we think it should not. GCC v3.3 and */ /* earlier could reorder a volatile store with another store. As of */ /* March 2005, gcc pre-4 reuses some previously computed common */ /* subexpressions across a volatile load; hence, we now add compiler */ /* barriers for gcc. */ #ifndef AO_HAVE_GCC_BARRIER /* TODO: Check GCC version (if workaround not needed for modern GCC). */ # if defined(__GNUC__) # define AO_GCC_BARRIER() AO_compiler_barrier() # else # define AO_GCC_BARRIER() (void)0 # endif # define AO_HAVE_GCC_BARRIER #endif AO_INLINE AO_t AO_load_acquire(const volatile AO_t *addr) { AO_t result = *addr; /* A normal volatile load generates an ld.acq (on IA-64). */ AO_GCC_BARRIER(); return result; } #define AO_HAVE_load_acquire AO_INLINE void AO_store_release(volatile AO_t *addr, AO_t new_val) { AO_GCC_BARRIER(); /* A normal volatile store generates an st.rel (on IA-64). */ *addr = new_val; } #define AO_HAVE_store_release libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/acquire_release_volatile.template000066400000000000000000000051041411761111000316230ustar00rootroot00000000000000/* * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* This file adds definitions appropriate for environments in which */ /* volatile load of a given type has acquire semantics, and volatile */ /* store of a given type has release semantics. This is arguably */ /* supposed to be true with the standard Itanium software conventions. */ /* Empirically gcc/ia64 does some reordering of ordinary operations */ /* around volatiles even when we think it should not. GCC v3.3 and */ /* earlier could reorder a volatile store with another store. As of */ /* March 2005, gcc pre-4 reuses some previously computed common */ /* subexpressions across a volatile load; hence, we now add compiler */ /* barriers for gcc. */ #ifndef AO_HAVE_GCC_BARRIER /* TODO: Check GCC version (if workaround not needed for modern GCC). */ # if defined(__GNUC__) # define AO_GCC_BARRIER() AO_compiler_barrier() # else # define AO_GCC_BARRIER() (void)0 # endif # define AO_HAVE_GCC_BARRIER #endif AO_INLINE XCTYPE AO_XSIZE_load_acquire(const volatile XCTYPE *addr) { XCTYPE result = *addr; /* A normal volatile load generates an ld.acq (on IA-64). */ AO_GCC_BARRIER(); return result; } #define AO_HAVE_XSIZE_load_acquire AO_INLINE void AO_XSIZE_store_release(volatile XCTYPE *addr, XCTYPE new_val) { AO_GCC_BARRIER(); /* A normal volatile store generates an st.rel (on IA-64). */ *addr = new_val; } #define AO_HAVE_XSIZE_store_release libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/atomic_load.h000066400000000000000000000032261411761111000254650ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Definitions for architectures on which loads of given type are */ /* atomic (either for suitably aligned data only or for any legal */ /* alignment). */ AO_INLINE AO_t AO_load(const volatile AO_t *addr) { # ifdef AO_ACCESS_CHECK_ALIGNED AO_ASSERT_ADDR_ALIGNED(addr); # endif /* Cast away the volatile for architectures like IA64 where */ /* volatile adds barrier (fence) semantics. */ return *(const AO_t *)addr; } #define AO_HAVE_load libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/atomic_load.template000066400000000000000000000032561411761111000270540ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Definitions for architectures on which loads of given type are */ /* atomic (either for suitably aligned data only or for any legal */ /* alignment). */ AO_INLINE XCTYPE AO_XSIZE_load(const volatile XCTYPE *addr) { # ifdef AO_ACCESS_XSIZE_CHECK_ALIGNED AO_ASSERT_ADDR_ALIGNED(addr); # endif /* Cast away the volatile for architectures like IA64 where */ /* volatile adds barrier (fence) semantics. */ return *(const XCTYPE *)addr; } #define AO_HAVE_XSIZE_load libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/atomic_store.h000066400000000000000000000030271411761111000257010ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Definitions for architectures on which stores of given type are */ /* atomic (either for suitably aligned data only or for any legal */ /* alignment). */ AO_INLINE void AO_store(volatile AO_t *addr, AO_t new_val) { # ifdef AO_ACCESS_CHECK_ALIGNED AO_ASSERT_ADDR_ALIGNED(addr); # endif *(AO_t *)addr = new_val; } #define AO_HAVE_store libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/atomic_store.template000066400000000000000000000030571411761111000272700ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Definitions for architectures on which stores of given type are */ /* atomic (either for suitably aligned data only or for any legal */ /* alignment). */ AO_INLINE void AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE new_val) { # ifdef AO_ACCESS_XSIZE_CHECK_ALIGNED AO_ASSERT_ADDR_ALIGNED(addr); # endif *(XCTYPE *)addr = new_val; } #define AO_HAVE_XSIZE_store libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/char_acquire_release_volatile.h000066400000000000000000000051621411761111000312400ustar00rootroot00000000000000/* * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* This file adds definitions appropriate for environments in which */ /* volatile load of a given type has acquire semantics, and volatile */ /* store of a given type has release semantics. This is arguably */ /* supposed to be true with the standard Itanium software conventions. */ /* Empirically gcc/ia64 does some reordering of ordinary operations */ /* around volatiles even when we think it should not. GCC v3.3 and */ /* earlier could reorder a volatile store with another store. As of */ /* March 2005, gcc pre-4 reuses some previously computed common */ /* subexpressions across a volatile load; hence, we now add compiler */ /* barriers for gcc. */ #ifndef AO_HAVE_GCC_BARRIER /* TODO: Check GCC version (if workaround not needed for modern GCC). */ # if defined(__GNUC__) # define AO_GCC_BARRIER() AO_compiler_barrier() # else # define AO_GCC_BARRIER() (void)0 # endif # define AO_HAVE_GCC_BARRIER #endif AO_INLINE unsigned/**/char AO_char_load_acquire(const volatile unsigned/**/char *addr) { unsigned/**/char result = *addr; /* A normal volatile load generates an ld.acq (on IA-64). */ AO_GCC_BARRIER(); return result; } #define AO_HAVE_char_load_acquire AO_INLINE void AO_char_store_release(volatile unsigned/**/char *addr, unsigned/**/char new_val) { AO_GCC_BARRIER(); /* A normal volatile store generates an st.rel (on IA-64). */ *addr = new_val; } #define AO_HAVE_char_store_release libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/char_atomic_load.h000066400000000000000000000033111411761111000264550ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Definitions for architectures on which loads of given type are */ /* atomic (either for suitably aligned data only or for any legal */ /* alignment). */ AO_INLINE unsigned/**/char AO_char_load(const volatile unsigned/**/char *addr) { # ifdef AO_ACCESS_char_CHECK_ALIGNED AO_ASSERT_ADDR_ALIGNED(addr); # endif /* Cast away the volatile for architectures like IA64 where */ /* volatile adds barrier (fence) semantics. */ return *(const unsigned/**/char *)addr; } #define AO_HAVE_char_load libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/char_atomic_store.h000066400000000000000000000031121411761111000266710ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Definitions for architectures on which stores of given type are */ /* atomic (either for suitably aligned data only or for any legal */ /* alignment). */ AO_INLINE void AO_char_store(volatile unsigned/**/char *addr, unsigned/**/char new_val) { # ifdef AO_ACCESS_char_CHECK_ALIGNED AO_ASSERT_ADDR_ALIGNED(addr); # endif *(unsigned/**/char *)addr = new_val; } #define AO_HAVE_char_store libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/double_atomic_load_store.h000066400000000000000000000037271411761111000302410ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * Copyright (c) 2013 Ivan Maidanski * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Definitions for architectures on which AO_double_t loads and stores */ /* are atomic (either for suitably aligned data only or for any legal */ /* alignment). */ AO_INLINE AO_double_t AO_double_load(const volatile AO_double_t *addr) { AO_double_t result; # ifdef AO_ACCESS_double_CHECK_ALIGNED AO_ASSERT_ADDR_ALIGNED(addr); # endif /* Cast away the volatile in case it adds fence semantics. */ result.AO_whole = ((const AO_double_t *)addr)->AO_whole; return result; } #define AO_HAVE_double_load AO_INLINE void AO_double_store(volatile AO_double_t *addr, AO_double_t new_val) { # ifdef AO_ACCESS_double_CHECK_ALIGNED AO_ASSERT_ADDR_ALIGNED(addr); # endif ((AO_double_t *)addr)->AO_whole = new_val.AO_whole; } #define AO_HAVE_double_store libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/int_acquire_release_volatile.h000066400000000000000000000051061411761111000311130ustar00rootroot00000000000000/* * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* This file adds definitions appropriate for environments in which */ /* volatile load of a given type has acquire semantics, and volatile */ /* store of a given type has release semantics. This is arguably */ /* supposed to be true with the standard Itanium software conventions. */ /* Empirically gcc/ia64 does some reordering of ordinary operations */ /* around volatiles even when we think it should not. GCC v3.3 and */ /* earlier could reorder a volatile store with another store. As of */ /* March 2005, gcc pre-4 reuses some previously computed common */ /* subexpressions across a volatile load; hence, we now add compiler */ /* barriers for gcc. */ #ifndef AO_HAVE_GCC_BARRIER /* TODO: Check GCC version (if workaround not needed for modern GCC). */ # if defined(__GNUC__) # define AO_GCC_BARRIER() AO_compiler_barrier() # else # define AO_GCC_BARRIER() (void)0 # endif # define AO_HAVE_GCC_BARRIER #endif AO_INLINE unsigned AO_int_load_acquire(const volatile unsigned *addr) { unsigned result = *addr; /* A normal volatile load generates an ld.acq (on IA-64). */ AO_GCC_BARRIER(); return result; } #define AO_HAVE_int_load_acquire AO_INLINE void AO_int_store_release(volatile unsigned *addr, unsigned new_val) { AO_GCC_BARRIER(); /* A normal volatile store generates an st.rel (on IA-64). */ *addr = new_val; } #define AO_HAVE_int_store_release libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/int_atomic_load.h000066400000000000000000000032561411761111000263420ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Definitions for architectures on which loads of given type are */ /* atomic (either for suitably aligned data only or for any legal */ /* alignment). */ AO_INLINE unsigned AO_int_load(const volatile unsigned *addr) { # ifdef AO_ACCESS_int_CHECK_ALIGNED AO_ASSERT_ADDR_ALIGNED(addr); # endif /* Cast away the volatile for architectures like IA64 where */ /* volatile adds barrier (fence) semantics. */ return *(const unsigned *)addr; } #define AO_HAVE_int_load libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/int_atomic_store.h000066400000000000000000000030571411761111000265560ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Definitions for architectures on which stores of given type are */ /* atomic (either for suitably aligned data only or for any legal */ /* alignment). */ AO_INLINE void AO_int_store(volatile unsigned *addr, unsigned new_val) { # ifdef AO_ACCESS_int_CHECK_ALIGNED AO_ASSERT_ADDR_ALIGNED(addr); # endif *(unsigned *)addr = new_val; } #define AO_HAVE_int_store libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/ordered_loads_only.h000066400000000000000000000150351411761111000270620ustar00rootroot00000000000000/* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifdef AO_HAVE_char_load /* char_load_read is defined in generalize-small. */ # define AO_char_load_acquire(addr) AO_char_load_read(addr) # define AO_HAVE_char_load_acquire #endif /* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifdef AO_HAVE_short_load /* short_load_read is defined in generalize-small. */ # define AO_short_load_acquire(addr) AO_short_load_read(addr) # define AO_HAVE_short_load_acquire #endif /* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifdef AO_HAVE_int_load /* int_load_read is defined in generalize-small. */ # define AO_int_load_acquire(addr) AO_int_load_read(addr) # define AO_HAVE_int_load_acquire #endif /* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifdef AO_HAVE_load /* load_read is defined in generalize-small. */ # define AO_load_acquire(addr) AO_load_read(addr) # define AO_HAVE_load_acquire #endif /* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifdef AO_HAVE_double_load /* double_load_read is defined in generalize-small. */ # define AO_double_load_acquire(addr) AO_double_load_read(addr) # define AO_HAVE_double_load_acquire #endif libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/ordered_loads_only.template000066400000000000000000000025011411761111000304400ustar00rootroot00000000000000/* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifdef AO_HAVE_XSIZE_load /* XSIZE_load_read is defined in generalize-small. */ # define AO_XSIZE_load_acquire(addr) AO_XSIZE_load_read(addr) # define AO_HAVE_XSIZE_load_acquire #endif libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/ordered_stores_only.h000066400000000000000000000150571411761111000273030ustar00rootroot00000000000000/* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifdef AO_HAVE_char_store # define AO_char_store_release(addr, val) \ (AO_nop_write(), AO_char_store(addr, val)) # define AO_HAVE_char_store_release #endif /* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifdef AO_HAVE_short_store # define AO_short_store_release(addr, val) \ (AO_nop_write(), AO_short_store(addr, val)) # define AO_HAVE_short_store_release #endif /* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifdef AO_HAVE_int_store # define AO_int_store_release(addr, val) \ (AO_nop_write(), AO_int_store(addr, val)) # define AO_HAVE_int_store_release #endif /* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifdef AO_HAVE_store # define AO_store_release(addr, val) \ (AO_nop_write(), AO_store(addr, val)) # define AO_HAVE_store_release #endif /* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifdef AO_HAVE_double_store # define AO_double_store_release(addr, val) \ (AO_nop_write(), AO_double_store(addr, val)) # define AO_HAVE_double_store_release #endif libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/ordered_stores_only.template000066400000000000000000000025031411761111000306570ustar00rootroot00000000000000/* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifdef AO_HAVE_XSIZE_store # define AO_XSIZE_store_release(addr, val) \ (AO_nop_write(), AO_XSIZE_store(addr, val)) # define AO_HAVE_XSIZE_store_release #endif libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/short_acquire_release_volatile.h000066400000000000000000000051731411761111000314640ustar00rootroot00000000000000/* * Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* This file adds definitions appropriate for environments in which */ /* volatile load of a given type has acquire semantics, and volatile */ /* store of a given type has release semantics. This is arguably */ /* supposed to be true with the standard Itanium software conventions. */ /* Empirically gcc/ia64 does some reordering of ordinary operations */ /* around volatiles even when we think it should not. GCC v3.3 and */ /* earlier could reorder a volatile store with another store. As of */ /* March 2005, gcc pre-4 reuses some previously computed common */ /* subexpressions across a volatile load; hence, we now add compiler */ /* barriers for gcc. */ #ifndef AO_HAVE_GCC_BARRIER /* TODO: Check GCC version (if workaround not needed for modern GCC). */ # if defined(__GNUC__) # define AO_GCC_BARRIER() AO_compiler_barrier() # else # define AO_GCC_BARRIER() (void)0 # endif # define AO_HAVE_GCC_BARRIER #endif AO_INLINE unsigned/**/short AO_short_load_acquire(const volatile unsigned/**/short *addr) { unsigned/**/short result = *addr; /* A normal volatile load generates an ld.acq (on IA-64). */ AO_GCC_BARRIER(); return result; } #define AO_HAVE_short_load_acquire AO_INLINE void AO_short_store_release(volatile unsigned/**/short *addr, unsigned/**/short new_val) { AO_GCC_BARRIER(); /* A normal volatile store generates an st.rel (on IA-64). */ *addr = new_val; } #define AO_HAVE_short_store_release libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/short_atomic_load.h000066400000000000000000000033171411761111000267050ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Definitions for architectures on which loads of given type are */ /* atomic (either for suitably aligned data only or for any legal */ /* alignment). */ AO_INLINE unsigned/**/short AO_short_load(const volatile unsigned/**/short *addr) { # ifdef AO_ACCESS_short_CHECK_ALIGNED AO_ASSERT_ADDR_ALIGNED(addr); # endif /* Cast away the volatile for architectures like IA64 where */ /* volatile adds barrier (fence) semantics. */ return *(const unsigned/**/short *)addr; } #define AO_HAVE_short_load libatomic_ops-7.6.12/src/atomic_ops/sysdeps/loadstore/short_atomic_store.h000066400000000000000000000031201411761111000271120ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Definitions for architectures on which stores of given type are */ /* atomic (either for suitably aligned data only or for any legal */ /* alignment). */ AO_INLINE void AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short new_val) { # ifdef AO_ACCESS_short_CHECK_ALIGNED AO_ASSERT_ADDR_ALIGNED(addr); # endif *(unsigned/**/short *)addr = new_val; } #define AO_HAVE_short_store libatomic_ops-7.6.12/src/atomic_ops/sysdeps/msftc/000077500000000000000000000000001411761111000221565ustar00rootroot00000000000000libatomic_ops-7.6.12/src/atomic_ops/sysdeps/msftc/arm.h000066400000000000000000000045261411761111000231150ustar00rootroot00000000000000/* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. * Copyright (c) 2009-2017 Ivan Maidanski * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef AO_ASSUME_WINDOWS98 /* CAS is always available */ # define AO_ASSUME_WINDOWS98 #endif #include "common32_defs.h" #include "../test_and_set_t_is_ao_t.h" /* AO_test_and_set_full() is emulated using CAS. */ /* Some ARM slide set, if it has been read correctly, claims that Loads */ /* followed by either a Load or a Store are ordered, but nothing else. */ /* It is assumed that Windows interrupt handlers clear the LL/SC flag. */ /* Unaligned accesses are not guaranteed to be atomic. */ #include "../all_aligned_atomic_load_store.h" /* If only a single processor is used, we can define AO_UNIPROCESSOR. */ #ifdef AO_UNIPROCESSOR AO_INLINE void AO_nop_full(void) { AO_compiler_barrier(); } # define AO_HAVE_nop_full #else /* AO_nop_full() is emulated using AO_test_and_set_full(). */ #endif #if _M_ARM >= 6 /* ARMv6 is the first architecture providing support for simple LL/SC. */ /* #include "../standard_ao_double_t.h" */ /* TODO: implement double-wide operations (similar to x86). */ #else /* _M_ARM < 6 */ /* TODO: implement AO_test_and_set_full using SWP. */ #endif /* _M_ARM < 6 */ #define AO_T_IS_INT libatomic_ops-7.6.12/src/atomic_ops/sysdeps/msftc/common32_defs.h000066400000000000000000000166251411761111000247770ustar00rootroot00000000000000/* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * Copyright (c) 2009-2018 Ivan Maidanski * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* This file contains AO primitives based on VC++ built-in intrinsic */ /* functions commonly available across 32-bit architectures. */ /* This file should be included from arch-specific header files. */ /* Define AO_USE_INTERLOCKED_INTRINSICS if _Interlocked primitives */ /* (used below) are available as intrinsic ones for a target arch */ /* (otherwise "Interlocked" functions family is used instead). */ /* Define AO_ASSUME_WINDOWS98 if CAS is available. */ #if _MSC_VER <= 1400 || !defined(AO_USE_INTERLOCKED_INTRINSICS) \ || defined(_WIN32_WCE) # include /* Seems like over-kill, but that's what MSDN recommends. */ /* And apparently winbase.h is not always self-contained. */ #endif #if _MSC_VER < 1310 || !defined(AO_USE_INTERLOCKED_INTRINSICS) # define _InterlockedIncrement InterlockedIncrement # define _InterlockedDecrement InterlockedDecrement # define _InterlockedExchangeAdd InterlockedExchangeAdd # define _InterlockedCompareExchange InterlockedCompareExchange # define AO_INTERLOCKED_VOLATILE /**/ #else /* elif _MSC_VER >= 1310 */ # if _MSC_VER >= 1400 # ifndef _WIN32_WCE # include # endif # else /* elif _MSC_VER < 1400 */ # ifdef __cplusplus extern "C" { # endif LONG __cdecl _InterlockedIncrement(LONG volatile *); LONG __cdecl _InterlockedDecrement(LONG volatile *); LONG __cdecl _InterlockedExchangeAdd(LONG volatile *, LONG); LONG __cdecl _InterlockedCompareExchange(LONG volatile *, LONG /* Exchange */, LONG /* Comp */); # ifdef __cplusplus } /* extern "C" */ # endif # endif /* _MSC_VER < 1400 */ # if !defined(AO_PREFER_GENERALIZED) || !defined(AO_ASSUME_WINDOWS98) # pragma intrinsic (_InterlockedIncrement) # pragma intrinsic (_InterlockedDecrement) # pragma intrinsic (_InterlockedExchangeAdd) # endif /* !AO_PREFER_GENERALIZED */ # pragma intrinsic (_InterlockedCompareExchange) # define AO_INTERLOCKED_VOLATILE volatile #endif /* _MSC_VER >= 1310 */ #if !defined(AO_PREFER_GENERALIZED) || !defined(AO_ASSUME_WINDOWS98) AO_INLINE AO_t AO_fetch_and_add_full(volatile AO_t *p, AO_t incr) { return _InterlockedExchangeAdd((long AO_INTERLOCKED_VOLATILE *)p, incr); } #define AO_HAVE_fetch_and_add_full AO_INLINE AO_t AO_fetch_and_add1_full(volatile AO_t *p) { return _InterlockedIncrement((long AO_INTERLOCKED_VOLATILE *)p) - 1; } #define AO_HAVE_fetch_and_add1_full AO_INLINE AO_t AO_fetch_and_sub1_full(volatile AO_t *p) { return _InterlockedDecrement((long AO_INTERLOCKED_VOLATILE *)p) + 1; } #define AO_HAVE_fetch_and_sub1_full #endif /* !AO_PREFER_GENERALIZED */ #ifdef AO_ASSUME_WINDOWS98 AO_INLINE AO_t AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val, AO_t new_val) { # ifdef AO_OLD_STYLE_INTERLOCKED_COMPARE_EXCHANGE return (AO_t)_InterlockedCompareExchange( (void *AO_INTERLOCKED_VOLATILE *)addr, (void *)new_val, (void *)old_val); # else return _InterlockedCompareExchange((long AO_INTERLOCKED_VOLATILE *)addr, new_val, old_val); # endif } # define AO_HAVE_fetch_compare_and_swap_full #endif /* AO_ASSUME_WINDOWS98 */ #if (_MSC_VER > 1400) && (!defined(_M_ARM) || _MSC_VER >= 1800) # pragma intrinsic (_InterlockedAnd8) # pragma intrinsic (_InterlockedCompareExchange16) # pragma intrinsic (_InterlockedOr8) # pragma intrinsic (_InterlockedXor8) AO_INLINE void AO_char_and_full(volatile unsigned char *p, unsigned char value) { _InterlockedAnd8((char volatile *)p, value); } # define AO_HAVE_char_and_full AO_INLINE void AO_char_or_full(volatile unsigned char *p, unsigned char value) { _InterlockedOr8((char volatile *)p, value); } # define AO_HAVE_char_or_full AO_INLINE void AO_char_xor_full(volatile unsigned char *p, unsigned char value) { _InterlockedXor8((char volatile *)p, value); } # define AO_HAVE_char_xor_full AO_INLINE unsigned short AO_short_fetch_compare_and_swap_full(volatile unsigned short *addr, unsigned short old_val, unsigned short new_val) { return _InterlockedCompareExchange16((short volatile *)addr, new_val, old_val); } # define AO_HAVE_short_fetch_compare_and_swap_full # ifndef AO_PREFER_GENERALIZED # pragma intrinsic (_InterlockedIncrement16) # pragma intrinsic (_InterlockedDecrement16) AO_INLINE unsigned short AO_short_fetch_and_add1_full(volatile unsigned short *p) { return _InterlockedIncrement16((short volatile *)p) - 1; } # define AO_HAVE_short_fetch_and_add1_full AO_INLINE unsigned short AO_short_fetch_and_sub1_full(volatile unsigned short *p) { return _InterlockedDecrement16((short volatile *)p) + 1; } # define AO_HAVE_short_fetch_and_sub1_full # endif /* !AO_PREFER_GENERALIZED */ #endif /* _MSC_VER > 1400 */ #if _MSC_VER >= 1800 /* Visual Studio 2013+ */ # pragma intrinsic (_InterlockedCompareExchange8) AO_INLINE unsigned char AO_char_fetch_compare_and_swap_full(volatile unsigned char *addr, unsigned char old_val, unsigned char new_val) { return _InterlockedCompareExchange8((char volatile *)addr, new_val, old_val); } # define AO_HAVE_char_fetch_compare_and_swap_full # if !defined(AO_PREFER_GENERALIZED) && !defined(_M_ARM) # pragma intrinsic (_InterlockedExchangeAdd16) # pragma intrinsic (_InterlockedExchangeAdd8) AO_INLINE unsigned char AO_char_fetch_and_add_full(volatile unsigned char *p, unsigned char incr) { return _InterlockedExchangeAdd8((char volatile *)p, incr); } # define AO_HAVE_char_fetch_and_add_full AO_INLINE unsigned short AO_short_fetch_and_add_full(volatile unsigned short *p, unsigned short incr) { return _InterlockedExchangeAdd16((short volatile *)p, incr); } # define AO_HAVE_short_fetch_and_add_full # endif /* !AO_PREFER_GENERALIZED && !_M_ARM */ #endif /* _MSC_VER >= 1800 */ libatomic_ops-7.6.12/src/atomic_ops/sysdeps/msftc/x86.h000066400000000000000000000122071411761111000227560ustar00rootroot00000000000000/* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* If AO_ASSUME_VISTA is defined, we assume Windows Server 2003, Vista */ /* or later. */ #include "../all_aligned_atomic_load_store.h" #include "../test_and_set_t_is_char.h" #if !defined(AO_ASSUME_WINDOWS98) \ && (defined(AO_ASSUME_VISTA) || _MSC_VER >= 1400) /* Visual Studio 2005 (MS VC++ 8.0) discontinued support of Windows 95. */ # define AO_ASSUME_WINDOWS98 #endif #ifndef AO_USE_INTERLOCKED_INTRINSICS /* _Interlocked primitives (Inc, Dec, Xchg, Add) are always available */ # define AO_USE_INTERLOCKED_INTRINSICS #endif #include "common32_defs.h" /* As far as we can tell, the lfence and sfence instructions are not */ /* currently needed or useful for cached memory accesses. */ /* Unfortunately mfence doesn't exist everywhere. */ /* IsProcessorFeaturePresent(PF_COMPARE_EXCHANGE128) is */ /* probably a conservative test for it? */ #if defined(AO_USE_PENTIUM4_INSTRS) AO_INLINE void AO_nop_full(void) { __asm { mfence } } #define AO_HAVE_nop_full #else /* We could use the cpuid instruction. But that seems to be slower */ /* than the default implementation based on test_and_set_full. Thus */ /* we omit that bit of misinformation here. */ #endif #if !defined(AO_NO_ASM_XADD) && !defined(AO_HAVE_char_fetch_and_add_full) AO_INLINE unsigned char AO_char_fetch_and_add_full(volatile unsigned char *p, unsigned char incr) { __asm { mov al, incr mov ebx, p lock xadd byte ptr [ebx], al } /* Ignore possible "missing return value" warning here. */ } # define AO_HAVE_char_fetch_and_add_full AO_INLINE unsigned short AO_short_fetch_and_add_full(volatile unsigned short *p, unsigned short incr) { __asm { mov ax, incr mov ebx, p lock xadd word ptr [ebx], ax } /* Ignore possible "missing return value" warning here. */ } # define AO_HAVE_short_fetch_and_add_full #endif /* !AO_NO_ASM_XADD */ AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t *addr) { __asm { mov eax,0xff ; /* AO_TS_SET */ mov ebx,addr ; xchg byte ptr [ebx],al ; } /* Ignore possible "missing return value" warning here. */ } #define AO_HAVE_test_and_set_full #if defined(_WIN64) && !defined(CPPCHECK) # error wrong architecture #endif #ifdef AO_ASSUME_VISTA # include "../standard_ao_double_t.h" /* Reading or writing a quadword aligned on a 64-bit boundary is */ /* always carried out atomically (requires at least a Pentium). */ # define AO_ACCESS_double_CHECK_ALIGNED # include "../loadstore/double_atomic_load_store.h" /* Whenever we run on a Pentium class machine, we have that certain */ /* function. */ # pragma intrinsic (_InterlockedCompareExchange64) /* Returns nonzero if the comparison succeeded. */ AO_INLINE int AO_double_compare_and_swap_full(volatile AO_double_t *addr, AO_double_t old_val, AO_double_t new_val) { AO_ASSERT_ADDR_ALIGNED(addr); return (double_ptr_storage)_InterlockedCompareExchange64( (__int64 volatile *)addr, new_val.AO_whole /* exchange */, old_val.AO_whole) == old_val.AO_whole; } # define AO_HAVE_double_compare_and_swap_full #endif /* AO_ASSUME_VISTA */ #define AO_T_IS_INT /* Real X86 implementations, except for some old WinChips, appear */ /* to enforce ordering between memory operations, EXCEPT that a later */ /* read can pass earlier writes, presumably due to the visible */ /* presence of store buffers. */ /* We ignore both the WinChips, and the fact that the official specs */ /* seem to be much weaker (and arguably too weak to be usable). */ #include "../ordered_except_wr.h" libatomic_ops-7.6.12/src/atomic_ops/sysdeps/msftc/x86_64.h000066400000000000000000000234051411761111000232710ustar00rootroot00000000000000/* * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "../all_aligned_atomic_load_store.h" /* Real X86 implementations appear */ /* to enforce ordering between memory operations, EXCEPT that a later */ /* read can pass earlier writes, presumably due to the visible */ /* presence of store buffers. */ /* We ignore the fact that the official specs */ /* seem to be much weaker (and arguably too weak to be usable). */ #include "../ordered_except_wr.h" #ifdef AO_ASM_X64_AVAILABLE # include "../test_and_set_t_is_char.h" #else # include "../test_and_set_t_is_ao_t.h" #endif /* Assume _MSC_VER >= 1400 */ #include #pragma intrinsic (_InterlockedCompareExchange) #pragma intrinsic (_InterlockedCompareExchange64) #ifndef AO_PREFER_GENERALIZED # pragma intrinsic (_InterlockedIncrement) # pragma intrinsic (_InterlockedIncrement64) # pragma intrinsic (_InterlockedDecrement) # pragma intrinsic (_InterlockedDecrement64) # pragma intrinsic (_InterlockedExchangeAdd) # pragma intrinsic (_InterlockedExchangeAdd64) AO_INLINE AO_t AO_fetch_and_add_full (volatile AO_t *p, AO_t incr) { return _InterlockedExchangeAdd64((__int64 volatile *)p, incr); } #define AO_HAVE_fetch_and_add_full AO_INLINE AO_t AO_fetch_and_add1_full (volatile AO_t *p) { return _InterlockedIncrement64((__int64 volatile *)p) - 1; } #define AO_HAVE_fetch_and_add1_full AO_INLINE AO_t AO_fetch_and_sub1_full (volatile AO_t *p) { return _InterlockedDecrement64((__int64 volatile *)p) + 1; } #define AO_HAVE_fetch_and_sub1_full #endif /* !AO_PREFER_GENERALIZED */ AO_INLINE AO_t AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val, AO_t new_val) { return (AO_t)_InterlockedCompareExchange64((__int64 volatile *)addr, new_val, old_val); } #define AO_HAVE_fetch_compare_and_swap_full AO_INLINE unsigned int AO_int_fetch_compare_and_swap_full(volatile unsigned int *addr, unsigned int old_val, unsigned int new_val) { return _InterlockedCompareExchange((long volatile *)addr, new_val, old_val); } #define AO_HAVE_int_fetch_compare_and_swap_full #ifndef AO_PREFER_GENERALIZED AO_INLINE unsigned int AO_int_fetch_and_add_full(volatile unsigned int *p, unsigned int incr) { return _InterlockedExchangeAdd((long volatile *)p, incr); } #define AO_HAVE_int_fetch_and_add_full AO_INLINE unsigned int AO_int_fetch_and_add1_full(volatile unsigned int *p) { return _InterlockedIncrement((long volatile *)p) - 1; } # define AO_HAVE_int_fetch_and_add1_full AO_INLINE unsigned int AO_int_fetch_and_sub1_full(volatile unsigned int *p) { return _InterlockedDecrement((long volatile *)p) + 1; } # define AO_HAVE_int_fetch_and_sub1_full #endif /* !AO_PREFER_GENERALIZED */ #if _MSC_VER > 1400 # pragma intrinsic (_InterlockedAnd8) # pragma intrinsic (_InterlockedCompareExchange16) # pragma intrinsic (_InterlockedOr8) # pragma intrinsic (_InterlockedXor8) AO_INLINE void AO_char_and_full(volatile unsigned char *p, unsigned char value) { _InterlockedAnd8((char volatile *)p, value); } # define AO_HAVE_char_and_full AO_INLINE void AO_char_or_full(volatile unsigned char *p, unsigned char value) { _InterlockedOr8((char volatile *)p, value); } # define AO_HAVE_char_or_full AO_INLINE void AO_char_xor_full(volatile unsigned char *p, unsigned char value) { _InterlockedXor8((char volatile *)p, value); } # define AO_HAVE_char_xor_full AO_INLINE unsigned short AO_short_fetch_compare_and_swap_full(volatile unsigned short *addr, unsigned short old_val, unsigned short new_val) { return _InterlockedCompareExchange16((short volatile *)addr, new_val, old_val); } # define AO_HAVE_short_fetch_compare_and_swap_full # ifndef AO_PREFER_GENERALIZED # pragma intrinsic (_InterlockedIncrement16) # pragma intrinsic (_InterlockedDecrement16) AO_INLINE unsigned short AO_short_fetch_and_add1_full(volatile unsigned short *p) { return _InterlockedIncrement16((short volatile *)p) - 1; } # define AO_HAVE_short_fetch_and_add1_full AO_INLINE unsigned short AO_short_fetch_and_sub1_full(volatile unsigned short *p) { return _InterlockedDecrement16((short volatile *)p) + 1; } # define AO_HAVE_short_fetch_and_sub1_full # endif /* !AO_PREFER_GENERALIZED */ #endif /* _MSC_VER > 1400 */ #if _MSC_VER >= 1800 /* Visual Studio 2013+ */ # pragma intrinsic (_InterlockedCompareExchange8) AO_INLINE unsigned char AO_char_fetch_compare_and_swap_full(volatile unsigned char *addr, unsigned char old_val, unsigned char new_val) { return _InterlockedCompareExchange8((char volatile *)addr, new_val, old_val); } # define AO_HAVE_char_fetch_compare_and_swap_full # ifndef AO_PREFER_GENERALIZED # pragma intrinsic (_InterlockedExchangeAdd16) # pragma intrinsic (_InterlockedExchangeAdd8) AO_INLINE unsigned char AO_char_fetch_and_add_full(volatile unsigned char *p, unsigned char incr) { return _InterlockedExchangeAdd8((char volatile *)p, incr); } # define AO_HAVE_char_fetch_and_add_full AO_INLINE unsigned short AO_short_fetch_and_add_full(volatile unsigned short *p, unsigned short incr) { return _InterlockedExchangeAdd16((short volatile *)p, incr); } # define AO_HAVE_short_fetch_and_add_full # endif /* !AO_PREFER_GENERALIZED */ #elif defined(AO_ASM_X64_AVAILABLE) AO_INLINE unsigned char AO_char_fetch_and_add_full(volatile unsigned char *p, unsigned char incr) { __asm { mov al, incr mov rbx, p lock xadd byte ptr [rbx], al } } # define AO_HAVE_char_fetch_and_add_full AO_INLINE unsigned short AO_short_fetch_and_add_full(volatile unsigned short *p, unsigned short incr) { __asm { mov ax, incr mov rbx, p lock xadd word ptr [rbx], ax } } # define AO_HAVE_short_fetch_and_add_full #endif /* _MSC_VER < 1800 && AO_ASM_X64_AVAILABLE */ #ifdef AO_ASM_X64_AVAILABLE /* As far as we can tell, the lfence and sfence instructions are not */ /* currently needed or useful for cached memory accesses. */ AO_INLINE void AO_nop_full(void) { /* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips. */ __asm { mfence } } # define AO_HAVE_nop_full AO_INLINE AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t *addr) { __asm { mov rax,AO_TS_SET ; mov rbx,addr ; xchg byte ptr [rbx],al ; } } # define AO_HAVE_test_and_set_full #endif /* AO_ASM_X64_AVAILABLE */ #ifdef AO_CMPXCHG16B_AVAILABLE /* AO_compare_double_and_swap_double_full needs implementation for Win64. * Also see ../gcc/x86.h for partial old Opteron workaround. */ # if _MSC_VER >= 1500 # include "../standard_ao_double_t.h" # pragma intrinsic (_InterlockedCompareExchange128) AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2) { __int64 comparandResult[2]; AO_ASSERT_ADDR_ALIGNED(addr); comparandResult[0] = old_val1; /* low */ comparandResult[1] = old_val2; /* high */ return _InterlockedCompareExchange128((volatile __int64 *)addr, new_val2 /* high */, new_val1 /* low */, comparandResult); } # define AO_HAVE_compare_double_and_swap_double_full # elif defined(AO_ASM_X64_AVAILABLE) # include "../standard_ao_double_t.h" /* If there is no intrinsic _InterlockedCompareExchange128 then we */ /* need basically what's given below. */ AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2) { __asm { mov rdx,QWORD PTR [old_val2] ; mov rax,QWORD PTR [old_val1] ; mov rcx,QWORD PTR [new_val2] ; mov rbx,QWORD PTR [new_val1] ; lock cmpxchg16b [addr] ; setz rax ; } } # define AO_HAVE_compare_double_and_swap_double_full # endif /* AO_ASM_X64_AVAILABLE && (_MSC_VER < 1500) */ #endif /* AO_CMPXCHG16B_AVAILABLE */ libatomic_ops-7.6.12/src/atomic_ops/sysdeps/ordered.h000066400000000000000000000025711411761111000226440ustar00rootroot00000000000000/* * Copyright (c) 2003 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* These are common definitions for architectures that provide */ /* processor ordered memory operations. */ #include "ordered_except_wr.h" AO_INLINE void AO_nop_full(void) { AO_compiler_barrier(); } #define AO_HAVE_nop_full libatomic_ops-7.6.12/src/atomic_ops/sysdeps/ordered_except_wr.h000066400000000000000000000033651411761111000247260ustar00rootroot00000000000000/* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * These are common definitions for architectures that provide processor * ordered memory operations except that a later read may pass an * earlier write. Real x86 implementations seem to be in this category, * except apparently for some IDT WinChips, which we ignore. */ #include "read_ordered.h" AO_INLINE void AO_nop_write(void) { /* AO_nop_write implementation is the same as of AO_nop_read. */ AO_compiler_barrier(); /* sfence according to Intel docs. Pentium 3 and up. */ /* Unnecessary for cached accesses? */ } #define AO_HAVE_nop_write #include "loadstore/ordered_stores_only.h" libatomic_ops-7.6.12/src/atomic_ops/sysdeps/read_ordered.h000066400000000000000000000030361411761111000236340ustar00rootroot00000000000000/* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * These are common definitions for architectures that provide processor * ordered memory operations except that a later read may pass an * earlier write. Real x86 implementations seem to be in this category, * except apparently for some IDT WinChips, which we ignore. */ AO_INLINE void AO_nop_read(void) { AO_compiler_barrier(); } #define AO_HAVE_nop_read #include "loadstore/ordered_loads_only.h" libatomic_ops-7.6.12/src/atomic_ops/sysdeps/standard_ao_double_t.h000066400000000000000000000076121411761111000253550ustar00rootroot00000000000000/* * Copyright (c) 2004-2011 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* For 64-bit systems, we expect the double type to hold two int64's. */ #if ((defined(__x86_64__) && defined(AO_GCC_ATOMIC_TEST_AND_SET)) \ || defined(__aarch64__)) && !defined(__ILP32__) /* x86-64: __m128 is not applicable to atomic intrinsics. */ # if AO_GNUC_PREREQ(4, 7) || AO_CLANG_PREREQ(3, 6) # pragma GCC diagnostic push /* Suppress warning about __int128 type. */ # if defined(__clang__) || AO_GNUC_PREREQ(6, 4) # pragma GCC diagnostic ignored "-Wpedantic" # else /* GCC before ~4.8 does not accept "-Wpedantic" quietly. */ # pragma GCC diagnostic ignored "-pedantic" # endif typedef unsigned __int128 double_ptr_storage; # pragma GCC diagnostic pop # else /* pragma diagnostic is not supported */ typedef unsigned __int128 double_ptr_storage; # endif #elif ((defined(__x86_64__) && AO_GNUC_PREREQ(4, 0)) || defined(_WIN64)) \ && !defined(__ILP32__) /* x86-64 (except for x32): __m128 serves as a placeholder which also */ /* requires the compiler to align it on 16-byte boundary (as required */ /* by cmpxchg16b). */ /* Similar things could be done for PPC 64-bit using a VMX data type. */ # include typedef __m128 double_ptr_storage; #elif defined(_WIN32) && !defined(__GNUC__) typedef unsigned __int64 double_ptr_storage; #elif defined(__i386__) && defined(__GNUC__) typedef unsigned long long double_ptr_storage __attribute__((__aligned__(8))); #else typedef unsigned long long double_ptr_storage; #endif # define AO_HAVE_DOUBLE_PTR_STORAGE typedef union { struct { AO_t AO_v1; AO_t AO_v2; } AO_parts; /* Note that AO_v1 corresponds to the low or the high part of */ /* AO_whole depending on the machine endianness. */ double_ptr_storage AO_whole; /* AO_whole is now (starting from v7.3alpha3) the 2nd element */ /* of this union to make AO_DOUBLE_T_INITIALIZER portable */ /* (because __m128 definition could vary from a primitive type */ /* to a structure or array/vector). */ } AO_double_t; #define AO_HAVE_double_t /* Note: AO_double_t volatile variables are not intended to be local */ /* ones (at least those which are passed to AO double-wide primitives */ /* as the first argument), otherwise it is the client responsibility to */ /* ensure they have double-word alignment. */ /* Dummy declaration as a compile-time assertion for AO_double_t size. */ struct AO_double_t_size_static_assert { char dummy[sizeof(AO_double_t) == 2 * sizeof(AO_t) ? 1 : -1]; }; #define AO_DOUBLE_T_INITIALIZER { { (AO_t)0, (AO_t)0 } } #define AO_val1 AO_parts.AO_v1 #define AO_val2 AO_parts.AO_v2 libatomic_ops-7.6.12/src/atomic_ops/sysdeps/sunc/000077500000000000000000000000001411761111000220125ustar00rootroot00000000000000libatomic_ops-7.6.12/src/atomic_ops/sysdeps/sunc/sparc.S000066400000000000000000000002011411761111000232370ustar00rootroot00000000000000 .seg "text" .globl AO_test_and_set_full AO_test_and_set_full: retl ldstub [%o0],%o0 libatomic_ops-7.6.12/src/atomic_ops/sysdeps/sunc/sparc.h000066400000000000000000000032231411761111000232730ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "../all_atomic_load_store.h" /* Real SPARC code uses TSO: */ #include "../ordered_except_wr.h" /* Test_and_set location is just a byte. */ #include "../test_and_set_t_is_char.h" #ifdef __cplusplus extern "C" { #endif extern AO_TS_VAL_t AO_test_and_set_full(volatile AO_TS_t *addr); /* Implemented in separate .S file, for now. */ #define AO_HAVE_test_and_set_full /* TODO: Like the gcc version, extend this for V8 and V9. */ #ifdef __cplusplus } /* extern "C" */ #endif libatomic_ops-7.6.12/src/atomic_ops/sysdeps/sunc/x86.h000066400000000000000000000175031411761111000226160ustar00rootroot00000000000000/* * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. * Copyright (c) 2009-2016 Ivan Maidanski * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * * Some of the machine specific code was borrowed from our GC distribution. */ /* The following really assume we have a 486 or better. */ #include "../all_aligned_atomic_load_store.h" #include "../test_and_set_t_is_char.h" #if !defined(AO_USE_PENTIUM4_INSTRS) && !defined(__i386) /* "mfence" (SSE2) is supported on all x86_64/amd64 chips. */ # define AO_USE_PENTIUM4_INSTRS #endif #if defined(AO_USE_PENTIUM4_INSTRS) AO_INLINE void AO_nop_full(void) { __asm__ __volatile__ ("mfence" : : : "memory"); } # define AO_HAVE_nop_full #else /* We could use the cpuid instruction. But that seems to be slower */ /* than the default implementation based on test_and_set_full. Thus */ /* we omit that bit of misinformation here. */ #endif /* !AO_USE_PENTIUM4_INSTRS */ /* As far as we can tell, the lfence and sfence instructions are not */ /* currently needed or useful for cached memory accesses. */ /* Really only works for 486 and later */ #ifndef AO_PREFER_GENERALIZED AO_INLINE AO_t AO_fetch_and_add_full (volatile AO_t *p, AO_t incr) { AO_t result; __asm__ __volatile__ ("lock; xadd %0, %1" : "=r" (result), "+m" (*p) : "0" (incr) : "memory"); return result; } # define AO_HAVE_fetch_and_add_full #endif /* !AO_PREFER_GENERALIZED */ AO_INLINE unsigned char AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr) { unsigned char result; __asm__ __volatile__ ("lock; xaddb %0, %1" : "=q" (result), "+m" (*p) : "0" (incr) : "memory"); return result; } #define AO_HAVE_char_fetch_and_add_full AO_INLINE unsigned short AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr) { unsigned short result; __asm__ __volatile__ ("lock; xaddw %0, %1" : "=r" (result), "+m" (*p) : "0" (incr) : "memory"); return result; } #define AO_HAVE_short_fetch_and_add_full #ifndef AO_PREFER_GENERALIZED AO_INLINE void AO_and_full (volatile AO_t *p, AO_t value) { __asm__ __volatile__ ("lock; and %1, %0" : "+m" (*p) : "r" (value) : "memory"); } # define AO_HAVE_and_full AO_INLINE void AO_or_full (volatile AO_t *p, AO_t value) { __asm__ __volatile__ ("lock; or %1, %0" : "+m" (*p) : "r" (value) : "memory"); } # define AO_HAVE_or_full AO_INLINE void AO_xor_full (volatile AO_t *p, AO_t value) { __asm__ __volatile__ ("lock; xor %1, %0" : "+m" (*p) : "r" (value) : "memory"); } # define AO_HAVE_xor_full #endif /* !AO_PREFER_GENERALIZED */ AO_INLINE AO_TS_VAL_t AO_test_and_set_full (volatile AO_TS_t *addr) { AO_TS_t oldval; /* Note: the "xchg" instruction does not need a "lock" prefix */ __asm__ __volatile__ ("xchg %b0, %1" : "=q" (oldval), "+m" (*addr) : "0" (0xff) : "memory"); return (AO_TS_VAL_t)oldval; } #define AO_HAVE_test_and_set_full #ifndef AO_GENERALIZE_ASM_BOOL_CAS /* Returns nonzero if the comparison succeeded. */ AO_INLINE int AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) { char result; __asm__ __volatile__ ("lock; cmpxchg %2, %0; setz %1" : "+m" (*addr), "=a" (result) : "r" (new_val), "a" (old) : "memory"); return (int) result; } # define AO_HAVE_compare_and_swap_full #endif /* !AO_GENERALIZE_ASM_BOOL_CAS */ AO_INLINE AO_t AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val, AO_t new_val) { AO_t fetched_val; __asm__ __volatile__ ("lock; cmpxchg %2, %0" : "+m" (*addr), "=a" (fetched_val) : "r" (new_val), "a" (old_val) : "memory"); return fetched_val; } #define AO_HAVE_fetch_compare_and_swap_full #if defined(__i386) # ifndef AO_NO_CMPXCHG8B # include "../standard_ao_double_t.h" /* Reading or writing a quadword aligned on a 64-bit boundary is */ /* always carried out atomically (requires at least a Pentium). */ # define AO_ACCESS_double_CHECK_ALIGNED # include "../loadstore/double_atomic_load_store.h" /* Returns nonzero if the comparison succeeded. */ /* Really requires at least a Pentium. */ AO_INLINE int AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2) { AO_t dummy; /* an output for clobbered edx */ char result; __asm__ __volatile__ ("lock; cmpxchg8b %0; setz %1" : "+m" (*addr), "=a" (result), "=d" (dummy) : "d" (old_val2), "a" (old_val1), "c" (new_val2), "b" (new_val1) : "memory"); return (int) result; } # define AO_HAVE_compare_double_and_swap_double_full # endif /* !AO_NO_CMPXCHG8B */ # define AO_T_IS_INT #else /* x64 */ AO_INLINE unsigned int AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr) { unsigned int result; __asm__ __volatile__ ("lock; xaddl %0, %1" : "=r" (result), "+m" (*p) : "0" (incr) : "memory"); return result; } # define AO_HAVE_int_fetch_and_add_full # ifdef AO_CMPXCHG16B_AVAILABLE # include "../standard_ao_double_t.h" /* Older AMD Opterons are missing this instruction (SIGILL should */ /* be thrown in this case). */ AO_INLINE int AO_compare_double_and_swap_double_full (volatile AO_double_t *addr, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2) { AO_t dummy; char result; __asm__ __volatile__ ("lock; cmpxchg16b %0; setz %1" : "+m" (*addr), "=a" (result), "=d" (dummy) : "d" (old_val2), "a" (old_val1), "c" (new_val2), "b" (new_val1) : "memory"); return (int) result; } # define AO_HAVE_compare_double_and_swap_double_full # endif /* !AO_CMPXCHG16B_AVAILABLE */ #endif /* x64 */ /* Real X86 implementations, except for some old 32-bit WinChips, */ /* appear to enforce ordering between memory operations, EXCEPT that */ /* a later read can pass earlier writes, presumably due to the visible */ /* presence of store buffers. */ /* We ignore both the WinChips and the fact that the official specs */ /* seem to be much weaker (and arguably too weak to be usable). */ #include "../ordered_except_wr.h" libatomic_ops-7.6.12/src/atomic_ops/sysdeps/test_and_set_t_is_ao_t.h000066400000000000000000000031341411761111000257100ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * These are common definitions for architectures on which test_and_set * operates on pointer-sized quantities, the "clear" value contains * all zeroes, and the "set" value contains only one lowest bit set. * This can be used if test_and_set is synthesized from compare_and_swap. */ typedef enum {AO_TS_clear = 0, AO_TS_set = 1} AO_TS_val; #define AO_TS_VAL_t AO_TS_val #define AO_TS_CLEAR AO_TS_clear #define AO_TS_SET AO_TS_set #define AO_TS_t AO_t #define AO_AO_TS_T 1 libatomic_ops-7.6.12/src/atomic_ops/sysdeps/test_and_set_t_is_char.h000066400000000000000000000035671411761111000257150ustar00rootroot00000000000000/* * Copyright (c) 2004 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * These are common definitions for architectures on which test_and_set * operates on byte sized quantities, the "clear" value contains * all zeroes, and the "set" value contains all ones typically. */ #ifndef AO_GCC_ATOMIC_TEST_AND_SET # define AO_TS_SET_TRUEVAL 0xff #elif defined(__GCC_ATOMIC_TEST_AND_SET_TRUEVAL) \ && !defined(AO_PREFER_GENERALIZED) # define AO_TS_SET_TRUEVAL __GCC_ATOMIC_TEST_AND_SET_TRUEVAL #else # define AO_TS_SET_TRUEVAL 1 /* true */ #endif typedef enum { AO_BYTE_TS_clear = 0, AO_BYTE_TS_set = AO_TS_SET_TRUEVAL } AO_BYTE_TS_val; #define AO_TS_VAL_t AO_BYTE_TS_val #define AO_TS_CLEAR AO_BYTE_TS_clear #define AO_TS_SET AO_BYTE_TS_set #define AO_TS_t unsigned char #define AO_CHAR_TS_T 1 #undef AO_TS_SET_TRUEVAL libatomic_ops-7.6.12/src/atomic_ops_malloc.c000066400000000000000000000243421411761111000210500ustar00rootroot00000000000000/* * Copyright (c) 2005 Hewlett-Packard Development Company, L.P. * * This file may be redistributed and/or modified under the * terms of the GNU General Public License as published by the Free Software * Foundation; either version 2, or (at your option) any later version. * * It is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License in the * file COPYING for more details. */ #if defined(HAVE_CONFIG_H) # include "config.h" #endif #ifdef DONT_USE_MMAP /* for testing */ # undef HAVE_MMAP #endif #define AO_REQUIRE_CAS #include "atomic_ops_malloc.h" #include /* for ffs, which is assumed reentrant. */ #include #include #ifdef AO_TRACE_MALLOC # include # include #endif #if defined(AO_ADDRESS_SANITIZER) && !defined(AO_NO_MALLOC_POISON) /* #include "sanitizer/asan_interface.h" */ void __asan_poison_memory_region(void *, size_t); void __asan_unpoison_memory_region(void *, size_t); # define ASAN_POISON_MEMORY_REGION(addr, size) \ __asan_poison_memory_region(addr, size) # define ASAN_UNPOISON_MEMORY_REGION(addr, size) \ __asan_unpoison_memory_region(addr, size) #else # define ASAN_POISON_MEMORY_REGION(addr, size) (void)0 # define ASAN_UNPOISON_MEMORY_REGION(addr, size) (void)0 #endif /* !AO_ADDRESS_SANITIZER */ #if (defined(_WIN32_WCE) || defined(__MINGW32CE__)) && !defined(AO_HAVE_abort) # define abort() _exit(-1) /* there is no abort() in WinCE */ #endif /* * We round up each allocation request to the next power of two * minus one word. * We keep one stack of free objects for each size. Each object * has an initial word (offset -sizeof(AO_t) from the visible pointer) * which contains either * The binary log of the object size in bytes (small objects) * The object size (a multiple of CHUNK_SIZE) for large objects. * The second case only arises if mmap-based allocation is supported. * We align the user-visible part of each object on a GRANULARITY * byte boundary. That means that the actual (hidden) start of * the object starts a word before this boundary. */ #ifndef LOG_MAX_SIZE # define LOG_MAX_SIZE 16 /* We assume that 2**LOG_MAX_SIZE is a multiple of page size. */ #endif #ifndef ALIGNMENT # define ALIGNMENT 16 /* Assumed to be at least sizeof(AO_t). */ #endif #define CHUNK_SIZE (1 << LOG_MAX_SIZE) #ifndef AO_INITIAL_HEAP_SIZE # define AO_INITIAL_HEAP_SIZE (2*(LOG_MAX_SIZE+1)*CHUNK_SIZE) #endif char AO_initial_heap[AO_INITIAL_HEAP_SIZE]; static volatile AO_t initial_heap_ptr = (AO_t)AO_initial_heap; #if defined(HAVE_MMAP) #include #include #include #include #if defined(MAP_ANONYMOUS) || defined(MAP_ANON) # define USE_MMAP_ANON #endif #ifdef USE_MMAP_FIXED # define GC_MMAP_FLAGS (MAP_FIXED | MAP_PRIVATE) /* Seems to yield better performance on Solaris 2, but can */ /* be unreliable if something is already mapped at the address. */ #else # define GC_MMAP_FLAGS MAP_PRIVATE #endif #ifdef USE_MMAP_ANON # if defined(CPPCHECK) # define OPT_MAP_ANON 0x20 /* taken from linux */ # elif defined(MAP_ANONYMOUS) # define OPT_MAP_ANON MAP_ANONYMOUS # else # define OPT_MAP_ANON MAP_ANON # endif #else # include /* for close() */ # define OPT_MAP_ANON 0 #endif static volatile AO_t mmap_enabled = 0; void AO_malloc_enable_mmap(void) { # if defined(__sun) AO_store_release(&mmap_enabled, 1); /* Workaround for Sun CC */ # else AO_store(&mmap_enabled, 1); # endif } static char *get_mmaped(size_t sz) { char * result; # ifdef USE_MMAP_ANON # define zero_fd -1 # else int zero_fd; # endif assert(!(sz & (CHUNK_SIZE - 1))); if (!mmap_enabled) return 0; # ifndef USE_MMAP_ANON zero_fd = open("/dev/zero", O_RDONLY); if (zero_fd == -1) return 0; # endif result = (char *)mmap(0, sz, PROT_READ | PROT_WRITE, GC_MMAP_FLAGS | OPT_MAP_ANON, zero_fd, 0 /* offset */); # ifndef USE_MMAP_ANON close(zero_fd); # endif if (AO_EXPECT_FALSE(result == MAP_FAILED)) result = NULL; return result; } #ifndef SIZE_MAX # include #endif #if defined(SIZE_MAX) && !defined(CPPCHECK) # define AO_SIZE_MAX ((size_t)SIZE_MAX) /* Extra cast to workaround some buggy SIZE_MAX definitions. */ #else # define AO_SIZE_MAX (~(size_t)0) #endif /* Saturated addition of size_t values. Used to avoid value wrap */ /* around on overflow. The arguments should have no side effects. */ #define SIZET_SAT_ADD(a, b) \ (AO_EXPECT_FALSE((a) >= AO_SIZE_MAX - (b)) ? AO_SIZE_MAX : (a) + (b)) /* Allocate an object of size (incl. header) of size > CHUNK_SIZE. */ /* sz includes space for an AO_t-sized header. */ static char * AO_malloc_large(size_t sz) { char *result; /* The header will force us to waste ALIGNMENT bytes, incl. header. */ /* Round to multiple of CHUNK_SIZE. */ sz = SIZET_SAT_ADD(sz, ALIGNMENT + CHUNK_SIZE - 1) & ~(CHUNK_SIZE - 1); assert(sz > LOG_MAX_SIZE); result = get_mmaped(sz); if (AO_EXPECT_FALSE(NULL == result)) return NULL; result += ALIGNMENT; ((AO_t *)result)[-1] = (AO_t)sz; return result; } static void AO_free_large(char * p) { AO_t sz = ((AO_t *)p)[-1]; if (munmap(p - ALIGNMENT, (size_t)sz) != 0) abort(); /* Programmer error. Not really async-signal-safe, but ... */ } #else /* No MMAP */ void AO_malloc_enable_mmap(void) { } #define get_mmaped(sz) ((char*)0) #define AO_malloc_large(sz) ((char*)0) #define AO_free_large(p) abort() /* Programmer error. Not really async-signal-safe, but ... */ #endif /* No MMAP */ static char * get_chunk(void) { char *my_chunk_ptr; for (;;) { char *initial_ptr = (char *)AO_load(&initial_heap_ptr); my_chunk_ptr = (char *)(((AO_t)initial_ptr + (ALIGNMENT - 1)) & ~(ALIGNMENT - 1)); if (initial_ptr != my_chunk_ptr) { /* Align correctly. If this fails, someone else did it for us. */ (void)AO_compare_and_swap_acquire(&initial_heap_ptr, (AO_t)initial_ptr, (AO_t)my_chunk_ptr); } if (AO_EXPECT_FALSE((AO_t)my_chunk_ptr > (AO_t)(AO_initial_heap + AO_INITIAL_HEAP_SIZE - CHUNK_SIZE))) { /* We failed. The initial heap is used up. */ my_chunk_ptr = get_mmaped(CHUNK_SIZE); # if !defined(CPPCHECK) assert(((AO_t)my_chunk_ptr & (ALIGNMENT-1)) == 0); # endif break; } if (AO_compare_and_swap(&initial_heap_ptr, (AO_t)my_chunk_ptr, (AO_t)(my_chunk_ptr + CHUNK_SIZE))) { break; } } return my_chunk_ptr; } /* Object free lists. Ith entry corresponds to objects */ /* of total size 2**i bytes. */ AO_stack_t AO_free_list[LOG_MAX_SIZE+1]; /* Break up the chunk, and add it to the object free list for */ /* the given size. We have exclusive access to chunk. */ static void add_chunk_as(void * chunk, unsigned log_sz) { size_t ofs, limit; size_t sz = (size_t)1 << log_sz; assert (CHUNK_SIZE >= sz); limit = (size_t)CHUNK_SIZE - sz; for (ofs = ALIGNMENT - sizeof(AO_t); ofs <= limit; ofs += sz) { ASAN_POISON_MEMORY_REGION((char *)chunk + ofs + sizeof(AO_t), sz - sizeof(AO_t)); AO_stack_push(&AO_free_list[log_sz], (AO_t *)((char *)chunk + ofs)); } } static const unsigned char msbs[16] = { 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4 }; /* Return the position of the most significant set bit in the */ /* argument. */ /* We follow the conventions of ffs(), i.e. the least */ /* significant bit is number one. */ static unsigned msb(size_t s) { unsigned result = 0; if ((s & 0xff) != s) { # if (__SIZEOF_SIZE_T__ == 8) && !defined(CPPCHECK) unsigned v = (unsigned)(s >> 32); if (AO_EXPECT_FALSE(v != 0)) { s = v; result += 32; } # elif __SIZEOF_SIZE_T__ == 4 /* No op. */ # else unsigned v; /* The following is a tricky code ought to be equivalent to */ /* "(v = s >> 32) != 0" but suppresses warnings on 32-bit arch's. */ # define SIZEOF_SIZE_T_GT_4 (sizeof(size_t) > 4) if (SIZEOF_SIZE_T_GT_4 && (v = (unsigned)(s >> (SIZEOF_SIZE_T_GT_4 ? 32 : 0))) != 0) { s = v; result += 32; } # endif /* !defined(__SIZEOF_SIZE_T__) */ if (AO_EXPECT_FALSE((s >> 16) != 0)) { s >>= 16; result += 16; } if ((s >> 8) != 0) { s >>= 8; result += 8; } } if (s > 15) { s >>= 4; result += 4; } result += msbs[s]; return result; } void * AO_malloc(size_t sz) { AO_t *result; unsigned log_sz; if (AO_EXPECT_FALSE(sz > CHUNK_SIZE - sizeof(AO_t))) return AO_malloc_large(sz); log_sz = msb(sz + (sizeof(AO_t) - 1)); assert(log_sz <= LOG_MAX_SIZE); assert(((size_t)1 << log_sz) >= sz + sizeof(AO_t)); result = AO_stack_pop(AO_free_list+log_sz); while (AO_EXPECT_FALSE(NULL == result)) { void * chunk = get_chunk(); if (AO_EXPECT_FALSE(NULL == chunk)) return NULL; add_chunk_as(chunk, log_sz); result = AO_stack_pop(AO_free_list+log_sz); } *result = log_sz; # ifdef AO_TRACE_MALLOC fprintf(stderr, "%p: AO_malloc(%lu) = %p\n", (void *)pthread_self(), (unsigned long)sz, (void *)(result + 1)); # endif ASAN_UNPOISON_MEMORY_REGION(result + 1, sz); return result + 1; } void AO_free(void *p) { AO_t *base; int log_sz; if (AO_EXPECT_FALSE(NULL == p)) return; base = (AO_t *)p - 1; log_sz = (int)(*base); # ifdef AO_TRACE_MALLOC fprintf(stderr, "%p: AO_free(%p sz:%lu)\n", (void *)pthread_self(), p, log_sz > LOG_MAX_SIZE ? (unsigned)log_sz : 1UL << log_sz); # endif if (AO_EXPECT_FALSE(log_sz > LOG_MAX_SIZE)) { AO_free_large((char *)p); } else { ASAN_POISON_MEMORY_REGION(base + 1, ((size_t)1 << log_sz) - sizeof(AO_t)); AO_stack_push(AO_free_list + log_sz, base); } } libatomic_ops-7.6.12/src/atomic_ops_malloc.h000066400000000000000000000052061411761111000210530ustar00rootroot00000000000000/* * Copyright (c) 2005 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Almost lock-free malloc implementation based on stack implementation. */ /* See doc/README_malloc.txt file for detailed usage rules. */ #ifndef AO_MALLOC_H #define AO_MALLOC_H #include "atomic_ops_stack.h" #include /* for size_t */ #ifdef __cplusplus extern "C" { #endif #ifdef AO_STACK_IS_LOCK_FREE # define AO_MALLOC_IS_LOCK_FREE #endif #ifndef AO_ATTR_MALLOC # if AO_GNUC_PREREQ(3, 1) # define AO_ATTR_MALLOC __attribute__((__malloc__)) # elif defined(_MSC_VER) && (_MSC_VER >= 1900) && !defined(__EDG__) # define AO_ATTR_MALLOC \ __declspec(allocator) __declspec(noalias) __declspec(restrict) # elif defined(_MSC_VER) && _MSC_VER >= 1400 # define AO_ATTR_MALLOC __declspec(noalias) __declspec(restrict) # else # define AO_ATTR_MALLOC /* empty */ # endif #endif #ifndef AO_ATTR_ALLOC_SIZE # ifdef __clang__ # if __has_attribute(__alloc_size__) # define AO_ATTR_ALLOC_SIZE(argnum) \ __attribute__((__alloc_size__(argnum))) # else # define AO_ATTR_ALLOC_SIZE(argnum) /* empty */ # endif # elif AO_GNUC_PREREQ(4, 3) && !defined(__ICC) # define AO_ATTR_ALLOC_SIZE(argnum) __attribute__((__alloc_size__(argnum))) # else # define AO_ATTR_ALLOC_SIZE(argnum) /* empty */ # endif #endif void AO_free(void *); AO_ATTR_MALLOC AO_ATTR_ALLOC_SIZE(1) void * AO_malloc(size_t); /* Allow use of mmap to grow the heap. No-op on some platforms. */ void AO_malloc_enable_mmap(void); #ifdef __cplusplus } /* extern "C" */ #endif #endif /* !AO_MALLOC_H */ libatomic_ops-7.6.12/src/atomic_ops_stack.c000066400000000000000000000303551411761111000207070ustar00rootroot00000000000000/* * Copyright (c) 2005 Hewlett-Packard Development Company, L.P. * * This file may be redistributed and/or modified under the * terms of the GNU General Public License as published by the Free Software * Foundation; either version 2, or (at your option) any later version. * * It is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License in the * file COPYING for more details. */ #if defined(HAVE_CONFIG_H) # include "config.h" #endif #include #include #include #define AO_REQUIRE_CAS #include "atomic_ops_stack.h" /* This function call must be a part of a do-while loop with a CAS */ /* designating the condition of the loop (see the use cases below). */ #ifdef AO_THREAD_SANITIZER AO_ATTR_NO_SANITIZE_THREAD static void store_before_cas(AO_t *addr, AO_t value) { *addr = value; } #else # define store_before_cas(addr, value) (void)(*(addr) = (value)) #endif #ifdef AO_USE_ALMOST_LOCK_FREE void AO_pause(int); /* defined in atomic_ops.c */ /* LIFO linked lists based on compare-and-swap. We need to avoid */ /* the case of a node deletion and reinsertion while I'm deleting */ /* it, since that may cause my CAS to succeed eventhough the next */ /* pointer is now wrong. Our solution is not fully lock-free, but it */ /* is good enough for signal handlers, provided we have a suitably low */ /* bound on the number of recursive signal handler reentries. */ /* A list consists of a first pointer and a blacklist */ /* of pointer values that are currently being removed. No list element */ /* on the blacklist may be inserted. If we would otherwise do so, we */ /* are allowed to insert a variant that differs only in the least */ /* significant, ignored, bits. If the list is full, we wait. */ /* Crucial observation: A particular padded pointer x (i.e. pointer */ /* plus arbitrary low order bits) can never be newly inserted into */ /* a list while it's in the corresponding auxiliary data structure. */ /* The second argument is a pointer to the link field of the element */ /* to be inserted. */ /* Both list headers and link fields contain "perturbed" pointers, i.e. */ /* pointers with extra bits "or"ed into the low order bits. */ void AO_stack_push_explicit_aux_release(volatile AO_t *list, AO_t *x, AO_stack_aux *a) { AO_t x_bits = (AO_t)x; AO_t next; /* No deletions of x can start here, since x is not currently in the */ /* list. */ retry: # if AO_BL_SIZE == 2 { /* Start all loads as close to concurrently as possible. */ AO_t entry1 = AO_load(&a->AO_stack_bl[0]); AO_t entry2 = AO_load(&a->AO_stack_bl[1]); if (entry1 == x_bits || entry2 == x_bits) { /* Entry is currently being removed. Change it a little. */ ++x_bits; if ((x_bits & AO_BIT_MASK) == 0) /* Version count overflowed; */ /* EXTREMELY unlikely, but possible. */ x_bits = (AO_t)x; goto retry; } } # else { int i; for (i = 0; i < AO_BL_SIZE; ++i) { if (AO_load(&a->AO_stack_bl[i]) == x_bits) { /* Entry is currently being removed. Change it a little. */ ++x_bits; if ((x_bits & AO_BIT_MASK) == 0) /* Version count overflowed; */ /* EXTREMELY unlikely, but possible. */ x_bits = (AO_t)x; goto retry; } } } # endif /* x_bits is not currently being deleted */ do { next = AO_load(list); store_before_cas(x, next); } while (AO_EXPECT_FALSE(!AO_compare_and_swap_release(list, next, x_bits))); } /* * I concluded experimentally that checking a value first before * performing a compare-and-swap is usually beneficial on X86, but * slows things down appreciably with contention on Itanium. * Since the Itanium behavior makes more sense to me (more cache line * movement unless we're mostly reading, but back-off should guard * against that), we take Itanium as the default. Measurements on * other multiprocessor architectures would be useful. (On a uniprocessor, * the initial check is almost certainly a very small loss.) - HB */ #ifdef __i386__ # define PRECHECK(a) (a) == 0 && #else # define PRECHECK(a) #endif /* This function is used before CAS in the below AO_stack_pop() and the */ /* data race (reported by TSan) is OK because it results in a retry. */ #ifdef AO_THREAD_SANITIZER AO_ATTR_NO_SANITIZE_THREAD static AO_t AO_load_next(const volatile AO_t *first_ptr) { /* Assuming an architecture on which loads of word type are atomic. */ /* AO_load cannot be used here because it cannot be instructed to */ /* suppress the warning about the race. */ return *first_ptr; } #else # define AO_load_next AO_load #endif AO_t * AO_stack_pop_explicit_aux_acquire(volatile AO_t *list, AO_stack_aux * a) { unsigned i; int j = 0; AO_t first; AO_t * first_ptr; AO_t next; retry: first = AO_load(list); if (0 == first) return 0; /* Insert first into aux black list. */ /* This may spin if more than AO_BL_SIZE removals using auxiliary */ /* structure a are currently in progress. */ for (i = 0; ; ) { if (PRECHECK(a -> AO_stack_bl[i]) AO_compare_and_swap_acquire(a->AO_stack_bl+i, 0, first)) break; ++i; if ( i >= AO_BL_SIZE ) { i = 0; AO_pause(++j); } } assert(i < AO_BL_SIZE); # ifndef AO_THREAD_SANITIZER assert(a -> AO_stack_bl[i] == first); /* No actual race with the above CAS. */ # endif /* First is on the auxiliary black list. It may be removed by */ /* another thread before we get to it, but a new insertion of x */ /* cannot be started here. */ /* Only we can remove it from the black list. */ /* We need to make sure that first is still the first entry on the */ /* list. Otherwise it's possible that a reinsertion of it was */ /* already started before we added the black list entry. */ # if defined(__alpha__) && (__GNUC__ == 4) if (first != AO_load_acquire(list)) /* Workaround __builtin_expect bug found in */ /* gcc-4.6.3/alpha causing test_stack failure. */ # else if (AO_EXPECT_FALSE(first != AO_load_acquire(list))) /* Workaround test failure on AIX, at least, by */ /* using acquire ordering semantics for this */ /* load. Probably, it is not the right fix. */ # endif { AO_store_release(a->AO_stack_bl+i, 0); goto retry; } first_ptr = AO_REAL_NEXT_PTR(first); next = AO_load_next(first_ptr); # if defined(__alpha__) && (__GNUC__ == 4) if (!AO_compare_and_swap_release(list, first, next)) # else if (AO_EXPECT_FALSE(!AO_compare_and_swap_release(list, first, next))) # endif { AO_store_release(a->AO_stack_bl+i, 0); goto retry; } # ifndef AO_THREAD_SANITIZER assert(*list != first); /* No actual race with the above CAS. */ # endif /* Since we never insert an entry on the black list, this cannot have */ /* succeeded unless first remained on the list while we were running. */ /* Thus its next link cannot have changed out from under us, and we */ /* removed exactly one entry and preserved the rest of the list. */ /* Note that it is quite possible that an additional entry was */ /* inserted and removed while we were running; this is OK since the */ /* part of the list following first must have remained unchanged, and */ /* first must again have been at the head of the list when the */ /* compare_and_swap succeeded. */ AO_store_release(a->AO_stack_bl+i, 0); return first_ptr; } #else /* ! USE_ALMOST_LOCK_FREE */ /* The functionality is the same as of AO_load_next but the atomicity */ /* is not needed. The usage is similar to that of store_before_cas. */ #if defined(AO_THREAD_SANITIZER) \ && (defined(AO_HAVE_compare_and_swap_double) \ || defined(AO_HAVE_compare_double_and_swap_double)) /* TODO: If compiled by Clang (as of clang-4.0) with -O3 flag, */ /* no_sanitize attribute is ignored unless the argument is volatile. */ # if defined(__clang__) # define LOAD_BEFORE_CAS_VOLATILE volatile # else # define LOAD_BEFORE_CAS_VOLATILE /* empty */ # endif AO_ATTR_NO_SANITIZE_THREAD static AO_t load_before_cas(const LOAD_BEFORE_CAS_VOLATILE AO_t *addr) { return *addr; } #else # define load_before_cas(addr) (*(addr)) #endif /* Better names for fields in AO_stack_t */ #define ptr AO_val2 #define version AO_val1 #if defined(AO_HAVE_compare_double_and_swap_double) \ && !(defined(AO_STACK_PREFER_CAS_DOUBLE) \ && defined(AO_HAVE_compare_and_swap_double)) #ifdef LINT2 volatile /* non-static */ AO_t AO_noop_sink; #endif void AO_stack_push_release(AO_stack_t *list, AO_t *element) { AO_t next; do { next = AO_load(&(list -> ptr)); store_before_cas(element, next); } while (AO_EXPECT_FALSE(!AO_compare_and_swap_release(&(list -> ptr), next, (AO_t)element))); /* This uses a narrow CAS here, an old optimization suggested */ /* by Treiber. Pop is still safe, since we run into the ABA */ /* problem only if there were both intervening "pop"s and "push"es. */ /* In that case we still see a change in the version number. */ # ifdef LINT2 /* Instruct static analyzer that element is not lost. */ AO_noop_sink = (AO_t)element; # endif } AO_t *AO_stack_pop_acquire(AO_stack_t *list) { # if defined(__clang__) && !AO_CLANG_PREREQ(3, 5) AO_t *volatile cptr; /* Use volatile to workaround a bug in */ /* clang-1.1/x86 causing test_stack failure. */ # else AO_t *cptr; # endif AO_t next; AO_t cversion; do { /* Version must be loaded first. */ cversion = AO_load_acquire(&(list -> version)); cptr = (AO_t *)AO_load(&(list -> ptr)); if (NULL == cptr) return NULL; next = load_before_cas((AO_t *)cptr); } while (AO_EXPECT_FALSE(!AO_compare_double_and_swap_double_release(list, cversion, (AO_t)cptr, cversion+1, (AO_t)next))); return cptr; } #elif defined(AO_HAVE_compare_and_swap_double) /* Needed for future IA64 processors. No current clients? */ /* TODO: Not tested thoroughly. */ /* We have a wide CAS, but only does an AO_t-wide comparison. */ /* We can't use the Treiber optimization, since we only check */ /* for an unchanged version number, not an unchanged pointer. */ void AO_stack_push_release(AO_stack_t *list, AO_t *element) { AO_t version; do { AO_t next_ptr; /* Again version must be loaded first, for different reason. */ version = AO_load_acquire(&(list -> version)); next_ptr = AO_load(&(list -> ptr)); store_before_cas(element, next_ptr); } while (!AO_compare_and_swap_double_release( list, version, version+1, (AO_t) element)); } AO_t *AO_stack_pop_acquire(AO_stack_t *list) { AO_t *cptr; AO_t next; AO_t cversion; do { cversion = AO_load_acquire(&(list -> version)); cptr = (AO_t *)AO_load(&(list -> ptr)); if (NULL == cptr) return NULL; next = load_before_cas(cptr); } while (!AO_compare_double_and_swap_double_release(list, cversion, (AO_t)cptr, cversion+1, next)); return cptr; } #endif /* AO_HAVE_compare_and_swap_double */ #endif /* ! USE_ALMOST_LOCK_FREE */ libatomic_ops-7.6.12/src/atomic_ops_stack.h000066400000000000000000000160441411761111000207130ustar00rootroot00000000000000/* * The implementation of the routines described here is covered by the GPL. * This header file is covered by the following license: */ /* * Copyright (c) 2005 Hewlett-Packard Development Company, L.P. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Almost lock-free LIFO linked lists (linked stacks). */ #ifndef AO_STACK_H #define AO_STACK_H #include "atomic_ops.h" #ifdef __cplusplus extern "C" { #endif #ifdef AO_USE_ALMOST_LOCK_FREE /* Use the almost-non-blocking implementation regardless of the */ /* double-word CAS availability. */ #elif !defined(AO_HAVE_compare_double_and_swap_double) \ && !defined(AO_HAVE_compare_double_and_swap) \ && defined(AO_HAVE_compare_and_swap) # define AO_USE_ALMOST_LOCK_FREE #else /* If we have no compare-and-swap operation defined, we assume */ /* that we will actually be using CAS emulation. If we do that, */ /* it's cheaper to use the version-based implementation. */ # define AO_STACK_IS_LOCK_FREE #endif /* * These are not guaranteed to be completely lock-free. * List insertion may spin under extremely unlikely conditions. * It cannot deadlock due to recursive reentry unless AO_list_remove * is called while at least AO_BL_SIZE activations of * AO_list_remove are currently active in the same thread, i.e. * we must have at least AO_BL_SIZE recursive signal handler * invocations. * * All operations take an AO_list_aux argument. It is safe to * share a single AO_list_aux structure among all lists, but that * may increase contention. Any given list must always be accessed * with the same AO_list_aux structure. * * We make some machine-dependent assumptions: * - We have a compare-and-swap operation. * - At least _AO_N_BITS low order bits in pointers are * zero and normally unused. * - size_t and pointers have the same size. * * We do use a fully lock-free implementation if double-width * compare-and-swap operations are available. */ #ifdef AO_USE_ALMOST_LOCK_FREE /* The number of low order pointer bits we can use for a small */ /* version number. */ # if defined(__LP64__) || defined(_LP64) || defined(_WIN64) /* WIN64 isn't really supported yet. */ # define AO_N_BITS 3 # else # define AO_N_BITS 2 # endif # define AO_BIT_MASK ((1 << AO_N_BITS) - 1) /* * AO_stack_aux should be treated as opaque. * It is fully defined here, so it can be allocated, and to facilitate * debugging. */ #ifndef AO_BL_SIZE # define AO_BL_SIZE 2 #endif #if AO_BL_SIZE > (1 << AO_N_BITS) # error AO_BL_SIZE too big #endif typedef struct AO__stack_aux { volatile AO_t AO_stack_bl[AO_BL_SIZE]; } AO_stack_aux; /* The stack implementation knows only about the location of */ /* link fields in nodes, and nothing about the rest of the */ /* stack elements. Link fields hold an AO_t, which is not */ /* necessarily a real pointer. This converts the AO_t to a */ /* real (AO_t *) which is either NULL, or points at the link */ /* field in the next node. */ #define AO_REAL_NEXT_PTR(x) (AO_t *)((x) & ~AO_BIT_MASK) /* The following two routines should not normally be used directly. */ /* We make them visible here for the rare cases in which it makes sense */ /* to share the an AO_stack_aux between stacks. */ void AO_stack_push_explicit_aux_release(volatile AO_t *list, AO_t *x, AO_stack_aux *); AO_t * AO_stack_pop_explicit_aux_acquire(volatile AO_t *list, AO_stack_aux *); /* And now AO_stack_t for the real interface: */ typedef struct AO__stack { volatile AO_t AO_ptr; AO_stack_aux AO_aux; } AO_stack_t; #define AO_STACK_INITIALIZER {0,{{0}}} AO_INLINE void AO_stack_init(AO_stack_t *list) { # if AO_BL_SIZE == 2 list -> AO_aux.AO_stack_bl[0] = 0; list -> AO_aux.AO_stack_bl[1] = 0; # else int i; for (i = 0; i < AO_BL_SIZE; ++i) list -> AO_aux.AO_stack_bl[i] = 0; # endif list -> AO_ptr = 0; } /* Convert an AO_stack_t to a pointer to the link field in */ /* the first element. */ #define AO_REAL_HEAD_PTR(x) AO_REAL_NEXT_PTR((x).AO_ptr) #define AO_stack_push_release(l, e) \ AO_stack_push_explicit_aux_release(&((l)->AO_ptr), e, &((l)->AO_aux)) #define AO_HAVE_stack_push_release #define AO_stack_pop_acquire(l) \ AO_stack_pop_explicit_aux_acquire(&((l)->AO_ptr), &((l)->AO_aux)) #define AO_HAVE_stack_pop_acquire # else /* Use fully non-blocking data structure, wide CAS */ #ifndef AO_HAVE_double_t /* Can happen if we're using CAS emulation, since we don't want to */ /* force that here, in case other atomic_ops clients don't want it. */ # ifdef __cplusplus } /* extern "C" */ # endif # include "atomic_ops/sysdeps/standard_ao_double_t.h" # ifdef __cplusplus extern "C" { # endif #endif typedef volatile AO_double_t AO_stack_t; /* AO_val1 is version, AO_val2 is pointer. */ /* Note: AO_stack_t variables are not intended to be local ones, */ /* otherwise it is the client responsibility to ensure they have */ /* double-word alignment. */ #define AO_STACK_INITIALIZER AO_DOUBLE_T_INITIALIZER AO_INLINE void AO_stack_init(AO_stack_t *list) { list -> AO_val1 = 0; list -> AO_val2 = 0; } #define AO_REAL_HEAD_PTR(x) (AO_t *)((x).AO_val2) #define AO_REAL_NEXT_PTR(x) (AO_t *)(x) void AO_stack_push_release(AO_stack_t *list, AO_t *new_element); #define AO_HAVE_stack_push_release AO_t * AO_stack_pop_acquire(AO_stack_t *list); #define AO_HAVE_stack_pop_acquire #endif /* Wide CAS case */ #if defined(AO_HAVE_stack_push_release) && !defined(AO_HAVE_stack_push) # define AO_stack_push(l, e) AO_stack_push_release(l, e) # define AO_HAVE_stack_push #endif #if defined(AO_HAVE_stack_pop_acquire) && !defined(AO_HAVE_stack_pop) # define AO_stack_pop(l) AO_stack_pop_acquire(l) # define AO_HAVE_stack_pop #endif #ifdef __cplusplus } /* extern "C" */ #endif #endif /* !AO_STACK_H */ libatomic_ops-7.6.12/src/atomic_ops_sysdeps.S000066400000000000000000000004621411761111000212500ustar00rootroot00000000000000/* * Include the appropriate system-dependent assembly file, if any. * This is used only if the platform supports neither inline assembly * code, nor appropriate compiler intrinsics. */ #if !defined(__GNUC__) && (defined(sparc) || defined(__sparc)) # include "atomic_ops/sysdeps/sunc/sparc.S" #endif libatomic_ops-7.6.12/tests/000077500000000000000000000000001411761111000155665ustar00rootroot00000000000000libatomic_ops-7.6.12/tests/Makefile.am000066400000000000000000000145751411761111000176360ustar00rootroot00000000000000EXTRA_DIST=test_atomic_include.template list_atomic.template run_parallel.h \ test_atomic_include.h list_atomic.c # We distribute test_atomic_include.h and list_atomic.c, since it is hard # to regenerate them on Windows without sed. BUILT_SOURCES = test_atomic_include.h list_atomic.i list_atomic.o CLEANFILES = list_atomic.i list_atomic.o AM_CPPFLAGS = \ -I$(top_builddir)/src -I$(top_srcdir)/src \ -I$(top_builddir)/tests -I$(top_srcdir)/tests CFLAGS += $(CFLAGS_EXTRA) TESTS = test_atomic$(EXEEXT) test_atomic_generalized$(EXEEXT) \ test_stack$(EXEEXT) test_malloc$(EXEEXT) TEST_OBJS = test_atomic.o test_atomic_generalized-test_atomic.o \ test_stack.o test_malloc.o check_PROGRAMS = test_atomic test_atomic_generalized \ test_stack test_malloc if HAVE_PTHREAD_H TESTS += test_atomic_pthreads$(EXEEXT) TEST_OBJS += test_atomic_pthreads-test_atomic.o check_PROGRAMS += test_atomic_pthreads test_atomic_pthreads_SOURCES=$(test_atomic_SOURCES) test_atomic_pthreads_CPPFLAGS=-DAO_USE_PTHREAD_DEFS $(AM_CPPFLAGS) test_atomic_pthreads_LDADD=$(test_atomic_LDADD) endif test_atomic_SOURCES=test_atomic.c test_atomic_LDADD = $(THREADDLLIBS) $(top_builddir)/src/libatomic_ops.la test_atomic_generalized_SOURCES=$(test_atomic_SOURCES) test_atomic_generalized_CPPFLAGS= \ -DAO_PREFER_GENERALIZED -DAO_TEST_EMULATION $(AM_CPPFLAGS) test_atomic_generalized_LDADD=$(test_atomic_LDADD) test_stack_SOURCES=test_stack.c test_stack_LDADD = $(THREADDLLIBS) \ $(top_builddir)/src/libatomic_ops_gpl.la test_malloc_SOURCES=test_malloc.c test_malloc_LDADD = $(THREADDLLIBS) \ $(top_builddir)/src/libatomic_ops_gpl.la ## In case of static libraries build, libatomic_ops.a is already referenced ## in dependency_libs attribute of libatomic_ops_gpl.la file. if ENABLE_SHARED test_malloc_LDADD += $(top_builddir)/src/libatomic_ops.la test_stack_LDADD += $(top_builddir)/src/libatomic_ops.la endif # Run the tests directly (without test-driver): .PHONY: check-without-test-driver check-without-test-driver: $(TESTS) @echo "The following will print some 'Missing ...' messages" ./test_atomic$(EXEEXT) ./test_atomic_generalized$(EXEEXT) ./test_stack$(EXEEXT) ./test_malloc$(EXEEXT) test ! -f test_atomic_pthreads$(EXEEXT) || ./test_atomic_pthreads$(EXEEXT) test_atomic_include.h: test_atomic_include.template mkdir -p `dirname $@` sed -e s:XX::g $? > $@ sed -e s:XX:_release:g $? >> $@ sed -e s:XX:_acquire:g $? >> $@ sed -e s:XX:_read:g $? >> $@ sed -e s:XX:_write:g $? >> $@ sed -e s:XX:_full:g $? >> $@ sed -e s:XX:_release_write:g $? >> $@ sed -e s:XX:_acquire_read:g $? >> $@ sed -e s:XX:_dd_acquire_read:g $? >> $@ list_atomic.c: list_atomic.template mkdir -p `dirname $@` echo "#include \"atomic_ops.h\"" > $@ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX::g $? >> $@ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX:_release:g $? >> $@ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX:_acquire:g $? >> $@ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX:_read:g $? >> $@ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX:_write:g $? >> $@ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX:_full:g $? >> $@ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX:_release_write:g $? >> $@ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX:_acquire_read:g $? >> $@ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g -e s:XX:_dd_acquire_read:g $? >> $@ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX::g $? >> $@ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX:_release:g $? >> $@ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX:_acquire:g $? >> $@ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX:_read:g $? >> $@ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX:_write:g $? >> $@ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX:_full:g $? >> $@ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX:_release_write:g $? >> $@ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX:_acquire_read:g $? >> $@ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g -e s:XX:_dd_acquire_read:g $? >> $@ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX::g $? >> $@ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX:_release:g $? >> $@ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX:_acquire:g $? >> $@ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX:_read:g $? >> $@ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX:_write:g $? >> $@ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX:_full:g $? >> $@ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX:_release_write:g $? >> $@ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX:_acquire_read:g $? >> $@ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g -e s:XX:_dd_acquire_read:g $? >> $@ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX::g $? >> $@ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX:_release:g $? >> $@ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX:_acquire:g $? >> $@ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX:_read:g $? >> $@ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX:_write:g $? >> $@ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX:_full:g $? >> $@ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX:_release_write:g $? >> $@ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX:_acquire_read:g $? >> $@ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g -e s:XX:_dd_acquire_read:g $? >> $@ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX::g $? >> $@ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX:_release:g $? >> $@ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX:_acquire:g $? >> $@ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX:_read:g $? >> $@ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX:_write:g $? >> $@ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX:_full:g $? >> $@ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX:_release_write:g $? >> $@ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX:_acquire_read:g $? >> $@ sed -e s:XSIZE:double:g -e s:XCTYPE:AO_double_t:g -e s:XX:_dd_acquire_read:g $? >> $@ list_atomic.i: list_atomic.c mkdir -p `dirname $@` $(COMPILE) $? -E > $@ # Verify list_atomic.c syntax: list_atomic.o: list_atomic.c $(COMPILE) -c -o $@ $? # Just compile all tests (without linking and execution): check-nolink-local: $(TEST_OBJS) libatomic_ops-7.6.12/tests/list_atomic.template000066400000000000000000000072051411761111000216360ustar00rootroot00000000000000/* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * This file is covered by the GNU general public license, version 2. * see COPYING for details. */ /* This generates a compilable program. But it is really meant to be */ /* be used only with cc -E, to inspect the expensions generated by */ /* primitives. */ /* The result will not link or run. */ #include /* for exit() */ void XSIZE_list_atomicXX(void) { # if defined(AO_HAVE_XSIZE_loadXX) || defined(AO_HAVE_XSIZE_storeXX) \ || defined(AO_HAVE_XSIZE_fetch_and_addXX) \ || defined(AO_HAVE_XSIZE_fetch_and_add1XX) \ || defined(AO_HAVE_XSIZE_andXX) \ || defined(AO_HAVE_XSIZE_compare_and_swapXX) \ || defined(AO_HAVE_XSIZE_fetch_compare_and_swapXX) static volatile XCTYPE val /* = 0 */; # endif # if defined(AO_HAVE_XSIZE_compare_and_swapXX) \ || defined(AO_HAVE_XSIZE_fetch_compare_and_swapXX) static XCTYPE oldval /* = 0 */; # endif # if defined(AO_HAVE_XSIZE_storeXX) \ || defined(AO_HAVE_XSIZE_compare_and_swapXX) \ || defined(AO_HAVE_XSIZE_fetch_compare_and_swapXX) static XCTYPE newval /* = 0 */; # endif # if defined(AO_HAVE_test_and_setXX) AO_TS_t ts = AO_TS_INITIALIZER; # endif # if defined(AO_HAVE_XSIZE_fetch_and_addXX) || defined(AO_HAVE_XSIZE_andXX) \ || defined(AO_HAVE_XSIZE_orXX) || defined(AO_HAVE_XSIZE_xorXX) static XCTYPE incr /* = 0 */; # endif # if defined(AO_HAVE_nopXX) (void)"AO_nopXX(): "; AO_nopXX(); # else (void)"No AO_nopXX"; # endif # ifdef AO_HAVE_XSIZE_loadXX (void)"AO_XSIZE_loadXX(&val):"; (void)AO_XSIZE_loadXX(&val); # else (void)"No AO_XSIZE_loadXX"; # endif # ifdef AO_HAVE_XSIZE_storeXX (void)"AO_XSIZE_storeXX(&val, newval):"; AO_XSIZE_storeXX(&val, newval); # else (void)"No AO_XSIZE_storeXX"; # endif # ifdef AO_HAVE_XSIZE_fetch_and_addXX (void)"AO_XSIZE_fetch_and_addXX(&val, incr):"; (void)AO_XSIZE_fetch_and_addXX(&val, incr); # else (void)"No AO_XSIZE_fetch_and_addXX"; # endif # ifdef AO_HAVE_XSIZE_fetch_and_add1XX (void)"AO_XSIZE_fetch_and_add1XX(&val):"; (void)AO_XSIZE_fetch_and_add1XX(&val); # else (void)"No AO_XSIZE_fetch_and_add1XX"; # endif # ifdef AO_HAVE_XSIZE_fetch_and_sub1XX (void)"AO_XSIZE_fetch_and_sub1XX(&val):"; (void)AO_XSIZE_fetch_and_sub1XX(&val); # else (void)"No AO_XSIZE_fetch_and_sub1XX"; # endif # ifdef AO_HAVE_XSIZE_andXX (void)"AO_XSIZE_andXX(&val, incr):"; AO_XSIZE_andXX(&val, incr); # else (void)"No AO_XSIZE_andXX"; # endif # ifdef AO_HAVE_XSIZE_orXX (void)"AO_XSIZE_orXX(&val, incr):"; AO_XSIZE_orXX(&val, incr); # else (void)"No AO_XSIZE_orXX"; # endif # ifdef AO_HAVE_XSIZE_xorXX (void)"AO_XSIZE_xorXX(&val, incr):"; AO_XSIZE_xorXX(&val, incr); # else (void)"No AO_XSIZE_xorXX"; # endif # ifdef AO_HAVE_XSIZE_compare_and_swapXX (void)"AO_XSIZE_compare_and_swapXX(&val, oldval, newval):"; if (!AO_XSIZE_compare_and_swapXX(&val, oldval, newval)) exit(1); # else (void)"No AO_XSIZE_compare_and_swapXX"; # endif /* TODO: Add AO_compare_double_and_swap_doubleXX */ /* TODO: Add AO_compare_and_swap_doubleXX */ # ifdef AO_HAVE_XSIZE_fetch_compare_and_swapXX (void)"AO_XSIZE_fetch_compare_and_swapXX(&val, oldval, newval):"; if (AO_XSIZE_fetch_compare_and_swapXX(&val, oldval, newval) != oldval) exit(1); # else (void)"No AO_XSIZE_fetch_compare_and_swapXX"; # endif # if defined(AO_HAVE_test_and_setXX) (void)"AO_test_and_setXX(&ts):"; (void)AO_test_and_setXX(&ts); # else (void)"No AO_test_and_setXX"; # endif } libatomic_ops-7.6.12/tests/run_parallel.h000066400000000000000000000113211411761111000204150ustar00rootroot00000000000000/* * Copyright (c) 2003-2005 Hewlett-Packard Development Company, L.P. * * This file is covered by the GNU general public license, version 2. * see COPYING for details. */ #if defined(_MSC_VER) || \ defined(_WIN32) && !defined(__CYGWIN32__) && !defined(__CYGWIN__) || \ defined(_WIN32_WINCE) # define USE_WINTHREADS #elif defined(__vxworks) # define USE_VXTHREADS #else # define USE_PTHREADS #endif #include #include #ifdef USE_PTHREADS # include #endif #ifdef USE_VXTHREADS # include # include #endif #ifdef USE_WINTHREADS # include #endif #include "atomic_ops.h" #if !defined(AO_ATOMIC_OPS_H) && !defined(CPPCHECK) # error Wrong atomic_ops.h included. #endif #if (defined(_WIN32_WCE) || defined(__MINGW32CE__)) && !defined(AO_HAVE_abort) # define abort() _exit(-1) /* there is no abort() in WinCE */ #endif #ifndef AO_PTRDIFF_T # define AO_PTRDIFF_T ptrdiff_t #endif #ifndef MAX_NTHREADS # define MAX_NTHREADS 100 #endif typedef void * (* thr_func)(void *); typedef int (* test_func)(void); /* Returns != 0 on success */ void * run_parallel(int nthreads, thr_func f1, test_func t, const char *name); #ifdef USE_PTHREADS void * run_parallel(int nthreads, thr_func f1, test_func t, const char *name) { pthread_attr_t attr; pthread_t thr[MAX_NTHREADS]; int i; printf("Testing %s\n", name); if (nthreads > MAX_NTHREADS) { fprintf(stderr, "run_parallel: requested too many threads\n"); abort(); } # ifdef _HPUX_SOURCE /* Default stack size is too small, especially with the 64 bit ABI */ /* Increase it. */ if (pthread_default_stacksize_np(1024*1024, 0) != 0) { fprintf(stderr, "pthread_default_stacksize_np failed. " "OK after first call.\n"); } # endif pthread_attr_init(&attr); for (i = 0; i < nthreads; ++i) { int code = pthread_create(thr + i, &attr, f1, (void *)(long)i); if (code != 0) { fprintf(stderr, "pthread_create returned %d, thread %d\n", code, i); abort(); } } for (i = 0; i < nthreads; ++i) { int code = pthread_join(thr[i], NULL); if (code != 0) { fprintf(stderr, "pthread_join returned %d, thread %d\n", code, i); abort(); } } if (t()) { printf("Succeeded\n"); } else { fprintf(stderr, "Failed\n"); abort(); } return 0; } #endif /* USE_PTHREADS */ #ifdef USE_VXTHREADS void * run_parallel(int nthreads, thr_func f1, test_func t, const char *name) { int thr[MAX_NTHREADS]; int i; printf("Testing %s\n", name); if (nthreads > MAX_NTHREADS) { fprintf(stderr, "run_parallel: requested too many threads\n"); taskSuspend(0); } for (i = 0; i < nthreads; ++i) { thr[i] = taskSpawn((char*) name, 180, 0, 32768, (FUNCPTR) f1, i, 1, 2, 3, 4, 5, 6, 7, 8, 9); if (thr[i] == ERROR) { fprintf(stderr, "taskSpawn failed with %d, thread %d\n", errno, i); taskSuspend(0); } } for (i = 0; i < nthreads; ++i) { while (taskIdVerify(thr[i]) == OK) taskDelay(60); } if (t()) { printf("Succeeded\n"); } else { fprintf(stderr, "Failed\n"); taskSuspend(0); } return 0; } #endif /* USE_VXTHREADS */ #ifdef USE_WINTHREADS struct tramp_args { thr_func fn; long arg; }; DWORD WINAPI tramp(LPVOID param) { struct tramp_args *args = (struct tramp_args *)param; return (DWORD)(AO_PTRDIFF_T)(*args->fn)((LPVOID)(AO_PTRDIFF_T)args->arg); } void * run_parallel(int nthreads, thr_func f1, test_func t, const char *name) { HANDLE thr[MAX_NTHREADS]; struct tramp_args args[MAX_NTHREADS]; int i; printf("Testing %s\n", name); if (nthreads > MAX_NTHREADS) { fprintf(stderr, "run_parallel: requested too many threads\n"); abort(); } for (i = 0; i < nthreads; ++i) { args[i].fn = f1; args[i].arg = i; if ((thr[i] = CreateThread(NULL, 0, tramp, (LPVOID)(args+i), 0, NULL)) == NULL) { fprintf(stderr, "CreateThread failed with %lu, thread %d\n", (unsigned long)GetLastError(), i); abort(); } } for (i = 0; i < nthreads; ++i) { DWORD code = WaitForSingleObject(thr[i], INFINITE); if (code != WAIT_OBJECT_0) { fprintf(stderr, "WaitForSingleObject returned %lu, thread %d\n", (unsigned long)code, i); abort(); } } if (t()) { printf("Succeeded\n"); } else { fprintf(stderr, "Failed\n"); abort(); } return 0; } #endif /* USE_WINTHREADS */ libatomic_ops-7.6.12/tests/test_atomic.c000066400000000000000000000156171411761111000202570ustar00rootroot00000000000000/* * Copyright (c) 2003-2005 Hewlett-Packard Development Company, L.P. * * This file may be redistributed and/or modified under the * terms of the GNU General Public License as published by the Free Software * Foundation; either version 2, or (at your option) any later version. * * It is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License in the * file COPYING for more details. */ #if defined(HAVE_CONFIG_H) # include "config.h" #endif #if (defined(AO_NO_PTHREADS) || defined(__MINGW32__)) \ && defined(AO_USE_PTHREAD_DEFS) # include int main(void) { printf("test skipped\n"); return 0; } #else #include "run_parallel.h" #include "test_atomic_include.h" #if defined(AO_USE_PTHREAD_DEFS) || defined(AO_PREFER_GENERALIZED) # define NITERS 100000 #else # define NITERS 10000000 #endif void * add1sub1_thr(void * id); int add1sub1_test(void); void * acqrel_thr(void *id); int acqrel_test(void); void * test_and_set_thr(void * id); int test_and_set_test(void); #if defined(AO_HAVE_fetch_and_add1) && defined(AO_HAVE_fetch_and_sub1) AO_t counter = 0; void * add1sub1_thr(void * id) { int me = (int)(AO_PTRDIFF_T)id; int i; for (i = 0; i < NITERS; ++i) if ((me & 1) != 0) { (void)AO_fetch_and_sub1(&counter); } else { (void)AO_fetch_and_add1(&counter); } return 0; } int add1sub1_test(void) { return counter == 0; } #endif /* defined(AO_HAVE_fetch_and_add1) && defined(AO_HAVE_fetch_and_sub1) */ #if defined(AO_HAVE_store_release_write) && defined(AO_HAVE_load_acquire_read) /* Invariant: counter1 >= counter2 */ AO_t counter1 = 0; AO_t counter2 = 0; void * acqrel_thr(void *id) { int me = (int)(AO_PTRDIFF_T)id; int i; for (i = 0; i < NITERS; ++i) if (me & 1) { AO_t my_counter1; if (me != 1) { fprintf(stderr, "acqrel test: too many threads\n"); abort(); } my_counter1 = AO_load(&counter1); AO_store(&counter1, my_counter1 + 1); AO_store_release_write(&counter2, my_counter1 + 1); } else { AO_t my_counter1a, my_counter2a; AO_t my_counter1b, my_counter2b; my_counter2a = AO_load_acquire_read(&counter2); my_counter1a = AO_load(&counter1); /* Redo this, to make sure that the second load of counter1 */ /* is not viewed as a common subexpression. */ my_counter2b = AO_load_acquire_read(&counter2); my_counter1b = AO_load(&counter1); if (my_counter1a < my_counter2a) { fprintf(stderr, "Saw release store out of order: %lu < %lu\n", (unsigned long)my_counter1a, (unsigned long)my_counter2a); abort(); } if (my_counter1b < my_counter2b) { fprintf(stderr, "Saw release store out of order (bad CSE?): %lu < %lu\n", (unsigned long)my_counter1b, (unsigned long)my_counter2b); abort(); } } return 0; } int acqrel_test(void) { return counter1 == NITERS && counter2 == NITERS; } #endif /* AO_HAVE_store_release_write && AO_HAVE_load_acquire_read */ #if defined(AO_HAVE_test_and_set_acquire) AO_TS_t lock = AO_TS_INITIALIZER; unsigned long locked_counter; volatile unsigned long junk = 13; AO_ATTR_NO_SANITIZE_THREAD void do_junk(void) { junk *= 17; junk *= 19; } void * test_and_set_thr(void * id) { unsigned long i; for (i = 0; i < NITERS/10; ++i) { while (AO_test_and_set_acquire(&lock) != AO_TS_CLEAR); ++locked_counter; if (locked_counter != 1) { fprintf(stderr, "Test and set failure 1, counter = %ld, id = %d\n", (long)locked_counter, (int)(AO_PTRDIFF_T)id); abort(); } locked_counter *= 2; locked_counter -= 1; locked_counter *= 5; locked_counter -= 4; if (locked_counter != 1) { fprintf(stderr, "Test and set failure 2, counter = %ld, id = %d\n", (long)locked_counter, (int)(AO_PTRDIFF_T)id); abort(); } --locked_counter; AO_CLEAR(&lock); /* Spend a bit of time outside the lock. */ do_junk(); } return 0; } int test_and_set_test(void) { return locked_counter == 0; } #endif /* defined(AO_HAVE_test_and_set_acquire) */ #if (!defined(_MSC_VER) && !defined(__MINGW32__) && !defined(__BORLANDC__) \ || defined(AO_USE_NO_SIGNALS) || defined(AO_USE_WIN32_PTHREADS)) \ && defined(AO_TEST_EMULATION) # ifdef __cplusplus extern "C" { # endif void AO_store_full_emulation(volatile AO_t *addr, AO_t val); AO_t AO_fetch_compare_and_swap_emulation(volatile AO_t *addr, AO_t old_val, AO_t new_val); # ifdef AO_HAVE_double_t int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *, AO_t old_val1, AO_t old_val2, AO_t new_val1, AO_t new_val2); # endif # ifdef __cplusplus } /* extern "C" */ # endif void test_atomic_emulation(void) { AO_t x; # ifdef AO_HAVE_double_t AO_double_t w; /* double-word alignment not needed */ w.AO_val1 = 0; w.AO_val2 = 0; TA_assert(!AO_compare_double_and_swap_double_emulation(&w, 4116, 2121, 8537, 6410)); TA_assert(w.AO_val1 == 0 && w.AO_val2 == 0); TA_assert(AO_compare_double_and_swap_double_emulation(&w, 0, 0, 8537, 6410)); TA_assert(w.AO_val1 == 8537 && w.AO_val2 == 6410); # endif AO_store_full_emulation(&x, 1314); TA_assert(x == 1314); TA_assert(AO_fetch_compare_and_swap_emulation(&x, 14, 13117) == 1314); TA_assert(x == 1314); TA_assert(AO_fetch_compare_and_swap_emulation(&x, 1314, 14117) == 1314); TA_assert(x == 14117); } #else # define test_atomic_emulation() (void)0 #endif /* _MSC_VER && !AO_USE_NO_SIGNALS || !AO_TEST_EMULATION */ int main(void) { test_atomic(); test_atomic_acquire(); test_atomic_release(); test_atomic_read(); test_atomic_write(); test_atomic_full(); test_atomic_release_write(); test_atomic_acquire_read(); test_atomic_dd_acquire_read(); # if defined(AO_HAVE_fetch_and_add1) && defined(AO_HAVE_fetch_and_sub1) run_parallel(4, add1sub1_thr, add1sub1_test, "add1/sub1"); # endif # if defined(AO_HAVE_store_release_write) && defined(AO_HAVE_load_acquire_read) run_parallel(3, acqrel_thr, acqrel_test, "store_release_write/load_acquire_read"); # endif # if defined(AO_HAVE_test_and_set_acquire) run_parallel(5, test_and_set_thr, test_and_set_test, "test_and_set"); # endif test_atomic_emulation(); return 0; } #endif /* !AO_NO_PTHREADS || !AO_USE_PTHREAD_DEFS */ libatomic_ops-7.6.12/tests/test_atomic_include.template000066400000000000000000000501231411761111000233420ustar00rootroot00000000000000/* * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved. * * This file is covered by the GNU general public license, version 2. * see COPYING for details. */ /* Some basic sanity tests. These do not test the barrier semantics. */ #undef TA_assert #define TA_assert(e) \ if (!(e)) { fprintf(stderr, "Assertion failed %s:%d (barrier: XX)\n", \ __FILE__, __LINE__), exit(1); } #undef MISSING #define MISSING(name) \ printf("Missing: %s\n", #name "XX") #if defined(CPPCHECK) void list_atomicXX(void); void char_list_atomicXX(void); void short_list_atomicXX(void); void int_list_atomicXX(void); void double_list_atomicXX(void); #endif void test_atomicXX(void) { AO_t x; unsigned char b; unsigned short s; unsigned int zz; # if defined(AO_HAVE_test_and_setXX) AO_TS_t z = AO_TS_INITIALIZER; # endif # if defined(AO_HAVE_double_compare_and_swapXX) \ || defined(AO_HAVE_double_loadXX) \ || defined(AO_HAVE_double_storeXX) static AO_double_t old_w; /* static to avoid misalignment */ AO_double_t new_w; # endif # if defined(AO_HAVE_compare_and_swap_doubleXX) \ || defined(AO_HAVE_compare_double_and_swap_doubleXX) \ || defined(AO_HAVE_double_compare_and_swapXX) static AO_double_t w; /* static to avoid misalignment */ w.AO_val1 = 0; w.AO_val2 = 0; # endif # if defined(CPPCHECK) list_atomicXX(); char_list_atomicXX(); short_list_atomicXX(); int_list_atomicXX(); double_list_atomicXX(); # endif # if defined(AO_HAVE_nopXX) AO_nopXX(); # elif !defined(AO_HAVE_nop) || !defined(AO_HAVE_nop_full) \ || !defined(AO_HAVE_nop_read) || !defined(AO_HAVE_nop_write) MISSING(AO_nop); # endif # if defined(AO_HAVE_storeXX) # if (defined(AO_MEMORY_SANITIZER) || defined(LINT2)) \ && defined(AO_PREFER_GENERALIZED) *(volatile AO_t *)&x = 0; /* initialize to avoid false warning */ # endif AO_storeXX(&x, 13); TA_assert(x == 13); # else # if !defined(AO_HAVE_store) || !defined(AO_HAVE_store_full) \ || !defined(AO_HAVE_store_release) \ || !defined(AO_HAVE_store_release_write) \ || !defined(AO_HAVE_store_write) MISSING(AO_store); # endif x = 13; # endif # if defined(AO_HAVE_loadXX) TA_assert(AO_loadXX(&x) == 13); # elif !defined(AO_HAVE_load) || !defined(AO_HAVE_load_acquire) \ || !defined(AO_HAVE_load_acquire_read) \ || !defined(AO_HAVE_load_dd_acquire_read) \ || !defined(AO_HAVE_load_full) || !defined(AO_HAVE_load_read) MISSING(AO_load); # endif # if defined(AO_HAVE_test_and_setXX) TA_assert(AO_test_and_setXX(&z) == AO_TS_CLEAR); TA_assert(AO_test_and_setXX(&z) == AO_TS_SET); TA_assert(AO_test_and_setXX(&z) == AO_TS_SET); AO_CLEAR(&z); # else MISSING(AO_test_and_set); # endif # if defined(AO_HAVE_fetch_and_addXX) TA_assert(AO_fetch_and_addXX(&x, 42) == 13); TA_assert(AO_fetch_and_addXX(&x, (AO_t)(-42)) == 55); # else MISSING(AO_fetch_and_add); # endif # if defined(AO_HAVE_fetch_and_add1XX) TA_assert(AO_fetch_and_add1XX(&x) == 13); # else MISSING(AO_fetch_and_add1); ++x; # endif # if defined(AO_HAVE_fetch_and_sub1XX) TA_assert(AO_fetch_and_sub1XX(&x) == 14); # else MISSING(AO_fetch_and_sub1); --x; # endif # if defined(AO_HAVE_short_storeXX) # if (defined(AO_MEMORY_SANITIZER) || defined(LINT2)) \ && defined(AO_PREFER_GENERALIZED) *(volatile short *)&s = 0; # endif AO_short_storeXX(&s, 13); # else # if !defined(AO_HAVE_short_store) || !defined(AO_HAVE_short_store_full) \ || !defined(AO_HAVE_short_store_release) \ || !defined(AO_HAVE_short_store_release_write) \ || !defined(AO_HAVE_short_store_write) MISSING(AO_short_store); # endif s = 13; # endif # if defined(AO_HAVE_short_loadXX) TA_assert(AO_short_load(&s) == 13); # elif !defined(AO_HAVE_short_load) || !defined(AO_HAVE_short_load_acquire) \ || !defined(AO_HAVE_short_load_acquire_read) \ || !defined(AO_HAVE_short_load_dd_acquire_read) \ || !defined(AO_HAVE_short_load_full) \ || !defined(AO_HAVE_short_load_read) MISSING(AO_short_load); # endif # if defined(AO_HAVE_short_fetch_and_addXX) TA_assert(AO_short_fetch_and_addXX(&s, 42) == 13); TA_assert(AO_short_fetch_and_addXX(&s, (unsigned short)-42) == 55); # else MISSING(AO_short_fetch_and_add); # endif # if defined(AO_HAVE_short_fetch_and_add1XX) TA_assert(AO_short_fetch_and_add1XX(&s) == 13); # else MISSING(AO_short_fetch_and_add1); ++s; # endif # if defined(AO_HAVE_short_fetch_and_sub1XX) TA_assert(AO_short_fetch_and_sub1XX(&s) == 14); # else MISSING(AO_short_fetch_and_sub1); --s; # endif TA_assert(*(volatile short *)&s == 13); # if defined(AO_HAVE_char_storeXX) # if (defined(AO_MEMORY_SANITIZER) || defined(LINT2)) \ && defined(AO_PREFER_GENERALIZED) *(volatile char *)&b = 0; # endif AO_char_storeXX(&b, 13); # else # if !defined(AO_HAVE_char_store) || !defined(AO_HAVE_char_store_full) \ || !defined(AO_HAVE_char_store_release) \ || !defined(AO_HAVE_char_store_release_write) \ || !defined(AO_HAVE_char_store_write) MISSING(AO_char_store); # endif b = 13; # endif # if defined(AO_HAVE_char_loadXX) TA_assert(AO_char_load(&b) == 13); # elif !defined(AO_HAVE_char_load) || !defined(AO_HAVE_char_load_acquire) \ || !defined(AO_HAVE_char_load_acquire_read) \ || !defined(AO_HAVE_char_load_dd_acquire_read) \ || !defined(AO_HAVE_char_load_full) || !defined(AO_HAVE_char_load_read) MISSING(AO_char_load); # endif # if defined(AO_HAVE_char_fetch_and_addXX) TA_assert(AO_char_fetch_and_addXX(&b, 42) == 13); TA_assert(AO_char_fetch_and_addXX(&b, (unsigned char)-42) == 55); # else MISSING(AO_char_fetch_and_add); # endif # if defined(AO_HAVE_char_fetch_and_add1XX) TA_assert(AO_char_fetch_and_add1XX(&b) == 13); # else MISSING(AO_char_fetch_and_add1); ++b; # endif # if defined(AO_HAVE_char_fetch_and_sub1XX) TA_assert(AO_char_fetch_and_sub1XX(&b) == 14); # else MISSING(AO_char_fetch_and_sub1); --b; # endif TA_assert(*(volatile char *)&b == 13); # if defined(AO_HAVE_int_storeXX) # if (defined(AO_MEMORY_SANITIZER) || defined(LINT2)) \ && defined(AO_PREFER_GENERALIZED) *(volatile int *)&zz = 0; # endif AO_int_storeXX(&zz, 13); # else # if !defined(AO_HAVE_int_store) || !defined(AO_HAVE_int_store_full) \ || !defined(AO_HAVE_int_store_release) \ || !defined(AO_HAVE_int_store_release_write) \ || !defined(AO_HAVE_int_store_write) MISSING(AO_int_store); # endif zz = 13; # endif # if defined(AO_HAVE_int_loadXX) TA_assert(AO_int_load(&zz) == 13); # elif !defined(AO_HAVE_int_load) || !defined(AO_HAVE_int_load_acquire) \ || !defined(AO_HAVE_int_load_acquire_read) \ || !defined(AO_HAVE_int_load_dd_acquire_read) \ || !defined(AO_HAVE_int_load_full) || !defined(AO_HAVE_int_load_read) MISSING(AO_int_load); # endif # if defined(AO_HAVE_int_fetch_and_addXX) TA_assert(AO_int_fetch_and_addXX(&zz, 42) == 13); TA_assert(AO_int_fetch_and_addXX(&zz, (unsigned int)-42) == 55); # else MISSING(AO_int_fetch_and_add); # endif # if defined(AO_HAVE_int_fetch_and_add1XX) TA_assert(AO_int_fetch_and_add1XX(&zz) == 13); # else MISSING(AO_int_fetch_and_add1); ++zz; # endif # if defined(AO_HAVE_int_fetch_and_sub1XX) TA_assert(AO_int_fetch_and_sub1XX(&zz) == 14); # else MISSING(AO_int_fetch_and_sub1); --zz; # endif TA_assert(*(volatile int *)&zz == 13); # if defined(AO_HAVE_compare_and_swapXX) TA_assert(!AO_compare_and_swapXX(&x, 14, 42)); TA_assert(x == 13); TA_assert(AO_compare_and_swapXX(&x, 13, 42)); TA_assert(x == 42); # else MISSING(AO_compare_and_swap); if (*(volatile AO_t *)&x == 13) x = 42; # endif # if defined(AO_HAVE_orXX) AO_orXX(&x, 66); TA_assert(x == 106); # else # if !defined(AO_HAVE_or) || !defined(AO_HAVE_or_acquire) \ || !defined(AO_HAVE_or_acquire_read) || !defined(AO_HAVE_or_full) \ || !defined(AO_HAVE_or_read) || !defined(AO_HAVE_or_release) \ || !defined(AO_HAVE_or_release_write) || !defined(AO_HAVE_or_write) MISSING(AO_or); # endif x |= 66; # endif # if defined(AO_HAVE_xorXX) AO_xorXX(&x, 181); TA_assert(x == 223); # else # if !defined(AO_HAVE_xor) || !defined(AO_HAVE_xor_acquire) \ || !defined(AO_HAVE_xor_acquire_read) || !defined(AO_HAVE_xor_full) \ || !defined(AO_HAVE_xor_read) || !defined(AO_HAVE_xor_release) \ || !defined(AO_HAVE_xor_release_write) || !defined(AO_HAVE_xor_write) MISSING(AO_xor); # endif x ^= 181; # endif # if defined(AO_HAVE_andXX) AO_andXX(&x, 57); TA_assert(x == 25); # else # if !defined(AO_HAVE_and) || !defined(AO_HAVE_and_acquire) \ || !defined(AO_HAVE_and_acquire_read) || !defined(AO_HAVE_and_full) \ || !defined(AO_HAVE_and_read) || !defined(AO_HAVE_and_release) \ || !defined(AO_HAVE_and_release_write) || !defined(AO_HAVE_and_write) MISSING(AO_and); # endif x &= 57; # endif # if defined(AO_HAVE_fetch_compare_and_swapXX) TA_assert(AO_fetch_compare_and_swapXX(&x, 14, 117) == 25); TA_assert(x == 25); TA_assert(AO_fetch_compare_and_swapXX(&x, 25, 117) == 25); # else MISSING(AO_fetch_compare_and_swap); if (x == 25) x = 117; # endif TA_assert(x == 117); # if defined(AO_HAVE_short_compare_and_swapXX) TA_assert(!AO_short_compare_and_swapXX(&s, 14, 42)); TA_assert(s == 13); TA_assert(AO_short_compare_and_swapXX(&s, 13, 42)); TA_assert(s == 42); # else MISSING(AO_short_compare_and_swap); if (*(volatile short *)&s == 13) s = 42; # endif # if defined(AO_HAVE_short_orXX) AO_short_orXX(&s, 66); TA_assert(s == 106); # else # if !defined(AO_HAVE_short_or) || !defined(AO_HAVE_short_or_acquire) \ || !defined(AO_HAVE_short_or_acquire_read) \ || !defined(AO_HAVE_short_or_full) || !defined(AO_HAVE_short_or_read) \ || !defined(AO_HAVE_short_or_release) \ || !defined(AO_HAVE_short_or_release_write) \ || !defined(AO_HAVE_short_or_write) MISSING(AO_short_or); # endif s |= 66; # endif # if defined(AO_HAVE_short_xorXX) AO_short_xorXX(&s, 181); TA_assert(s == 223); # else # if !defined(AO_HAVE_short_xor) || !defined(AO_HAVE_short_xor_acquire) \ || !defined(AO_HAVE_short_xor_acquire_read) \ || !defined(AO_HAVE_short_xor_full) \ || !defined(AO_HAVE_short_xor_read) \ || !defined(AO_HAVE_short_xor_release) \ || !defined(AO_HAVE_short_xor_release_write) \ || !defined(AO_HAVE_short_xor_write) MISSING(AO_short_xor); # endif s ^= 181; # endif # if defined(AO_HAVE_short_andXX) AO_short_andXX(&s, 57); TA_assert(s == 25); # else # if !defined(AO_HAVE_short_and) || !defined(AO_HAVE_short_and_acquire) \ || !defined(AO_HAVE_short_and_acquire_read) \ || !defined(AO_HAVE_short_and_full) \ || !defined(AO_HAVE_short_and_read) \ || !defined(AO_HAVE_short_and_release) \ || !defined(AO_HAVE_short_and_release_write) \ || !defined(AO_HAVE_short_and_write) MISSING(AO_short_and); # endif s &= 57; # endif # if defined(AO_HAVE_short_fetch_compare_and_swapXX) TA_assert(AO_short_fetch_compare_and_swapXX(&s, 14, 117) == 25); TA_assert(s == 25); TA_assert(AO_short_fetch_compare_and_swapXX(&s, 25, 117) == 25); # else MISSING(AO_short_fetch_compare_and_swap); if (s == 25) s = 117; # endif TA_assert(s == 117); # if defined(AO_HAVE_char_compare_and_swapXX) TA_assert(!AO_char_compare_and_swapXX(&b, 14, 42)); TA_assert(b == 13); TA_assert(AO_char_compare_and_swapXX(&b, 13, 42)); TA_assert(b == 42); # else MISSING(AO_char_compare_and_swap); if (*(volatile char *)&b == 13) b = 42; # endif # if defined(AO_HAVE_char_orXX) AO_char_orXX(&b, 66); TA_assert(b == 106); # else # if !defined(AO_HAVE_char_or) || !defined(AO_HAVE_char_or_acquire) \ || !defined(AO_HAVE_char_or_acquire_read) \ || !defined(AO_HAVE_char_or_full) || !defined(AO_HAVE_char_or_read) \ || !defined(AO_HAVE_char_or_release) \ || !defined(AO_HAVE_char_or_release_write) \ || !defined(AO_HAVE_char_or_write) MISSING(AO_char_or); # endif b |= 66; # endif # if defined(AO_HAVE_char_xorXX) AO_char_xorXX(&b, 181); TA_assert(b == 223); # else # if !defined(AO_HAVE_char_xor) || !defined(AO_HAVE_char_xor_acquire) \ || !defined(AO_HAVE_char_xor_acquire_read) \ || !defined(AO_HAVE_char_xor_full) || !defined(AO_HAVE_char_xor_read) \ || !defined(AO_HAVE_char_xor_release) \ || !defined(AO_HAVE_char_xor_release_write) \ || !defined(AO_HAVE_char_xor_write) MISSING(AO_char_xor); # endif b ^= 181; # endif # if defined(AO_HAVE_char_andXX) AO_char_andXX(&b, 57); TA_assert(b == 25); # else # if !defined(AO_HAVE_char_and) || !defined(AO_HAVE_char_and_acquire) \ || !defined(AO_HAVE_char_and_acquire_read) \ || !defined(AO_HAVE_char_and_full) || !defined(AO_HAVE_char_and_read) \ || !defined(AO_HAVE_char_and_release) \ || !defined(AO_HAVE_char_and_release_write) \ || !defined(AO_HAVE_char_and_write) MISSING(AO_char_and); # endif b &= 57; # endif # if defined(AO_HAVE_char_fetch_compare_and_swapXX) TA_assert(AO_char_fetch_compare_and_swapXX(&b, 14, 117) == 25); TA_assert(b == 25); TA_assert(AO_char_fetch_compare_and_swapXX(&b, 25, 117) == 25); # else MISSING(AO_char_fetch_compare_and_swap); if (b == 25) b = 117; # endif TA_assert(b == 117); # if defined(AO_HAVE_int_compare_and_swapXX) TA_assert(!AO_int_compare_and_swapXX(&zz, 14, 42)); TA_assert(zz == 13); TA_assert(AO_int_compare_and_swapXX(&zz, 13, 42)); TA_assert(zz == 42); # else MISSING(AO_int_compare_and_swap); if (*(volatile int *)&zz == 13) zz = 42; # endif # if defined(AO_HAVE_int_orXX) AO_int_orXX(&zz, 66); TA_assert(zz == 106); # else # if !defined(AO_HAVE_int_or) || !defined(AO_HAVE_int_or_acquire) \ || !defined(AO_HAVE_int_or_acquire_read) \ || !defined(AO_HAVE_int_or_full) || !defined(AO_HAVE_int_or_read) \ || !defined(AO_HAVE_int_or_release) \ || !defined(AO_HAVE_int_or_release_write) \ || !defined(AO_HAVE_int_or_write) MISSING(AO_int_or); # endif zz |= 66; # endif # if defined(AO_HAVE_int_xorXX) AO_int_xorXX(&zz, 181); TA_assert(zz == 223); # else # if !defined(AO_HAVE_int_xor) || !defined(AO_HAVE_int_xor_acquire) \ || !defined(AO_HAVE_int_xor_acquire_read) \ || !defined(AO_HAVE_int_xor_full) || !defined(AO_HAVE_int_xor_read) \ || !defined(AO_HAVE_int_xor_release) \ || !defined(AO_HAVE_int_xor_release_write) \ || !defined(AO_HAVE_int_xor_write) MISSING(AO_int_xor); # endif zz ^= 181; # endif # if defined(AO_HAVE_int_andXX) AO_int_andXX(&zz, 57); TA_assert(zz == 25); # else # if !defined(AO_HAVE_int_and) || !defined(AO_HAVE_int_and_acquire) \ || !defined(AO_HAVE_int_and_acquire_read) \ || !defined(AO_HAVE_int_and_full) || !defined(AO_HAVE_int_and_read) \ || !defined(AO_HAVE_int_and_release) \ || !defined(AO_HAVE_int_and_release_write) \ || !defined(AO_HAVE_int_and_write) MISSING(AO_int_and); # endif zz &= 57; # endif # if defined(AO_HAVE_int_fetch_compare_and_swapXX) TA_assert(AO_int_fetch_compare_and_swapXX(&zz, 14, 117) == 25); TA_assert(zz == 25); TA_assert(AO_int_fetch_compare_and_swapXX(&zz, 25, 117) == 25); # else MISSING(AO_int_fetch_compare_and_swap); if (zz == 25) zz = 117; # endif TA_assert(zz == 117); # if defined(AO_HAVE_double_loadXX) || defined(AO_HAVE_double_storeXX) /* Initialize old_w even for store to workaround MSan warning. */ old_w.AO_val1 = 3316; old_w.AO_val2 = 2921; # endif # if defined(AO_HAVE_double_loadXX) new_w = AO_double_loadXX(&old_w); TA_assert(new_w.AO_val1 == 3316 && new_w.AO_val2 == 2921); # elif !defined(AO_HAVE_double_load) \ || !defined(AO_HAVE_double_load_acquire) \ || !defined(AO_HAVE_double_load_acquire_read) \ || !defined(AO_HAVE_double_load_dd_acquire_read) \ || !defined(AO_HAVE_double_load_full) \ || !defined(AO_HAVE_double_load_read) MISSING(AO_double_load); # endif # if defined(AO_HAVE_double_storeXX) new_w.AO_val1 = 1375; new_w.AO_val2 = 8243; AO_double_storeXX(&old_w, new_w); TA_assert(old_w.AO_val1 == 1375 && old_w.AO_val2 == 8243); AO_double_storeXX(&old_w, new_w); TA_assert(old_w.AO_val1 == 1375 && old_w.AO_val2 == 8243); new_w.AO_val1 ^= old_w.AO_val1; new_w.AO_val2 ^= old_w.AO_val2; AO_double_storeXX(&old_w, new_w); TA_assert(old_w.AO_val1 == 0 && old_w.AO_val2 == 0); # elif !defined(AO_HAVE_double_store) \ || !defined(AO_HAVE_double_store_full) \ || !defined(AO_HAVE_double_store_release) \ || !defined(AO_HAVE_double_store_release_write) \ || !defined(AO_HAVE_double_store_write) MISSING(AO_double_store); # endif # if defined(AO_HAVE_compare_double_and_swap_doubleXX) TA_assert(!AO_compare_double_and_swap_doubleXX(&w, 17, 42, 12, 13)); TA_assert(w.AO_val1 == 0 && w.AO_val2 == 0); TA_assert(AO_compare_double_and_swap_doubleXX(&w, 0, 0, 12, 13)); TA_assert(w.AO_val1 == 12 && w.AO_val2 == 13); TA_assert(!AO_compare_double_and_swap_doubleXX(&w, 12, 14, 64, 33)); TA_assert(w.AO_val1 == 12 && w.AO_val2 == 13); TA_assert(!AO_compare_double_and_swap_doubleXX(&w, 11, 13, 85, 82)); TA_assert(w.AO_val1 == 12 && w.AO_val2 == 13); TA_assert(!AO_compare_double_and_swap_doubleXX(&w, 13, 12, 17, 42)); TA_assert(w.AO_val1 == 12 && w.AO_val2 == 13); TA_assert(AO_compare_double_and_swap_doubleXX(&w, 12, 13, 17, 42)); TA_assert(w.AO_val1 == 17 && w.AO_val2 == 42); TA_assert(AO_compare_double_and_swap_doubleXX(&w, 17, 42, 0, 0)); TA_assert(w.AO_val1 == 0 && w.AO_val2 == 0); # else MISSING(AO_compare_double_and_swap_double); # endif # if defined(AO_HAVE_compare_and_swap_doubleXX) TA_assert(!AO_compare_and_swap_doubleXX(&w, 17, 12, 13)); TA_assert(w.AO_val1 == 0 && w.AO_val2 == 0); TA_assert(AO_compare_and_swap_doubleXX(&w, 0, 12, 13)); TA_assert(w.AO_val1 == 12 && w.AO_val2 == 13); TA_assert(!AO_compare_and_swap_doubleXX(&w, 13, 12, 33)); TA_assert(w.AO_val1 == 12 && w.AO_val2 == 13); TA_assert(!AO_compare_and_swap_doubleXX(&w, 1213, 48, 86)); TA_assert(w.AO_val1 == 12 && w.AO_val2 == 13); TA_assert(AO_compare_and_swap_doubleXX(&w, 12, 17, 42)); TA_assert(w.AO_val1 == 17 && w.AO_val2 == 42); TA_assert(AO_compare_and_swap_doubleXX(&w, 17, 0, 0)); TA_assert(w.AO_val1 == 0 && w.AO_val2 == 0); # else MISSING(AO_compare_and_swap_double); # endif # if defined(AO_HAVE_double_compare_and_swapXX) old_w.AO_val1 = 4116; old_w.AO_val2 = 2121; new_w.AO_val1 = 8537; new_w.AO_val2 = 6410; TA_assert(!AO_double_compare_and_swapXX(&w, old_w, new_w)); TA_assert(w.AO_val1 == 0 && w.AO_val2 == 0); TA_assert(AO_double_compare_and_swapXX(&w, w, new_w)); TA_assert(w.AO_val1 == 8537 && w.AO_val2 == 6410); old_w.AO_val1 = new_w.AO_val1; old_w.AO_val2 = 29; new_w.AO_val1 = 820; new_w.AO_val2 = 5917; TA_assert(!AO_double_compare_and_swapXX(&w, old_w, new_w)); TA_assert(w.AO_val1 == 8537 && w.AO_val2 == 6410); old_w.AO_val1 = 11; old_w.AO_val2 = 6410; new_w.AO_val1 = 3552; new_w.AO_val2 = 1746; TA_assert(!AO_double_compare_and_swapXX(&w, old_w, new_w)); TA_assert(w.AO_val1 == 8537 && w.AO_val2 == 6410); old_w.AO_val1 = old_w.AO_val2; old_w.AO_val2 = 8537; new_w.AO_val1 = 4116; new_w.AO_val2 = 2121; TA_assert(!AO_double_compare_and_swapXX(&w, old_w, new_w)); TA_assert(w.AO_val1 == 8537 && w.AO_val2 == 6410); old_w.AO_val1 = old_w.AO_val2; old_w.AO_val2 = 6410; new_w.AO_val1 = 1; TA_assert(AO_double_compare_and_swapXX(&w, old_w, new_w)); TA_assert(w.AO_val1 == 1 && w.AO_val2 == 2121); old_w.AO_val1 = new_w.AO_val1; old_w.AO_val2 = w.AO_val2; new_w.AO_val1--; new_w.AO_val2 = 0; TA_assert(AO_double_compare_and_swapXX(&w, old_w, new_w)); TA_assert(w.AO_val1 == 0 && w.AO_val2 == 0); # else MISSING(AO_double_compare_and_swap); # endif } libatomic_ops-7.6.12/tests/test_malloc.c000066400000000000000000000133101411761111000202360ustar00rootroot00000000000000/* * Copyright (c) 2005 Hewlett-Packard Development Company, L.P. * * This file may be redistributed and/or modified under the * terms of the GNU General Public License as published by the Free Software * Foundation; either version 2, or (at your option) any later version. * * It is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License in the * file COPYING for more details. */ #if defined(HAVE_CONFIG_H) # include "config.h" #endif #ifdef DONT_USE_MMAP # undef HAVE_MMAP #endif #include "run_parallel.h" #include #include #include "atomic_ops_malloc.h" #ifndef DEFAULT_NTHREADS # ifdef HAVE_MMAP # define DEFAULT_NTHREADS 16 /* must be <= MAX_NTHREADS */ # else # define DEFAULT_NTHREADS 3 # endif #endif #ifndef N_REVERSALS # ifdef AO_USE_PTHREAD_DEFS # define N_REVERSALS 4 # else # define N_REVERSALS 1000 /* must be even */ # endif #endif #ifndef LIST_LENGTH # ifdef HAVE_MMAP # define LIST_LENGTH 1000 # else # define LIST_LENGTH 100 # endif #endif #ifndef LARGE_OBJ_SIZE # ifdef HAVE_MMAP # define LARGE_OBJ_SIZE 200000 # else # define LARGE_OBJ_SIZE 20000 # endif #endif #ifdef USE_STANDARD_MALLOC # define AO_malloc(n) malloc(n) # define AO_free(p) free(p) # define AO_malloc_enable_mmap() #endif typedef struct list_node { struct list_node *next; int data; } ln; ln *cons(int d, ln *tail) { # ifdef AO_HAVE_fetch_and_add1 static volatile AO_t extra = 0; size_t my_extra = (size_t)AO_fetch_and_add1(&extra) % 101; # else static size_t extra = 0; /* data race in extra is OK */ size_t my_extra = (extra++) % 101; # endif ln *result; int * extras; unsigned i; result = (ln *)AO_malloc(sizeof(ln) + sizeof(int)*my_extra); if (result == 0) { fprintf(stderr, "Out of memory\n"); /* Normal for more than about 10 threads without mmap? */ exit(2); } result -> data = d; result -> next = tail; extras = (int *)(result+1); for (i = 0; i < my_extra; ++i) extras[i] = 42; return result; } #ifdef DEBUG_RUN_ONE_TEST void print_list(ln *l) { ln *p; for (p = l; p != 0; p = p -> next) { printf("%d, ", p -> data); } printf("\n"); } #endif /* DEBUG_RUN_ONE_TEST */ /* Check that l contains numbers from m to n inclusive in ascending order */ void check_list(ln *l, int m, int n) { ln *p; int i; for (p = l, i = m; p != 0 && i <= n; p = p -> next, ++i) { if (i != p -> data) { fprintf(stderr, "Found %d, expected %d\n", p -> data, i); abort(); } } if (i <= n) { fprintf(stderr, "Number not found: %d\n", i); abort(); } if (p != 0) { fprintf(stderr, "Found unexpected number: %d\n", i); abort(); } } /* Create a list of integers from m to n */ ln * make_list(int m, int n) { if (m > n) return 0; return cons(m, make_list(m+1, n)); } void free_list(ln *x) { while (x != NULL) { ln *next = x -> next; AO_free(x); x = next; } } /* Reverse list x, and concatenate it to y, deallocating no longer needed */ /* nodes in x. */ ln * reverse(ln *x, ln *y) { ln * result; if (x == 0) return y; result = reverse(x -> next, cons(x -> data, y)); AO_free(x); return result; } int dummy_test(void) { return 1; } void * run_one_test(void * arg) { ln * x = make_list(1, LIST_LENGTH); int i; char *p = (char *)AO_malloc(LARGE_OBJ_SIZE); char *q; char a = 'a' + ((int)((AO_PTRDIFF_T)arg) * 2) % ('z' - 'a' + 1); char b = a + 1; if (0 == p) { # ifdef HAVE_MMAP fprintf(stderr, "AO_malloc(%d) failed\n", LARGE_OBJ_SIZE); abort(); # else fprintf(stderr, "AO_malloc(%d) failed: This is normal without mmap\n", LARGE_OBJ_SIZE); # endif } else { p[0] = p[LARGE_OBJ_SIZE/2] = p[LARGE_OBJ_SIZE-1] = a; q = (char *)AO_malloc(LARGE_OBJ_SIZE); if (q == 0) { fprintf(stderr, "Out of memory\n"); /* Normal for more than about 10 threads without mmap? */ exit(2); } q[0] = q[LARGE_OBJ_SIZE/2] = q[LARGE_OBJ_SIZE-1] = b; if (p[0] != a || p[LARGE_OBJ_SIZE/2] != a || p[LARGE_OBJ_SIZE-1] != a) { fprintf(stderr, "First large allocation smashed\n"); abort(); } AO_free(p); if (q[0] != b || q[LARGE_OBJ_SIZE/2] != b || q[LARGE_OBJ_SIZE-1] != b) { fprintf(stderr, "Second large allocation smashed\n"); abort(); } AO_free(q); } # ifdef DEBUG_RUN_ONE_TEST x = reverse(x, 0); print_list(x); x = reverse(x, 0); print_list(x); # endif for (i = 0; i < N_REVERSALS; ++i) { x = reverse(x, 0); } check_list(x, 1, LIST_LENGTH); free_list(x); return NULL; } #ifndef LOG_MAX_SIZE # define LOG_MAX_SIZE 16 #endif #define CHUNK_SIZE (1 << LOG_MAX_SIZE) int main(int argc, char **argv) { int nthreads; if (1 == argc) { nthreads = DEFAULT_NTHREADS; } else if (2 == argc) { nthreads = atoi(argv[1]); if (nthreads < 1 || nthreads > MAX_NTHREADS) { fprintf(stderr, "Invalid # of threads argument\n"); exit(1); } } else { fprintf(stderr, "Usage: %s [# of threads]\n", argv[0]); exit(1); } printf("Performing %d reversals of %d element lists in %d threads\n", N_REVERSALS, LIST_LENGTH, nthreads); AO_malloc_enable_mmap(); /* Test various corner cases. */ AO_free(NULL); AO_free(AO_malloc(0)); # ifdef HAVE_MMAP AO_free(AO_malloc(CHUNK_SIZE - (sizeof(AO_t)-1))); /* large alloc */ # endif run_parallel(nthreads, run_one_test, dummy_test, "AO_malloc/AO_free"); return 0; } libatomic_ops-7.6.12/tests/test_stack.c000066400000000000000000000205001411761111000200730ustar00rootroot00000000000000/* * Copyright (c) 2005 Hewlett-Packard Development Company, L.P. * * This file may be redistributed and/or modified under the * terms of the GNU General Public License as published by the Free Software * Foundation; either version 2, or (at your option) any later version. * * It is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License in the * file COPYING for more details. */ #if defined(HAVE_CONFIG_H) # include "config.h" #endif #include #if defined(__vxworks) int main(void) { printf("test skipped\n"); return 0; } #else #if ((defined(_WIN32) && !defined(__CYGWIN32__) && !defined(__CYGWIN__)) \ || defined(_MSC_VER) || defined(_WIN32_WINCE)) \ && !defined(AO_USE_WIN32_PTHREADS) # define USE_WINTHREADS #endif #ifdef USE_WINTHREADS # include #else # include #endif #include #include "atomic_ops_stack.h" /* includes atomic_ops.h as well */ #if (defined(_WIN32_WCE) || defined(__MINGW32CE__)) && !defined(AO_HAVE_abort) # define abort() _exit(-1) /* there is no abort() in WinCE */ #endif #ifndef MAX_NTHREADS # define MAX_NTHREADS 100 #endif #ifndef DEFAULT_NTHREADS # define DEFAULT_NTHREADS 16 /* must be <= MAX_NTHREADS */ #endif #ifdef NO_TIMES # define get_msecs() 0 #elif (defined(USE_WINTHREADS) || defined(AO_USE_WIN32_PTHREADS)) \ && !defined(CPPCHECK) # include unsigned long get_msecs(void) { struct timeb tb; ftime(&tb); return (unsigned long)tb.time * 1000 + tb.millitm; } #else /* Unix */ # include # include unsigned long get_msecs(void) { struct timeval tv; gettimeofday(&tv, 0); return (unsigned long)tv.tv_sec * 1000 + tv.tv_usec/1000; } #endif /* !NO_TIMES */ typedef struct le { AO_t next; int data; } list_element; AO_stack_t the_list = AO_STACK_INITIALIZER; void add_elements(int n) { list_element * le; if (n == 0) return; add_elements(n-1); le = (list_element *)malloc(sizeof(list_element)); if (le == 0) { fprintf(stderr, "Out of memory\n"); exit(2); } le -> data = n; AO_stack_push(&the_list, (AO_t *)le); } #ifdef VERBOSE void print_list(void) { list_element *p; for (p = (list_element *)AO_REAL_HEAD_PTR(the_list); p != 0; p = (list_element *)AO_REAL_NEXT_PTR(p -> next)) printf("%d\n", p -> data); } #endif /* VERBOSE */ static char marks[MAX_NTHREADS * (MAX_NTHREADS + 1) / 2 + 1]; void check_list(int n) { list_element *p; int i; for (i = 1; i <= n; ++i) marks[i] = 0; for (p = (list_element *)AO_REAL_HEAD_PTR(the_list); p != 0; p = (list_element *)AO_REAL_NEXT_PTR(p -> next)) { i = p -> data; if (i > n || i <= 0) { fprintf(stderr, "Found erroneous list element %d\n", i); abort(); } if (marks[i] != 0) { fprintf(stderr, "Found duplicate list element %d\n", i); abort(); } marks[i] = 1; } for (i = 1; i <= n; ++i) if (marks[i] != 1) { fprintf(stderr, "Missing list element %d\n", i); abort(); } } volatile AO_t ops_performed = 0; #ifndef LIMIT /* Total number of push/pop ops in all threads per test. */ # ifdef AO_USE_PTHREAD_DEFS # define LIMIT 20000 # else # define LIMIT 1000000 # endif #endif #ifdef AO_HAVE_fetch_and_add # define fetch_and_add(addr, val) AO_fetch_and_add(addr, val) #else /* Fake it. This is really quite unacceptable for timing */ /* purposes. But as a correctness test, it should be OK. */ AO_INLINE AO_t fetch_and_add(volatile AO_t * addr, AO_t val) { AO_t result = AO_load(addr); AO_store(addr, result + val); return result; } #endif #ifdef USE_WINTHREADS DWORD WINAPI run_one_test(LPVOID arg) #else void * run_one_test(void * arg) #endif { list_element * t[MAX_NTHREADS + 1]; unsigned index = (unsigned)(size_t)arg; unsigned i; # ifdef VERBOSE unsigned j = 0; printf("starting thread %u\n", index); # endif while (fetch_and_add(&ops_performed, index + 1) + index + 1 < LIMIT) { for (i = 0; i <= index; ++i) { t[i] = (list_element *)AO_stack_pop(&the_list); if (0 == t[i]) { fprintf(stderr, "FAILED\n"); abort(); } } for (i = 0; i <= index; ++i) { AO_stack_push(&the_list, (AO_t *)t[i]); } # ifdef VERBOSE j += index + 1; # endif } # ifdef VERBOSE printf("finished thread %u: %u total ops\n", index, j); # endif return 0; } #ifndef N_EXPERIMENTS # define N_EXPERIMENTS 1 #endif unsigned long times[MAX_NTHREADS + 1][N_EXPERIMENTS]; int main(int argc, char **argv) { int nthreads; int max_nthreads; int exper_n; if (1 == argc) { max_nthreads = DEFAULT_NTHREADS; } else if (2 == argc) { max_nthreads = atoi(argv[1]); if (max_nthreads < 1 || max_nthreads > MAX_NTHREADS) { fprintf(stderr, "Invalid max # of threads argument\n"); exit(1); } } else { fprintf(stderr, "Usage: %s [max # of threads]\n", argv[0]); exit(1); } for (exper_n = 0; exper_n < N_EXPERIMENTS; ++ exper_n) for (nthreads = 1; nthreads <= max_nthreads; ++nthreads) { unsigned i; # ifdef USE_WINTHREADS DWORD thread_id; HANDLE thread[MAX_NTHREADS]; # else pthread_t thread[MAX_NTHREADS]; # endif int list_length = nthreads*(nthreads+1)/2; unsigned long start_time; list_element * le; # ifdef VERBOSE printf("Before add_elements: exper_n=%d, nthreads=%d," " max_nthreads=%d, list_length=%d\n", exper_n, nthreads, max_nthreads, list_length); # endif add_elements(list_length); # ifdef VERBOSE printf("Initial list (nthreads = %d):\n", nthreads); print_list(); # endif ops_performed = 0; start_time = get_msecs(); for (i = 1; (int)i < nthreads; ++i) { int code; # ifdef USE_WINTHREADS thread[i] = CreateThread(NULL, 0, run_one_test, (LPVOID)(size_t)i, 0, &thread_id); code = thread[i] != NULL ? 0 : (int)GetLastError(); # else code = pthread_create(&thread[i], 0, run_one_test, (void *)(size_t)i); # endif if (code != 0) { fprintf(stderr, "Thread creation failed %u\n", (unsigned)code); exit(3); } } /* We use the main thread to run one test. This allows gprof */ /* profiling to work, for example. */ run_one_test(0); for (i = 1; (int)i < nthreads; ++i) { int code; # ifdef USE_WINTHREADS code = WaitForSingleObject(thread[i], INFINITE) == WAIT_OBJECT_0 ? 0 : (int)GetLastError(); # else code = pthread_join(thread[i], 0); # endif if (code != 0) { fprintf(stderr, "Thread join failed %u\n", (unsigned)code); abort(); } } times[nthreads][exper_n] = get_msecs() - start_time; # ifdef VERBOSE printf("nthreads=%d, time_ms=%lu\n", nthreads, times[nthreads][exper_n]); printf("final list (should be reordered initial list):\n"); print_list(); # endif check_list(list_length); while ((le = (list_element *)AO_stack_pop(&the_list)) != 0) free(le); } for (nthreads = 1; nthreads <= max_nthreads; ++nthreads) { # ifndef NO_TIMES unsigned long sum = 0; # endif printf("About %d pushes + %d pops in %d threads:", LIMIT, LIMIT, nthreads); # ifndef NO_TIMES for (exper_n = 0; exper_n < N_EXPERIMENTS; ++exper_n) { # ifdef VERBOSE printf(" [%lums]", times[nthreads][exper_n]); # endif sum += times[nthreads][exper_n]; } printf(" %lu msecs\n", (sum + N_EXPERIMENTS/2)/N_EXPERIMENTS); # else printf(" completed\n"); # endif } return 0; } #endif