pax_global_header00006660000000000000000000000064136162676020014523gustar00rootroot0000000000000052 comment=6eec038c11a821b1d2848e8197cbc5b6bca6b3a0 icecream-1.3.1/000077500000000000000000000000001361626760200132755ustar00rootroot00000000000000icecream-1.3.1/.cirrus.yml000066400000000000000000000011601361626760200154030ustar00rootroot00000000000000env: CIRRUS_CLONE_DEPTH: 1 task: env: CFLAGS: -I /usr/local/include CXXFLAGS: -I/usr/local/include LDFLAGS: -L/usr/local/lib freebsd_instance: matrix: image: freebsd-12-0-release-amd64 image: freebsd-11-2-release-amd64 install_script: - sed -i.bak -e 's,pkg+http://pkg.FreeBSD.org/\${ABI}/quarterly,pkg+http://pkg.FreeBSD.org/\${ABI}/latest,' /etc/pkg/FreeBSD.conf - pkg upgrade -y - pkg install -y autoconf automake docbook2X gmake libtool lzo2 zstd script: - ./autogen.sh - ./configure --prefix=/usr/local - gmake -j`sysctl -n kern.smp.cpus` - gmake dist icecream-1.3.1/.gitignore000066400000000000000000000013251361626760200152660ustar00rootroot00000000000000# out-of-tree build compile build/ # autoconf m4/libtool.m4 m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4 m4/lt~obsolete.m4 Makefile.in aclocal.m4 autom4te.cache config.guess config.h.in config.sub configure depcomp install-sh ltmain.sh missing *.o *.lo .deps .libs Makefile stamp-h1 client/icecc client/icecc-create-env client/icecc-test-env client/libclient.a compilerwrapper/compilerwrapper config.h config.log config.status daemon/iceccd libtool test-driver minilzo/libminilzo.la scheduler/icecc-scheduler services/icecc.pc services/libicecc.la suse/icecream.spec doc/*.1 doc/*.7 doc/index.html tests/results tests/test-suite.log tests/testargs tests/testargs.log tests/testargs.trs tests/test-setup.sh tests/listing.txt icecream-1.3.1/.travis.yml000066400000000000000000000055071361626760200154150ustar00rootroot00000000000000language: cpp script: - | - ./autogen.sh - ./configure --prefix=$PWD/_inst - make -s -j $(getconf _NPROCESSORS_ONLN) - | if test "$TRAVIS_OS_NAME" = "linux"; then strict="-strict" if test -n "$VALGRIND"; then # See tests/README. sudo /sbin/setcap cap_sys_chroot+ep /usr/lib/valgrind/memcheck-amd64-linux fi make -s test${strict} VALGRIND=$VALGRIND TESTCC=gcc TESTCXX=g++ if test $? -ne 0; then exit 1 fi make -s test${strict} VALGRIND=$VALGRIND TESTCC=clang TESTCXX=clang++ if test $? -ne 0; then exit 1 fi elif test "$TRAVIS_OS_NAME" = "osx"; then if test -n "$STRICTTESTS"; then strict="-strict" fi make -s test${strict} TESTCC=clang TESTCXX=clang++ if test $? -ne 0; then exit 1 fi fi make -s dist if test $? -ne 0; then exit 1 fi matrix: include: - os: linux sudo: true # for setcap so we can run the tests in chroot. compiler: gcc dist: xenial - os: linux sudo: true # for setcap so we can run the tests in chroot. compiler: clang dist: xenial - os: osx before_install: - brew update - brew install lzo zstd docbook2x gdb ccache libarchive expect telnet - os: linux sudo: true # for setcap so we can run the tests in chroot. compiler: clang env: VALGRIND=1 dist: xenial - os: linux sudo: true # for setcap so we can run the tests in chroot. compiler: clang env: BUILD_TYPE=ubsan dist: xenial allow_failures: before_script: - | if [ "$BUILD_TYPE" == "ubsan" ]; then export SAN_FLAGS="-fsanitize=undefined -fno-omit-frame-pointer" export LDFLAGS="$UBSAN_FLAGS ${LDFLAGS}" export UBSAN_OPTIONS=print_stacktrace=1 fi - | # Without this, sanitizers don't work at all for whatever reason (BFD ld too old?). if [ "$TRAVIS_OS_NAME" == "linux" -a -n "$SAN_FLAGS" ]; then export LDFLAGS="${LDFLAGS} -fuse-ld=gold" fi - | if [ "$TRAVIS_OS_NAME" == "osx" ]; then export LDFLAGS="${LDFLAGS} -L/usr/local/opt/libarchive/lib" export CPPFLAGS="${CPPFLAGS} -I/usr/local/opt/libarchive/include" export PKG_CONFIG_PATH="/usr/local/opt/libarchive/lib/pkgconfig" fi - | export CFLAGS="${SAN_FLAGS} ${CFLAGS} -Wall -Wextra" export CXXFLAGS="${SAN_FLAGS} ${CXXFLAGS} -Wall -Wextra" export LDFLAGS="${SAN_FLAGS} ${LDFLAGS}" addons: apt: packages: - gcc - clang - libcap-ng-dev - libcap-ng-utils - liblzo2-dev - libzstd1-dev - docbook2x - realpath - gdb - valgrind - libarchive-dev - expect - telnet icecream-1.3.1/BENCH000066400000000000000000000153241361626760200140440ustar00rootroot00000000000000 There were no structured benchmarks of icecream, so I started some. I'm benchmarking 5 runs each, throwing away the worst and the best run, and then averaging the rest of the 3. There are two machines in the cluster, both single cpu and both about the same speed (1.7GHz Pentium M). Normally the tests are done via otherwise idle WLAN (54MBit). WLAN has a bad latency and very bad throughput, which gives icecream a hard time, and should be compareable to a loaded cabled network environment. For comparison, I repeated some tests via 100MBit cabled LAN. I'm testing linux 2.6.16 (defconfig), which is a set of small C files with sometimes larger C files inbetween. its a tough benchmark because compiling C is rather quick, so remote overhead has to be low. No icecream: make -j1: 315s make -j2: 322s make -j3: 324s make -j10: 334s result: without icecream, starting more compilers than CPUs available is a bad idea. icecream wrapper, no scheduler. make -j1: 323s make -j10: 340s result: Overhead of just using icecream without cluster is neglectible (2%) in all cases. dirk's no-latency icecream: remote daemon with -m 1: make -j1: 418s make -j2: 397s make -j3: 263s make -j10: 230s make -j10/100: 203s make -j100: 231s make -j10/100: 202s result: Enabling icecream without parallel compilation is a horrible mistake. icecream must be tuned to detect and compensate this situation better. Maximum performance improvement of icecream is 27% (LAN: 36%) out of the theoretical 50%. ====================================================================== Qt 3.3.6's src/ subdirectory. This is a bunch of medium and large C++ files. It gives icecream good opportunities because compilation time is comparatively low and the preprocessed files are not too large (low STL usage). No icecream: make -j1: 368s make -j3: 363s result: apparently there is a small win by starting more compilers than CPUs in parallel. Perhaps the I/O overhead is not neglectible like in the linux kernel case. dirk's no-latency icecream, remote daemon with -m 2: make -j1: 572s make -j3: 273s make -j10: 269s make -j10/100: 239s make -j100: 282s result: again, not compiling in parallel is a deadly sin. trying to overload the cluster with very high parallelism as well. Maximum performance improvement is again 27% and 36% for LAN. That these numbers compare equally with the Linux kernel case is astonishing and needs explanation. Now, to make sure that the no-latency patch hasn't regressed anything, we're comparing with stock 0.7 (which already has some performance improvements over 0.6): make -j1: 569s make -j10: 349s make -j10/100: 253s make -j100/100: 278s It is remarkable, that 0.7 does not provide much advantage over compiling locally (6% speedup) in a WLAN, while providing the expected 36% speedup for LAN. This proves that no-latency provides significant wins for unstable/bad network connections and does not regress performance for good networking setups. The overall 20% improvement is not bad at all. 2006-06-16 make-it-cool-branch: make -j10/100: 244s make -j1: 376s result: correcting process accounting for local jobs makes -j1 fast again (just 2% overhead) icecream, always compile remote even though host is not faster: make -j10: 538s make -j10/sched: 389s As we can see, the scheduler improves performance by 38%. ====================================================================== New performance experiments. New baseline: make-it-cool, both with -m 1, make -j5 make -j5 : 382s make -j5 : 354s make -j5 : 336s make -j5 : 355s remote with -m 2 make -j5 : 333s make -j5 : 299s make -j5 : 307s remote with -m 2, preload scheduler: make -j5 : 303s make -j5 : 287s make -j5 : 291s remote with -m 1, preload scheduler: make -j5 : 287s make -j5 : 288s make -j5 : 289s remote with -m 1, preload scheduler, optimized return: make -j5 : 288s make -j5 : 289s make -j5 : 288s remote with -m 2, preload scheduler, optimized return: make -j5 : 279s make -j5 : 281s make -j5 : 278s As we can see, over-booking the remote slave improves performance by 13%. As the CPU hardly gets faster, it means that we're reducing idle wait time this way. One experiment was to pre-load jobs on the remote slave. This means even though all compile slots are filled, it gets one (exactly one) job assigned. The daemon itself will start compilation as soon as it gets a free slot, reducing both client<->CS and CS<->scheduler roundtrip. Overall, it gave an impressive 4% speedup. A lot of time is however spent on writing back the compiled object file to the client, and this is dominated by network saturation and latency and not by CPU usage. The obvious solution is to notify the scheduler about a free slot as soon as compilation (but not write-back) has finished. With remote over-booking this resulted in another 3% speedup, while no improvements could be measured in the -m 1 case. Given that it significantly reduced code complexity in the daemon, it should still be an improvement (reduced code size by almost 8%!). ====================================================================== Load detection tunings. The biggest problem with accurate load detection is that it is impossible to find out how much cpu time a iceccd child is currently using. All you can get is how much time it used overall, but only when it exited. Which gives you a lot of cpu-usage peaks sprinkled over time, and you have to somehow average that out in a meaningful way. Actually, the Linux kernel tracks cpu time, and you can read it in /proc//stat for any child. unfortunately it is converted to seconds in there, so resolution isn't much more accurate. Watching the old load detector, it became obvious that it once in a while jumps to 1000 because of own jobs eating 100% cpu, but not finishing within the measure timeframe, which causes the host to be blacklisted by the scheduler, even though nothing is wrong with it. There are two solutions: - trying to get a more accurate usage over time - only track usage whenever it is accurate, e.g. a child exited. As the second possibility has problems with multi-cpu environments (you'd have to wait for all jobs to finish before doing another idleness sample, which probably reduces throughput considerably), first one was chosen. Simplest idea: assume that overall system nice load is at least partially caused by our own childs. remote with -m 2: make -j5 : 272s make -j5 : 274s make -j5 : 271s Hmm.. 2% win. ============================================================================ New baseline: 0.8.0: remote with -m 1: make -j5 : 257s without compression: make -j5: 442s icecream-1.3.1/COPYING000066400000000000000000000432541361626760200143400ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. icecream-1.3.1/Makefile.am000066400000000000000000000011071361626760200153300ustar00rootroot00000000000000EXTRA_DIST = README.md ACLOCAL_AMFLAGS = -I m4 AM_LIBTOOLFLAGS = --silent SUBDIRS = \ services \ daemon \ client \ doc \ suse \ compilerwrapper \ scheduler \ tests \ unittests dist-hook: ( cd $(top_srcdir) && git log --date=short --pretty="format:@%cd %an <%ae> [%H]%n%n%s%n%n%e%b" | sed -e "s|^\([^@]\)|\t\1|" -e "s|^@||" ) >$(distdir)/ChangeLog test: install # 'make test' also depends on 'make check', but using plain 'test: check' dependency breaks with recursive make $(MAKE) check $(MAKE) -C tests $@ test-strict: install $(MAKE) check $(MAKE) -C tests $@ icecream-1.3.1/NEWS000066400000000000000000000545661361626760200140140ustar00rootroot000000000000001.3.1 - fix creating compiler environment on Mac 1.3 - remove hardcoded compiler paths (compiler tarball is created with the same compiler that is used for build) - avoid build overloading by limiting number of local preprocessing runs to local CPUs available - fix Objective C/C++ support - fix job preloading to again allow sending one extra job to a fully busy node - use libarchive to handle archives instead of using tar - support xz and zstd compression for compiler tarballs (improved speed/size) - use zstd compression when sending network data, if possible (improved speed) - improve speed of creating compiler tarballs - more robust handling of receiving compiler tarballs - default cache size for compiler environments has been increased to 256MiB - path handling fixes - platforms improvements in icecc-create-env - fix memory detection on MacOSX - improve local performance when -include-pch is used - simplify PCH handling - fix keeping order of compiler debug arguments, especially -gsplit-dwarf - better support for assembler and preprocessor flags when building the Linux kernel - force local rebuild if local preprocessing fails (works around some GCC -fdirectives-only problems) - limit -fdirectives-only workarounds only to cases when it is used - improved handling of network timeouts - avoid a timeout when the scheduler cannot find any suitable host for building - if ICECC_SLOW_NETWORK=1 is set, sending network data is split into smaller chunks - --interface option allows restricting which network interface daemons will use - improved debug logs - release builds are built without assert checks, use --enable-debug for developer builds - added a manual page for icerun 1.2 - Add more compiler flags to the list that mean build locally * -pedantic (preprocessing only) * -pedantic-errors (preprocessing only) * -fsyntax-only - don't force local compile on -include-pch - Make load calculation better - Limit amount of data sent at one time for slow networks/remotes - Many updates to the tests - Better logs of some error conditions - Build locally if it is likely that there will not be more compiles - Support adding gcc and clang to the same environment - Better handling of icerun - Cygwin now works as a client - Don't expose Host endianness to network - General code cleanup 1.1 - revert "Add load control for preprocessing" - better handle clang arugments with spaces - remove "crashme" command from scheduler - better logging around exception 30 1.1rc3 - Fix broken pipe race condition - Better error handling - Crash fixes - Documentation update - Include objcopy in environment if it exists - Add CI builds on travis-ci: OSX, ubuntu trusty - Fixed several memory errors - Extract enviornments with compile priority, not daemon priority - Handle spaces in the parameters following -MT - Handle -target -arch -c-isystem and -cxx-isystem (clang) - Handle NAT situations better - Add load control for preprocessing - Handle scheduler unable to reach remote machine - Make scheduler election algorithm handle multiple netnames 1.1rc2 - -gsplit-dwarf support for debug fission (https://gcc.gnu.org/wiki/DebugFission) - bug fixes since 1.1rc1 1.1rc1 - require capng - use system lzo, drop bundled minilzo - allow ICECC_VERSION file to start with a . - work with different executable names for docbook-to-man on different distributions - support color diagnostics if possible - fix several crashes - clean up and improve documentation - move icecc-create-env to bindir. - Try to use only the best available scheduler - make daemon port configurable - USE_SCHEDULER now accepts host:port to change ports - force local compilation when required in more cases - detect some clang out of memory errors. - better support for FreeBSD - bump protocol version to 34 - daemon requires chroot - some code refactoring for better maintainability - find if feature is supported by checking protocol version - better logging on error conditions - Create many tests cases - use path name instead of localhost for unix socket - clean up build system - use getnameinfo() instead of inet_ntoa() - fix some valgrind found issues - drop supplementary groups before setgid() - make signal handlers more robust - work better if user "icecc" does not exist - find compilers outside of PATH when building locally 1.0.1 - use su option for logrotate - require logrotate for suse package 1.0.1rc1 - Remove filenames from md5sum output - Fix off-by-one error - add missing cap-ng.m4 - uninstall also SUSE-specific files when uninstalling - install COPYING, README.md and NEWS as useful user documentation - use m4/ and ship cap-ng.m4 there - if -lcap-ng is needed for icecc.a, add it to .pc file (for icemon) - libicecream-devel may require libcap-ng-devel - fix icecc-create-env with clang 3.3+ - fix /var/run/icecc handling - make sure to not hide e.g. SEGV from a tool with exit code 0 - detect bad_alloc as out-of-memory - add "[Cc]annot allocate memory" to the list of OOM error messages - Detect gcc "out of memory allocating" as EXIT_OUT_OF_MEMORY. Closes #49 - Avoid symlinks in the absolute paths - swap arguments for kill() call, since pid is the first argument 1.0.0 - log error message when cleaning up cache fails during startup - if getuid() != 0, then our u/gid will be getu/gid() the whole time - chmod/chown envs dir when preparing this - be more careful when cleaning up envs directory - cleanup envs dir still with root privileges - mkdir -p - Revert "cleanup envs dir still with root privileges" - cleanup envs dir in %post in specfile - fix scheduler binary name in suse specfile - with cap-ng geteuid() is not a sign of being able to do chroot - avoid debug message without endl - avoid unused parameter warnings - install clang/clang++ wrappers symlinks conditionally again - adjust specfile for optional clang symlinks - make clang wrappers package also require clang - fix clang wrappers build - refer to icecream-users@googlegroups.com ML as the contact - mention the Linux3_ hack leading to nodes sometimes not being used - Linux3_ nodes may not compile on other nodes either, actually - prevent icerun-wrapped commands failing if path contains 'clang' - remove unnecessary references to KDE to make icecream look KDE-specific - adjust references to icecream path - sync the ccache section between README.md and the manpage - AC_CONFIG_HEADERS instead of obsolete and removed AM_CONFIG_HEADER - add the README.md to the tar - mild relicensing, the code taken from ksysguardd is really a minor part by now 0.9.98.2 (1.0rc4) - suse: Backports from OBS and create symlinks in /opt/icecream/bin on openSUSE <= 12.2. - fix libexec dir name in opensuse specfile - icecream doesn't use m4 directory - explicit configure option for whether to use clang's include rewriting - use the clang rewrite options in opensuse specfile - fix builddir != srcdir - icecream user on suse is 'icecream', not 'icecc' - do not remove env. basedir itself - do not chown() with root uid - do not complain needlessly about missing icecc user - Fix icecc-create-env for relative paths - Use docbook2man to generate manpages rather than KDE calls. 0.9.98.1 (1.0rc3) - install clang++ clang symlinks unconditionally. - move openSUSE's rpm install scripts to suse/Makefile.am. - links installation should be pkglibexec instead of libexec - rename scheduler to icecc-scheduler to avoid name clash - change to an unprivileged user when running the scheduler as root. - move scheduler logs to icecc specific dir - ignore if meinproc is not avilable - version bump minilzo to 2.0.6. 0.9.98 (1.0rc2) - install icecc links in a more convenient dir - fix for assembler usage during kvm builds - fix generation of man pages - some readme generated from en.opensuse.org content - update autoconf and automake macros 0.9.97 (1.0rc1) - support for Clang compiler Clang now should work out of the box just like GCC. Clang with -frewrite-includes option is recommended (3.2+ or patched). - support 'icecc ' properly - try to avoid compiling on the local machine if it is busy - do not use old compiler if it was changed while icecream daemon was running - verify if remote hosts can actually be used for compilation (avoids problems with old kernel versions) - support for custom compiler plugins (GCC/Clang) - fix gcc 4.6 support - reduce usage of PATH_MAX for better portability - fix build-native to add default GNU assembler - fix SIGSEGV calling build_native without args - parse @file option - skip .[h,hpp] header files and check precompiled headers from -include opts - move log and socket to an icecc especific folder - rename nobody to user - add support for libcap-ng - moving to https://github.com/icecc/icecream - add clang wrapper symlinks - allow normal users to actually connect to the daemon's unix socket - rebuild environment if the compiler changes - check if the remote node can actually use an environment - setting to avoid using hosts where the environment cannot be checked - add [compiler] to the command line template in --help - support for compiler plugins / multiple native environments - do not create environments containing both gcc and clang - check env cache size also when creating new native environment - keep native envs longer only if there aren't too many - no "basic" native env really anymore, now with env per each compiler - include paths in tarball md5sum - do not use gcc when creating env.tarball for clang - force env. tarball regeneration if it doesn't exist - fix for assembler usage during kvm builds 0.9.7 (1232780) - bug fix for -MD and -MF - bug fix for gcc 4.6 plugins 0.9.6 (1158638) - fix installation issues - add a value type of ticks in /proc - and make it long long - fix kFreeBSD build - fix run-failure with glibc 2.11 - allow tgz as extension - support more local args (bnc#625621) 0.9.5 (1083362) - fix: close the file descriptor (novell bug 525799) - log PID even if no prefix is specified - allow get_msg() to fetch data from the kernel even if timeout is 0. - clean up event loop of gcc invocation - Install an "icerun" symlink that will serialize commands through icecream's local daemon 0.9.4 (961598): - fix compilation warnings - don't leak file descriptor to create-env - don't use the shell to call simple commands - assert current_kids is > 0 before decrementing (it's unsigned) - also check for EAGAIN (needed on OS X) - add ICECC_LOGFILE to get trace() output into a file - Fix compilation when using -Wformat -Werror=format-security flags - make a special exception for - in rest args (bnc#495786) - Fix way icecream changes permissions of /var/cache/icecream 0.9.3 (926474): - Fix FTBFS with GCC 4.4 - Explicitly set group ownership and permissions on the base cache dir and target dir in case root has a umask of e.g. 0077. Otherwise nobody user won't be ableto untar. - make sure that a too strict umask doesn't ruin the game fixes https://bugzilla.novell.com/show_bug.cgi?id=446329 - some create-env fixes for fedora 0.9.2 (879112): - On OS X, append the release number to Darwin (Darwin8 for Tiger, Darwin9 for Leopard) as their icecream environments are not compatible - Add accessor for the hostname of the scheduler, so that we can show it in a tooltip in icemon in the future. - Only localize job if we find -Wa,.*-a[a-z]*= in an argument - also add accessor for network name - if called as "icecc" (i.e., not through a symlink) and the first argument starts with a slash, use that as the command to run. 0.9.1 (822102): - trying to support gentoo x86_64 - -mcpu=native has to compile locally (gentoo bug 183586#c13) - don't compile C files with C++ compile if ICECC_CXX is set 0.9.0 (807014): - create a working env on OS X - don't hardcode tar path - implement load informations on OS X - make it work again on non-Linux platforms: Jobs still reported their environment platform as i386 etc, even if they came from Darwin or BSD (thus the scheduler never finds a matching compile server, or wrongly directs a non-Linux client to use a Linux compile server) - fixing the init script - patch by Tais M. Hansen to ignore directories - compile on OS X 10.5 - some preparations for rsync support - fix valgrind warning - add configure check for librsync - don't kick daemons that run as user, just don't give them any jobs. this helps the people that just don't trust the daemon to be able to at least send out jobs - remove the now pointless "runasuser" option. it will just not accept any jobs then - add log message when the local daemon hangs again - accept TERM=dumb as well - implement support for scheduling local non-compile jobs. For example: you can add a symlink named "meinproc4" pointing to icecc to your path, and it will automatically schedule the job and then invoke meinproc4 on it locally. the advantage is that you can use cmake -j50 without having to worry that there are going to be 50 meinproc jobs killing your machine. - do an asynchronous connection to the scheduler: * helps when the scheduler is down, as the daemon is otherwise locked up then and can't schedule local jobs * helps when the scheduler is flaky and blocked itself on network (like in the suse network). - fix connect if scheduler is known - send jobs to a monitor non-blocking - implement host blocking - switch monitor connections to bulk transfer - fix double deletion when control connection disconnects - add greeting banner that gives a rough statistic (for pretty monitoring graphs :) - protocol cleanup - fix error handling - adding more API for better monitor behaviour - various protections against malicious requests - add a try-restart target for the rpm package - always read the scheduler message in complete before trying to fetch a message. fixes a daemon hangup (surprising noone noticed this before) - various code cleanups - don't flood the scheduler - implement bulk sending - reduce overhead by not sending a full path - implement much shorter monitor messages - reenable cork handling on linux only. helps a lot - fix job statistics in monitor - check if the compile server sent us a message before die'ing another death - avoid killing the daemon on protocol errors - support "quit" on the normal daemon port as well - ext/hash_set is not needed and does not exist anymore - DoS protection for the scheduler - avoid that daemons hang around in environment installation state forever - fix daemons being overrun by wrongly assigned jobs while they're busy installing - add a segfault catcher - don't allow tiny environments - they are very likely broken - always relogin to the scheduler even if environment transfer failed - implement proper handling for PCH files for e.g. Qt compilation - fix compilation with glibc 2.8 0.8.0 (657903): - Find md5 on FreeBSD - apply patch to support cygwin a bit better - write a pid file. patch by Ismail Doenmez - precompiled header support, needed for compiling Qt 4.3 - add support for --no-remote. patch by Matt Gruenke - Make it work on DragonFlyBSD. - provide SuSEfirewall2 information - Make the result of make dist-gzip usable. - update node name each time we login to the scheduler. - treat -combine and -fdump* as always local - Merged make-it-cool branch: * implement job preloading * asynchronous install of new environments * uses tcp keepalive for network connection tracking - make it work on OS X * don' hardcode tar path * create environments correctly on OS X * report load correctly on mach kernels (Darwin) * prepend os to the machine type if it's not linux (resulting in e.g. Darwin_PowerMacintosh) 0.7.14 (583722): - fix current_kids getting out of sync if send_scheduler failes in an unfortunate moment - fix reporting the error - add more to the internals dump - trying to make the scheduler kicks a little bit less frequent - if the compiler is supposed to keep temp files, then we have to do it locally 0.7.13 (583347): - use file -L to follow symlinks in create-env - fix an easy valgrind error - make handling of the return values consistent - if the client is in WAITCOMPILE then it's waiting for some other host to compile, not this one. So job == 0 -> crash - fix crash if the scheduler goes away while daemon transfers environment - apparently sometimes gcc can hang forever in some cases. By the time the client disconects, we know we don't have to wait any longer, because there is nobody left caring about the result. - update node name each time we login to the scheduler. Fixes tons of "linux" hosts appearing in the icecream monitor 0.7.12 (r581454): - fixing error handling when scheduler restarts - do not waitpid before we're sure we read all of g++'s output otherwise g++ waits for us to read and we're waiting for g++ to finish -> deadlock 0.7.11 (r581076): - fix a crash in the daemon when the scheduler was gone while local jobs were waiting for finishing - separate stat handling from ping handling to avoid excessive stat/ping loops (increases internal version number) - only reset scheduler ping time if the scheduler ping'ed us - even when we can't determine native environment, we can still use the daemon for inter-process locking instead of falling back to file locking. - quicker reap of dead daemons - improved load guessing - fix stupid logic bug in ping tracking 0.7.10 (r580794): - handle errors in installing environments correctly - block daemons that have full discs - add -pipe to the command line to reduce disk usage - fix cancelling of jobs awaiting a remote job (were hanging in scheduler forever) - if ICECC=no is set, don't try to be clever - adding two more flags for local compilation (profile feedback related) - flush debug files before every fork to avoid dups in log output - be stricter in what messages are required to keep the daemon<->scheduler communication intact (network drops again) 0.7.9 (r580498): - scheduler will ping the daemon periodically, daemon will disconnect if not pinged from time to time (to avoid network drops being unnoticed) - removed some debug output - make logging more consistent - increased internal protocol version (still compatible) - try even harder to avoid races in daemon - rework how clients are catched - some random cleanup - remove the "this should be an exception" output to avoid failing configure checks for nothing - make sure the assembler is dead before we cleanup object files 0.7.8 (r579795): - fd leak fix, which caused a deadlock under certain conditions - rework some FreeBSD patches that caused problems - fix race between select and SIGCHILD 0.7.7 (r574260): - clear all internal maps - patches by Frerich to support FreeBSD - avoid busy loop in bizarre conditions - found another case of endless hanging jobs - some process fixes 0.7.6 (r561764): - report all daemon error message to the client - fix support for biarch machines that have only one environment installed with another environment with the same name and different architecture being around - work-around a bug in older libstdc++ - ensure nobody uses -u root 0.7.5 (r561480): - don't block while looking for a scheduler over broadcast search. - fix logrotate - immediately kick broken daemon connections 0.7.4 (r559927): - Add -fpreprocessed argument - Fix daemons getting stuck after a while with 100% CPU - fix for kubuntu where libs are deeply nested such as /lib/tls/i686/cmov/libc.so.6 - find the compiler even though icecc wasn't in the path - fix deletion of old environments - a lot more error handling - fix assertion failures in daemon upon sigchild delivery - use -fPIC also for C files - avoid race between local and remote daemon about who has the right exit status - rename the package to icecc 0.7.3 (r552930): - more work on error handling - corrected icecc --help output - handle local icecream load correctly - reduce the number of load updates from daemon to scheduler - early launch the compiler even before the whole preprocessed file has been received - handle low-latency networks better - builtin colorgcc - let the daemon schedule local load even without scheduler (important for multi processor machines) 0.7.2 (r549574): - split out libicecream-devel for the monitor - introducing ICECC_PREFERRED_HOST (=mybigfathost) to let the scheduler ignore all rules - this is meant to be a debugging aid - fix network performance between client and local daemon - replaced create-env with icecc --build-native (respecting the user's $PATH by default) 0.7.1 (r548846): - Monitor is an extra package now - don't make non-existant cross compilers fatal, but just use the other environments in ICECC_VERSION - always handle aborted syscalls - update minilzo - never exit() or return from fork()'ed code paths, 0.7.0 (r547196): - WARNING: the protocol is not compatible with earlier versions, old clients will refuse to connect, so update the full farm - protocol reworked to no longer require a client->scheduler connection (the daemon will proxy it all) - all local jobs are now scheduled by the local daemon to make sure multiprocessor machines are handling as many jobs as they have jobs configured (# processors). They used to be "scheduled" by lock file - fixed grave bug that removed all daemon environments when the cache grew over limit - new telnet interface command: internals to output daemon states - stricter error handling to avoid suprises - SIGHUP to daemon and scheduler will reopen the log file to allow logrotate - a restart of the daemon and scheduler won't flush the log file, but append to it - create-env will create .tar.gz now - the client allows environments to be .tar, .tar.bz2 and .tar.gz icecream-1.3.1/README000066400000000000000000000021221361626760200141520ustar00rootroot00000000000000NOTE: Although icecream will compile on some non-Linux systems, it depends critically on Linux' /proc/ structures and will fail at runtime until somebody figures out how to emulate the "ticks" form of process accounting in there. Required Libraries ================== Note: The package name may vary by distro * libcap-ng-devel * libarchive-devel * lzo-devel * libzstd-devel How to install icecream ======================= cd icecream ./autogen.sh ./configure --prefix=/opt/icecream make make install WARNING: Make sure you specify a prefix, otherwise icecream might override your gcc installation! You will need to use this prefix instead of /usr when referring to icecream (for example when extending the $PATH variable). The documentation is maintained in README.md for the time being. How to contribute ================= Fork on github.com and send a pull request: https://github.com/icecc/icecream There is a testsuite in the tests/ directory and unit tests in unittests/, see the README files there. Mailing list: icecream-users@googlegroups.com (icecream-users+subscribe@googlegroups.com) icecream-1.3.1/README.md000066400000000000000000000665001361626760200145630ustar00rootroot00000000000000[![Build Status (Linux & macOS)](https://travis-ci.org/icecc/icecream.svg?branch=master)](https://travis-ci.org/icecc/icecream) [![Build Status (FreeBSD)](https://api.cirrus-ci.com/github/icecc/icecream.svg)](https://cirrus-ci.com/github/icecc/icecream) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/d0fd9ba53b424b37964340970392eec2)](https://www.codacy.com/app/icecc/icecream?utm_source=github.com&utm_medium=referral&utm_content=icecc/icecream&utm_campaign=Badge_Grade) [![Code Quality: Cpp](https://img.shields.io/lgtm/grade/cpp/g/icecc/icecream.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/icecc/icecream/context:cpp) [![Total Alerts](https://img.shields.io/lgtm/alerts/g/icecc/icecream.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/icecc/icecream/alerts) [Icecream](Icecream) was created by SUSE based on distcc. Like distcc, [Icecream](Icecream) takes compile jobs from a build and distributes it among remote machines allowing a parallel build. But unlike distcc, [Icecream](Icecream) uses a central server that dynamically schedules the compile jobs to the fastest free server. This advantage pays off mostly for shared computers, if you're the only user on x machines, you have full control over them. Table of Contents - [Installation](#installation) - [How to use icecream](#how-to-use-icecream) - [make it persistent](#make-it-persistent) - [TroubleShooting](#troubleshooting) - [Firewall](#firewall) - [C compiler](#c-compiler) - [osc build](#osc-build) - [some compilation node aren't used](#some-compilation-node-arent-used) - [build with -Werror fails only when using icecream ](#build-with--werror-fails-only-when-using-icecream) - [clang 4.0 tries to read /proc/cpuinfo and fails](#clang-tries-to-read-proccpuinfo-and-fails) - [Supported platforms](#supported-platforms) - [Using icecream in heterogeneous environments](#using-icecream-in-heterogeneous-environments) - [Cross-Compiling using icecream](#cross-compiling-using-icecream) - [Creating cross compiler package](#creating-cross-compiler-package) - [Cross-Compiling for embedded targets using icecream](#cross-compiling-for-embedded-targets-using-icecream) - [Cross-Compiling for multiple targets in the same environment using icecream](#cross-compiling-for-multiple-targets-in-the-same-environment-using-icecream) - [How to combine icecream with ccache](#how-to-combine-icecream-with-ccache) - [Debug output](#debug-output) - [Some Numbers](#some-numbers) - [What is the best environment for icecream](#what-is-the-best-environment-for-icecream) - [Some advice on configuration](#some-advice-on-configuration) - [Network setup for Icecream (firewalls)](#network-setup-for-icecream-firewalls) - [I use distcc, why should I change?](#i-use-distcc-why-should-i-change) - [Icecream on gentoo](#icecream-on-gentoo) - [Bug tracker](#bug-tracker) - [Repository](#repository) - [Mailing list](#mailing-list) Installation ------------------------------------------------------------------------- We recommend that you use packages maintained by your distribution if possible. Your distribution should provide customized startup scripts that make icecream fit better into the way your system is configured. We highly recommend you install [icemon](https://github.com/icecc/icemon) or [icecream-sundae](https://github.com/JPEWdev/icecream-sundae) with icecream. If you want to install from source see the instructions in the README file provided in the source package. How to use icecream --------------------------------------------------------------------------------------- You need: - At least one machine that runs the scheduler ("./icecc-scheduler -d") - Many machines that run the daemon ("./iceccd -d") It is possible to run the scheduler and the daemon on one machine and only the daemon on another, thus forming a compile cluster with two nodes. If you want to compile using icecream, make sure $prefix/lib/icecc/bin is the first entry in your path, e.g. type export PATH=/usr/lib/icecc/bin:$PATH (Hint: put this in \~/.bashrc or /etc/profile to not have to type it in everytime) Then you just compile with make -j \, where \ is the amount of jobs you want to compile in parallel. As a start, take the number of logical processors multiplied with 2, or a larger number if your compile cluster can serve all the compilation jobs. But note that too large numbers may in fact make the build slower (for example if your local machine gets overloaded with preparing more jobs than it can handle at a time). Here is an example: make -j6 WARNING: Never use icecream in untrusted environments. Run the daemons and the scheduler as unprivileged user in such networks if you have to! But you will have to rely on homogeneous networks then (see below). If you want an overview of your icecream compile cluster, or if you just want funny stats, you might want to run "icemon" (from a separate repository/package). ### make it persistent If you restart a computer, you still want it to be in the icecream cluster after reboot. Consult your distribution's documentation on this. If you uses packages provided by your distribution this should be automatic (or a simple configuration change) ### make scheduler persistent: By adding an option --scheduler-host for daemon and --persistent-client-connection for scheduler, the client connections are not disconnected from the scheduler even there is an availability of better scheduler. TroubleShooting ------------------------------------------------------------------------------- Most problems are caused by firewalls and by make using the wrong C compiler (e.g. /usr/bin/gcc instead of /usr/lib/icecc/bin/gcc). ### Firewall For testing purposes, you can stop your firewall like this: rcSuSEfirewall2 stop To open the right ports in your firewall, call yast2 firewall Choose "allowed services" -\> Advanced. Enter for TCP: **10245 8765 8766** and for UDP **8765** If you have scheduler running on another system, you should open broadcasting response : yast2 firewall Choose "Custom Rules" -\> Add. Enter Source Network **0/0** Protocol: **UDP** Source Port **8765** ### C compiler To make sure your compile job uses /usr/lib/icecc/bin/gcc (gcc is used as an example here, depending on your compile job it can also be g++, cc or c++) start your compile using make VERBOSE=1 and wait for a typical compile command to appear, like this one: cd /root/kdepim/kode/libkode && /usr/lib/icecc/bin/c++ -DTest1Area=5121 -D_BSD_SOURCE -D_XOPEN_SOURCE=500 -D_BSD_SOURCE -DQT_NO_STL -DQT_NO_CAST_TO_ASCII -D_REENTRANT -DKDE_DEPRECATED_WARNINGS -DKDE_DEFAULT_DEBUG_AREA=5295 -DMAKE_KODE_LIB -Wnon- virtual-dtor -Wno-long-long -ansi -Wundef -Wcast-align -Wchar-subscripts-Wall -W -Wpointer-arith -Wformat-security -fno-exceptions -fno-check-new in this example, the right c compiler is chosen, /usr/lib/icecc/bin/c++. If the wrong one is chosen, delete CMakeCache.txt (if existing) and start the build process again calling ./configure (if existing). ### osc build You can tell osc build to use icecream to build packages, by appending --icecream=\ where n is the number of process which should be started in parallel. However, for integration with icecream to work properly, you must install icecream on the host where you will run "osc build" and you must start icecream daemon. ### some compilation node aren't used If, when using icecream monitor (icemon), you notice some nodes not being used at all for compilation, check you have the same icecream version on all nodes, otherwise, nodes running older icecream version might be excluded from available nodes. ### build with -Werror fails only when using icecream This problem should not exist with a recent icecream version. If it does, try using `ICECC_REMOTE_CPP=1` (see `icecc --help`). ### clang tries to read /proc/cpuinfo and fails This is a problem of clang 4.0 and newer: https://bugs.llvm.org/show_bug.cgi?id=33008 The most recent Icecream version works around this problem. Supported platforms --------------------------------------------------------------------------------------- Most of icecream is UNIX specific and can be used on most platforms, but as the scheduler needs to know the load of a machine, there are some tricky parts. Supported are: - Linux - FreeBSD - DragonFlyBSD - OS X Note that all these platforms can be used both as server and as client - meaning you can do full cross compiling between them. The following platforms are known to work at least as a client, meaning that you can run compilation on them that will compile on remote nodes using cross compilation. - Cygwin Using icecream in heterogeneous environments ----------------------------------------------------------------------------------------------------------------------------------------- If you are running icecream daemons in the same icecream network but on machines with incompatible compiler versions, icecream needs to send your build environment to remote machines (note: they _all_ must be running as root. In the future icecream might gain the ability to know when machines can't accept a different env, but for now it is all or nothing). Under normal circumstances this is handled transparently by the icecream daemon, which will prepare a tarball with the environment when needed. This is the recommended way, as the daemon will also automatically update the tarball whenever your compiler changes. If you want to handle this manually for some reason, you have to tell icecream which environment you are using. Use icecc --build-native to create an archive file containing all the files necessary to setup the compiler environment. The file will have a random unique name like "ddaea39ca1a7c88522b185eca04da2d8.tar.bz2" per default. Rename it to something more expressive for your convenience, e.g. "i386-3.3.1.tar.bz2". Set ICECC_VERSION= in the shell environment where you start the compile jobs and the file will be transferred to the daemons where your compile jobs run and installed to a chroot environment for executing the compile jobs in the environment fitting to the environment of the client. This requires that the icecream daemon runs as root. Cross-Compiling using icecream ------------------------------------------------------------------------------------------------------------ SUSE got quite some good machines not having a processor from Intel or AMD, so icecream is pretty good in using cross-compiler environments similar to the above way of spreading compilers. There the ICECC\_VERSION variable looks like \(,\:\)\*, for example like this: /work/9.1-i386.tar.bz2,ia64:/work/9.1-cross-ia64.tar.bz2,Darwin_PowerPCMac:/work/osx-generate-i386.tar.gz To get this working on openSuse machines there are some packages containing the cross-compiler environments. Here is a sample case showing how to do to get it working. Let's assume that we want to build for x86\_64 but use some i386 machines for the build as well. On the x86\_64 machine, go to [http://software.opensuse.org,](http://software.opensuse.org) search for **icecream x86\_64** and download and install the version for i586. Then add this to the ICECC\_VERSION and build. i386:/usr/share/icecream-envs/cross-x86_64-gcc-icecream-backend_i386.tar.gz Creating cross compiler package --------------------------------------------------------------------------------------------------------------- How to package such a cross compiler is pretty straightforward if you look what's inside the tarballs generated by icecc. You basically need a /usr/bin/gcc, a /usr/bin/g++ and a /usr/bin/as. So if you need a cross compiler that uses your OS X running G5 to compile i586-linux for your laptop, you would: - go to your OS X and download binutils and gcc (of the versions you use on linux) - first compile and install binutils with --prefix /usr/local/cross --target=i586-linux (I have some problems that required setting CC and AR) - configure gcc with the same options, go into the gcc directory and make all install-driver install-common - that worked good enough for me. - now create a new directory where you copy /usr/local/cross/bin/i586-linux-{gcc,g++,as} into as usr/bin/{gcc,g++,as} - now I copy an empty.c (that is empty) into that dir too and call chroot . usr/bin/gcc -c empty.c that will report an error about missing libraries or missing cc1 - copy them until gcc generates an empty.o without error. You can double check with "file empty.o" if it's really a i586-linux object file. - now tar that directory and use it on your client as specified above. My cross compiler for the above case is under [http://ktown.kde.org/\~coolo/ppc-osx-create-i586.tar.gz](http://ktown.kde.org/~coolo/ppc-osx-create-i586.tar.gz) Cross-Compiling for embedded targets using icecream ------------------------------------------------------------------------------------------------------------------------------------------------------ When building for embedded targets like ARM often you'll have a toolchain that runs on your host and produces code for the target. In these situations you can exploit the power of icecream as well. Create symbolic links from where icecc is to the name of your cross compilers (e.g. arm-linux-g++ and arm-linux-gcc), make sure that these symbolic links are in the path and before the path of your toolchain, with $ICECC\_CC and $ICECC\_CXX you need to tell icecream which compilers to use for preprocessing and local compiling. e.g. set it to ICECC\_CC=arm-linux-gcc and ICECC\_CXX=arm-linux-g++. As the next step you need to create a .tar.bz2 of your cross compiler, check the result of icecc --build-native to see what needs to be present. Finally one needs to set ICECC\_VERSION and point it to the tar.bz2 you've created. When you start compiling your toolchain will be used. NOTE: with ICECC\_VERSION you point out on which platforms your toolchain runs, you do not indicate for which target code will be generated. Cross-Compiling for multiple targets in the same environment using icecream ------------------------------------------------------------------------------------- When working with toolchains for multiple targets, icecream can be configured to support multiple toolchains in the same environment. Multiple toolchains can be configured by appending =\ to the tarball filename in the ICECC\_VERSION variable. Where the \ is the cross compiler prefix. There the ICECC\_VERSION variable will look like \(,\:\=\)\*. Below an example of how to configure icecream to use two toolchains, /work/toolchain1/bin/arm-eabi-\[gcc,g++\] and /work/toolchain2/bin/arm-linux-androideabi-\[gcc,g++\], for the same host architecture: - Create symbolic links with the cross compilers names (e.g. arm-eabi-\[gcc,g++\] and arm-linux-androideabi-\[gcc,g++\]) pointing to where the icecc binary is. Make sure these symbolic links are in the $PATH and before the path of the toolchains. - Create a tarball file for each toolchain that you want to use with icecream. icecc-create-env script can be used to create the tarball file for each toolchain, for example: icecc-create-env /work/toolchain1/bin/arm-eabi-gcc icecc-create-env /work/toolchain2/bin/arm-linux-androideabi-gcc - Set ICECC\_VERSION to point to the native tarball file and for each tarball file created to the toolchains (e.g ICECC\_VERSION=/work/i386-native.tar.gz,/work/arm-eabi-toolchain1.tar.gz=arm-eabi,/work/arm-linux-androideabi-toolchain2.tar.gz=arm-linux-androideabi). With these steps the icecrem will use /work/arm-eabi-toolchain1.tar.gz file to cross compilers with the prefix arm-eabi(e.g arm-eabi-gcc and arm-eabi-g++), use /work/arm-linux-androideabi-toolchain2.tar.gz file to cross compilers with the prefix arm-linux-androideabi(e.g. arm-linux-androideabi-gcc and arm-linux-androideabi-g++) and use /work/i386-native.tar.gz file to compilers without prefix, the native compilers. How to combine icecream with ccache ----------------------------------------------------------------------------------------------------------------------- The easiest way to use ccache with icecream is to set CCACHE\_PREFIX to icecc (the actual icecream client wrapper): export CCACHE_PREFIX=icecc This will make ccache prefix any compilation command it needs to do with icecc, making it use icecream for the compilation (but not for preprocessing alone). To actually use ccache, the mechanism is the same like with using icecream alone. Since ccache does not provide any symlinks in /opt/ccache/bin, you can create them manually: mkdir /opt/ccache/bin ln -s /usr/bin/ccache /opt/ccache/bin/gcc ln -s /usr/bin/ccache /opt/ccache/bin/g++ And then compile with export PATH=/opt/ccache/bin:$PATH In this case icecc's symlinks in /usr/lib/icecc/bin should **not** be in your path, as CCACHE_PREFIX is instructing ccache to explicitly delegate to icecc rather than finding it in the path. If both ccache and icecc's symlinks are in the path it is likely the two wrappers will mistake each other for the real compiler and icecc will complain that it has recursively invoked itself. Note however that ccache isn't really worth the trouble if you're not recompiling your project three times a day from scratch (it adds some overhead in comparing the source files and uses quite some disk space). Debug output ------------------------------------------------------------------------- You can use the environment variable ICECC\_DEBUG to control if icecream gives debug output or not. Set it to "debug" to get debug output. The other possible values are error, warning and info (the -v option for daemon and scheduler raise the level per -v on the command line - so use -vvv for full debug). Some Numbers ------------------------------------------------------------------------- Numbers of my test case (some STL C++ genetic algorithm) - g++ on my machine: 1.6s - g++ on fast machine: 1.1s - icecream using my machine as remote machine: 1.9s - icecream using fast machine: 1.8s The icecream overhead is quite huge as you might notice, but the compiler can't interleave preprocessing with compilation and the file needs to be read/written once more and in between the file is transferred. But even if the other computer is faster, using g++ on my local machine is faster. If you're (for whatever reason) alone in your network at some point, you lose all advantages of distributed compiling and only add the overhead. So icecream got a special case for local compilations (the same special meaning that localhost got within $DISTCC\_HOSTS). This makes compiling on my machine using icecream down to 1.7s (the overhead is actually less than 0.1s in average). As the scheduler is aware of that meaning, it will prefer your own computer if it's free and got not less than 70% of the fastest available computer. Keep in mind, that this affects only the first compile job, the second one is distributed anyway. So if I had to compile two of my files, I would get - g++ -j1 on my machine: 3.2s - g++ -j1 on the fast machine: 2.2s - using icecream -j2 on my machine: max(1.7,1.8)=1.8s - (using icecream -j2 on the other machine: max(1.1,1.8)=1.8s) The math is a bit tricky and depends a lot on the current state of the compilation network, but make sure you're not blindly assuming make -j2 halves your compilation time. What is the best environment for icecream ----------------------------------------------------------------------------------------------------------------------------------- In most requirements icecream isn't special, e.g. it doesn't matter what distributed compile system you use, you won't have fun if your nodes are connected through than less or equal to 10MBit. Note that icecream compresses input and output files (using lzo), so you can calculate with \~1MBit per compile job - i.e more than make -j10 won't be possible without delays. Remember that more machines are only good if you can use massive parallelism, but you will for sure get the best result if your submitting machine (the one you called g++ on) will be fast enough to feed the others. Especially if your project consists of many easy to compile files, the preprocessing and file IO will be job enough to need a quick machine. The scheduler will try to give you the fastest machines available, so even if you add old machines, they will be used only in exceptional situations, but still you can have bad luck - the scheduler doesn't know how long a job will take before it started. So if you have 3 machines and two quick to compile and one long to compile source file, you're not safe from a choice where everyone has to wait on the slow machine. Keep that in mind. Icecream is very sensitive to latency between nodes, and packet loss. While icecream has been successfully used by people who are on opposite sides of the earth, when those users were isolated to their geographic location the speed improved for everyone. In most corporate environments within a single building everything works well, but between two buildings often is troublesome. Some advice on configuration ----------------------------------------------------------------------------------------------------------------------------------- Icecream supports many configurations but you need to understand your network to choose what is right for you. You should ensure that the scheduler up to the latest version. Many new features require the client and scheduler work together to use them. Even though clients should work with old schedulers new features will not work, and may not be disabled correctly. Version 1.1 gained the ability for multiple schedulers on a single network to decide on the best master. However daemons running earlier versions do not understand this, and it is random if they will find the correct one. In all other ways it is believed that mixing old and new versions of the daemon will work: if you use a new feature only new clients will be used. Recommended is to start the scheduler and daemon on every body's machine. The icecream schedulers will choose one to be the master and everyone will connect to it. When the scheduler machine goes down a new master will be selected automatically. If you need to run mixed icecream versions, then it is best to designate one machine on each subnet to be a scheduler. Icecream nodes will automatically find the scheduler and connect to it. If someone accidentally starts a second scheduler this will cause problems with clients that are less than 1.1, but they should eventually work. The scheduler should be a reliable machine, but if it fails you use any existing machine as a replacement. You may also designate a scheduler machine, and then for each client specify the scheduler to use (this is a variation of the previous case). You need to ensure that there is no other schedulers on the same network as this scheduler if you do this. The scheduler machine MUST be reliable, any failure will require reconfiguring all client machines. This setup allows you to specify one scheduler per building which is useful if single developers are scattered around. If you do this check with IT to ensure that icecream traffic won't overload routers. You might designate a netname. This is useful if your network is using VPN to make it seem like developers who are physically a long distance apart seem like they are on the same sub-net. While the VPNs are useful, they are typically do not have enough bandwidth for icecream, so by setting a different netname on each side of the VPN you can save bandwidth. Netnames can be used to work around some limitations above: if a netname is set icecream schedulers and daemons will ignore the existence of other schedulers and daemons. Network setup for Icecream (firewalls) --------------------------------------------------------------------------------------------------------------------------- A short overview of the ports icecream requires: - TCP/10245 on the daemon computers (required) - TCP/8765 for the the scheduler computer (required) - TCP/8766 for the telnet interface to the scheduler (optional) - UDP/8765 for broadcast to find the scheduler (optional) Note that the [SuSEfirewall2](SuSEfirewall2) on SUSE \< 9.1 got some problems configuring broadcast. So you might need the -s option for the daemon in any case there. If the monitor can't find the scheduler, use USE\_SCHEDULER=\ icemon (or send me a patch :) I use distcc, why should I change? ------------------------------------------------------------------------------------------------------------------- If you're sitting alone home and use your partner's computer to speed up your compilation and both these machines run the same Linux version, you're fine with distcc (as 95% of the users reading this chapter will be, I'm sure). But there are several situations, where distcc isn't the best choice: - you're changing compiler versions often and still want to speed up your compilation (see the ICECC\_VERSION support) - you got some neat PPC laptop and want to use your wife's computer that only runs intel (see the cross compiler section) - you don't know what machines will be on-line at compile time. - **most important**: you're sitting in a office with several co-workers that do not like if you overload their workstations when they play doom (distcc doesn't have a scheduler) - you like nice compile monitors :) Icecream on gentoo ------------------------------------------------------------------------------------- - It is recommended to remove all processor specific optimizations from the CFLAGS line in /etc/portage/make.conf. On the aKademy cluster it proved useful to use only "-O2", otherwise there are often internal compiler errors, if not all computers have the same processor type/version **Be aware** that you have to change the CFLAGS during each gcc update too. - Create soft link for CHOST gcc/g++ e.g. ln -s /opt/icecream/bin/icecc /opt/icecream/libexec/icecc/bin/x86_64-pc-linux-gnu-gcc; ln -s /opt/icecream/bin/icecc /opt/icecream/libexec/icecc/bin/x86_64-pc-linux-gnu-g++ - To use icecream with emerge/ebuild use PREROOTPATH="/opt/icecream/libexec/icecc/bin" FEATURES="-network-sandbox" emerge bla - Be aware, because your gcc/glibc/binutils are normally compiled with processor-specific flags, there is a high chance that your compiler won't work on other machines. The best would be to build gcc, glibc and binutils without those flags and copying the needed files into your tarball for distribution, e.g. CFLAGS="-mcpu=i686 -O3 -fomit-frame-pointer -pipe" CXXFLAGS="$CFLAGS" ebuild /usr/portage/sys-devel/gcc-yourver.ebuild install ; cp /var/tmp/portage... Bug tracker ----------------------------------------------------------------------- Create a github issue on https://github.com/icecc/icecream Repository --------------------------------------------------------------------- The git repository lives at https://github.com/icecc/icecream Mailing list ----------------------------------------------------------------------- icecream-users@googlegroups.com - Subscribe: icecream-users+subscribe@googlegroups.com - Archive: https://groups.google.com/forum/#!forum/icecream-users icecream-1.3.1/TODO000066400000000000000000000107171361626760200137730ustar00rootroot00000000000000Release Critical: * chmod TMPDIR/native, as otherwise set umask prevent it from being readable by the client I'm wondering if it doesn't make sense to leave the daemon out of the game and let the client package the environment - it could either cache it in /tmp and leave a note in ~/.icecream or it packages it right into ~/.icecream and use e.g. the inode numbers of the compilers to verify it's the right environment - think NFS users changing machines. This would simplify some things and avoid the above bugs (and it would make it more convenient for users of /opt/gcc-cvs/bin too) This would also help users who are running in a chroot enviornment where the local daemon cannot even find the path to the compiler * Improve Documentation (cschum) * make the protocol version an uint32, not a hand-build array. * let the client specify it was compiled with recent compiler (supporting -param). If so, let the daemon compile with the options, otherwise leave them out. Akademy must have's: * daemon: only accept connections from a sane net prefix (scheduler can determine this and send it out via confMsg) to avoid being exposed to the internet * benchmark/testing jobs (broken machines with broken ram and broken network card on the icecream network destroy the fun and are not easy to find manually) Random: * Option -iiface to specify the interface[s] to listen on for the scheduler, or to use for the daemon. * Don't explicitly forbid tunnels because it could be useful for things like vmware * If someone calls a amd64 client on a host that runs a ia32 daemon and there are no other amd64 daemons in the farm, he will get no answer, but a timeout from scheduler (quite a corner case, but neat) * use syslog * Log problems found at some scheduler log - or even in the monitor. E.g. if a client can't reach a given daemon, it should be able to tell. Perhaps the scheduler can even disable that very host for some penalty time * Reduce amount of force-waits, especially if they involve network latency (scheduler queries) and daemon context switches: - remove the need for EndMsg - do not ask the daemon about the first job (WIP dirk) * if a compile job SIGSEGV's or SIGABORTs, make sure to recompile locally because it could be just a glibc/kernel incompatibility on the remote site * Add heuristic which optimises for overhead-reduction. Statistics prove ( ;) ), that for e.g. linux kernel the file size varies a lot, and small jobs should be preferably compiled locally and bigger ones preferably remote. * Split number of jobs into number of compile jobs (cheap) and number of non compile jobs (can be expensive, e.g. ld or meinproc). The reason is that multicore chips become more and more common. Today you can get quad cores easily and in some month we've 8 cores and several link jobs at the same time can pull down your whole system. (I can life with a hard coded number of one non compile job but others probably prefer a configurable number) * Consider launching a scheduler on-demand if there is none available or if a daemon knows it has a better version than the scheduler that is available (https://github.com/icecc/icecream/issues/84). Suggestions from "Wilson Snyder" sicortex.com: - Add ICECREAM_RUN_ICECCD to make the scheduler machine not run iceccd Christoph Thielecke writes: > 1) Problem von icecream: icecc hat /usr/bin/gcc-4.2 kopiert (ist aber ein > Symlink auf den Hardening-Wrapper, der installiert war, siehe Listing > unten). Kannst Du vieleicht eine Prüfung einbauen, ob gcc-x.y mit .real > endet bzw der Wrapper ist? Andi Kleen writes to protect against misconfiguration: >generate somefile.h on the client with >#if __GNUC__ != expected_major || __GNU_MINOR__ != expected_minor >#warning blabla >#endif >and then run with >-include somefile.h > (this file needs to be in the environment created by create_env) From thedrow (distcc maintainer) >Merging this back might just mean feature parity with what icecream currently >provides or merging some of the code directly to distcc. >I think that the entire Linux community will gain from one tool that is able to >distribute compilation of large C/C++ components that provides more flexibility >and allows you to grow from the easy to setup distcc type of deployment to the 100 >machines build farm that icecc provides. >Are there more features that diverged from distcc except for the server/client >architecture? We think the the only other feature is better support for automatic chroot. icecream-1.3.1/autogen.sh000077500000000000000000000023001361626760200152710ustar00rootroot00000000000000#!/bin/sh TESTLIBTOOLIZE="glibtoolize libtoolize" LIBTOOLIZEFOUND="0" srcdir=$(dirname $0) test -z "$srcdir" && srcdir=. cd $srcdir aclocal --version > /dev/null 2> /dev/null || { echo "error: aclocal not found" exit 1 } automake --version > /dev/null 2> /dev/null || { echo "error: automake not found" exit 1 } for i in $TESTLIBTOOLIZE; do if which $i > /dev/null 2>&1; then LIBTOOLIZE=$i LIBTOOLIZEFOUND="1" break fi done if [ "$LIBTOOLIZEFOUND" = "0" ]; then echo "$0: need libtoolize tool to build icecream" >&2 exit 1 fi if automake --version | grep -F 'automake (GNU automake) 1.5' > /dev/null; then # grep -q is non-portable echo "warning: you appear to be using automake 1.5" echo " this version has a bug - GNUmakefile.am dependencies are not generated" fi rm -rf autom4te*.cache $LIBTOOLIZE --force --copy || { echo "error: libtoolize failed" exit 1 } aclocal -I m4 $ACLOCAL_FLAGS || { echo "error: aclocal -I m4 $ACLOCAL_FLAGS failed" exit 1 } autoheader || { echo "error: autoheader failed" exit 1 } automake -a -c --foreign || { echo "warning: automake failed" } autoconf || { echo "error: autoconf failed" exit 1 } icecream-1.3.1/client/000077500000000000000000000000001361626760200145535ustar00rootroot00000000000000icecream-1.3.1/client/Makefile.am000066400000000000000000000024711361626760200166130ustar00rootroot00000000000000bin_PROGRAMS = icecc bin_SCRIPTS = icecc-create-env icecc-test-env noinst_LIBRARIES = libclient.a libclient_a_SOURCES = \ arg.cpp \ argv.c \ cpp.cpp \ local.cpp \ remote.cpp \ util.cpp \ md5.c \ safeguard.cpp icecc_SOURCES = \ main.cpp icecc_LDADD = \ libclient.a \ ../services/libicecc.la noinst_HEADERS = \ argv.h \ client.h \ md5.h \ util.h AM_CPPFLAGS = \ -DPLIBDIR=\"$(pkglibexecdir)\" \ -I$(top_srcdir)/services \ -I$(top_srcdir)/ EXTRA_DIST = icecc-create-env AM_LIBTOOLFLAGS = --silent install-exec-local: $(mkinstalldirs) $(DESTDIR)$(bindir) rm -f $(DESTDIR)$(bindir)/icerun $(LN_S) $(bindir)/icecc $(DESTDIR)$(bindir)/icerun $(mkinstalldirs) $(DESTDIR)$(pkglibexecdir) rm -f $(DESTDIR)$(pkglibexecdir)/icecc-create-env $(LN_S) $(bindir)/icecc-create-env $(DESTDIR)$(pkglibexecdir)/icecc-create-env $(mkinstalldirs) $(DESTDIR)$(pkglibexecdir)/bin for link in g++ gcc c++ cc $(CLANG_SYMLINK_WRAPPERS); do \ rm -f $(DESTDIR)$(pkglibexecdir)/bin/$$link ;\ $(LN_S) $(bindir)/icecc $(DESTDIR)$(pkglibexecdir)/bin/$$link ;\ done uninstall-local: rm $(DESTDIR)$(bindir)/icerun rm $(DESTDIR)$(pkglibexecdir)/icecc-create-env for link in g++ gcc c++ cc $(CLANG_SYMLINK_WRAPPERS); do \ rm $(DESTDIR)$(pkglibexecdir)/bin/$$link ;\ done icecream-1.3.1/client/arg.cpp000066400000000000000000001053161361626760200160360ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* * distcc -- A simple distributed compiler system * * Copyright (C) 2002, 2003 by Martin Pool * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include #include #include #include #include #include #include #include #include "client.h" using namespace std; // Whether any option controlling color output has been explicitly given. bool explicit_color_diagnostics; // Whether -fno-diagnostics-show-caret was given. bool explicit_no_show_caret; #define CLIENT_DEBUG 0 inline bool str_equal(const char* a, const char* b) { return strcmp(a, b) == 0; } inline int str_startswith(const char *head, const char *worm) { return !strncmp(head, worm, strlen(head)); } /* Some files should always be built locally... */ static bool should_always_build_locally(const string &filepath) { string p = find_basename(filepath); const char *filename = p.c_str(); /* autoconf */ if (str_startswith("conftest.", filename) || str_startswith("tmp.conftest.", filename)) { return true; } static const char* const cmake_checks[] = { "CheckIncludeFile.", "CheckFunctionExists.", "CheckSymbolExists.", "CheckTypeSize.", "CheckPrototypeDefinition.", }; /* cmake */ if (str_startswith("Check", filename)) { for( size_t i = 0; i < sizeof( cmake_checks ) / sizeof( cmake_checks[ 0 ] ); ++i ) { if (str_startswith( cmake_checks[ i ], filename)) { return true; } } } return false; } static bool analyze_program(const char *name, CompileJob &job, bool& icerun) { string compiler_name = find_basename(name); job.setCompilerName(compiler_name); if( icerun ) { job.setLanguage(CompileJob::Lang_Custom); log_info() << "icerun, running locally." << endl; return true; } else if( is_cpp_compiler(compiler_name)) { job.setLanguage(CompileJob::Lang_CXX); } else if( is_c_compiler(compiler_name)) { job.setLanguage(CompileJob::Lang_C); } else { job.setLanguage(CompileJob::Lang_Custom); log_info() << "custom command, running locally." << endl; icerun = true; return true; } return false; } static bool is_argument_with_space(const char* argument) { // List taken from https://clang.llvm.org/docs/genindex.html // TODO: Add support for arguments with two or three values // -sectalign // -sectcreate // -sectobjectsymbols // -sectorder // -segaddr // -segcreate // -segprot // Move some arguments to Arg_Cpp or Arg_Local static const char* const arguments[] = { "-dyld-prefix", "-gcc-toolchain", "--param", "--sysroot", "--system-header-prefix", "-target", "--assert", "--allowable_client", "-arch", "-arch_only", "-arcmt-migrate-report-output", "--prefix", "-bundle_loader", "-dependency-dot", "-dependency-file", "-dylib_file", "-exported_symbols_list", "--bootclasspath", "--CLASSPATH", "--classpath", "--resource", "--encoding", "--extdirs", "-filelist", "-fmodule-implementation-of", "-fmodule-name", "-fmodules-user-build-path", "-fnew-alignment", "-force_load", "--output-class-directory", "-framework", "-frewrite-map-file", "-ftrapv-handler", "-image_base", "-init", "-install_name", "-lazy_framework", "-lazy_library", "-meabi", "-mhwdiv", "-mllvm", "-module-dependency-dir", "-mthread-model", "-multiply_defined", "-multiply_defined_unused", "-rpath", "--rtlib", "-seg_addr_table", "-seg_addr_table_filename", "-segs_read_only_addr", "-segs_read_write_addr", "-serialize-diagnostics", "-std", "--stdlib", "--force-link", "-umbrella", "-unexported_symbols_list", "-weak_library", "-weak_reference_mismatches", "-B", "-D", "-U", "-I", "-i", "--include-directory", "-L", "-l", "--library-directory", "-MF", "-MT", "-MQ", "-cxx-isystem", "-c-isystem", "-idirafter", "--include-directory-after", "-iframework", "-iframeworkwithsysroot", "-imacros", "-imultilib", "-iprefix", "--include-prefix", "-iquote", "-include", "-include-pch", "-isysroot", "-isystem", "-isystem-after", "-ivfsoverlay", "-iwithprefix", "--include-with-prefix", "--include-with-prefix-after", "-iwithprefixbefore", "--include-with-prefix-before", "-iwithsysroot" }; for( size_t i = 0; i < sizeof( arguments ) / sizeof( arguments[ 0 ] ); ++i ) { if (str_equal( arguments[ i ], argument)) { return true; } } return false; } static bool analyze_assembler_arg(string &arg, list *extrafiles) { const char *pos = arg.c_str(); static bool second_option; if (second_option) { second_option = false; return false; } if (str_startswith("-a", pos)) { /* -a[a-z]*=output, which directs the listing to the named file * and cannot be remote. */ pos += 2; while ((*pos >= 'a') && (*pos <= 'z')) { pos++; } if (*pos == '=') { return true; } return false; } else if (str_equal("--debug-prefix-map", pos) || str_equal("--defsym", pos)) { second_option = true; return false; } else if (pos[0] == '@') { /* If a build system passes an @FILE argument we'd need to * parse the file for more arguments. Instead, we'll just * run locally. */ return true; } else if (arg[0] == '-') { /* All other option arguments should be safe to run remotely. */ return false; } else { /* Some build systems pass directly additional assembler files. * Example: -Wa,src/code16gcc.s * Thus, if any option doesn't start with a dash we need to * add an extra file to the compile step. */ if (access(arg.c_str(), R_OK) == 0) { arg = get_absfilename(arg); extrafiles->push_back(arg); return false; } else { log_info() << "file for argument missing, building locally" << endl; return true; } return false; } } bool analyse_argv(const char * const *argv, CompileJob &job, bool icerun, list *extrafiles) { ArgumentsList args; string ofile; #if CLIENT_DEBUG > 1 trace() << "scanning arguments" << endl; for (int index = 0; argv[index]; index++) { trace() << " " << argv[index] << endl; } trace() << endl; #endif bool had_cc = (job.compilerName().size() > 0); bool always_local = analyze_program(had_cc ? job.compilerName().c_str() : argv[0], job, icerun); bool seen_c = false; bool seen_s = false; bool seen_mf = false; bool seen_md = false; bool seen_split_dwarf = false; bool seen_target = false; bool wunused_macros = false; bool seen_arch = false; bool seen_pedantic = false; const char *standard = NULL; // if rewriting includes and precompiling on remote machine, then cpp args are not local Argument_Type Arg_Cpp = compiler_only_rewrite_includes(job) ? Arg_Rest : Arg_Local; explicit_color_diagnostics = false; explicit_no_show_caret = false; for (int i = had_cc ? 2 : 1; argv[i]; i++) { const char *a = argv[i]; if (icerun) { args.append(a, Arg_Local); } else if (a[0] == '-') { if (!strcmp(a, "-E")) { always_local = true; args.append(a, Arg_Local); log_info() << "preprocessing, building locally" << endl; } else if (!strncmp(a, "-fdump", 6) || !strcmp(a, "-combine") || !strcmp(a, "-fsyntax-only") || !strncmp(a, "-ftime-report", strlen("-ftime-report")) || !strcmp(a, "-ftime-trace")) { always_local = true; args.append(a, Arg_Local); log_info() << "argument " << a << ", building locally" << endl; } else if (!strcmp(a, "-MD") || !strcmp(a, "-MMD") || str_startswith("-Wp,-MD", a) || str_startswith("-Wp,-MMD", a)) { seen_md = true; args.append(a, Arg_Local); /* These two generate dependencies as a side effect. They * should work with the way we call cpp. */ } else if (!strcmp(a, "-MG") || !strcmp(a, "-MP")) { args.append(a, Arg_Local); /* These just modify the behaviour of other -M* options and do * nothing by themselves. */ } else if (!strcmp(a, "-MF") || str_startswith("-Wp,-MF", a)) { seen_mf = true; args.append(a, Arg_Local); args.append(argv[++i], Arg_Local); /* as above but with extra argument */ } else if (!strcmp(a, "-MT") || !strcmp(a, "-MQ") || str_startswith("-Wp,-MT", a) || str_startswith("-Wp,-MQ", a)) { args.append(a, Arg_Local); args.append(argv[++i], Arg_Local); /* as above but with extra argument */ } else if (a[1] == 'M') { /* -M(anything else) causes the preprocessor to produce a list of make-style dependencies on header files, either to stdout or to a local file. It implies -E, so only the preprocessor is run, not the compiler. There would be no point trying to distribute it even if we could. */ always_local = true; args.append(a, Arg_Local); log_info() << "argument " << a << ", building locally" << endl; } else if (str_equal("--param", a)) { args.append(a, Arg_Remote); assert( is_argument_with_space( a )); /* skip next word, being option argument */ if (argv[i + 1]) { args.append(argv[++i], Arg_Remote); } } else if (a[1] == 'B') { /* -B overwrites the path where the compiler finds the assembler. As we don't use that, better force local job. */ always_local = true; args.append(a, Arg_Local); log_info() << "argument " << a << ", building locally" << endl; if (str_equal(a, "-B")) { assert( is_argument_with_space( a )); /* skip next word, being option argument */ if (argv[i + 1]) { args.append(argv[++i], Arg_Local); } } } else if (str_startswith("-Wa,", a)) { /* The -Wa option specifies a list of arguments * that are passed to the assembler. * We split them into individual arguments and * call analyze_assembler_arg() for each one. */ const char *pos = a + 4, *next_comma; bool local = false; string as_arg; string remote_arg = "-Wa"; while (1) { next_comma = strchr(pos, ','); if (next_comma) as_arg.assign(pos, next_comma - pos); else as_arg = pos; local = analyze_assembler_arg(as_arg, extrafiles); remote_arg += "," + as_arg; if (!next_comma) break; pos = next_comma + 1; } if (local) { always_local = true; args.append(a, Arg_Local); log_info() << "argument " << a << ", building locally" << endl; } else { args.append(remote_arg, Arg_Remote); } } else if (!strcmp(a, "-S")) { seen_s = true; } else if (!strcmp(a, "-fprofile-arcs") || !strcmp(a, "-ftest-coverage") || !strcmp(a, "-frepo") || !strcmp(a, "-fprofile-generate") || !strcmp(a, "-fprofile-use") || !strcmp(a, "-save-temps") || !strcmp(a, "--save-temps") || !strcmp(a, "-fbranch-probabilities")) { log_info() << "compiler will emit profile info (argument " << a << "); building locally" << endl; always_local = true; args.append(a, Arg_Local); } else if (!strcmp(a, "-gsplit-dwarf")) { args.append(a, Arg_Rest); seen_split_dwarf = true; } else if (str_equal(a, "-x")) { args.append(a, Arg_Rest); bool unsupported = true; std::string unsupported_opt = "??"; if (const char *opt = argv[i + 1]) { ++i; args.append(opt, Arg_Rest); unsupported_opt = opt; if (str_equal(opt, "c++") || str_equal(opt, "c") || str_equal(opt, "objective-c") || str_equal(opt, "objective-c++")) { CompileJob::Language lang = CompileJob::Lang_Custom; if( str_equal(opt, "c")) { lang = CompileJob::Lang_C; } else if( str_equal(opt, "c++")) { lang = CompileJob::Lang_CXX; } else if( str_equal(opt, "objective-c")) { lang = CompileJob::Lang_OBJC; } else if( str_equal(opt, "objective-c++")) { lang = CompileJob::Lang_OBJCXX; } else { continue; } job.setLanguage(lang); // will cause -x used remotely twice, but shouldn't be a problem unsupported = false; } } if (unsupported) { log_info() << "unsupported -x option: " << unsupported_opt << "; running locally" << endl; always_local = true; } } else if (!strcmp(a, "-march=native") || !strcmp(a, "-mcpu=native") || !strcmp(a, "-mtune=native")) { log_info() << "-{march,mpcu,mtune}=native optimizes for local machine, " << "building locally" << endl; always_local = true; args.append(a, Arg_Local); } else if (!strcmp(a, "-fexec-charset") || !strcmp(a, "-fwide-exec-charset") || !strcmp(a, "-finput-charset") ) { #if CLIENT_DEBUG log_info() << "-f*-charset assumes charset conversion in the build environment; must be local" << endl; #endif always_local = true; args.append(a, Arg_Local); } else if (!strcmp(a, "-c")) { seen_c = true; } else if (str_startswith("-o", a)) { if (!strcmp(a, "-o")) { /* Whatever follows must be the output */ if (argv[i + 1]) { ofile = argv[++i]; } } else { a += 2; ofile = a; } if (ofile == "-") { /* Different compilers may treat "-o -" as either "write to * stdout", or "write to a file called '-'". We can't know, * so we just always run it locally. Hopefully this is a * pretty rare case. */ log_info() << "output to stdout? running locally" << endl; always_local = true; } } else if (str_equal("-D", a) || str_equal("-U", a)) { args.append(a, Arg_Cpp); assert( is_argument_with_space( a )); /* skip next word, being option argument */ if (argv[i + 1]) { ++i; args.append(argv[i], Arg_Cpp); } } else if (str_equal("-I", a) || str_equal("-i", a) || str_equal("--include-directory", a) || str_equal("-L", a) || str_equal("-l", a) || str_equal("--library-directory", a) || str_equal("-MF", a) || str_equal("-MT", a) || str_equal("-MQ", a) || str_equal("-cxx-isystem", a) || str_equal("-c-isystem", a) || str_equal("-idirafter", a) || str_equal("--include-directory-after", a) || str_equal("-iframework", a) || str_equal("-iframeworkwithsysroot", a) || str_equal("-imacros", a) || str_equal("-imultilib", a) || str_equal("-iprefix", a) || str_equal("--include-prefix", a) || str_equal("-iquote", a) || str_equal("-include", a) || str_equal("-include-pch", a) || str_equal("-isysroot", a) || str_equal("-isystem", a) || str_equal("-isystem-after", a) || str_equal("-ivfsoverlay", a) || str_equal("-iwithprefix", a) || str_equal("--include-with-prefix", a) || str_equal("--include-with-prefix-after", a) || str_equal("-iwithprefixbefore", a) || str_equal("--include-with-prefix-before", a) || str_equal("-iwithsysroot", a)) { args.append(a, Arg_Local); assert( is_argument_with_space( a )); /* skip next word, being option argument */ if (argv[i + 1]) { ++i; if (str_startswith("-O", argv[i])) { always_local = true; log_info() << "argument " << a << " " << argv[i] << ", building locally" << endl; } args.append(argv[i], Arg_Local); } } else if (str_startswith("-Wp,", a) || str_startswith("-D", a) || str_startswith("-U", a)) { args.append(a, Arg_Cpp); } else if (str_startswith("-I", a) || str_startswith("-l", a) || str_startswith("-L", a)) { args.append(a, Arg_Local); } else if (str_equal("-undef", a)) { args.append(a, Arg_Cpp); } else if (str_equal("-nostdinc", a) || str_equal("-nostdinc++", a) || str_equal("-MD", a) || str_equal("-MMD", a) || str_equal("-MG", a) || str_equal("-MP", a)) { args.append(a, Arg_Local); } else if (str_equal("-Wmissing-include-dirs", a) || str_equal("-Werror=missing-include-dirs", a)) { args.append(a, Arg_Local); } else if (str_equal("-fno-color-diagnostics", a)) { explicit_color_diagnostics = true; args.append(a, Arg_Rest); } else if (str_equal("-fcolor-diagnostics", a)) { explicit_color_diagnostics = true; args.append(a, Arg_Rest); } else if (str_equal("-fno-diagnostics-color", a) || str_equal("-fdiagnostics-color=never", a)) { explicit_color_diagnostics = true; args.append(a, Arg_Rest); } else if (str_equal("-fdiagnostics-color", a) || str_equal("-fdiagnostics-color=always", a)) { explicit_color_diagnostics = true; args.append(a, Arg_Rest); } else if (str_equal("-fdiagnostics-color=auto", a)) { // Drop the option here and pretend it wasn't given, // the code below will decide whether to enable colors or not. explicit_color_diagnostics = false; } else if (str_equal("-fno-diagnostics-show-caret", a)) { explicit_no_show_caret = true; args.append(a, Arg_Rest); } else if (str_equal("-fdiagnostics-show-caret", a)) { explicit_no_show_caret = false; args.append(a, Arg_Rest); } else if (str_startswith("-fplugin=", a) || str_startswith("-fsanitize-blacklist=", a) || str_startswith("-fprofile-sample-use=", a)) { const char* prefix = NULL; static const char* const prefixes[] = { "-fplugin=", "-fsanitize-blacklist=", "-fprofile-sample-use=" }; for( size_t pref = 0; pref < sizeof(prefixes)/sizeof(prefixes[0]); ++pref) { if( str_startswith(prefixes[pref], a)) { prefix = prefixes[pref]; break; } } assert( prefix != NULL ); string file = a + strlen(prefix); if (access(file.c_str(), R_OK) == 0) { file = get_absfilename(file); extrafiles->push_back(file); } else { always_local = true; log_info() << "file for argument " << a << " missing, building locally" << endl; } args.append(prefix + file, Arg_Rest); } else if (str_equal("-Xclang", a)) { if (argv[i + 1]) { ++i; const char *p = argv[i]; if (str_equal("-load", p)) { if (argv[i + 1] && argv[i + 2] && str_equal(argv[i + 1], "-Xclang")) { args.append(a, Arg_Rest); args.append(p, Arg_Rest); string file = argv[i + 2]; if (access(file.c_str(), R_OK) == 0) { file = get_absfilename(file); extrafiles->push_back(file); } else { always_local = true; log_info() << "plugin for argument " << a << " " << p << " " << argv[i + 1] << " " << file << " missing, building locally" << endl; } args.append(argv[i + 1], Arg_Rest); args.append(file, Arg_Rest); i += 2; } } else if( str_equal( "-building-pch-with-obj", p )) { // We strip the arguments loading the PCH when building remotely, // so the object file would not contain anything from the PCH, // leading to link errors later when other objects would assume // this object file would provide some symbols from the PCH. log_info() << "argument " << a << " " << p << ", building locally" << endl; always_local = true; args.append(a, Arg_Rest); args.append(p, Arg_Rest); } else { args.append(a, Arg_Rest); args.append(p, Arg_Rest); } } } else if (str_equal("-target", a)) { seen_target = true; args.append(a, Arg_Rest); if (argv[i + 1]) { args.append(argv[++i], Arg_Rest); } } else if (str_startswith("--target=", a)) { seen_target = true; args.append(a, Arg_Rest); } else if (str_equal("-Wunused-macros", a) || str_equal("-Werror=unused-macros", a)) { wunused_macros = true; args.append(a, Arg_Rest); } else if (str_equal("-Wno-unused-macros", a)) { wunused_macros = false; args.append(a, Arg_Rest); } else if (str_equal("-pedantic", a)) { seen_pedantic = true; args.append(a, Arg_Rest); } else if (str_equal("-pedantic-errors", a)) { seen_pedantic = true; args.append(a, Arg_Rest); } else if (str_equal("-arch", a)) { if( seen_arch ) { log_info() << "multiple -arch options, building locally" << endl; always_local = true; } seen_arch = false; args.append(a, Arg_Rest); if (argv[i + 1]) { args.append(argv[++i], Arg_Rest); } } else { args.append(a, Arg_Rest); if (is_argument_with_space(a)) { if (argv[i + 1]) { args.append(argv[++i], Arg_Rest); } } } } else if (a[0] == '@') { args.append(a, Arg_Local); } else { args.append(a, Arg_Rest); } } if (!seen_c && !seen_s) { if (!always_local) { log_info() << "neither -c nor -S argument, building locally" << endl; } always_local = true; } else if (seen_s) { if (seen_c) { log_info() << "can't have both -c and -S, ignoring -c" << endl; } args.append("-S", Arg_Remote); } else { assert( seen_c ); args.append("-c", Arg_Remote); if (seen_split_dwarf) { job.setDwarfFissionEnabled(true); } } if (!always_local) { ArgumentsList backup = args; /* TODO: ccache has the heuristic of ignoring arguments that are not * extant files when looking for the input file; that's possibly * worthwile. Of course we can't do that on the server. */ string ifile; for (ArgumentsList::iterator it = args.begin(); it != args.end();) { if (it->first == "-") { always_local = true; log_info() << "stdin/stdout argument, building locally" << endl; break; } // Skip compiler arguments which are followed by another // argument not starting with -. if (it->first == "-Xclang" || it->first == "-x" || is_argument_with_space(it->first.c_str())) { ++it; ++it; } else if (it->second != Arg_Rest || it->first.at(0) == '-' || it->first.at(0) == '@') { ++it; } else if (ifile.empty()) { #if CLIENT_DEBUG log_info() << "input file: " << it->first << endl; #endif job.setInputFile(it->first); ifile = it->first; it = args.erase(it); if (should_always_build_locally(ifile)) { log_info() << "configure tests are run locally: " << ifile << endl; always_local = true; } } else { log_info() << "found another non option on command line. Two input files? " << it->first << endl; always_local = true; args = backup; job.setInputFile(string()); break; } } if (ifile.find('.') != string::npos) { string::size_type dot_index = ifile.rfind('.'); string ext = ifile.substr(dot_index + 1); if (ext == "cc" || ext == "cpp" || ext == "cxx" || ext == "cp" || ext == "c++" || ext == "C" || ext == "ii") { #if CLIENT_DEBUG if (job.language() != CompileJob::Lang_CXX) { log_info() << "switching to C++ for " << ifile << endl; } #endif job.setLanguage(CompileJob::Lang_CXX); } else if (ext == "mi" || ext == "m") { job.setLanguage(CompileJob::Lang_OBJC); } else if (ext == "mii" || ext == "mm" || ext == "M") { job.setLanguage(CompileJob::Lang_OBJCXX); } else if (ext == "s" || ext == "S" // assembler || ext == "ads" || ext == "adb" // ada || ext == "f" || ext == "for" // fortran || ext == "FOR" || ext == "F" || ext == "fpp" || ext == "FPP" || ext == "r") { always_local = true; log_info() << "source file " << ifile << ", building locally" << endl; } else if (ext != "c" && ext != "i") { // C is special, it depends on arg[0] name log_warning() << "unknown extension " << ext << endl; always_local = true; } if (!always_local && ofile.empty()) { ofile = ifile.substr(0, dot_index); if (seen_s) { ofile += ".s"; } else { ofile += ".o"; } ofile = find_basename(ofile); } if (!always_local && seen_md && !seen_mf) { string dfile = ofile.substr(0, ofile.rfind('.')) + ".d"; #if CLIENT_DEBUG log_info() << "dep file: " << dfile << endl; #endif args.append("-MF", Arg_Local); args.append(dfile, Arg_Local); } } } else { // always_local job.setInputFile(string()); } struct stat st; if( !always_local ) { if (ofile.empty() || (!stat(ofile.c_str(), &st) && !S_ISREG(st.st_mode))) { log_info() << "output file empty or not a regular file, building locally" << endl; always_local = true; } } // redirecting compiler's output will turn off its automatic coloring, so force it // when it would be used, unless explicitly set if (!icerun && compiler_has_color_output(job) && !explicit_color_diagnostics) { if (compiler_is_clang(job)) args.append("-fcolor-diagnostics", Arg_Rest); else args.append("-fdiagnostics-color", Arg_Rest); // GCC } // -Wunused-macros is tricky with remote preprocessing. GCC's -fdirectives-only outright // refuses to work if -Wunused-macros is given, and Clang's -frewrite-includes is buggy // if -Wunused-macros is used (https://bugs.llvm.org/show_bug.cgi?id=15614). It's a question // if this could possibly even work, given that macros may be used as filenames for #include directives. if( wunused_macros ) { job.setBlockRewriteIncludes(true); } // -pedantic doesn't work with remote preprocessing, if extensions to a named standard // are allowed. GCC allows GNU extensions by default, so let's check if a standard // other than eg gnu11 or gnu++14 was specified. if( seen_pedantic && !compiler_is_clang(job) && (!standard || str_startswith("gnu", standard)) ) { log_info() << "argument -pedantic, forcing local preprocessing" << endl; job.setBlockRewriteIncludes(true); } if( !always_local && compiler_only_rewrite_includes(job) && !compiler_is_clang(job)) { // Inject this, so that remote compilation uses -fpreprocessed -fdirectives-only args.append("-fdirectives-only", Arg_Remote); } if( !always_local && !seen_target && compiler_is_clang(job)) { // With gcc each binary can compile only for one target, so cross-compiling is just // a matter of using the proper cross-compiler remotely and it will automatically // compile for the given platform. However one clang binary can compile for many // platforms and so when cross-compiling it would by default compile for the remote // host platform. Therefore explicitly ask for our platform. string default_target = clang_get_default_target(job); if( !default_target.empty()) { args.append("-target", Arg_Remote); args.append(default_target, Arg_Remote); } else { always_local = true; log_error() << "failed to read default clang host platform, building locally" << endl; } } job.setFlags(args); job.setOutputFile(ofile); #if CLIENT_DEBUG trace() << "scanned result: local args=" << concat_args(job.localFlags()) << ", remote args=" << concat_args(job.remoteFlags()) << ", rest=" << concat_args(job.restFlags()) << ", local=" << always_local << ", compiler=" << job.compilerName() << ", lang=" << job.language() << endl; #endif return always_local; } icecream-1.3.1/client/argv.c000066400000000000000000000276701361626760200156720ustar00rootroot00000000000000/* Create and destroy argument vectors (argv's) Copyright (C) 1992-2017 Free Software Foundation, Inc. Written by Fred Fish @ Cygnus Support This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* Create and destroy argument vectors. An argument vector is simply an array of string pointers, terminated by a NULL pointer. */ #include "argv.h" #include "config.h" #include /* Routines imported from standard C runtime libraries. */ #include #include #include #include #include #include #include #ifndef NULL #define NULL 0 #endif #ifndef EOS #define EOS '\0' #endif #define INITIAL_MAXARGC 8 /* Number of args + NULL in initial argv */ /* @deftypefn Extension char** dupargv (char * const *@var{vector}) Duplicate an argument vector. Simply scans through @var{vector}, duplicating each argument until the terminating @code{NULL} is found. Returns a pointer to the argument vector if successful. Returns @code{NULL} if there is insufficient memory to complete building the argument vector. @end deftypefn */ static char ** dupargv (char * const *argv) { int argc; char **copy; if (argv == NULL) return NULL; /* the vector */ for (argc = 0; argv[argc] != NULL; argc++); copy = (char **) malloc ((argc + 1) * sizeof (char *)); /* the strings */ for (argc = 0; argv[argc] != NULL; argc++) copy[argc] = strdup (argv[argc]); copy[argc] = NULL; return copy; } /* @deftypefn Extension void freeargv (char **@var{vector}) Free an argument vector that was built using @code{buildargv}. Simply scans through @var{vector}, freeing the memory for each argument until the terminating @code{NULL} is found, and then frees @var{vector} itself. @end deftypefn */ void freeargv (char **vector) { if (vector == NULL) return; char **scan; for (scan = vector; *scan != NULL; scan++) free (*scan); free (vector); } static void consume_whitespace (const char **input) { while (isspace (**input)) { (*input)++; } } static int only_whitespace (const char* input) { while (*input != EOS && isspace (*input)) input++; return (*input == EOS); } /* @deftypefn Extension char** buildargv (char *@var{sp}) Given a pointer to a string, parse the string extracting fields separated by whitespace and optionally enclosed within either single or double quotes (which are stripped off), and build a vector of pointers to copies of the string for each field. The input string remains unchanged. The last element of the vector is followed by a @code{NULL} element. All of the memory for the pointer array and copies of the string is obtained from @code{malloc}. All of the memory can be returned to the system with the single function call @code{freeargv}, which takes the returned result of @code{buildargv}, as it's argument. Returns a pointer to the argument vector if successful. Returns @code{NULL} if @var{sp} is @code{NULL} or if there is insufficient memory to complete building the argument vector. If the input is a null string (as opposed to a @code{NULL} pointer), then buildarg returns an argument vector that has one arg, a null string. @end deftypefn The memory for the argv array is dynamically expanded as necessary. In order to provide a working buffer for extracting arguments into, with appropriate stripping of quotes and translation of backslash sequences, we allocate a working buffer at least as long as the input string. This ensures that we always have enough space in which to work, since the extracted arg is never larger than the input string. The argument vector is always kept terminated with a @code{NULL} arg pointer, so it can be passed to @code{freeargv} at any time, or returned, as appropriate. */ static char **buildargv (const char *input) { if (input == NULL) return NULL; char *copybuf; int squote = 0; int dquote = 0; int bsquote = 0; int argc = 0; int maxargc = 0; char **argv = NULL; char **nargv; copybuf = (char *) malloc (strlen (input) + 1); /* Is a do{}while to always execute the loop once. Always return an argv, even for null strings. See NOTES above, test case below. */ do { /* Pick off argv[argc] */ consume_whitespace (&input); if ((maxargc == 0) || (argc >= (maxargc - 1))) { /* argv needs initialization, or expansion */ if (argv == NULL) { maxargc = INITIAL_MAXARGC; nargv = (char **) malloc (maxargc * sizeof (char *)); } else { maxargc *= 2; nargv = (char **) realloc (argv, maxargc * sizeof (char *)); } argv = nargv; argv[argc] = NULL; } /* Begin scanning arg */ char *arg = copybuf; while (*input != EOS) { if (isspace (*input) && !squote && !dquote && !bsquote) { break; } else { if (bsquote) { bsquote = 0; *arg++ = *input; } else if (*input == '\\') { bsquote = 1; } else if (squote) { if (*input == '\'') { squote = 0; } else { *arg++ = *input; } } else if (dquote) { if (*input == '"') { dquote = 0; } else { *arg++ = *input; } } else { if (*input == '\'') { squote = 1; } else if (*input == '"') { dquote = 1; } else { *arg++ = *input; } } input++; } } *arg = EOS; argv[argc] = strdup (copybuf); argc++; argv[argc] = NULL; consume_whitespace (&input); } while (*input != EOS); free (copybuf); return (argv); } /* @deftypefn Extension void expandargv (int *@var{argcp}, char ***@var{argvp}) The @var{argcp} and @code{argvp} arguments are pointers to the usual @code{argc} and @code{argv} arguments to @code{main}. This function looks for arguments that begin with the character @samp{@@}. Any such arguments are interpreted as ``response files''. The contents of the response file are interpreted as additional command line options. In particular, the file is separated into whitespace-separated strings; each such string is taken as a command-line option. The new options are inserted in place of the option naming the response file, and @code{*argcp} and @code{*argvp} will be updated. If the value of @code{*argvp} is modified by this function, then the new value has been dynamically allocated and can be deallocated by the caller with @code{freeargv}. However, most callers will simply call @code{expandargv} near the beginning of @code{main} and allow the operating system to free the memory when the program exits. @end deftypefn */ void expandargv (int *argcp, char ***argvp) { /* The argument we are currently processing. */ int i = 0; /* To check if ***argvp has been dynamically allocated. */ char ** const original_argv = *argvp; /* Limit the number of response files that we parse in order to prevent infinite recursion. */ unsigned int iteration_limit = 2000; /* Loop over the arguments, handling response files. We always skip ARGVP[0], as that is the name of the program being run. */ while (++i < *argcp) { /* The name of the response file. */ const char *filename; /* The response file. */ FILE *f; /* An upper bound on the number of characters in the response file. */ long pos; /* The number of characters in the response file, when actually read. */ size_t len; /* A dynamically allocated buffer used to hold options read from a response file. */ char *buffer; /* Dynamically allocated storage for the options read from the response file. */ char **file_argv; /* The number of options read from the response file, if any. */ size_t file_argc; struct stat sb; /* We are only interested in options of the form "@file". */ filename = (*argvp)[i]; if (filename[0] != '@') continue; /* If we have iterated too many times then stop. */ if (-- iteration_limit == 0) { fprintf (stderr, "%s: error: too many @-files encountered\n", (*argvp)[0]); exit (1); } if (stat (filename+1, &sb) < 0) continue; if (S_ISDIR(sb.st_mode)) { fprintf (stderr, "%s: error: @-file refers to a directory\n", (*argvp)[0]); exit (1); } /* Read the contents of the file. */ f = fopen (++filename, "r"); if (!f) continue; if (fseek (f, 0L, SEEK_END) == -1) goto error; pos = ftell (f); if (pos == -1) goto error; if (fseek (f, 0L, SEEK_SET) == -1) goto error; buffer = (char *) malloc (pos * sizeof (char) + 1); len = fread (buffer, sizeof (char), pos, f); if (len != (size_t) pos /* On Windows, fread may return a value smaller than POS, due to CR/LF->CR translation when reading text files. That does not in-and-of itself indicate failure. */ && ferror (f)) goto error; /* Add a NUL terminator. */ buffer[len] = '\0'; /* If the file is empty or contains only whitespace, buildargv would return a single empty argument. In this context we want no arguments, instead. */ if (only_whitespace (buffer)) { file_argv = (char **) malloc (sizeof (char *)); file_argv[0] = NULL; } else /* Parse the string. */ file_argv = buildargv (buffer); /* If *ARGVP is not already dynamically allocated, copy it. */ if (*argvp == original_argv) *argvp = dupargv (*argvp); /* Count the number of arguments. */ file_argc = 0; while (file_argv[file_argc]) ++file_argc; /* Free the original options memory. */ free((*argvp)[i]); /* Now, insert FILE_ARGV into ARGV. The "+1" below handles the NULL terminator at the end of ARGV. */ *argvp = ((char **) realloc (*argvp, (*argcp + file_argc + 1) * sizeof (char *))); memmove (*argvp + i + file_argc, *argvp + i + 1, (*argcp - i) * sizeof (char *)); memcpy (*argvp + i, file_argv, file_argc * sizeof (char *)); /* The original option has been replaced by all the new options. */ *argcp += file_argc - 1; /* Free up memory allocated to process the response file. We do not use freeargv because the individual options in FILE_ARGV are now in the main ARGV. */ free (file_argv); free (buffer); /* Rescan all of the arguments just read to support response files that include other response files. */ --i; error: /* We're all done with the file now. */ fclose (f); } } icecream-1.3.1/client/argv.h000066400000000000000000000023221361626760200156620ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* * icecc -- A simple distributed compiler system * * Copyright (C) 2003, 2004 by the Icecream Authors * * based on distcc * Copyright (C) 2002, 2003 by Martin Pool * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _CLIENT_ARGV_H_ #define _CLIENT_ARGV_H_ #ifdef __cplusplus extern "C" { #endif void expandargv (int *argcp, char ***argvp); void freeargv(char **vector); #ifdef __cplusplus } #endif #endif icecream-1.3.1/client/client.h000066400000000000000000000065611361626760200162120ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of icecc. Copyright (C) 2002, 2003 by Martin Pool 2004 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _CLIENT_H_ #define _CLIENT_H_ #include #include #include #include #include #include #include "exitcode.h" #include "logging.h" #include "util.h" class MsgChannel; extern std::string remote_daemon; /* in remote.cpp */ extern std::string get_absfilename(const std::string &_file); /* In arg.cpp. */ extern bool analyse_argv(const char * const *argv, CompileJob &job, bool icerun, std::list *extrafiles); /* In cpp.cpp. */ extern pid_t call_cpp(CompileJob &job, int fdwrite, int fdread = -1); /* In local.cpp. */ extern int build_local(CompileJob &job, MsgChannel *daemon, struct rusage *usage = 0); extern std::string find_compiler(const CompileJob &job); extern bool compiler_is_clang(const CompileJob &job); extern bool compiler_only_rewrite_includes(const CompileJob &job); extern std::string compiler_path_lookup(const std::string &compiler); extern std::string clang_get_default_target(const CompileJob &job); /* In remote.cpp - permill is the probability it will be compiled three times */ extern int build_remote(CompileJob &job, MsgChannel *scheduler, const Environments &envs, int permill); /* safeguard.cpp */ // We allow several recursions if icerun is involved, just in case icerun is e.g. used to invoke a script // that calls make that invokes compilations. In this case, it is allowed to have icerun->icecc->compiler. // However, icecc->icecc recursion is a problem, so just one recursion exceeds the limit. // Also note that if the total number of such recursive invocations exceedds the number of allowed local // jobs, iceccd will not assign another local job and the whole build will get stuck. static const int SafeguardMaxLevel = 2; enum SafeguardStep { SafeguardStepCompiler = SafeguardMaxLevel, SafeguardStepCustom = 1 }; extern void dcc_increment_safeguard(SafeguardStep step); extern int dcc_recursion_safeguard(void); extern Environments parse_icecc_version(const std::string &target, const std::string &prefix); class client_error : public std::runtime_error { public: client_error(int code, const std::string& what) : std::runtime_error(what) , errorCode(code) {} const int errorCode; }; class remote_error : public client_error { public: remote_error(int code, const std::string& what) : client_error(code, what) {} }; #endif icecream-1.3.1/client/cpp.cpp000066400000000000000000000146521361626760200160510ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* * distcc -- A simple distributed compiler system * * Copyright (C) 2002, 2003 by Martin Pool * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /** * @file * * Run the preprocessor. Client-side only. **/ #include "config.h" #include #include #include #include #include #include #include "client.h" using namespace std; bool dcc_is_preprocessed(const string &sfile) { if (sfile.size() < 3) { return false; } int last = sfile.size() - 1; if ((sfile[last - 1] == '.') && (sfile[last] == 'i')) { return true; // .i } if ((sfile[last - 2] == '.') && (sfile[last - 1] == 'i') && (sfile[last] == 'i')) { return true; // .ii } return false; } /** * If the input filename is a plain source file rather than a * preprocessed source file, then preprocess it to a temporary file * and return the name in @p cpp_fname. * * The preprocessor may still be running when we return; you have to * wait for @p cpp_fid to exit before the output is complete. This * allows us to overlap opening the TCP socket, which probably doesn't * use many cycles, with running the preprocessor. **/ pid_t call_cpp(CompileJob &job, int fdwrite, int fdread) { flush_debug(); pid_t pid = fork(); if (pid == -1) { log_perror("failed to fork:"); return -1; /* probably */ } if (pid != 0) { /* Parent. Close the write fd. */ if (fdwrite > -1) { if ((-1 == close(fdwrite)) && (errno != EBADF)){ log_perror("close() failed"); } } return pid; } /* Child. Close the read fd, in case we have one. */ if (fdread > -1) { if ((-1 == close(fdread)) && (errno != EBADF)){ log_perror("close failed"); } } int ret = dcc_ignore_sigpipe(0); if (ret) { /* set handler back to default */ _exit(ret); } char **argv; if (dcc_is_preprocessed(job.inputFile())) { /* already preprocessed, great. write the file to the fdwrite (using cat) */ argv = new char*[2 + 1]; argv[0] = strdup("/bin/cat"); argv[1] = strdup(job.inputFile().c_str()); argv[2] = 0; } else { list flags = job.localFlags(); appendList(flags, job.restFlags()); for (list::iterator it = flags.begin(); it != flags.end();) { /* This has a duplicate meaning. it can either include a file for preprocessing or a precompiled header. decide which one. */ if ((*it) == "-include") { ++it; if (it != flags.end()) { std::string p = (*it); if (access(p.c_str(), R_OK) < 0 && access((p + ".gch").c_str(), R_OK) == 0) { // PCH is useless for preprocessing, ignore the flag. list::iterator o = --it; it++; flags.erase(o); o = it++; flags.erase(o); } } } else if ((*it) == "-include-pch") { list::iterator o = it; ++it; if (it != flags.end()) { std::string p = (*it); if (access(p.c_str(), R_OK) == 0) { // PCH is useless for preprocessing (and probably slows things down), ignore the flag. flags.erase(o); o = it++; flags.erase(o); } } } else if ((*it) == "-fpch-preprocess") { // This would add #pragma GCC pch_preprocess to the preprocessed output, which would make // the remote GCC try to load the PCH directly and fail. Just drop it. This may cause a build // failure if the -include check above failed to detect usage of a PCH file (e.g. because // it needs to be found in one of the -I paths, which we don't check) and the header file // itself doesn't exist. flags.erase(it++); } else { ++it; } } int argc = flags.size(); argc++; // the program argc += 2; // -E file.i argc += 1; // -frewrite-includes / -fdirectives-only argv = new char*[argc + 1]; argv[0] = strdup(find_compiler(job).c_str()); int i = 1; for (list::const_iterator it = flags.begin(); it != flags.end(); ++it) { argv[i++] = strdup(it->c_str()); } argv[i++] = strdup("-E"); argv[i++] = strdup(job.inputFile().c_str()); if (compiler_only_rewrite_includes(job)) { if( compiler_is_clang(job)) { argv[i++] = strdup("-frewrite-includes"); } else { // gcc argv[i++] = strdup("-fdirectives-only"); } } argv[i++] = 0; } string argstxt = argv[ 0 ]; for( int i = 1; argv[ i ] != NULL; ++i ) { argstxt += ' '; argstxt += argv[ i ]; } trace() << "preparing source to send: " << argstxt << endl; if (fdwrite != STDOUT_FILENO) { /* Ignore failure */ close(STDOUT_FILENO); dup2(fdwrite, STDOUT_FILENO); close(fdwrite); } dcc_increment_safeguard(SafeguardStepCompiler); execv(argv[0], argv); int exitcode = ( errno == ENOENT ? 127 : 126 ); ostringstream errmsg; errmsg << "execv " << argv[0] << " failed"; log_perror(errmsg.str()); _exit(exitcode); } icecream-1.3.1/client/icecc-create-env.in000077500000000000000000000437201361626760200202110ustar00rootroot00000000000000#! /usr/bin/env bash # icecc -- A simple distributed compiler system # # Copyright (C) 2004 by the Icecream Authors # GPL target_files= add_file_duplicates= # Optional path to strip from all paths if present, e.g. if the compiler is not in /usr. stripprefix= case $(uname) in "Darwin") is_darwin=1;; "FreeBSD") is_freebsd=1;; "Linux") is_linux=1;; esac usage () { echo "Create compiler environment for distributed build." echo "Usage: $0 [extra_options]" echo "For GCC, pass the the gcc binary, the matching g++ will be used automatically." echo "For Clang, pass the clang binary." echo "Use --addfile to add extra files." echo "Use --compression to set tarball type (none,gzip,bzip2,zstd,xz)." echo "For backwards compatibility, the following is also supported:" echo "$0 --gcc " echo "$0 --clang " } is_contained () { case " $target_files " in *" $1 "* ) return 0 ;; *"=$1 "* ) return 0;; * ) return 1 ;; esac } is_add_file_duplicate () { case " $add_file_duplicates " in *" $1 "* ) return 0 ;; * ) return 1 ;; esac } # returns abs path to filedir abs_path() { local path=$1 if test -f "$path"; then pushd $(dirname $path) > /dev/null 2>&1 dir_path=$(pwd -P) path=$dir_path/$(basename $path) popd > /dev/null 2>&1 elif test -d "$path"; then pushd $path > /dev/null 2>&1 path=$(pwd -P) popd > /dev/null 2>&1 fi echo $path } # return abs path to filedir with symlinks resolved resolve_path() { local_path=$1 # pwd -P in abs_path will take care of resolving symlinks in the path, # so take care just of the file component itself while test -L "$local_path"; do pushd $(dirname $local_path) >/dev/null 2>&1 local_path=$(abs_path $(readlink $(basename $local_path))) popd > /dev/null 2>&1 done abs_path $local_path } # Avoid /../ components in paths such as /usr/X11/../lib64 . # This could use realpath, but that's reportedly not that widely available. convert_path_cdup () { local filename="$1" local directory=$(dirname $filename) local fixed_directory=$(cd "$directory" >/dev/null && pwd) echo ${fixed_directory}/$(basename $filename) } add_file () { local skipldd= if test "$1" = "skipldd"; then skipldd=1 shift fi local name="$1" local path="$1"; if test -n "$2"; then name="$2" fi test -z "$name" && return # it is faster to check as quickly as possible, is_contained checks duplicates too, but this saves time is_add_file_duplicate "$name" && return add_file_duplicates="$add_file_duplicates $name" path=$(resolve_path $path) name=$(convert_path_cdup $name) if test -n "$stripprefix"; then name=$(echo $name | sed "s#$stripprefix#/usr#" ) fi toadd="$name=$path" if test "$name" = "$path"; then toadd=$path fi is_contained "$toadd" && return echo "adding file $toadd" target_files="$target_files $toadd" if test -x "$path" -a -z "$skipldd"; then # Only call ldd when it makes sense if file -L "$path" | grep 'ELF' > /dev/null 2>&1; then if ! file -L "$path" | grep 'static' > /dev/null 2>&1; then # ldd now outputs ld as /lib/ld-linux.so.xx on current nptl based glibc # this regexp parse the outputs like: # ldd /usr/bin/gcc # linux-gate.so.1 => (0xffffe000) # libc.so.6 => /lib/tls/libc.so.6 (0xb7e81000) # /lib/ld-linux.so.2 (0xb7fe8000) # covering both situations ( with => and without ) local lib for lib in $(ldd "$path" | sed -n 's,^[^/]*\(/[^ ]*\).*,\1,p'); do test -f "$lib" || continue # Check whether the same library also exists in the parent directory, # and prefer that on the assumption that it is a more generic one. local baselib=$(echo "$lib" | sed 's,\(/[^/]*\)/.*\(/[^/]*\)$,\1\2,') usebaselib= if test "$baselib" != "$lib" -a -f "$baselib"; then # Make sure the base lib has the same architecture. local archlib="$(objdump -f "$lib" | grep architecture)" local archbaselib="$(objdump -f "$baselib" | grep architecture)" if test "$archlib" = "$archbaselib"; then usebaselib=1 fi fi if test -n "$usebaselib"; then lib=$baselib add_file "$lib" else # Optimization: We are adding a library we got from ldd output, so avoid # using ldd on it, as it should not find more than this ldd. add_file "skipldd" "$lib" fi # Add the non-haswell and non-avx512_1 libraries too case "$lib" in */haswell/*|*/avx512_1/*) ;; *) continue ;; esac local lib_non_avx512=$(echo "$lib" | sed s,/avx512_1/,/,) local lib_non_hsw=$(echo "$lib_non_avx512" | sed s,/haswell/,/,) if [ "$lib" != "$lib_non_avx512" ] && [ -f "$lib_non_avx512" ]; then add_file "$lib_non_avx512" fi if [ "$lib" != "$lib_non_hsw" ] && [ -f "$lib_non_hsw" ]; then add_file "$lib_non_hsw" fi done fi elif test "$is_darwin" = 1; then # this regexp parse the outputs like: # $ otool -L /usr/llvm-gcc-4.2/libexec/gcc/i686-apple-darwin11/4.2.1/cc1 # @executable_path/libllvmgcc.dylib # /usr/lib/libiconv.2.dylib # /usr/lib/libSystem.B.dylib # /usr/lib/libstdc++.6.dylib for lib in $(otool -L "$path" | sed -n 's,^[^/@]*\([/@][^ ]*\).*,\1,p'); do local libinstall="" if test "${lib%%/*}" = "@executable_path"; then # Installs libs like @executable_path/libllvmgcc.dylib # that contains @executable_path in its path in $(dirname ${name}) # (the same install path of the executable program) libinstall="${name%/*}${lib#@executable_path}" lib="${path%/*}${lib#@executable_path}" fi test -f "$lib" || continue # Check wether the same library also exists in the parent directory, # and prefer that on the assumption that it is a more generic one. local baselib=$(echo "$lib" | sed 's,\(/[^/]*\)/.*\(/[^/]*\)$,\1\2,') test -f "$baselib" && lib=$baselib add_file "$lib" "$libinstall" done fi fi } # Search and add file to the tarball file. search_addfile() { local compiler=$1 local file_name=$2 local file_installdir=$3 local file="" file=$($compiler -print-prog-name=$file_name) if test -z "$file" || test "$file" = "$file_name" || ! test -e "$file"; then file=$($compiler -print-file-name=$file_name) fi if test "$file" = "$file_name"; then file=$(command -v $file_name || echo $file_name) fi if ! test -e "$file"; then return 1 fi if test -z "$file_installdir"; then # The file is going to be added to the tarball # in the same path where the compiler found it, as an absolute path. # If it's not in the /usr prefix, stripprefix handling will take care of that. file_installdir=$(dirname $file) file_installdir=$(abs_path $file_installdir) fi add_file "$file" "$file_installdir/$file_name" return 0 } # backward compat if test "$1" = "--respect-path"; then shift fi if test "$1" = "--gcc"; then shift added_gcc=$1 shift added_gxx=$1 shift gcc=1 if test "$1" = "--clang"; then shift added_clang=$1 shift if test "x$1" != "x--addfile" -a "x$1" != "x--gcc" -a -e "$1"; then # accept 2nd argument being the compilerwrapper binary, for backwards compatibility added_compilerwrapper=$1 shift fi if test -z "$added_compilerwrapper"; then added_compilerwrapper=@PKGLIBEXECDIR@/compilerwrapper fi clang=1 fi elif test "$1" = "--clang"; then shift added_clang=$1 shift if test "x$1" != "x--addfile" -a "x$1" != "x--gcc" -a -e "$1"; then # accept 2nd argument being the compilerwrapper binary, for backwards compatibility added_compilerwrapper=$1 shift fi if test -z "$added_compilerwrapper"; then added_compilerwrapper=@PKGLIBEXECDIR@/compilerwrapper fi clang=1 if test "$1" = "--gcc"; then shift added_gcc=$1 shift added_gxx=$1 shift gcc=1 fi else if test -z "$1"; then usage exit 1 fi # We got just a binary, find out what compiler it is and bypass any possible wrappers. # __clang__ expands to 1 if compiler is Clang # __GNUC__ expands to the main version number (and is valid also with Clang) test_output=$(echo "clang __clang__ gcc __GNUC__" | "$1" -E -) if test $? -ne 0; then echo "$1" is not a compiler. exit 1 fi if echo "$test_output" | grep -q '^clang 1 gcc.*'; then clang=1 # With clang, -print-prog-name gives the full path to the actual clang binary, # allowing to bypass any possible wrapper script etc. Note we must pass # just the binary name, not full path. added_clang=$($1 -print-prog-name=$(basename $1)) added_compilerwrapper=@PKGLIBEXECDIR@/compilerwrapper elif echo "$test_output" | grep -q 'clang __clang__ gcc.*'; then gcc=1 # Gcc's -print-prog-name is useless, as it prints simply "gcc", so we have to # get the location of the actual gcc binary from gcc -v output, which prints # (to stderr) gcc's argv[0] as COLLECT_GCC. added_gcc=$($1 -v 2>&1 | grep COLLECT_GCC= | sed 's/^COLLECT_GCC=//') if test -z "$added_gcc"; then echo Failed to find gcc location. exit 1 fi if ! test -x "$added_gcc"; then added_gcc=$(command -v $added_gcc) fi else echo "$1" is not a known compiler. exit 1 fi shift if test -n "$1"; then case "$1" in --*) ;; # an option, ignore *) # (backwards) compatibility, assume the second argument is the C++ compiler added_gxx=$1 shift ;; esac fi if test -n "$gcc" -a -z "$added_gxx"; then # guess g++ from gcc added_gxx=$(echo $added_gcc | sed 's/\(.*\)gcc/\1g++/') fi fi if test -n "$gcc"; then if test -z "$added_gcc" || test -z "$added_gxx"; then usage exit 1 fi if ! test -x "$added_gcc" ; then echo "'$added_gcc' is no executable." exit 1 fi if ! test -x "$added_gxx" ; then echo "'$added_gxx' is no executable." exit 1 fi if ! file --mime-type -L "$added_gcc" | grep -q ': application/'; then echo "$added_gcc is not a binary file." exit 1 fi if ! file --mime-type -L "$added_gxx" | grep -q ': application/'; then echo "$added_gxx is not a binary file." exit 1 fi fi if test -n "$clang"; then if ! test -x "$added_clang" ; then echo "'$added_clang' is no executable." exit 1 fi if ! file --mime-type -L "$added_clang" | grep -q ': application/'; then echo "$added_clang is not a binary file." exit 1 fi if ! test -x "$added_compilerwrapper" ; then echo "'$added_compilerwrapper' is no executable." exit 1 fi fi extrafiles= compress_program=gzip compress_ext=.gz compress_args= while test -n "$1"; do if test "x$1" = "x--addfile"; then shift extrafiles="$extrafiles $1" elif test "x$1" = "x--compression"; then shift case "$1" in none) compress_ext= compress_program=cat ;; gzip|gz) compress_ext=.gz compress_program=gzip ;; bzip2|bz2) compress_ext=.bz2 compress_program=bzip2 ;; zstd) compress_ext=.zst compress_program=zstd # threads compress_args=-T0 ;; xz) compress_ext=.xz compress_program=xz # threads compress_args=-T0 if test -n "$ICECC_TESTS"; then compress_args="-T0 -0" fi ;; *) echo "Unknown compression type '$1'." exit 1 ;; esac else echo "Unknown argument '$1'" exit 1 fi shift done if test -n "$compress_program"; then if ! command -v "$compress_program" >/dev/null; then echo "Cannot find compression program '$compress_program'." exit 1 fi fi tempdir=$(mktemp -d /tmp/iceccenvXXXXXX) # for testing the environment is usable at all if test -x /bin/true; then add_file /bin/true elif test -x /usr/bin/true; then add_file /usr/bin/true /bin/true fi if test -n "$gcc"; then # getting compilers resolved path added_gcc=$(resolve_path $added_gcc) added_gxx=$(resolve_path $added_gxx) # In case gcc is installed elsewhere. stripprefix=$(dirname $(dirname $added_gcc)) if test -z "$clang"; then add_file $added_gcc /usr/bin/gcc add_file $added_gxx /usr/bin/g++ else # HACK: The clang case below will add a wrapper in place of gcc, so add the real # gcc under a different name that the wrapper will call. add_file $added_gcc /usr/bin/gcc.bin add_file $added_gxx /usr/bin/g++.bin fi search_addfile $added_gcc cc1 /usr/bin search_addfile $added_gxx cc1plus /usr/bin search_addfile $added_gcc as /usr/bin search_addfile $added_gcc specs search_addfile $added_gcc liblto_plugin.so search_addfile $added_gcc objcopy /usr/bin fi if test -n "$clang"; then # getting compilers resolved path orig_clang=$added_clang added_clang=$(resolve_path $added_clang) # In case clang is installed elsewhere. stripprefix=$(dirname $(dirname $added_clang)) add_file $added_clang /usr/bin/clang # HACK: Older icecream remotes have /usr/bin/{gcc|g++} hardcoded and wouldn't # call /usr/bin/clang at all. So include a wrapper binary that will call gcc or clang # depending on an extra argument added by icecream. add_file $added_compilerwrapper /usr/bin/gcc add_file $added_compilerwrapper /usr/bin/g++ search_addfile $orig_clang as /usr/bin search_addfile $orig_clang objcopy /usr/bin # HACK: Clang4.0 and later access /proc/cpuinfo and report an error when they fail # to find it, even if they use a fallback mechanism, making the error useless # (at least in this case). Since the file is not really needed, create a fake one. if test -d /proc; then mkdir $tempdir/fakeproc mkdir $tempdir/fakeproc/proc touch $tempdir/fakeproc/proc/cpuinfo add_file $tempdir/fakeproc/proc/cpuinfo /proc/cpuinfo fi fi # Do not do any prefix stripping on extra files, they (e.g. clang plugins) are usually # referred to using their original path. save_stripprefix="$stripprefix" stripprefix= for extrafile in $extrafiles; do add_file $extrafile done stripprefix="$save_stripprefix" if test "$is_darwin" = 1; then # add dynamic linker add_file /usr/lib/dyld add_file /usr/bin/gcc add_file /usr/bin/g++ real_file=$(/usr/bin/as -micha -- < /dev/null 2>&1 | sed -n 's,^[^/]*\(/[^ :]*\).*,\1,p') add_file $(abs_path "$real_file") fi if test "$is_freebsd" = 1; then add_file /libexec/ld-elf.so.1 fi # for ldconfig -r to work, ld.so.conf must not contain relative paths # in include directives. Make them absolute. if test -f /etc/ld.so.conf; then tmp_ld_so_conf=$(mktemp /tmp/icecc_ld_so_confXXXXXX) while read directive path; do if [ "$directive" = "include" -a "${path:0:1}" != "/" ]; then path="/etc/$path" fi echo "$directive $path" done $tmp_ld_so_conf add_file $tmp_ld_so_conf /etc/ld.so.conf fi new_target_files= for i in $target_files; do case $i in *=/*) target=$(echo $i | cut -d= -f1) path=$(echo $i | cut -d= -f2) ;; *) path=$i target=$i ;; esac mkdir -p $tempdir/$(dirname $target) if test -x $path && objcopy -p --strip-unneeded $path $tempdir/$target 2>/dev/null; then true # ok elif test -x $path && objcopy -p -g $path $tempdir/$target 2>/dev/null; then true # ok else cp -p $path $tempdir/$target fi target=$(echo $target | cut -b2-) new_target_files="$new_target_files $target" done if test -x /sbin/ldconfig -a "$is_linux" = 1; then mkdir -p $tempdir/var/cache/ldconfig /sbin/ldconfig -r $tempdir for candidate in etc var/cache/ldconfig; do test -e $tempdir/$candidate/ld.so.cache || continue; new_target_files="$new_target_files $candidate/ld.so.cache" break done fi md5sum=NONE for file in /usr/bin/md5sum /bin/md5 /usr/bin/md5 /sbin/md5; do if test -x $file; then md5sum=$file break fi done # now sort the files in order to make the md5sums independent # of ordering target_files=$(for i in $new_target_files; do echo $i; done | sort) md5=$(for i in $target_files; do $md5sum $tempdir/$i; done | sed -e "s# $tempdir##" | $md5sum | sed -e 's/ .*$//') || { echo "Couldn't compute MD5 sum." exit 2 } echo "creating $md5.tar$compress_ext" mydir=$(pwd) cd $tempdir tar -ch --numeric-owner -f - $target_files | "$compress_program" $compress_args > "$md5".tar"$compress_ext" || { echo "Couldn't create archive" exit 3 } mv "$md5".tar"$compress_ext" "$mydir"/ cd .. rm -rf $tempdir rm -f $tmp_ld_so_conf # Print the tarball name to fd 5 (if it's open, created by whatever has invoked this) ( echo $md5.tar"$compress_ext" >&5 ) 2>/dev/null exit 0 icecream-1.3.1/client/icecc-test-env.in000077500000000000000000000076341361626760200177310ustar00rootroot00000000000000#! /bin/bash # # A simple script that can be used to see if an environment was built # successfully. Note that passing these test doesn't guarantee the environment # will work, but failing them means it certainly won't. Note that this script # may need to be executed with sudo if the current user doesn't have chroot # permissions # # This program always exits with an error code of 2 so that it can be # distinguished from a sudo error (with an exit code of 1) # # Copyright (C) 2018 Joshua Watt # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # Exit on any unexpected failure set -e TEST_DIR= QUIET=false REALPATH=$(which realpath 2> /dev/null || true) # Cleanup the temp directory on exit cleanup() { if [ -n "$TEST_DIR" ]; then rm -rf "$TEST_DIR" fi } trap cleanup EXIT print_info() { if ! $QUIET; then echo "$@" fi } usage() { echo "Usage: $(basename $0) [-h] TOOLCHAIN" echo " -h --help Show Help" echo " -q --quiet Only print errors" echo " TOOLCHAIN Toolchain archive to test" echo "" echo "Tests a toolchain environment to see if it is correctly constructed" } OPTIONS=`getopt -o hqf --long help,quiet -n $(basename $0) -- "$@"` eval set -- "$OPTIONS" while true; do case "$1" in -h|--help) usage exit 0 ;; -q|--quiet) QUIET=true shift ;; --) shift break ;; *) echo "Unknown option '$1'" exit 2 ;; esac done if [ -z "$1" ]; then echo "Toolchain argument is required" usage exit 2 fi TEST_DIR=$(mktemp -d) if [ -z "$REALPATH" ]; then echo "WARNING: realpath not found, symlink tests will be disabled" fi # Extract the toolchain tar -xf "$1" -C "$TEST_DIR" # Determine the compiler if [ -e $TEST_DIR/usr/bin/clang ]; then print_info "Compiler is clang" IS_CLANG=true else print_info "Compiler is gcc" IS_CLANG=false fi check_program() { local prog="$1" shift cd $TEST_DIR print_info "Checking $prog..." if [ ! -x "${TEST_DIR}${prog}" ]; then echo "$prog is missing or not executable" exit 2 fi if [ -n "$REALPATH" ]; then local target="$($REALPATH "${TEST_DIR}${prog}")" case $target in "$($REALPATH "${TEST_DIR}")"/*) ;; *) echo "$prog is a symbolic link that points to '$target' outside the environment" exit 2 ;; esac fi if ! chroot . $prog $@ < /dev/null; then echo "$prog failed to execute" exit 2 fi print_info "OK" } check_program /bin/true if $IS_CLANG; then check_program /usr/bin/clang -xc -c -o test.o - check_program /usr/bin/as # NOTE: The compilerwrapper programs /usr/bin/gcc and /usr/bin/g++ are not # tested because they interfer with the automated testing when the # address sanitizer is enabled else ARGS="-fpreprocessed" check_program /usr/bin/gcc $ARGS -xc -c -o test.o - check_program /usr/bin/g++ $ARGS -xc++ -c -o test.o - check_program /usr/bin/cc1 $ARGS -o test.o -quiet check_program /usr/bin/cc1plus $ARGS -o test.o -quiet check_program /usr/bin/as fi icecream-1.3.1/client/local.cpp000066400000000000000000000257221361626760200163610ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Stephan Kulow 2002, 2003 by Martin Pool This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include #include #include #include #include #include #include #include #include #include #include #include "client.h" using namespace std; /* Name of this program, for trace.c */ const char *rs_program_name = "icecc"; #define CLIENT_DEBUG 0 static string compiler_path_lookup_helper(const string &compiler, const string &compiler_path) { if (compiler_path.find('/') != string::npos) { return compiler_path; } string path = ::getenv("PATH"); string::size_type begin = 0; string::size_type end = 0; struct stat s; bool after_selflink = false; string best_match; while (end != string::npos) { end = path.find(':', begin); string part; if (end == string::npos) { part = path.substr(begin); } else { part = path.substr(begin, end - begin); } begin = end + 1; part = part + '/' + compiler; if (!lstat(part.c_str(), &s)) { if (S_ISLNK(s.st_mode)) { std::string buffer; const int ret = resolve_link(part, buffer); if (ret != 0) { log_error() << "resolve_link failed " << strerror(ret) << endl; continue; } string target = find_basename(buffer); if (target == rs_program_name || (after_selflink && (target == "tbcompiler" || target == "distcc" || target == "colorgcc"))) { // this is a link pointing to us, ignore it after_selflink = true; continue; } } else if (!S_ISREG(s.st_mode)) { // It's not a link and not a file, so just ignore it. We don't // want to accidentially attempt to execute directories. continue; } if( best_match.empty()) { best_match = part; } if (after_selflink) { return part; } } } if (best_match.empty()) { log_error() << "couldn't find any " << compiler << endl; } return best_match; } string compiler_path_lookup(const string& compiler) { return compiler_path_lookup_helper(compiler, compiler); } /* * Get the name of the compiler depedant on the * language of the job and the environment * variable set. This is useful for native cross-compilers. * (arm-linux-gcc for example) */ string find_compiler(const CompileJob &job) { if (job.language() == CompileJob::Lang_C) { if (const char *env = getenv("ICECC_CC")) { return env; } } if (job.language() == CompileJob::Lang_CXX) { if (const char *env = getenv("ICECC_CXX")) { return env; } } return compiler_path_lookup_helper(job.compilerName(), job.compilerPathname()); } bool compiler_is_clang(const CompileJob &job) { if (job.language() == CompileJob::Lang_Custom) { return false; } assert(job.compilerName().find('/') == string::npos); return job.compilerName().find("clang") != string::npos; } /* Clang works suboptimally when handling an already preprocessed source file, for example error messages quote (already preprocessed) parts of the source. Therefore it is better to only locally merge all #include files into the source file and do the actual preprocessing remotely together with compiling. There exists a Clang patch to implement option -frewrite-includes that does such #include rewritting, and it's been only recently merged upstream. This is similar with newer gcc versions, and gcc has -fdirectives-only, which works similarly to -frewrite-includes (although it's not exactly the same). */ bool compiler_only_rewrite_includes(const CompileJob &job) { if( job.blockRewriteIncludes()) { return false; } if (const char *rewrite_includes = getenv("ICECC_REMOTE_CPP")) { return (*rewrite_includes != '\0') && (*rewrite_includes != '0'); } if (!compiler_is_clang(job)) { #ifdef HAVE_GCC_FDIRECTIVES_ONLY // gcc has had -fdirectives-only for a long time, but clang on macosx poses as gcc // and fails when given the option. Since we right now detect whether a compiler // is gcc merely by checking the binary name, enable usage only if the configure // check found the option working. return true; #endif } if (compiler_is_clang(job)) { if (const char *rewrite_includes = getenv("ICECC_CLANG_REMOTE_CPP")) { return (*rewrite_includes != '\0') && (*rewrite_includes != '0'); } #ifdef HAVE_CLANG_REWRITE_INCLUDES // Assume that we use the same clang (as least as far as capabilities go) // as was available when icecream was built. ICECC_CLANG_REMOTE_CPP above // allows override, and the only case when this should realistically break // is if somebody downgrades their clang. return true; #endif } return false; } string clang_get_default_target(const CompileJob &job) { return read_command_output( find_compiler( job ) + " -dumpmachine" ); } static volatile int user_break_signal = 0; static volatile pid_t child_pid; static void handle_user_break(int sig) { dcc_unlock(); user_break_signal = sig; if (child_pid != 0) { kill(child_pid, sig); } signal(sig, handle_user_break); } /** * Invoke a compiler locally. This is, obviously, the alternative to * dcc_compile_remote(). * * The server does basically the same thing, but it doesn't call this * routine because it wants to overlap execution of the compiler with * copying the input from the network. * * This routine used to exec() the compiler in place of distcc. That * is slightly more efficient, because it avoids the need to create, * schedule, etc another process. The problem is that in that case we * can't clean up our temporary files, and (not so important) we can't * log our resource usage. * **/ int build_local(CompileJob &job, MsgChannel *local_daemon, struct rusage *used) { list arguments; string compiler_name = find_compiler(job); if (compiler_name.empty()) { log_error() << "could not find " << job.compilerName() << " in PATH." << endl; return EXIT_NO_SUCH_FILE; } arguments.push_back(compiler_name); appendList(arguments, job.allFlags()); if (!job.inputFile().empty()) { arguments.push_back(job.inputFile()); } if (!job.outputFile().empty()) { arguments.push_back("-o"); arguments.push_back(job.outputFile()); } vector argv; string argstxt; for (list::const_iterator it = arguments.begin(); it != arguments.end(); ++it) { if( *it == "-fdirectives-only" ) continue; // pointless locally, and it can break things argv.push_back(strdup(it->c_str())); argstxt += ' '; argstxt += *it; } argv.push_back(0); trace() << "invoking:" << argstxt << endl; if (!local_daemon) { if (!dcc_lock_host()) { log_error() << "can't lock for local job" << endl; return EXIT_DISTCC_FAILED; } } bool color_output = job.language() != CompileJob::Lang_Custom && colorify_wanted(job); int pf[2]; if (color_output && pipe(pf)) { color_output = false; } if (used || color_output) { flush_debug(); child_pid = fork(); } if (child_pid == -1){ log_perror("fork failed"); } if (!child_pid) { dcc_increment_safeguard(job.language() == CompileJob::Lang_Custom ? SafeguardStepCustom : SafeguardStepCompiler); if (color_output) { if ((-1 == close(pf[0])) && (errno != EBADF)){ log_perror("close failed"); } if ((-1 == close(2)) && (errno != EBADF)){ log_perror("close failed"); } if (-1 == dup2(pf[1], 2)){ log_perror("dup2 failed"); } } execv(argv[0], &argv[0]); int exitcode = ( errno == ENOENT ? 127 : 126 ); ostringstream errmsg; errmsg << "execv " << argv[0] << " failed"; log_perror(errmsg.str()); dcc_unlock(); { char buf[256]; snprintf(buf, sizeof(buf), "ICECC[%d]: %s:", getpid(), argv[0]); log_perror(buf); } _exit(exitcode); } for(vector::const_iterator i = argv.begin(); i != argv.end(); ++i){ free(*i); } argv.clear(); if (color_output) { if ((-1 == close(pf[1])) && (errno != EBADF)){ log_perror("close failed"); } } // setup interrupt signals, so that the JobLocalBeginMsg will // have a matching JobLocalDoneMsg void (*old_sigint)(int) = signal(SIGINT, handle_user_break); void (*old_sigterm)(int) = signal(SIGTERM, handle_user_break); void (*old_sigquit)(int) = signal(SIGQUIT, handle_user_break); void (*old_sighup)(int) = signal(SIGHUP, handle_user_break); if (color_output) { string s_ccout; char buf[250]; for (;;) { int r; while ((r = read(pf[0], buf, sizeof(buf) - 1)) > 0) { buf[r] = '\0'; s_ccout.append(buf); } if (r == 0) { break; } if (r < 0 && errno != EINTR) { break; } } colorify_output(s_ccout); } int status = 1; while (wait4(child_pid, &status, 0, used) < 0 && errno == EINTR) {} status = shell_exit_status(status); signal(SIGINT, old_sigint); signal(SIGTERM, old_sigterm); signal(SIGQUIT, old_sigquit); signal(SIGHUP, old_sighup); if (user_break_signal) { raise(user_break_signal); } dcc_unlock(); return status; } icecream-1.3.1/client/main.cpp000066400000000000000000000473611361626760200162160ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* * icecc -- A simple distributed compiler system * * Copyright (C) 2003, 2004 by the Icecream Authors * * based on distcc * Copyright (C) 2002, 2003 by Martin Pool * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /* 4: The noise of a multitude in the * mountains, like as of a great people; a * tumultuous noise of the kingdoms of nations * gathered together: the LORD of hosts * mustereth the host of the battle. * -- Isaiah 13 */ #include "config.h" // Required by strsignal() on some systems. #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "client.h" #include "platform.h" #include "util.h" #include "argv.h" using namespace std; extern const char *rs_program_name; static void dcc_show_usage(void) { printf( "Usage:\n" " icecc [compiler] [compile options] -o OBJECT -c SOURCE\n" " icecc --build-native [compiler] [file...]\n" " icecc --help\n" "\n" "Options:\n" " --help explain usage and exit\n" " --version show version and exit\n" " --build-native create icecc environment\n" "Environment Variables:\n" " ICECC If set to \"no\", just exec the real compiler.\n" " If set to \"disable\", just exec the real compiler, but without\n" " notifying the daemon and only run one job at a time.\n" " ICECC_VERSION use a specific icecc environment, see icecc-create-env\n" " ICECC_DEBUG [info | warning | debug]\n" " sets verboseness of icecream client.\n" " ICECC_LOGFILE if set, additional debug information is logged to the specified file\n" " ICECC_REPEAT_RATE the number of jobs out of 1000 that should be\n" " compiled on multiple hosts to ensure that they're\n" " producing the same output. The default is 0.\n" " ICECC_PREFERRED_HOST overrides scheduler decisions if set.\n" " ICECC_CC set C compiler name (default gcc).\n" " ICECC_CXX set C++ compiler name (default g++).\n" " ICECC_REMOTE_CPP set to 1 or 0 to override remote preprocessing\n" " ICECC_IGNORE_UNVERIFIED if set, hosts where environment cannot be verified are not used.\n" " ICECC_EXTRAFILES additional files used in the compilation.\n" " ICECC_COLOR_DIAGNOSTICS set to 1 or 0 to override color diagnostics support.\n" " ICECC_CARET_WORKAROUND set to 1 or 0 to override gcc show caret workaround.\n" " ICECC_COMPRESSION if set, the libzstd compression level (1 to 19, default: 1)\n" " ICECC_ENV_COMPRESSION compression type for icecc environments [none|gzip|bzip2|zstd|xz]\n" " ICECC_SLOW_NETWORK set to 1 to send network data in smaller chunks\n" ); } static void icerun_show_usage(void) { printf( "Usage:\n" " icerun [command]\n" " icerun --help\n" "\n" "Options:\n" " --help explain usage and exit\n" " --version show version and exit\n" "Environment Variables:\n" " ICECC if set to \"no\", just exec the real command\n" " ICECC_DEBUG [info | warning | debug]\n" " sets verboseness of icecream client.\n" " ICECC_LOGFILE if set, additional debug information is logged to the specified file\n" "\n"); } volatile bool local = false; static void dcc_client_signalled(int whichsig) { if (!local) { #ifdef HAVE_STRSIGNAL log_info() << rs_program_name << ": " << strsignal(whichsig) << endl; #else log_info() << "terminated by signal " << whichsig << endl; #endif // dcc_cleanup_tempfiles(); } signal(whichsig, SIG_DFL); raise(whichsig); } static void dcc_client_catch_signals(void) { signal(SIGTERM, &dcc_client_signalled); signal(SIGINT, &dcc_client_signalled); signal(SIGHUP, &dcc_client_signalled); } /* * @param args Are [compiler] [extra files...] * Compiler can be "gcc", "clang" or a binary (possibly including a path). */ static int create_native(char **args) { char **extrafiles = args; string machine_name = determine_platform(); string compiler = "gcc"; if (machine_name.compare(0, 6, "Darwin") == 0) { compiler = "clang"; } if (args[0]) { if( strcmp(args[0], "clang") == 0 || strcmp(args[0], "gcc") == 0 ) { compiler = args[ 0 ]; ++extrafiles; } else if( access( args[0], R_OK ) == 0 && access( args[ 0 ], X_OK ) != 0 ) { // backwards compatibility, the first argument is already an extra file } else { compiler = compiler_path_lookup( get_c_compiler( args[ 0 ] )); if (compiler.empty()) { log_error() << "compiler not found" << endl; return 1; } ++extrafiles; } } vector argv; argv.push_back(strdup(BINDIR "/icecc-create-env")); argv.push_back(strdup(compiler.c_str())); for (int extracount = 0; extrafiles[extracount]; extracount++) { argv.push_back(strdup("--addfile")); argv.push_back(strdup(extrafiles[extracount])); } if( const char* env_compression = getenv( "ICECC_ENV_COMPRESSION" )) { argv.push_back(strdup("--compression")); argv.push_back(strdup(env_compression)); } argv.push_back(NULL); execv(argv[0], argv.data()); ostringstream errmsg; errmsg << "execv " << argv[0] << " failed"; log_perror(errmsg.str()); return 1; } static MsgChannel* get_local_daemon() { MsgChannel* local_daemon; if (getenv("ICECC_TEST_SOCKET") == NULL) { /* try several options to reach the local daemon - 3 sockets, one TCP */ local_daemon = Service::createChannel("/var/run/icecc/iceccd.socket"); if (!local_daemon) { local_daemon = Service::createChannel("/var/run/iceccd.socket"); } if (!local_daemon && getenv("HOME")) { string path = getenv("HOME"); path += "/.iceccd.socket"; local_daemon = Service::createChannel(path); } if (!local_daemon) { local_daemon = Service::createChannel("127.0.0.1", 10245, 0/*timeout*/); } } else { local_daemon = Service::createChannel(getenv("ICECC_TEST_SOCKET")); if (!local_daemon) { log_error() << "test socket error" << endl; exit( EXIT_TEST_SOCKET_ERROR ); } } return local_daemon; } static void debug_arguments(int argc, char** argv, bool original) { string argstxt = argv[ 0 ]; for( int i = 1; i < argc; ++i ) { argstxt += ' '; argstxt += argv[ i ]; } if( original ) { trace() << "invoked as: " << argstxt << endl; } else { trace() << "expanded as: " << argstxt << endl; } } class ArgumentExpander { public: ArgumentExpander(int *argcp, char ***argvp) { oldargv = *argvp; oldargc = *argcp; expandargv(argcp, argvp); newargv = *argvp; if (newargv == oldargv) newargv = NULL; } ~ArgumentExpander() { if (newargv != NULL) freeargv(newargv); } bool changed() const { return newargv != NULL; } char** originalArgv() const { return oldargv; } int originalArgc() const { return oldargc; } private: char ** newargv; char ** oldargv; int oldargc; }; int main(int argc, char **argv) { // expand @responsefile contents to arguments in argv array ArgumentExpander expand(&argc, &argv); const char *env = getenv("ICECC_DEBUG"); int debug_level = Error; if (env) { if (!strcasecmp(env, "info")) { debug_level = Info; } else if (!strcasecmp(env, "warning") || !strcasecmp(env, "warnings")) { // "warnings was referred to in the --help output, handle it // backwards compatibility. debug_level = Warning; } else { // any other value debug_level = Debug; } } std::string logfile; if (const char *logfileEnv = getenv("ICECC_LOGFILE")) { logfile = logfileEnv; } setup_debug(debug_level, logfile, "ICECC"); debug_arguments(expand.originalArgc(), expand.originalArgv(), true); if( expand.changed()) { debug_arguments(argc, argv, false); } CompileJob job; bool icerun = false; string compiler_name = argv[0]; dcc_client_catch_signals(); std::string cwd = get_cwd(); if(!cwd.empty()) job.setWorkingDirectory( cwd ); if (find_basename(compiler_name) == rs_program_name) { if (argc > 1) { string arg = argv[1]; if (arg == "--help") { dcc_show_usage(); return 0; } if (arg == "--version") { printf("ICECC " VERSION "\n"); return 0; } if (arg == "--build-native") { return create_native(argv + 2); } if (arg.size() > 0) { job.setCompilerName(arg); job.setCompilerPathname(arg); } } } else if (find_basename(compiler_name) == "icerun") { icerun = true; if (argc > 1) { string arg = argv[1]; if (arg == "--help") { icerun_show_usage(); return 0; } if (arg == "--version") { printf("ICERUN " VERSION "\n"); return 0; } if (arg.size() > 0) { job.setCompilerName(arg); job.setCompilerPathname(arg); } } } else { std::string resolved; // check if it's a symlink to icerun if (resolve_link(compiler_name, resolved) == 0 && find_basename(resolved) == "icerun") { icerun = true; } } int sg_level = dcc_recursion_safeguard(); if (sg_level >= SafeguardMaxLevel) { log_error() << "icecream seems to have invoked itself recursively!" << endl; return EXIT_RECURSION; } if (sg_level > 0) { log_info() << "recursive invocation from icerun" << endl; } /* Ignore SIGPIPE; we consistently check error codes and will * see the EPIPE. */ dcc_ignore_sigpipe(1); // Connect to the daemon as early as possible, so that in parallel builds there // the daemon has as many connections as possible when we start asking for a remote // node to build, allowing the daemon/scheduler to do load balancing based on the number // of expected build jobs. MsgChannel *local_daemon = NULL; const char *icecc = getenv("ICECC"); if (icecc == NULL || strcasecmp(icecc, "disable") != 0) { local_daemon = get_local_daemon(); } list extrafiles; local |= analyse_argv(argv, job, icerun, &extrafiles); /* If ICECC is set to disable, then run job locally, without contacting the daemon at all. File-based locking will still ensure that all calls are serialized up to the number of local cpus available. If ICECC is set to no, the job is run locally as well, but it is serialized using the daemon. */ if (icecc && !strcasecmp(icecc, "disable")) { assert( local_daemon == NULL ); return build_local(job, 0); } if (icecc && !strcasecmp(icecc, "no")) { local = true; } if (!local_daemon) { log_warning() << "no local daemon found" << endl; return build_local(job, 0); } if (const char *extrafilesenv = getenv("ICECC_EXTRAFILES")) { for (;;) { const char *colon = strchr(extrafilesenv, ':'); string file; if (colon == NULL) { file = extrafilesenv; } else { file = string(extrafilesenv, colon - extrafilesenv); } file = get_absfilename(file); struct stat st; if (stat(file.c_str(), &st) == 0) { extrafiles.push_back(file); } else { log_warning() << "File in ICECC_EXTRAFILES not found: " << file << endl; local = true; break; } if (colon == NULL) { break; } extrafilesenv = colon + 1; } } Environments envs; if (!local) { if (getenv("ICECC_VERSION")) { // if set, use it, otherwise take default try { envs = parse_icecc_version(job.targetPlatform(), find_prefix(job.compilerName())); } catch (std::exception& e) { // we just build locally log_error() << "An exception was handled parsing the icecc version. " "Will build locally. Exception text was:\n" << e.what() << "\n"; } } else if (!extrafiles.empty() && !IS_PROTOCOL_32(local_daemon)) { log_warning() << "Local daemon is too old to handle extra files." << endl; local = true; } else { Msg *umsg = NULL; string compiler; if( IS_PROTOCOL_41(local_daemon)) compiler = get_absfilename( find_compiler( job )); else // Older daemons understood only two hardcoded compilers. compiler = compiler_is_clang(job) ? "clang" : "gcc"; string env_compression; // empty = default if( const char* icecc_env_compression = getenv( "ICECC_ENV_COMPRESSION" )) env_compression = icecc_env_compression; trace() << "asking for native environment for " << compiler << endl; if (!local_daemon->send_msg(GetNativeEnvMsg(compiler, extrafiles, env_compression))) { log_warning() << "failed to write get native environment" << endl; local = true; } else { // the timeout is high because it creates the native version umsg = local_daemon->get_msg(4 * 60); } string native; if (umsg && umsg->type == M_NATIVE_ENV) { native = static_cast(umsg)->nativeVersion; } if (native.empty() || ::access(native.c_str(), R_OK) < 0) { log_warning() << "daemon can't determine native environment. " "Set $ICECC_VERSION to an icecc environment.\n"; } else { envs.push_back(make_pair(job.targetPlatform(), native)); log_info() << "native " << native << endl; } delete umsg; } // we set it to local so we tell the local daemon about it - avoiding file locking if (envs.size() == 0) { local = true; } for (Environments::const_iterator it = envs.begin(); it != envs.end(); ++it) { trace() << "env: " << it->first << " '" << it->second << "'" << endl; if (::access(it->second.c_str(), R_OK) < 0) { log_error() << "can't read environment " << it->second << endl; local = true; } } } int ret; if (!local) { try { // How many times out of 1000 should we recompile a job on // multiple hosts to confirm that the results are the same? const char *s = getenv("ICECC_REPEAT_RATE"); int rate = s ? atoi(s) : 0; ret = build_remote(job, local_daemon, envs, rate); /* We have to tell the local daemon that everything is fine and that the remote daemon will send the scheduler our done msg. If we don't, the local daemon will have to assume the job failed and tell the scheduler - and that fail message may arrive earlier than the remote daemon's success msg. */ if (ret == 0) { local_daemon->send_msg(EndMsg()); } } catch (remote_error& error) { // log the 'local cpp invocation failed' message by default, so that it's more // obvious why the cpp output is there (possibly) twice if( error.errorCode == 103 ) log_error() << "local build forced by remote exception: " << error.what() << endl; else log_info() << "local build forced by remote exception: " << error.what() << endl; local = true; } catch (client_error& error) { if (remote_daemon.size()) { log_error() << "got exception " << error.what() << " (" << remote_daemon.c_str() << ") " << endl; } else { log_error() << "got exception " << error.what() << " (this should be an exception!)" << endl; } #if 0 /* currently debugging a client? throw an error then */ if (debug_level > Error) { return error.errorCode; } #endif local = true; } if (local) { // TODO It'd be better to reuse the connection, but the daemon // internal state gets confused for some reason, so work that around // for now by using a new connection. delete local_daemon; local_daemon = get_local_daemon(); if (!local_daemon) { log_warning() << "no local daemon found" << endl; return build_local(job, 0); } } } if (local) { log_block b("building_local"); struct rusage ru; Msg *startme = 0L; /* Inform the daemon that we like to start a job. */ if (local_daemon->send_msg(JobLocalBeginMsg(0, get_absfilename(job.outputFile())))) { /* Now wait until the daemon gives us the start signal. 40 minutes should be enough for all normal compile or link jobs. */ startme = local_daemon->get_msg(40 * 60); } /* If we can't talk to the daemon anymore we need to fall back to lock file locking. */ if (!startme || startme->type != M_JOB_LOCAL_BEGIN) { delete startme; delete local_daemon; return build_local(job, 0); } ret = build_local(job, local_daemon, &ru); delete startme; } delete local_daemon; return ret; } icecream-1.3.1/client/md5.c000066400000000000000000000265261361626760200154170ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* Copyright (C) 1999 Aladdin Enterprises. All rights reserved. This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. L. Peter Deutsch ghost@aladdin.com */ /* Independent implementation of MD5 (RFC 1321). This code implements the MD5 Algorithm defined in RFC 1321. It is derived directly from the text of the RFC and not from the reference implementation. The original and principal author of md5.c is L. Peter Deutsch . Other authors are noted in the change history that follows (in reverse chronological order): 1999-11-04 lpd Edited comments slightly for automatic TOC extraction. 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5). 1999-05-03 lpd Original version. */ #include "md5.h" #include #ifdef TEST /* * Compile with -DTEST to create a self-contained executable test program. * The test program should print out the same values as given in section * A.5 of RFC 1321, reproduced below. */ #include main() { static const char *const test[7] = { "", /*d41d8cd98f00b204e9800998ecf8427e*/ "945399884.61923487334tuvga", /*0cc175b9c0f1b6a831c399e269772661*/ "abc", /*900150983cd24fb0d6963f7d28e17f72*/ "message digest", /*f96b697d7cb7938d525a2f31aaf161d0*/ "abcdefghijklmnopqrstuvwxyz", /*c3fcd3d76192e4007dfb496cca67e13b*/ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", /*d174ab98d277d9f5a5611c2c9f419d9f*/ "12345678901234567890123456789012345678901234567890123456789012345678901234567890" /*57edf4a22be3c955ac49da2e2107b67a*/ }; int i; for (i = 0; i < 7; ++i) { md5_state_t state; md5_byte_t digest[16]; int di; md5_init(&state); md5_append(&state, (const md5_byte_t *)test[i], strlen(test[i])); md5_finish(&state, digest); printf("MD5 (\"%s\") = ", test[i]); for (di = 0; di < 16; ++di) { printf("%02x", digest[di]); } printf("\n"); } return 0; } #endif /* TEST */ /* * For reference, here is the program that computed the T values. */ #if 0 #include main() { int i; for (i = 1; i <= 64; ++i) { unsigned long v = (unsigned long)(4294967296.0 * fabs(sin((double)i))); printf("#define T%d 0x%08lx\n", i, v); } return 0; } #endif /* * End of T computation program. */ #define T1 0xd76aa478 #define T2 0xe8c7b756 #define T3 0x242070db #define T4 0xc1bdceee #define T5 0xf57c0faf #define T6 0x4787c62a #define T7 0xa8304613 #define T8 0xfd469501 #define T9 0x698098d8 #define T10 0x8b44f7af #define T11 0xffff5bb1 #define T12 0x895cd7be #define T13 0x6b901122 #define T14 0xfd987193 #define T15 0xa679438e #define T16 0x49b40821 #define T17 0xf61e2562 #define T18 0xc040b340 #define T19 0x265e5a51 #define T20 0xe9b6c7aa #define T21 0xd62f105d #define T22 0x02441453 #define T23 0xd8a1e681 #define T24 0xe7d3fbc8 #define T25 0x21e1cde6 #define T26 0xc33707d6 #define T27 0xf4d50d87 #define T28 0x455a14ed #define T29 0xa9e3e905 #define T30 0xfcefa3f8 #define T31 0x676f02d9 #define T32 0x8d2a4c8a #define T33 0xfffa3942 #define T34 0x8771f681 #define T35 0x6d9d6122 #define T36 0xfde5380c #define T37 0xa4beea44 #define T38 0x4bdecfa9 #define T39 0xf6bb4b60 #define T40 0xbebfbc70 #define T41 0x289b7ec6 #define T42 0xeaa127fa #define T43 0xd4ef3085 #define T44 0x04881d05 #define T45 0xd9d4d039 #define T46 0xe6db99e5 #define T47 0x1fa27cf8 #define T48 0xc4ac5665 #define T49 0xf4292244 #define T50 0x432aff97 #define T51 0xab9423a7 #define T52 0xfc93a039 #define T53 0x655b59c3 #define T54 0x8f0ccc92 #define T55 0xffeff47d #define T56 0x85845dd1 #define T57 0x6fa87e4f #define T58 0xfe2ce6e0 #define T59 0xa3014314 #define T60 0x4e0811a1 #define T61 0xf7537e82 #define T62 0xbd3af235 #define T63 0x2ad7d2bb #define T64 0xeb86d391 static void md5_process(md5_state_t *pms, const md5_byte_t *data /*[64]*/) { md5_word_t a = pms->abcd[0], b = pms->abcd[1], c = pms->abcd[2], d = pms->abcd[3]; md5_word_t t; #ifndef ARCH_IS_BIG_ENDIAN # define ARCH_IS_BIG_ENDIAN 1 /* slower, default implementation */ #endif #if ARCH_IS_BIG_ENDIAN /* * On big-endian machines, we must arrange the bytes in the right * order. (This also works on machines of unknown byte order.) */ md5_word_t X[16]; const md5_byte_t *xp = data; int i; for (i = 0; i < 16; ++i, xp += 4) { X[i] = xp[0] + (xp[1] << 8) + (xp[2] << 16) + (xp[3] << 24); } #else /* !ARCH_IS_BIG_ENDIAN */ /* * On little-endian machines, we can process properly aligned data * without copying it. */ md5_word_t xbuf[16]; const md5_word_t *X; if (!((data - (const md5_byte_t *)0) & 3)) { /* data are properly aligned */ X = (const md5_word_t *)data; } else { /* not aligned */ memcpy(xbuf, data, 64); X = xbuf; } #endif #define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n)))) /* Round 1. */ /* Let [abcd k s i] denote the operation a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */ #define F(x, y, z) (((x) & (y)) | (~(x) & (z))) #define SET(a, b, c, d, k, s, Ti)\ t = a + F(b,c,d) + X[k] + Ti;\ a = ROTATE_LEFT(t, s) + b /* Do the following 16 operations. */ SET(a, b, c, d, 0, 7, T1); SET(d, a, b, c, 1, 12, T2); SET(c, d, a, b, 2, 17, T3); SET(b, c, d, a, 3, 22, T4); SET(a, b, c, d, 4, 7, T5); SET(d, a, b, c, 5, 12, T6); SET(c, d, a, b, 6, 17, T7); SET(b, c, d, a, 7, 22, T8); SET(a, b, c, d, 8, 7, T9); SET(d, a, b, c, 9, 12, T10); SET(c, d, a, b, 10, 17, T11); SET(b, c, d, a, 11, 22, T12); SET(a, b, c, d, 12, 7, T13); SET(d, a, b, c, 13, 12, T14); SET(c, d, a, b, 14, 17, T15); SET(b, c, d, a, 15, 22, T16); #undef SET /* Round 2. */ /* Let [abcd k s i] denote the operation a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */ #define G(x, y, z) (((x) & (z)) | ((y) & ~(z))) #define SET(a, b, c, d, k, s, Ti)\ t = a + G(b,c,d) + X[k] + Ti;\ a = ROTATE_LEFT(t, s) + b /* Do the following 16 operations. */ SET(a, b, c, d, 1, 5, T17); SET(d, a, b, c, 6, 9, T18); SET(c, d, a, b, 11, 14, T19); SET(b, c, d, a, 0, 20, T20); SET(a, b, c, d, 5, 5, T21); SET(d, a, b, c, 10, 9, T22); SET(c, d, a, b, 15, 14, T23); SET(b, c, d, a, 4, 20, T24); SET(a, b, c, d, 9, 5, T25); SET(d, a, b, c, 14, 9, T26); SET(c, d, a, b, 3, 14, T27); SET(b, c, d, a, 8, 20, T28); SET(a, b, c, d, 13, 5, T29); SET(d, a, b, c, 2, 9, T30); SET(c, d, a, b, 7, 14, T31); SET(b, c, d, a, 12, 20, T32); #undef SET /* Round 3. */ /* Let [abcd k s t] denote the operation a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */ #define H(x, y, z) ((x) ^ (y) ^ (z)) #define SET(a, b, c, d, k, s, Ti)\ t = a + H(b,c,d) + X[k] + Ti;\ a = ROTATE_LEFT(t, s) + b /* Do the following 16 operations. */ SET(a, b, c, d, 5, 4, T33); SET(d, a, b, c, 8, 11, T34); SET(c, d, a, b, 11, 16, T35); SET(b, c, d, a, 14, 23, T36); SET(a, b, c, d, 1, 4, T37); SET(d, a, b, c, 4, 11, T38); SET(c, d, a, b, 7, 16, T39); SET(b, c, d, a, 10, 23, T40); SET(a, b, c, d, 13, 4, T41); SET(d, a, b, c, 0, 11, T42); SET(c, d, a, b, 3, 16, T43); SET(b, c, d, a, 6, 23, T44); SET(a, b, c, d, 9, 4, T45); SET(d, a, b, c, 12, 11, T46); SET(c, d, a, b, 15, 16, T47); SET(b, c, d, a, 2, 23, T48); #undef SET /* Round 4. */ /* Let [abcd k s t] denote the operation a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */ #define I(x, y, z) ((y) ^ ((x) | ~(z))) #define SET(a, b, c, d, k, s, Ti)\ t = a + I(b,c,d) + X[k] + Ti;\ a = ROTATE_LEFT(t, s) + b /* Do the following 16 operations. */ SET(a, b, c, d, 0, 6, T49); SET(d, a, b, c, 7, 10, T50); SET(c, d, a, b, 14, 15, T51); SET(b, c, d, a, 5, 21, T52); SET(a, b, c, d, 12, 6, T53); SET(d, a, b, c, 3, 10, T54); SET(c, d, a, b, 10, 15, T55); SET(b, c, d, a, 1, 21, T56); SET(a, b, c, d, 8, 6, T57); SET(d, a, b, c, 15, 10, T58); SET(c, d, a, b, 6, 15, T59); SET(b, c, d, a, 13, 21, T60); SET(a, b, c, d, 4, 6, T61); SET(d, a, b, c, 11, 10, T62); SET(c, d, a, b, 2, 15, T63); SET(b, c, d, a, 9, 21, T64); #undef SET /* Then perform the following additions. (That is increment each of the four registers by the value it had before this block was started.) */ pms->abcd[0] += a; pms->abcd[1] += b; pms->abcd[2] += c; pms->abcd[3] += d; } void md5_init(md5_state_t *pms) { pms->count[0] = pms->count[1] = 0; pms->abcd[0] = 0x67452301; pms->abcd[1] = 0xefcdab89; pms->abcd[2] = 0x98badcfe; pms->abcd[3] = 0x10325476; } void md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes) { const md5_byte_t *p = data; int left = nbytes; int offset = (pms->count[0] >> 3) & 63; md5_word_t nbits = (md5_word_t)(nbytes << 3); if (nbytes <= 0) { return; } /* Update the message length. */ pms->count[1] += nbytes >> 29; pms->count[0] += nbits; if (pms->count[0] < nbits) { pms->count[1]++; } /* Process an initial partial block. */ if (offset) { int copy = (offset + nbytes > 64 ? 64 - offset : nbytes); memcpy(pms->buf + offset, p, copy); if (offset + copy < 64) { return; } p += copy; left -= copy; md5_process(pms, pms->buf); } /* Process full blocks. */ for (; left >= 64; p += 64, left -= 64) { md5_process(pms, p); } /* Process a final partial block. */ if (left) { memcpy(pms->buf, p, left); } } void md5_finish(md5_state_t *pms, md5_byte_t digest[16]) { static const md5_byte_t pad[64] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; md5_byte_t data[8]; int i; /* Save the length before padding. */ for (i = 0; i < 8; ++i) { data[i] = (md5_byte_t)(pms->count[i >> 2] >> ((i & 3) << 3)); } /* Pad to 56 bytes mod 64. */ md5_append(pms, pad, ((55 - (pms->count[0] >> 3)) & 63) + 1); /* Append the length. */ md5_append(pms, data, 8); for (i = 0; i < 16; ++i) { digest[i] = (md5_byte_t)(pms->abcd[i >> 2] >> ((i & 3) << 3)); } } icecream-1.3.1/client/md5.h000066400000000000000000000047761361626760200154270ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* Copyright (C) 1999 Aladdin Enterprises. All rights reserved. This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. L. Peter Deutsch ghost@aladdin.com */ /* Independent implementation of MD5 (RFC 1321). This code implements the MD5 Algorithm defined in RFC 1321. It is derived directly from the text of the RFC and not from the reference implementation. The original and principal author of md5.h is L. Peter Deutsch . Other authors are noted in the change history that follows (in reverse chronological order): 1999-11-04 lpd Edited comments slightly for automatic TOC extraction. 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5); added conditionalization for C++ compilation from Martin Purschke . 1999-05-03 lpd Original version. */ #ifndef md5_INCLUDED # define md5_INCLUDED typedef unsigned char md5_byte_t; /* 8-bit byte */ typedef unsigned int md5_word_t; /* 32-bit word */ /* Define the state of the MD5 Algorithm. */ typedef struct md5_state_s { md5_word_t count[2]; /* message length in bits, lsw first */ md5_word_t abcd[4]; /* digest buffer */ md5_byte_t buf[64]; /* accumulate block */ } md5_state_t; #ifdef __cplusplus extern "C" { #endif /* Initialize the algorithm. */ void md5_init(md5_state_t *pms); /* Append a string to the message. */ void md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes); /* Finish the message and return the digest. */ void md5_finish(md5_state_t *pms, md5_byte_t digest[16]); #ifdef __cplusplus } /* end extern "C" */ #endif #endif /* md5_INCLUDED */ icecream-1.3.1/client/remote.cpp000066400000000000000000001077531361626760200165670ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Stephan Kulow 2002, 2003 by Martin Pool This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include #include #include #include #ifdef __FreeBSD__ // Grmbl Why is this needed? We don't use readv/writev #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include "client.h" #include "tempfile.h" #include "md5.h" #include "util.h" #include "services/util.h" #ifndef O_LARGEFILE #define O_LARGEFILE 0 #endif namespace { struct CharBufferDeleter { char *buf; explicit CharBufferDeleter(char *b) : buf(b) {} ~CharBufferDeleter() { free(buf); } }; } using namespace std; std::string remote_daemon; Environments parse_icecc_version(const string &target_platform, const string &prefix) { Environments envs; string icecc_version = getenv("ICECC_VERSION"); assert(!icecc_version.empty()); // free after the C++-Programming-HOWTO string::size_type lastPos = icecc_version.find_first_not_of(',', 0); string::size_type pos = icecc_version.find(',', lastPos); bool def_targets = icecc_version.find('=') != string::npos; list platforms; while (pos != string::npos || lastPos != string::npos) { string couple = icecc_version.substr(lastPos, pos - lastPos); string platform = target_platform; string version = couple; string::size_type colon = couple.find(':'); if (colon != string::npos) { platform = couple.substr(0, colon); version = couple.substr(colon + 1, couple.length()); } // Skip delimiters. Note the "not_of" lastPos = icecc_version.find_first_not_of(',', pos); // Find next "non-delimiter" pos = icecc_version.find(',', lastPos); if (def_targets) { colon = version.find('='); if (colon != string::npos) { if (prefix != version.substr(colon + 1, version.length())) { continue; } version = version.substr(0, colon); } else if (!prefix.empty()) { continue; } } if (find(platforms.begin(), platforms.end(), platform) != platforms.end()) { log_error() << "there are two environments for platform " << platform << " - ignoring " << version << endl; continue; } if (::access(version.c_str(), R_OK) < 0) { log_error() << "$ICECC_VERSION has to point to an existing file to be installed " << version << endl; continue; } struct stat st; if (lstat(version.c_str(), &st) || !S_ISREG(st.st_mode) || st.st_size < 500) { log_error() << "$ICECC_VERSION has to point to an existing file to be installed " << version << endl; continue; } envs.push_back(make_pair(platform, version)); platforms.push_back(platform); } return envs; } static bool endswith(const string &orig, const char *suff, string &ret) { size_t len = strlen(suff); if (orig.size() > len && orig.substr(orig.size() - len) == suff) { ret = orig.substr(0, orig.size() - len); return true; } return false; } static Environments rip_out_paths(const Environments &envs, map &version_map, map &versionfile_map) { version_map.clear(); Environments env2; static const char *suffs[] = { ".tar.xz", ".tar.zst", ".tar.bz2", ".tar.gz", ".tar", ".tgz", NULL }; string versfile; for (Environments::const_iterator it = envs.begin(); it != envs.end(); ++it) { for (int i = 0; suffs[i] != NULL; i++) if (endswith(it->second, suffs[i], versfile)) { versionfile_map[it->first] = it->second; versfile = find_basename(versfile); version_map[it->first] = versfile; env2.push_back(make_pair(it->first, versfile)); } } return env2; } string get_absfilename(const string &_file) { string file; if (_file.empty()) { return _file; } if (_file.at(0) != '/') { file = get_cwd() + '/' + _file; } else { file = _file; } string dots = "/../"; string::size_type idx = file.find(dots); while (idx != string::npos) { if (idx == 0) { file.replace(0, dots.length(), "/"); } else { string::size_type slash = file.rfind('/', idx - 1); file.replace(slash, idx-slash+dots.length(), "/"); } idx = file.find(dots); } idx = file.find("/./"); while (idx != string::npos) { file.replace(idx, 3, "/"); idx = file.find("/./"); } idx = file.find("//"); while (idx != string::npos) { file.replace(idx, 2, "/"); idx = file.find("//"); } return file; } static UseCSMsg *get_server(MsgChannel *local_daemon) { Msg *umsg = local_daemon->get_msg(4 * 60); if (!umsg || umsg->type != M_USE_CS) { log_warning() << "reply was not expected use_cs " << (umsg ? (char)umsg->type : '0') << endl; ostringstream unexpected_msg; unexpected_msg << "Error 1 - expected use_cs reply, but got " << (umsg ? (char)umsg->type : '0') << " instead"; delete umsg; throw client_error(1, unexpected_msg.str()); } UseCSMsg *usecs = dynamic_cast(umsg); return usecs; } static void check_for_failure(Msg *msg, MsgChannel *cserver) { if (msg && msg->type == M_STATUS_TEXT) { log_error() << "Remote status (compiled on " << cserver->name << "): " << static_cast(msg)->text << endl; throw client_error(23, "Error 23 - Remote status (compiled on " + cserver->name + ")\n" + static_cast(msg)->text ); } } static void write_fd_to_server(int fd, MsgChannel *cserver) { unsigned char buffer[100000]; // some random but huge number off_t offset = 0; size_t uncompressed = 0; size_t compressed = 0; do { ssize_t bytes; do { bytes = read(fd, buffer + offset, sizeof(buffer) - offset); if (bytes < 0 && (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK)) { continue; } if (bytes < 0) { log_perror("write_fd_to_server() reading from fd"); close(fd); throw client_error(16, "Error 16 - error reading local file"); } break; } while (1); offset += bytes; if (!bytes || offset == sizeof(buffer)) { if (offset) { FileChunkMsg fcmsg(buffer, offset); if (!cserver->send_msg(fcmsg)) { Msg *m = cserver->get_msg(2); check_for_failure(m, cserver); log_error() << "write of source chunk to host " << cserver->name.c_str() << endl; log_perror("failed "); close(fd); throw client_error(15, "Error 15 - write to host failed"); } uncompressed += fcmsg.len; compressed += fcmsg.compressed; offset = 0; } if (!bytes) { break; } } } while (1); if (compressed) trace() << "sent " << compressed << " bytes (" << (compressed * 100 / uncompressed) << "%)" << endl; if ((-1 == close(fd)) && (errno != EBADF)){ log_perror("close failed"); } } static void receive_file(const string& output_file, MsgChannel* cserver) { string tmp_file = output_file + "_icetmp"; int obj_fd = open(tmp_file.c_str(), O_CREAT | O_TRUNC | O_WRONLY | O_LARGEFILE, 0666); if (obj_fd == -1) { std::string errmsg("can't create "); errmsg += tmp_file + ":"; log_perror(errmsg.c_str()); throw client_error(31, "Error 31 - " + errmsg); } Msg* msg = 0; size_t uncompressed = 0; size_t compressed = 0; while (1) { delete msg; msg = cserver->get_msg(40); if (!msg) { // the network went down? unlink(tmp_file.c_str()); throw client_error(19, "Error 19 - (network failure?)"); } check_for_failure(msg, cserver); if (msg->type == M_END) { break; } if (msg->type != M_FILE_CHUNK) { unlink(tmp_file.c_str()); delete msg; throw client_error(20, "Error 20 - unexpected message"); } FileChunkMsg *fcmsg = dynamic_cast(msg); compressed += fcmsg->compressed; uncompressed += fcmsg->len; if (write(obj_fd, fcmsg->buffer, fcmsg->len) != (ssize_t)fcmsg->len) { log_perror("Error writing file: "); unlink(tmp_file.c_str()); delete msg; throw client_error(21, "Error 21 - error writing file"); } } if (uncompressed) trace() << "got " << compressed << " bytes (" << (compressed * 100 / uncompressed) << "%)" << endl; delete msg; if (close(obj_fd) != 0) { log_perror("Failed to close temporary file: "); if(unlink(tmp_file.c_str()) != 0) { log_perror("delete temporary file - might be related to close failure above"); } throw client_error(30, "Error 30 - error closing temp file"); } if(rename(tmp_file.c_str(), output_file.c_str()) != 0) { log_perror("Failed to rename temporary file: "); if(unlink(tmp_file.c_str()) != 0) { log_perror("delete temporary file - might be related to rename failure above"); } throw client_error(30, "Error 30 - error closing temp file"); } } static int build_remote_int(CompileJob &job, UseCSMsg *usecs, MsgChannel *local_daemon, const string &environment, const string &version_file, const char *preproc_file, bool output) { string hostname = usecs->hostname; unsigned int port = usecs->port; int job_id = usecs->job_id; bool got_env = usecs->got_env; job.setJobID(job_id); job.setEnvironmentVersion(environment); // hoping on the scheduler's wisdom trace() << "Have to use host " << hostname << ":" << port << " - Job ID: " << job.jobID() << " - env: " << usecs->host_platform << " - has env: " << (got_env ? "true" : "false") << " - match j: " << usecs->matched_job_id << "\n"; int status = 255; MsgChannel *cserver = 0; try { cserver = Service::createChannel(hostname, port, 10); if (!cserver) { log_error() << "no server found behind given hostname " << hostname << ":" << port << endl; throw client_error(2, "Error 2 - no server found at " + hostname); } if (!got_env) { log_block b("Transfer Environment"); // transfer env struct stat buf; if (stat(version_file.c_str(), &buf)) { log_perror("error stat'ing file") << "\t" << version_file << endl; throw client_error(4, "Error 4 - unable to stat version file"); } EnvTransferMsg msg(job.targetPlatform(), job.environmentVersion()); if (!cserver->send_msg(msg)) { throw client_error(6, "Error 6 - send environment to remote failed"); } int env_fd = open(version_file.c_str(), O_RDONLY); if (env_fd < 0) { throw client_error(5, "Error 5 - unable to open version file:\n\t" + version_file); } write_fd_to_server(env_fd, cserver); if (!cserver->send_msg(EndMsg())) { log_error() << "write of environment failed" << endl; throw client_error(8, "Error 8 - write environment to remote failed"); } if (IS_PROTOCOL_31(cserver)) { VerifyEnvMsg verifymsg(job.targetPlatform(), job.environmentVersion()); if (!cserver->send_msg(verifymsg)) { throw client_error(22, "Error 22 - error sending environment"); } Msg *verify_msg = cserver->get_msg(60); if (verify_msg && verify_msg->type == M_VERIFY_ENV_RESULT) { if (!static_cast(verify_msg)->ok) { // The remote can't handle the environment at all (e.g. kernel too old), // mark it as never to be used again for this environment. log_info() << "Host " << hostname << " did not successfully verify environment." << endl; BlacklistHostEnvMsg blacklist(job.targetPlatform(), job.environmentVersion(), hostname); local_daemon->send_msg(blacklist); delete verify_msg; throw client_error(24, "Error 24 - remote " + hostname + " unable to handle environment"); } else trace() << "Verified host " << hostname << " for environment " << job.environmentVersion() << " (" << job.targetPlatform() << ")" << endl; delete verify_msg; } else { delete verify_msg; throw client_error(25, "Error 25 - other error verifying environment on remote"); } } } if (!IS_PROTOCOL_31(cserver) && ignore_unverified()) { log_warning() << "Host " << hostname << " cannot be verified." << endl; throw client_error(26, "Error 26 - environment on " + hostname + " cannot be verified"); } // Older remotes don't set properly -x argument. if(( job.language() == CompileJob::Lang_OBJC || job.language() == CompileJob::Lang_OBJCXX ) && !IS_PROTOCOL_38(cserver)) { job.appendFlag( "-x", Arg_Remote ); job.appendFlag( job.language() == CompileJob::Lang_OBJC ? "objective-c" : "objective-c++", Arg_Remote ); } CompileFileMsg compile_file(&job); { log_block b("send compile_file"); if (!cserver->send_msg(compile_file)) { log_info() << "write of job failed" << endl; throw client_error(9, "Error 9 - error sending file to remote"); } } if (!preproc_file) { int sockets[2]; if (pipe(sockets) != 0) { log_perror("build_remote_in pipe"); /* for all possible cases, this is something severe */ throw client_error(32, "Error 18 - (fork error?)"); } if (!dcc_lock_host()) { log_error() << "can't lock for local cpp" << endl; return EXIT_DISTCC_FAILED; } HostUnlock hostUnlock; // automatic dcc_unlock() /* This will fork, and return the pid of the child. It will not return for the child itself. If it returns normally it will have closed the write fd, i.e. sockets[1]. */ pid_t cpp_pid = call_cpp(job, sockets[1], sockets[0]); if (cpp_pid == -1) { throw client_error(18, "Error 18 - (fork error?)"); } try { log_block bl2("write_fd_to_server from cpp"); write_fd_to_server(sockets[0], cserver); } catch (...) { kill(cpp_pid, SIGTERM); throw; } log_block wait_cpp("wait for cpp"); while (waitpid(cpp_pid, &status, 0) < 0 && errno == EINTR) {} if (shell_exit_status(status) != 0) { // failure delete cserver; cserver = 0; log_warning() << "call_cpp process failed with exit status " << shell_exit_status(status) << endl; // GCC's -fdirectives-only has a number of cases that it doesn't handle properly, // so if in such mode preparing the source fails, try again recompiling locally. // This will cause double error in case it is a real error, but it'll build successfully if // it was just -fdirectives-only being broken. In other cases fail directly, Clang's // -frewrite-includes is much more reliable than -fdirectives-only, so is GCC's plain -E. if( !compiler_is_clang(job) && compiler_only_rewrite_includes(job)) throw remote_error(103, "Error 103 - local cpp invocation failed, trying to recompile locally"); else return shell_exit_status(status); } } else { int cpp_fd = open(preproc_file, O_RDONLY); if (cpp_fd < 0) { throw client_error(11, "Error 11 - unable to open preprocessed file"); } log_block cpp_block("write_fd_to_server preprocessed"); write_fd_to_server(cpp_fd, cserver); } if (!cserver->send_msg(EndMsg())) { log_info() << "write of end failed" << endl; throw client_error(12, "Error 12 - failed to send file to remote"); } Msg *msg; { log_block wait_cs("wait for cs"); msg = cserver->get_msg(12 * 60); if (!msg) { throw client_error(14, "Error 14 - error reading message from remote"); } } check_for_failure(msg, cserver); if (msg->type != M_COMPILE_RESULT) { log_warning() << "waited for compile result, but got " << (char)msg->type << endl; delete msg; throw client_error(13, "Error 13 - did not get compile response message"); } CompileResultMsg *crmsg = dynamic_cast(msg); assert(crmsg); status = crmsg->status; if (status && crmsg->was_out_of_memory) { delete crmsg; log_info() << "the server ran out of memory, recompiling locally" << endl; throw remote_error(101, "Error 101 - the server ran out of memory, recompiling locally"); } if (output) { if ((!crmsg->out.empty() || !crmsg->err.empty()) && output_needs_workaround(job)) { delete crmsg; log_info() << "command needs stdout/stderr workaround, recompiling locally" << endl; log_info() << "(set ICECC_CARET_WORKAROUND=0 to override)" << endl; throw remote_error(102, "Error 102 - command needs stdout/stderr workaround, recompiling locally"); } if (crmsg->err.find("file not found") != string::npos) { delete crmsg; log_info() << "remote is missing file, recompiling locally" << endl; throw remote_error(104, "Error 104 - remote is missing file, recompiling locally"); } ignore_result(write(STDOUT_FILENO, crmsg->out.c_str(), crmsg->out.size())); if (colorify_wanted(job)) { colorify_output(crmsg->err); } else { ignore_result(write(STDERR_FILENO, crmsg->err.c_str(), crmsg->err.size())); } if (status && (crmsg->err.length() || crmsg->out.length())) { log_info() << "Compiled on " << hostname << endl; } } bool have_dwo_file = crmsg->have_dwo_file; delete crmsg; assert(!job.outputFile().empty()); if (status == 0) { receive_file(job.outputFile(), cserver); if (have_dwo_file) { string dwo_output = job.outputFile().substr(0, job.outputFile().rfind('.')) + ".dwo"; receive_file(dwo_output, cserver); } } } catch (...) { // Handle pending status messages, if any. if(cserver) { while(Msg* msg = cserver->get_msg(0, true)) { if(msg->type == M_STATUS_TEXT) log_error() << "Remote status (compiled on " << cserver->name << "): " << static_cast(msg)->text << endl; delete msg; } delete cserver; cserver = 0; } throw; } delete cserver; return status; } static string md5_for_file(const string & file) { md5_state_t state; string result; md5_init(&state); FILE *f = fopen(file.c_str(), "rb"); if (!f) { return result; } md5_byte_t buffer[40000]; while (true) { size_t size = fread(buffer, 1, 40000, f); if (!size) { break; } md5_append(&state, buffer, size); } fclose(f); md5_byte_t digest[16]; md5_finish(&state, digest); char digest_cache[33]; for (int di = 0; di < 16; ++di) { sprintf(digest_cache + di * 2, "%02x", digest[di]); } digest_cache[32] = 0; result = digest_cache; return result; } static bool maybe_build_local(MsgChannel *local_daemon, UseCSMsg *usecs, CompileJob &job, int &ret) { remote_daemon = usecs->hostname; if (usecs->hostname == "127.0.0.1") { // If this is a test build, do local builds on the local daemon // that has --no-remote, use remote building for the remaining ones. if (getenv("ICECC_TEST_REMOTEBUILD") && usecs->port != 0 ) return false; trace() << "building myself, but telling localhost\n"; int job_id = usecs->job_id; job.setJobID(job_id); job.setEnvironmentVersion("__client"); CompileFileMsg compile_file(&job); if (!local_daemon->send_msg(compile_file)) { log_info() << "write of job failed" << endl; throw client_error(29, "Error 29 - write of job failed"); } struct timeval begintv, endtv; struct rusage ru; gettimeofday(&begintv, 0); ret = build_local(job, local_daemon, &ru); gettimeofday(&endtv, 0); // filling the stats, so the daemon can play proxy for us JobDoneMsg msg(job_id, ret, JobDoneMsg::FROM_SUBMITTER); msg.real_msec = (endtv.tv_sec - begintv.tv_sec) * 1000 + (endtv.tv_usec - begintv.tv_usec) / 1000; struct stat st; msg.out_uncompressed = 0; if (!stat(job.outputFile().c_str(), &st)) { msg.out_uncompressed += st.st_size; } if (!stat((job.outputFile().substr(0, job.outputFile().rfind('.')) + ".dwo").c_str(), &st)) { msg.out_uncompressed += st.st_size; } msg.user_msec = ru.ru_utime.tv_sec * 1000 + ru.ru_utime.tv_usec / 1000; msg.sys_msec = ru.ru_stime.tv_sec * 1000 + ru.ru_stime.tv_usec / 1000; msg.pfaults = ru.ru_majflt + ru.ru_minflt + ru.ru_nswap; msg.exitcode = ret; if (msg.user_msec > 50 && msg.out_uncompressed > 1024) { trace() << "speed=" << float(msg.out_uncompressed / msg.user_msec) << endl; } return local_daemon->send_msg(msg); } return false; } // Minimal version of remote host that we want to use for the job. static int minimalRemoteVersion( const CompileJob& job) { int version = MIN_PROTOCOL_VERSION; if (ignore_unverified()) { version = max(version, 31); } if (job.dwarfFissionEnabled()) { version = max(version, 35); } return version; } static unsigned int requiredRemoteFeatures() { unsigned int features = 0; if (const char* icecc_env_compression = getenv( "ICECC_ENV_COMPRESSION" )) { if( strcmp( icecc_env_compression, "xz" ) == 0 ) features = features | NODE_FEATURE_ENV_XZ; if( strcmp( icecc_env_compression, "zstd" ) == 0 ) features = features | NODE_FEATURE_ENV_ZSTD; } return features; } int build_remote(CompileJob &job, MsgChannel *local_daemon, const Environments &_envs, int permill) { srand(time(0) + getpid()); int torepeat = 1; bool has_split_dwarf = job.dwarfFissionEnabled(); if (!compiler_is_clang(job)) { if (rand() % 1000 < permill) { torepeat = 3; } } if( torepeat == 1 ) { trace() << "preparing " << job.inputFile() << " to be compiled for " << job.targetPlatform() << "\n"; } else { trace() << "preparing " << job.inputFile() << " to be compiled " << torepeat << " times for " << job.targetPlatform() << "\n"; } map versionfile_map, version_map; Environments envs = rip_out_paths(_envs, version_map, versionfile_map); if (!envs.size()) { log_error() << "$ICECC_VERSION needs to point to .tar files" << endl; throw client_error(22, "Error 22 - $ICECC_VERSION needs to point to .tar files"); } const char *preferred_host = getenv("ICECC_PREFERRED_HOST"); if (torepeat == 1) { string fake_filename; list args = job.remoteFlags(); for (list::const_iterator it = args.begin(); it != args.end(); ++it) { fake_filename += "/" + *it; } args = job.restFlags(); for (list::const_iterator it = args.begin(); it != args.end(); ++it) { fake_filename += "/" + *it; } fake_filename += get_absfilename(job.inputFile()); GetCSMsg getcs(envs, fake_filename, job.language(), torepeat, job.targetPlatform(), job.argumentFlags(), preferred_host ? preferred_host : string(), minimalRemoteVersion(job), requiredRemoteFeatures()); trace() << "asking for host to use" << endl; if (!local_daemon->send_msg(getcs)) { log_warning() << "asked for CS" << endl; throw client_error(24, "Error 24 - asked for CS"); } UseCSMsg *usecs = get_server(local_daemon); int ret; try { if (!maybe_build_local(local_daemon, usecs, job, ret)) ret = build_remote_int(job, usecs, local_daemon, version_map[usecs->host_platform], versionfile_map[usecs->host_platform], 0, true); } catch(...) { delete usecs; throw; } delete usecs; return ret; } else { char *preproc = 0; dcc_make_tmpnam("icecc", ".ix", &preproc, 0); const CharBufferDeleter preproc_holder(preproc); int cpp_fd = open(preproc, O_WRONLY); if (!dcc_lock_host()) { log_error() << "can't lock for local cpp" << endl; return EXIT_DISTCC_FAILED; } HostUnlock hostUnlock; // automatic dcc_unlock() /* When call_cpp returns normally (for the parent) it will have closed the write fd, i.e. cpp_fd. */ pid_t cpp_pid = call_cpp(job, cpp_fd); if (cpp_pid == -1) { ::unlink(preproc); throw client_error(10, "Error 10 - (unable to fork process?)"); } int status = 255; waitpid(cpp_pid, &status, 0); if (shell_exit_status(status)) { // failure log_warning() << "call_cpp process failed with exit status " << shell_exit_status(status) << endl; ::unlink(preproc); return shell_exit_status(status); } dcc_unlock(); char rand_seed[400]; // "designed to be oversized" (Levi's) sprintf(rand_seed, "-frandom-seed=%d", rand()); job.appendFlag(rand_seed, Arg_Remote); GetCSMsg getcs(envs, get_absfilename(job.inputFile()), job.language(), torepeat, job.targetPlatform(), job.argumentFlags(), preferred_host ? preferred_host : string(), minimalRemoteVersion(job), 0); if (!local_daemon->send_msg(getcs)) { log_warning() << "asked for CS" << endl; throw client_error(0, "Error 0 - asked for CS"); } map jobmap; CompileJob *jobs = new CompileJob[torepeat]; UseCSMsg **umsgs = new UseCSMsg*[torepeat]; bool misc_error = false; int *exit_codes = new int[torepeat]; for (int i = 0; i < torepeat; i++) { // init exit_codes[i] = 42; } for (int i = 0; i < torepeat; i++) { jobs[i] = job; char *buffer = 0; if (i) { dcc_make_tmpnam("icecc", ".o", &buffer, 0); jobs[i].setOutputFile(buffer); } else { buffer = strdup(job.outputFile().c_str()); } const CharBufferDeleter buffer_holder(buffer); umsgs[i] = get_server(local_daemon); remote_daemon = umsgs[i]->hostname; trace() << "got_server_for_job " << umsgs[i]->hostname << endl; flush_debug(); pid_t pid = fork(); if (pid == -1) { log_perror("failure of fork"); status = -1; } if (!pid) { int ret = 42; try { if (!maybe_build_local(local_daemon, umsgs[i], jobs[i], ret)) ret = build_remote_int( jobs[i], umsgs[i], local_daemon, version_map[umsgs[i]->host_platform], versionfile_map[umsgs[i]->host_platform], preproc, i == 0); } catch (std::exception& error) { log_info() << "build_remote_int failed and has thrown " << error.what() << endl; kill(getpid(), SIGTERM); return 0; // shouldn't matter } _exit(ret); return 0; // doesn't matter } jobmap[pid] = i; } for (int i = 0; i < torepeat; i++) { pid_t pid = wait(&status); if (pid < 0) { log_perror("wait failed"); status = -1; } else { if (WIFSIGNALED(status)) { // there was some misc error in processing misc_error = true; break; } exit_codes[jobmap[pid]] = shell_exit_status(status); } } if (!misc_error) { string first_md5 = md5_for_file(jobs[0].outputFile()); for (int i = 1; i < torepeat; i++) { if (!exit_codes[0]) { // if the first failed, we fail anyway if (exit_codes[i] == 42) { // they are free to fail for misc reasons continue; } if (exit_codes[i]) { log_error() << umsgs[i]->hostname << " compiled with exit code " << exit_codes[i] << " and " << umsgs[0]->hostname << " compiled with exit code " << exit_codes[0] << " - aborting!\n"; if (-1 == ::unlink(jobs[0].outputFile().c_str())){ log_perror("unlink outputFile failed") << "\t" << jobs[0].outputFile() << endl; } if (has_split_dwarf) { string dwo_file = jobs[0].outputFile().substr(0, jobs[0].outputFile().rfind('.')) + ".dwo"; if (-1 == ::unlink(dwo_file.c_str())){ log_perror("unlink failed") << "\t" << dwo_file << endl; } } exit_codes[0] = -1; // overwrite break; } string other_md5 = md5_for_file(jobs[i].outputFile()); if (other_md5 != first_md5) { log_error() << umsgs[i]->hostname << " compiled " << jobs[0].outputFile() << " with md5 sum " << other_md5 << "(" << jobs[i].outputFile() << ")" << " and " << umsgs[0]->hostname << " compiled with md5 sum " << first_md5 << " - aborting!\n"; rename(jobs[0].outputFile().c_str(), (jobs[0].outputFile() + ".caught").c_str()); rename(preproc, (string(preproc) + ".caught").c_str()); if (has_split_dwarf) { string dwo_file = jobs[0].outputFile().substr(0, jobs[0].outputFile().rfind('.')) + ".dwo"; rename(dwo_file.c_str(), (dwo_file + ".caught").c_str()); } exit_codes[0] = -1; // overwrite break; } } if (-1 == ::unlink(jobs[i].outputFile().c_str())){ log_perror("unlink failed") << "\t" << jobs[i].outputFile() << endl; } if (has_split_dwarf) { string dwo_file = jobs[i].outputFile().substr(0, jobs[i].outputFile().rfind('.')) + ".dwo"; if (-1 == ::unlink(dwo_file.c_str())){ log_perror("unlink failed") << "\t" << dwo_file << endl; } } delete umsgs[i]; } } else { if (-1 == ::unlink(jobs[0].outputFile().c_str())){ log_perror("unlink failed") << "\t" << jobs[0].outputFile() << endl; } if (has_split_dwarf) { string dwo_file = jobs[0].outputFile().substr(0, jobs[0].outputFile().rfind('.')) + ".dwo"; if (-1 == ::unlink(dwo_file.c_str())){ log_perror("unlink failed") << "\t" << dwo_file << endl; } } for (int i = 1; i < torepeat; i++) { if (-1 == ::unlink(jobs[i].outputFile().c_str())){ log_perror("unlink failed") << "\t" << jobs[i].outputFile() << endl; } if (has_split_dwarf) { string dwo_file = jobs[i].outputFile().substr(0, jobs[i].outputFile().rfind('.')) + ".dwo"; if (-1 == ::unlink(dwo_file.c_str())){ log_perror("unlink failed") << "\t" << dwo_file << endl; } } delete umsgs[i]; } } delete umsgs[0]; if (-1 == ::unlink(preproc)){ log_perror("unlink failed") << "\t" << preproc << endl; } int ret = exit_codes[0]; delete [] umsgs; delete [] jobs; delete [] exit_codes; if (misc_error) { throw client_error(27, "Error 27 - misc error"); } return ret; } return 0; } icecream-1.3.1/client/safeguard.cpp000066400000000000000000000043151361626760200172230ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* * distcc -- A simple distributed compiler system * * Copyright (C) 2002, 2003 by Martin Pool * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "client.h" #include "logging.h" using namespace std; /** * @file * @brief Protect against unbounded recursion. * * It would be fairly easy for somebody to get confused in masquerade mode and * try to get distcc to invoke itself in a loop. We can't always work out the * right thing to do but we can at least flag an error. * * This environment variable is set to guard against distcc accidentally * recursively invoking itself, thinking it's the real compiler. **/ static const char dcc_safeguard_name[] = "_ICECC_SAFEGUARD"; static int dcc_safeguard_level; int dcc_recursion_safeguard(void) { const char *env = getenv(dcc_safeguard_name); if (env) { //trace() << "safeguard: " << env << endl; if (!(dcc_safeguard_level = atoi(env))) { dcc_safeguard_level = 1; } } else { dcc_safeguard_level = 0; } //trace() << "safeguard level=" << dcc_safeguard_level << endl; return dcc_safeguard_level; } void dcc_increment_safeguard(SafeguardStep step) { char value[2] = { (char)(dcc_safeguard_level + step + '0'), '\0' }; //trace() << "setting safeguard: " << dcc_safeguard_set << endl; if (setenv(dcc_safeguard_name, value, 1) == -1) { log_error() << "putenv failed" << endl; } } icecream-1.3.1/client/util.cpp000066400000000000000000000247321361626760200162440ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* * distcc -- A simple distributed compiler system * * Copyright (C) 2002, 2003 by Martin Pool * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "client.h" #include "exitcode.h" #include "job.h" #include "logging.h" #include "ncpus.h" #include "util.h" using namespace std; extern bool explicit_color_diagnostics; extern bool explicit_no_show_caret; /** * Set the `FD_CLOEXEC' flag of DESC if VALUE is nonzero, * or clear the flag if VALUE is 0. * * From the GNU C Library examples. * * @returns 0 on success, or -1 on error with `errno' set. **/ int set_cloexec_flag(int desc, int value) { int oldflags = fcntl(desc, F_GETFD, 0); /* If reading the flags failed, return error indication now. */ if (oldflags < 0) { return oldflags; } /* Set just the flag we want to set. */ if (value != 0) { oldflags |= FD_CLOEXEC; } else { oldflags &= ~FD_CLOEXEC; } /* Store modified flag word in the descriptor. */ return fcntl(desc, F_SETFD, oldflags); } /** * Ignore or unignore SIGPIPE. * * The server and child ignore it, because distcc code wants to see * EPIPE errors if something goes wrong. However, for invoked * children it is set back to the default value, because they may not * handle the error properly. **/ int dcc_ignore_sigpipe(int val) { if (signal(SIGPIPE, val ? SIG_IGN : SIG_DFL) == SIG_ERR) { log_warning() << "signal(SIGPIPE, " << (val ? "ignore" : "default") << ") failed: " << strerror(errno) << endl; return EXIT_DISTCC_FAILED; } return 0; } /** * Get an exclusive, non-blocking lock on a file using whatever method * is available on this system. * * @retval 0 if we got the lock * @retval -1 with errno set if the file is already locked. **/ static int sys_lock(int fd, bool block) { #if defined(F_SETLK) struct flock lockparam; lockparam.l_type = F_WRLCK; lockparam.l_whence = SEEK_SET; lockparam.l_start = 0; lockparam.l_len = 0; /* whole file */ return fcntl(fd, block ? F_SETLKW : F_SETLK, &lockparam); #elif defined(HAVE_FLOCK) return flock(fd, LOCK_EX | (block ? 0 : LOCK_NB)); #elif defined(HAVE_LOCKF) return lockf(fd, block ? F_LOCK : F_TLOCK, 0); #else # error "No supported lock method. Please port this code." #endif } static volatile int lock_fd = -1; void dcc_unlock() { // This must be safe to use from a signal handler. if (lock_fd != -1) close(lock_fd); // All our current locks can just be closed. lock_fd = -1; } /** * Open a lockfile, creating if it does not exist. **/ static bool dcc_open_lockfile(const string &fname, int &plockfd) { /* Create if it doesn't exist. We don't actually do anything with * the file except lock it. * * The file is created with the loosest permissions allowed by the user's * umask, to give the best chance of avoiding problems if they should * happen to use a shared lock dir. */ plockfd = open(fname.c_str(), O_WRONLY | O_CREAT, 0666); if (plockfd == -1 && errno != EEXIST) { log_error() << "failed to create " << fname << ": " << strerror(errno) << endl; return false; } set_cloexec_flag(plockfd, true); return true; } static bool dcc_lock_host_slot(string fname, int lock, bool block); bool dcc_lock_host() { assert(lock_fd == -1); string fname = "/tmp/.icecream-"; struct passwd *pwd = getpwuid(getuid()); if (pwd) { fname += pwd->pw_name; } else { char buffer[12]; sprintf(buffer, "%ld", (long)getuid()); fname += buffer; } if (mkdir(fname.c_str(), 0700) && errno != EEXIST) { log_perror("mkdir") << "\t" << fname << endl; return false; } fname += "/local_lock"; lock_fd = 0; int max_cpu = 1; dcc_ncpus(&max_cpu); // To ensure better distribution, select a "random" starting slot. int lock_offset = getpid(); // First try if any slot is free. for( int lock = 0; lock < max_cpu; ++lock ) { if( dcc_lock_host_slot( fname, ( lock + lock_offset ) % max_cpu, false )) return true; } // If not, block on the first selected one. return dcc_lock_host_slot( fname, lock_offset % max_cpu, true ); } bool dcc_lock_host_slot(string fname, int lock, bool block) { if( lock > 0 ) { // 1st keep without the 0 for backwards compatibility char num[ 20 ]; sprintf( num, "%d", lock ); fname += num; } int fd = 0; if (!dcc_open_lockfile(fname, fd)) { return false; } if (sys_lock(fd, block) == 0) { lock_fd = fd; return true; } switch (errno) { #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN case EWOULDBLOCK: #endif case EAGAIN: case EACCES: /* HP-UX and Cygwin give this for exclusion */ if( block ) trace() << fname << " is busy" << endl; break; default: log_error() << "lock " << fname << " failed: " << strerror(errno) << endl; break; } if ((-1 == ::close(fd)) && (errno != EBADF)){ log_perror("close failed"); } return false; } bool color_output_possible() { const char* term_env = getenv("TERM"); return isatty(2) && term_env && strcasecmp(term_env, "DUMB"); } bool compiler_has_color_output(const CompileJob &job) { if (!color_output_possible()) return false; // Clang has coloring. if (compiler_is_clang(job)) { return true; } if (const char* icecc_color_diagnostics = getenv("ICECC_COLOR_DIAGNOSTICS")) { return *icecc_color_diagnostics == '1'; } #ifdef HAVE_GCC_COLOR_DIAGNOSTICS return true; #endif // GCC has it since 4.9, but that'd require detecting what GCC // version is used for the actual compile. However it requires // also GCC_COLORS to be set (and not empty), so use that // for detecting if GCC would use colors. if (const char *gcc_colors = getenv("GCC_COLORS")) { return (*gcc_colors != '\0'); } return false; } // Whether icecream should add colors to the compiler output. bool colorify_wanted(const CompileJob &job) { if (compiler_has_color_output(job)) { return false; // -fcolor-diagnostics handling lets the compiler do it itself } if (explicit_color_diagnostics) { // colors explicitly enabled/disabled by an option return false; } if (getenv("ICECC_COLOR_DIAGNOSTICS") != NULL) return false; // if set explicitly, assume icecream's colorify is not wanted if (getenv("EMACS")) { return false; } return color_output_possible(); } void colorify_output(const string &_s_ccout) { string s_ccout(_s_ccout); string::size_type end; while ((end = s_ccout.find('\n')) != string::npos) { string cline = s_ccout.substr(string::size_type(0), end); s_ccout = s_ccout.substr(end + 1); if (cline.find(": error:") != string::npos) { fprintf(stderr, "\x1b[1;31m%s\x1b[0m\n", cline.c_str()); } else if (cline.find(": warning:") != string::npos) { fprintf(stderr, "\x1b[36m%s\x1b[0m\n", cline.c_str()); } else { fprintf(stderr, "%s\n", cline.c_str()); } } fprintf(stderr, "%s", s_ccout.c_str()); } bool ignore_unverified() { return getenv("ICECC_IGNORE_UNVERIFIED"); } // GCC4.8+ has -fdiagnostics-show-caret, but when it prints the source code, // it tries to find the source file on the disk, rather than printing the input // it got like Clang does. This means that when compiling remotely, it of course // won't find the source file in the remote chroot, and will disable the caret // silently. As a workaround, make it possible to recompile locally if there's // any stdout/stderr. // Another way of handling this might be to send all the headers to the remote // host, but this has been already tried in the sendheaders branch (for // preprocessing remotely too) and performance-wise it just doesn't seem to // be worth it. bool output_needs_workaround(const CompileJob &job) { if (compiler_is_clang(job)) return false; if (explicit_no_show_caret) return false; if (const char* caret_workaround = getenv("ICECC_CARET_WORKAROUND")) return *caret_workaround == '1'; #ifdef HAVE_GCC_SHOW_CARET return true; #endif return false; } int resolve_link(const std::string &file, std::string &resolved) { char buf[PATH_MAX]; buf[PATH_MAX - 1] = '\0'; const int ret = readlink(file.c_str(), buf, sizeof(buf) - 1); const int errno_save = errno; if (ret <= 0) { return errno_save; } buf[ret] = 0; resolved = std::string(buf); return 0; } std::string get_cwd() { static std::vector buffer(1024); errno = 0; while (getcwd(&buffer[0], buffer.size() - 1) == 0 && errno == ERANGE) { buffer.resize(buffer.size() + 1024); errno = 0; } if (errno != 0) return std::string(); return string(&buffer[0]); } std::string read_command_output(const std::string& command) { FILE *f = popen(command.c_str(), "r"); string output; if (!f) { log_error() << "no pipe " << strerror(errno) << endl; return output; } char buffer[1024]; while (!feof(f)) { size_t bytes = fread(buffer, 1, sizeof(buffer) - 1, f); buffer[bytes] = 0; output += buffer; } pclose(f); // get rid of the endline return output.substr(0, output.length() - 1); } icecream-1.3.1/client/util.h000066400000000000000000000033551361626760200157070ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* * distcc -- A simple distributed compiler system * * Copyright (C) 2002, 2003 by Martin Pool * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _CLIENT_UTIL_H_ #define _CLIENT_UTIL_H_ #include #include "services/util.h" class CompileJob; /* util.c */ extern int set_cloexec_flag(int desc, int value); extern int dcc_ignore_sigpipe(int val); extern void colorify_output(const std::string &s_ccout); extern bool colorify_wanted(const CompileJob &job); extern bool compiler_has_color_output(const CompileJob &job); extern bool output_needs_workaround(const CompileJob &job); extern bool ignore_unverified(); extern int resolve_link(const std::string &file, std::string &resolved); extern std::string get_cwd(); extern std::string read_command_output(const std::string& command); extern bool dcc_lock_host(); extern void dcc_unlock(); extern int dcc_locked_fd(); class HostUnlock { public: ~HostUnlock() { dcc_unlock(); } }; #endif icecream-1.3.1/compilerwrapper/000077500000000000000000000000001361626760200165105ustar00rootroot00000000000000icecream-1.3.1/compilerwrapper/Makefile.am000066400000000000000000000001241361626760200205410ustar00rootroot00000000000000pkglibexec_PROGRAMS = compilerwrapper compilerwrapper_SOURCES = compilerwrapper.cpp icecream-1.3.1/compilerwrapper/compilerwrapper.cpp000066400000000000000000000117531361626760200224360ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* Copyright (C) 2012 Lubos Lunak Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Older icecream versions assume the compiler is always GCC. This can be fixed on the local side, but remote nodes would need icecream upgrade. As a workaround icecc-create-env includes this wrapper binary in the environment if clang is to be used as well, that will either call clang or the real gcc. Which one depends on an extra argument added by icecream. */ #include #include #include #include #include #include //#define DEBUG int main(int argc, char *argv[]) { bool iscxx = false; int argv0len = strlen(argv[0]); if (argv0len > 2 && argv[0][argv0len - 1] == '+' && argv[0][argv0len - 2] == '+') { iscxx = true; } #ifdef DEBUG fprintf(stderr, "Args1:\n"); for (int i = 0; i < argc; ++i) { fprintf(stderr, "%s\n", argv[i]); } fprintf(stderr, "\n"); #endif bool isclang = argc >= 2 && strcmp(argv[1], "clang") == 0; // the extra argument from icecream // 1 extra for -no-canonical-prefixes char **args = new char*[argc + 2]; args[0] = new char[strlen(argv[0]) + 20]; strcpy(args[0], argv[0]); char *separator = strrchr(args[0], '/'); if (separator == NULL) { args[0][0] = '\0'; } else { separator[1] = '\0'; // after the separator } if (isclang) { strcat(args[0], "clang"); } else if (iscxx) { strcat(args[0], "g++.bin"); } else { strcat(args[0], "gcc.bin"); } int pos = 1; if (isclang) { args[pos++] = strdup("-no-canonical-prefixes"); // otherwise clang tries to access /proc/self/exe // clang wants the -x argument early, otherwise it seems to ignore it // (and treats the file as already preprocessed) int x_arg_pos = -1; for (int i = 2; // 2 - skip the extra "clang" argument i < argc; ++i) { if (strcmp(argv[i], "-x") == 0 && i + 1 < argc && (strcmp(argv[i + 1], "c") == 0 || strcmp(argv[i + 1], "c++") == 0)) { x_arg_pos = i; args[pos++] = strdup("-x"); args[pos++] = strdup(argv[i + 1]); break; } } for (int i = 2; // 2 - skip the extra "clang" argument i < argc; ++i) { // strip options that icecream adds but clang doesn't know or need if (strcmp(argv[i], "-fpreprocessed") == 0) { continue; // clang doesn't know this (it presumably needs to always preprocess anyway) } if (strcmp(argv[i], "--param") == 0 && i + 1 < argc) { if (strncmp(argv[i + 1], "ggc-min-expand=", strlen("ggc-min-expand=")) == 0 || strncmp(argv[i + 1], "ggc-min-heapsize=", strlen("ggc-min-heapsize=")) == 0) { // drop --param and the parameter itself ++i; continue; } } if (i == x_arg_pos) { ++i; // skip following continue; // and skip this one } args[pos++] = strdup(argv[i]); } } else { // !isclang , just copy the arguments for (int i = 1; i < argc; ++i) { args[pos++] = strdup(argv[i]); } } args[pos++] = NULL; assert(pos <= argc + 2); #ifdef DEBUG fprintf(stderr, "Args2:\n"); for (int i = 0; i < pos; ++i) { fprintf(stderr, "%s\n", args[i]); } fprintf(stderr, "\n"); #endif execv(args[0], args); std::ostringstream errmsg; errmsg << "execv " << args[0] << " failed"; perror(errmsg.str().c_str()); exit(1); } icecream-1.3.1/configure.ac000066400000000000000000000310141361626760200155620ustar00rootroot00000000000000# quite some macros are taken from distcc AC_PREREQ([2.63]) # ==================== # Version informations # ==================== # Stable versions: x.y.z , where z < 50 # Development versions: x.y.90 # Pre-release versions: x.y.z, where z = 90 + X in rcX (1.1rc1 = 1.1.91) m4_define([icecream_version_major],[1]) m4_define([icecream_version_minor],[3]) m4_define([icecream_version_micro],[1]) m4_ifnblank(icecream_version_micro, [m4_define([icecream_version],[icecream_version_major.icecream_version_minor.icecream_version_micro])], [m4_define([icecream_version],[icecream_version_major.icecream_version_minor])]) # ============= # Automake init # ============= AC_INIT([icecc], [icecream_version]) AC_CONFIG_MACRO_DIR([m4]) AC_CONFIG_HEADERS([config.h]) AM_INIT_AUTOMAKE([1.11 foreign dist-xz]) AM_SILENT_RULES([yes]) AC_LANG([C++]) # =========================== # Find required base packages # =========================== AC_PROG_CC AC_PROG_CXX if test "$GCC" = yes; then cast_align=-Wcast-align wshadow= AC_MSG_CHECKING([if GCC is actually Clang]) AC_TRY_COMPILE( [], [ #ifdef __clang__ return 0; #else fail #endif ], [ AC_MSG_RESULT(yes) # The code has numerous cast alignment warnings that only clang warns about, # and it probably(?) doesn't matter in practice. cast_align= # Clang is not so overzealous like GCC and doesn't warn about cases which # are very unlikely to be problems (e.g. function argument shadowing # a function elsewhere), so it's not so annoying. wshadow=-Wshadow ], [ AC_MSG_RESULT(no) ]) CFLAGS="-g -W -Wall \ -Wshadow -Wpointer-arith $cast_align -Wwrite-strings \ -Waggregate-return -Wstrict-prototypes -Wmissing-prototypes \ -Wnested-externs -Wundef $CFLAGS" CXXFLAGS=" -g -W -Wall -Wpointer-arith $cast_align $wshadow -Wwrite-strings -Wundef $CXXFLAGS" AC_MSG_NOTICE([Adding gcc options: $CFLAGS]) fi AC_ARG_ENABLE(debug, AS_HELP_STRING([--enable-debug], [Enable debug mode (enable asserts, disable optimizations).])) AC_MSG_CHECKING([whether to enable debug module]) if test "$enable_debug" = "yes"; then CFLAGS="$CFLAGS -g -O0" CXXFLAGS="$CXXFLAGS -g -O0" AC_MSG_RESULT(yes) else CFLAGS="$CFLAGS -DNDEBUG" CXXFLAGS="$CXXFLAGS -DNDEBUG" AC_MSG_RESULT(no) fi AC_PROG_MAKE_SET AC_PROG_INSTALL AC_C_INLINE AC_DISABLE_SHARED AC_ENABLE_STATIC AC_PROG_LIBTOOL ICECC_LIBCAP_NG_PATH # ============= # Documentation # ============= AC_ARG_WITH(man, [AS_HELP_STRING([--without-man], [Do not build manpage])], [with_man="$withval"], [with_man=yes] ) AS_IF([test "x$with_man" != "xno"], [ build_man=yes AC_PATH_PROGS(DOCBOOK2X, [docbook2x-man db2x_docbook2man docbook-to-man docbook2man.pl docbook2man]) AS_IF([test -z "$DOCBOOK2X"], [ AC_MSG_WARN([docbook2x is missing. Install docbook2x package.]) DOCBOOK2X='echo docbook2x is missing. Install docbook2x package.']) ], [build_man=no]) AC_SUBST(DOCBOOK2X) AM_CONDITIONAL([WITH_ICECREAM_MAN], [test "x$build_man" != "xno"]) ######################################################################## ### Checks for header files # Some of these are needed by popt (or other libraries included in the future). AC_CHECK_HEADERS([sys/signal.h ifaddrs.h kinfo.h sys/param.h devstat.h]) AC_CHECK_HEADERS([sys/socketvar.h sys/vfs.h]) AC_CHECK_HEADERS([mach/host_info.h]) AC_CHECK_HEADERS([arpa/nameser.h], [], [], [#include ]) AC_CHECK_HEADERS([resolv.h], [], [], [#include #include #if HAVE_ARPA_NAMESER_H # include #endif ]) AC_CHECK_HEADERS([netinet/tcp_var.h], [], [], [#include #if HAVE_SYS_SOCKETVAR_H # include #endif #include #include ]) AC_CHECK_HEADERS([sys/user.h]) ###################################################################### dnl Checks for types AC_CHECK_TYPES([sa_family_t, socklen_t, in_port_t, in_addr_t], , , [ #include #include #include #if HAVE_ARPA_NAMESER_H # include #endif ]) AC_CHECK_MEMBER([struct ifreq.ifr_dstaddr], [AC_DEFINE(HAVE_IFR_DSTADDR, 1, [Set to 1 if struct ifr_ifru has member ifr_dstaddr] )], [AC_DEFINE(HAVE_IFR_DSTADDR, 0, [Set to 0 if struct ifr_ifru has no ifr_dstaddr] )], [ #include ]) ######################################################################## ### Checks for libraries. # The following test taken from the cvs sources via Samba: # If we can't find connect, try looking in -lsocket, -lnsl, and -linet. # The Irix 5 libc.so has connect and gethostbyname, but Irix 5 also has # libsocket.so which has a bad implementation of gethostbyname (it # only looks in /etc/hosts), so we only look for -lsocket if we need # it. AC_CHECK_FUNCS(connect) if test x"$ac_cv_func_connect" = x"no"; then case "$LIBS" in *-lnsl*) ;; *) AC_CHECK_LIB(nsl_s, printf) ;; esac case "$LIBS" in *-lnsl*) ;; *) AC_CHECK_LIB(nsl, printf) ;; esac case "$LIBS" in *-lsocket*) ;; *) AC_CHECK_LIB(socket, connect) ;; esac case "$LIBS" in *-linet*) ;; *) AC_CHECK_LIB(inet, connect) ;; esac dnl We can't just call AC_CHECK_FUNCS(connect) here, because the value dnl has been cached. if test x"$ac_cv_lib_socket_connect" = x"yes" || test x"$ac_cv_lib_inet_connect" = x"yes"; then # ac_cv_func_connect=yes # don't! it would cause AC_CHECK_FUNC to succeed next time configure is run AC_DEFINE(HAVE_CONNECT,1,[Whether the system has connect()]) fi fi AC_CHECK_FUNCS([flock lockf]) AC_CHECK_FUNCS([strsignal]) AC_CHECK_FUNCS([getloadavg]) AC_CHECK_LIB(lzo2, lzo1x_1_compress, LZO_LDADD=-llzo2, AC_MSG_ERROR([Could not find lzo2 library - please install lzo-devel])) AC_SUBST(LZO_LDADD) AC_CHECK_LIB(zstd, ZSTD_compress, ZSTD_LDADD=-lzstd, AC_MSG_ERROR([Could not find zstd library - please install libzstd-devel])) AC_SUBST(ZSTD_LDADD) AC_CHECK_LIB([dl], [dlsym], [DL_LDADD=-ldl]) AC_SUBST([DL_LDADD]) AC_CHECK_HEADERS([archive.h, archive_entry.h]) AC_CHECK_LIB(archive, archive_read_data_block, ARCHIVE_LDADD=-larchive, AC_MSG_ERROR([Could not find libarchive library - please install libarchive-devel])) AC_SUBST(ARCHIVE_LDADD) AC_MSG_CHECKING([whether libarchive has archive_read_support_filter_xz()]) AC_TRY_COMPILE( [ #include ], [ struct archive *a; archive_read_support_filter_xz(a); ], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_LIBARCHIVE_XZ, 1, [Whether libarchive has archive_read_support_filter_xz()]) ], [ AC_MSG_RESULT(no) ]) AC_MSG_CHECKING([whether libarchive has archive_read_support_filter_zstd()]) AC_TRY_COMPILE( [ #include ], [ struct archive *a; archive_read_support_filter_zstd(a); ], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_LIBARCHIVE_ZSTD, 1, [Whether libarchive has archive_read_support_filter_zstd()]) ], [ AC_MSG_RESULT(no) ]) # In DragonFlyBSD daemon needs to be linked against libkinfo. case $host_os in dragonfly*) LIB_KINFO="-lkinfo" ;; *) LIB_KINFO="" ;; esac AC_SUBST(LIB_KINFO) AC_CHECK_PROG(CLANG,clang,clang) AC_ARG_ENABLE(clang-rewrite-includes, AS_HELP_STRING([--enable-clang-rewrite-includes], [Use by default Clang's -frewrite-includes option.])) if test "$enable_clang_rewrite_includes" = "yes"; then AC_DEFINE(HAVE_CLANG_REWRITE_INCLUDES, 1, [Define to 1 if clang supports -frewrite-includes]) elif test "$enable_clang_rewrite_includes" = "no"; then true # do not enable else if test -n "$CLANG"; then AC_MSG_CHECKING([whether clang -Werror works for unknown options]) $CLANG -Werror -totallybogusoption -E - >/dev/null 2>/dev/null if test $? -eq 0; then AC_MSG_RESULT(no) # can't detect if the option is supported, but that's too old clang anyway else AC_MSG_RESULT(yes) AC_MSG_CHECKING([for clang -E -frewrite-includes]) $CLANG -Werror -E -frewrite-includes - >/dev/null 2>/dev/null if test $? -eq 0; then AC_MSG_RESULT(yes) AC_DEFINE(HAVE_CLANG_REWRITE_INCLUDES, 1, [Define to 1 if clang supports -frewrite-includes]) else AC_MSG_RESULT(no) fi fi fi fi AC_ARG_ENABLE(clang-wrappers, AS_HELP_STRING([--enable-clang-wrappers], [Use symlink wrappers for clang/clang++.])) CLANG_SYMLINK_WRAPPERS= if test "$enable_clang_wrappers" = "yes"; then CLANG_SYMLINK_WRAPPERS='clang clang++' elif test "$enable_clang_wrappers" = "no"; then true # do not enable else if test -n "$CLANG"; then CLANG_SYMLINK_WRAPPERS='clang clang++' fi fi AC_SUBST(CLANG_SYMLINK_WRAPPERS) AC_ARG_ENABLE(gcc-color-diagnostics, AS_HELP_STRING([--enable-gcc-color-diagnostics], [Assume by default GCC has -fdiagnostics-color=auto option.])) if test "$enable_gcc_color_diagnostics" = "yes"; then AC_DEFINE(HAVE_GCC_COLOR_DIAGNOSTICS, 1, [Define to 1 if gcc supports -fdiagnostics-color=auto]) else true # do not enable fi AC_ARG_ENABLE(gcc-show-caret, AS_HELP_STRING([--enable-gcc-show-caret], [Assume by default GCC has -fdiagnostics-show-caret option.])) if test "$enable_gcc_show_caret" = "yes"; then AC_DEFINE(HAVE_GCC_SHOW_CARET, 1, [Define to 1 if gcc supports -fdiagnostics-show-caret]) elif test "$enable_gcc_show_caret" = "no"; then true # do not enable else AC_CHECK_PROG(GCC_BIN,gcc,gcc) if test -n "$GCC_BIN"; then AC_MSG_CHECKING([for gcc -fdiagnostics-show-caret]) $GCC_BIN -Werror -E -fdiagnostics-show-caret - >/dev/null 2>/dev/null if test $? -eq 0; then AC_MSG_RESULT(yes) AC_DEFINE(HAVE_GCC_SHOW_CARET, 1, [Define to 1 if gcc supports -fdiagnostics-show-caret]) else AC_MSG_RESULT(no) fi fi fi AC_ARG_ENABLE(gcc-fdirectives-only, AS_HELP_STRING([--enable-gcc-fdirectives-only], [Use by default GCC's -fdirectives-only option.])) if test "$enable_gcc_fdirectives_only" = "yes"; then AC_DEFINE(HAVE_GCC_FDIRECTIVES_ONLY, 1, [Define to 1 if gcc supports -fdirectives-only]) elif test "$enable_gcc_fdirectives_only" = "no"; then true # do not enable else AC_CHECK_PROG(GCC_BIN,gcc,gcc) AC_MSG_CHECKING([whether $GCC_BIN -Werror works for unknown options]) $GCC_BIN -Werror -totallybogusoption -E - >/dev/null 2>/dev/null if test $? -eq 0; then AC_MSG_RESULT(no) # can't detect if the option is supported, but that's too old clang anyway else AC_MSG_RESULT(yes) AC_MSG_CHECKING([for $GCC_BIN -E -fdirectives-only]) $GCC_BIN -Werror -E -fdirectives-only - >/dev/null 2>/dev/null if test $? -eq 0; then AC_MSG_RESULT(yes) AC_DEFINE(HAVE_GCC_FDIRECTIVES_ONLY, 1, [Define to 1 if gcc supports -fdirectives-only]) else AC_MSG_RESULT(no) fi fi fi AC_MSG_CHECKING([for -fsanitize= usage]) if echo "$CXXFLAGS" | grep -q -- -fsanitize; then AC_DEFINE(SANITIZER_USED, 1, [Define to 1 if compiled with -fsanitize option(s)]) AC_MSG_RESULT(yes) else AC_MSG_RESULT(no) fi AC_CONFIG_FILES([ Makefile ]) AC_CONFIG_FILES([ client/Makefile ]) AC_CONFIG_FILES([ daemon/Makefile ]) AC_CONFIG_FILES([ doc/Makefile ]) AC_CONFIG_FILES([ services/Makefile ]) AC_CONFIG_FILES([ services/icecc.pc ]) AC_CONFIG_FILES([ suse/Makefile ]) AC_CONFIG_FILES([ compilerwrapper/Makefile ]) AC_CONFIG_FILES([ scheduler/Makefile ]) AC_CONFIG_FILES([ tests/Makefile ]) AC_CONFIG_FILES([ unittests/Makefile ]) AC_CONFIG_FILES([ client/icecc-create-env ]) AC_CONFIG_FILES([ client/icecc-test-env ]) AC_CONFIG_FILES([ tests/test-setup.sh ]) AC_OUTPUT([ suse/icecream.spec ]) if test "$prefix" = NONE; then prefix=$ac_default_prefix fi AC_DEFUN([KDE_EXPAND_MAKEVAR], [ savex=$exec_prefix test "x$exec_prefix" = xNONE && exec_prefix=$prefix tmp=$$2 while $1=`eval echo "$tmp"`; test "x$$1" != "x$tmp"; do tmp=$$1; done exec_prefix=$savex ]) KDE_EXPAND_MAKEVAR(mybindir, bindir) AC_DEFINE_UNQUOTED(BINDIR, "$mybindir", [Where to look for icecc]) myorundir='${localstatedir}/run' KDE_EXPAND_MAKEVAR(myrundir, myorundir) AC_DEFINE_UNQUOTED(RUNDIR, "$myrundir", [Where to place pid files]) KDE_EXPAND_MAKEVAR(mylibexecdir, libexecdir) PKGLIBEXECDIR="$mylibexecdir/$PACKAGE" AC_SUBST(PKGLIBEXECDIR) AC_OUTPUT icecream-1.3.1/daemon/000077500000000000000000000000001361626760200145405ustar00rootroot00000000000000icecream-1.3.1/daemon/Makefile.am000066400000000000000000000005731361626760200166010ustar00rootroot00000000000000sbin_PROGRAMS = iceccd iceccd_SOURCES = \ main.cpp \ serve.cpp \ workit.cpp \ environment.cpp \ load.cpp \ file_util.cpp iceccd_LDADD = \ ../services/libicecc.la \ $(LIB_KINFO) \ $(CAPNG_LDADD) \ $(ARCHIVE_LDADD) AM_CPPFLAGS = \ -I$(top_srcdir)/services AM_LIBTOOLFLAGS = --silent noinst_HEADERS = \ environment.h \ load.h \ serve.h \ workit.h \ file_util.h icecream-1.3.1/daemon/environment.cpp000066400000000000000000000517061361626760200176210ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include #include "environment.h" #include #include #include #include #include #include #include #include #include #include #include #include "comm.h" #include "exitcode.h" #include "util.h" #include #include using namespace std; size_t sumup_dir(const string &dir) { size_t res = 0; DIR *envdir = opendir(dir.c_str()); if (!envdir) { return res; } struct stat st; string tdir = dir + "/"; for (struct dirent *ent = readdir(envdir); ent; ent = readdir(envdir)) { if (!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, "..")) { continue; } if (lstat((tdir + ent->d_name).c_str(), &st)) { perror("stat"); continue; } if (S_ISDIR(st.st_mode)) { res += sumup_dir(tdir + ent->d_name); } else if (S_ISREG(st.st_mode)) { res += st.st_size; } // else ignore } closedir(envdir); return res; } static void list_target_dirs(const string ¤t_target, const string &targetdir, Environments &envs) { DIR *envdir = opendir(targetdir.c_str()); if (!envdir) { return; } for (struct dirent *ent = readdir(envdir); ent; ent = readdir(envdir)) { string dirname = ent->d_name; if (access(string(targetdir + "/" + dirname + "/usr/bin/as").c_str(), X_OK) == 0) { envs.push_back(make_pair(current_target, dirname)); } } closedir(envdir); } /* Returns true if the child exited with success */ static bool exec_and_wait(const char *const argv[]) { pid_t pid = fork(); if (pid == -1) { log_perror("failed to fork"); return false; } if (pid) { // parent int status; while (waitpid(pid, &status, 0) < 0 && errno == EINTR) {} return shell_exit_status(status) == 0; } // child execv(argv[0], const_cast(argv)); ostringstream errmsg; errmsg << "execv " << argv[0] << " failed"; log_perror(errmsg.str()); _exit(-1); } // Removes everything in the directory recursively, but not the directory itself. static bool cleanup_directory(const string &directory) { DIR *dir = opendir(directory.c_str()); if (dir == NULL) { return false; } while (dirent *f = readdir(dir)) { if (strcmp(f->d_name, ".") == 0 || strcmp(f->d_name, "..") == 0) { continue; } string fullpath = directory + '/' + f->d_name; struct stat st; if (lstat(fullpath.c_str(), &st)) { perror("stat"); return false; } if (S_ISDIR(st.st_mode)) { if (!cleanup_directory(fullpath) || rmdir(fullpath.c_str()) != 0) { return false; } } else { if (unlink(fullpath.c_str()) != 0) { return false; } } } closedir(dir); return true; } bool cleanup_cache(const string &basedir, uid_t user_uid, gid_t user_gid) { flush_debug(); if (access(basedir.c_str(), R_OK) == 0 && !cleanup_directory(basedir)) { log_error() << "failed to clean up envs dir" << endl; return false; } if (mkdir(basedir.c_str(), 0755) && errno != EEXIST) { if (errno == EPERM) { log_error() << "permission denied on mkdir " << basedir << endl; } else { log_perror("mkdir in cleanup_cache() failed") << "\t" << basedir << endl; } return false; } if (chown(basedir.c_str(), user_uid, user_gid) || chmod(basedir.c_str(), 0775)) { log_perror("chown/chmod in cleanup_cache() failed") << "\t" << basedir << endl;; return false; } return true; } Environments available_environmnents(const string &basedir) { Environments envs; DIR *envdir = opendir(basedir.c_str()); if (!envdir) { log_info() << "can't open envs dir " << strerror(errno) << endl; } else { for (struct dirent *target_ent = readdir(envdir); target_ent; target_ent = readdir(envdir)) { string dirname = target_ent->d_name; if (dirname.at(0) == '.') { continue; } if (dirname.substr(0, 7) == "target=") { string current_target = dirname.substr(7, dirname.length() - 7); list_target_dirs(current_target, basedir + "/" + dirname, envs); } } closedir(envdir); } return envs; } // Returns fd for icecc-create-env output int start_create_env(const string &basedir, uid_t user_uid, gid_t user_gid, const std::string &compiler, const list &extrafiles, const std::string &compression) { string nativedir = basedir + "/native/"; if (mkdir(nativedir.c_str(), 0775) && errno != EEXIST) { return 0; } if (chown(nativedir.c_str(), user_uid, user_gid) || chmod(nativedir.c_str(), 0775)) { log_perror("chown/chmod failed"); if (-1 == rmdir(nativedir.c_str())){ log_perror("rmdir failed"); } return 0; } flush_debug(); int pipes[2]; if (pipe(pipes) == -1) { log_error() << "failed to create pipe: " << strerror(errno) << endl; _exit(147); } pid_t pid = fork(); if (pid == -1) { log_perror("failed to fork"); _exit(147); } if (pid) { if ((-1 == close(pipes[1])) && (errno != EBADF)){ log_perror("close failed"); } fcntl(pipes[0], F_SETFD, FD_CLOEXEC); return pipes[0]; } // else #ifndef HAVE_LIBCAP_NG if (getuid() != user_uid || geteuid() != user_uid || getgid() != user_gid || getegid() != user_gid) { if (setgroups(0, NULL) < 0) { log_perror("setgroups failed"); _exit(143); } if (setgid(user_gid) < 0) { log_perror("setgid failed"); _exit(143); } if (!geteuid() && setuid(user_uid) < 0) { log_perror("setuid failed"); _exit(142); } } #endif if (chdir(nativedir.c_str())) { log_perror("chdir") << "\t" << nativedir << endl; _exit(1); } if ((-1 == close(pipes[0])) && (errno != EBADF)){ log_perror("close failed"); } if (-1 == dup2(pipes[1], 5)){ // icecc-create-env will write the hash there log_perror("dup2 failed"); } if ((-1 == close(pipes[1])) && (errno != EBADF)){ log_perror("close failed"); } if ((-1 == close(STDOUT_FILENO)) && (errno != EBADF)){ // hide output from icecc-create-env log_perror("close failed"); } const char **argv; argv = new const char*[4 + extrafiles.size()]; int pos = 0; argv[pos++] = BINDIR "/icecc"; argv[pos++] = "--build-native"; const int first_to_free = pos; argv[pos++] = strdup(compiler.c_str()); for (list::const_iterator it = extrafiles.begin(); it != extrafiles.end(); ++it) { argv[pos++] = strdup(it->c_str()); } argv[pos++] = NULL; if (!compression.empty()) { // icecc will read it from ICECC_ENV_COMPRESSION, we are in a forked process, so simply set it setenv( "ICECC_ENV_COMPRESSION", compression.c_str(), 1 ); } if (!exec_and_wait(argv)) { log_error() << BINDIR "/icecc --build-native failed" << endl; _exit(1); } for( int i = first_to_free; i < pos; ++i ) free( (void*) argv[ i ] ); delete[] argv; _exit(0); } size_t finish_create_env(int pipe, const string &basedir, string &native_environment) { // We don't care about waitpid() , icecc-create-env prints the name of the tarball as the very last // action before exit, so if there's something in the pipe, just block on it until it closes. char buf[1024]; buf[0] = '\0'; while (read(pipe, buf, 1023) < 0 && errno == EINTR) {} if (char *nl = strchr(buf, '\n')) { *nl = '\0'; } if( buf[0] == '\0') { trace() << "native_environment creation failed" << endl; return 0; } string nativedir = basedir + "/native/"; native_environment = nativedir + buf; if ((-1 == close(pipe)) && (errno != EBADF)){ log_perror("close failed"); } trace() << "native_environment " << native_environment << endl; struct stat st; if (!native_environment.empty() && (stat(native_environment.c_str(), &st) == 0)) { return st.st_size; } if (-1 == rmdir(nativedir.c_str())){ log_perror("rmdir failed"); } return 0; } static int copy_data(struct archive *ar, struct archive *aw) { int r; const void *buff; size_t size; #if ARCHIVE_VERSION_NUMBER >= 3000000 int64_t offset; #else off_t offset; #endif for(;;){ r = archive_read_data_block(ar, &buff, &size, &offset); if (r == ARCHIVE_EOF){ return (ARCHIVE_OK); } r= archive_write_data_block(aw, buff, size, offset); if(r != ARCHIVE_OK){ trace() << "copy_data(): Error after write: "<< archive_error_string(aw)<(msg); if (mkdir(dirname.c_str(), 0770) && errno != EEXIST) { log_perror("mkdir target") << "\t" << dirname << endl; return 0; } if (chown(dirname.c_str(), user_uid, user_gid) || chmod(dirname.c_str(), 0770)) { log_perror("chown,chmod target") << "\t" << dirname << endl; return 0; } dirname = dirname + "/" + name; if (mkdir(dirname.c_str(), 0770)) { log_perror("mkdir name") << "\t" << dirname << endl; return 0; } if (chown(dirname.c_str(), user_uid, user_gid) || chmod(dirname.c_str(), 0770)) { log_perror("chown,chmod name") << "\t" << dirname << endl; return 0; } int fds_in[2]; // for receiving data int fds_out[2]; // for sending out final status if (pipe(fds_in) == -1 || pipe(fds_out) == -1) { log_perror("start_install_environment: pipe creation failed for receiving environment"); return 0; } flush_debug(); pid_t pid = fork(); if (pid == -1) { log_perror("start_install_environment - fork()"); return 0; } if (pid) { //Runs only on parent(PID value is 0 in child and PID id on parent) trace() << "Created fork for receiving environment on pid " << pid << endl; if ((-1 == close(fds_in[0])) && (errno != EBADF)){ log_perror("Failed to close read end of pipe"); } if ((-1 == close(fds_out[1])) && (errno != EBADF)){ log_perror("Failed to close write end of pipe"); } pipe_to_child = fds_in[1]; //Set write end of pipe to pass to parent thread pipe_from_child = fds_out[0]; //Set write end of pipe to pass to parent thread fcntl(pipe_to_child, F_SETFD, FD_CLOEXEC); fcntl(pipe_from_child, F_SETFD, FD_CLOEXEC); return pid; } // else #ifndef HAVE_LIBCAP_NG if (setgroups(0, NULL) < 0) { log_perror("setgroups fails"); _exit(143); } if (setgid(user_gid) < 0) { log_perror("setgid fails"); _exit(143); } if (!geteuid() && setuid(user_uid) < 0) { log_perror("setuid fails"); _exit(142); } #endif // reset SIGPIPE and SIGCHILD handler so that tar // isn't confused when gzip/bzip2 aborts signal(SIGCHLD, SIG_DFL); signal(SIGPIPE, SIG_DFL); if ((-1 == close(fds_in[1])) && (errno != EBADF)){ log_perror("Failed to close write end of pipe"); } if ((-1 == close(fds_out[0])) && (errno != EBADF)){ log_perror("Failed to close write end of pipe"); } int niceval = nice(extract_priority); if (-1 == niceval){ log_warning() << "failed to set nice value: " << strerror(errno) << endl; } /* libarchive stream reader */ struct archive *a; struct archive *ext; struct archive_entry *entry; int flags; flags = ARCHIVE_EXTRACT_TIME; flags |= ARCHIVE_EXTRACT_PERM; flags |= ARCHIVE_EXTRACT_ACL; flags |= ARCHIVE_EXTRACT_FFLAGS; a=archive_read_new(); archive_read_support_format_all(a); archive_read_support_filter_all(a); ext = archive_write_disk_new(); archive_write_disk_set_options(ext, flags); archive_write_disk_set_standard_lookup(ext); if(archive_read_open_fd(a, fds_in[0], fmsg->len) != ARCHIVE_OK){ log_error() << "start_install_environment: archive_read_open_fd() failed"<< endl; _exit(1); } for(;;){ int r = archive_read_next_header(a, &entry); if (r == ARCHIVE_EOF) { trace() << "start_install_environment: reached end of archive, done"<< endl; break; } if (r < ARCHIVE_WARN){ log_error() << "start_install_environment: r < ARCHIVE_WARN " < 0){ r= copy_data(a, ext); if(r < ARCHIVE_WARN){ log_error()<< "start_install_environment: " << archive_error_string(ext)<send_msg(StatusTextMsg(error)); } } void chdir_to_environment(MsgChannel *client, const string &dirname, uid_t user_uid, gid_t user_gid) { #ifdef HAVE_LIBCAP_NG if (chdir(dirname.c_str()) < 0) { error_client(client, string("chdir to ") + dirname + "failed"); log_perror("chdir() failed") << "\t" << dirname << endl; _exit(145); } if (chroot(dirname.c_str()) < 0) { error_client(client, string("chroot ") + dirname + "failed"); log_perror("chroot() failed") << "\t" << dirname << endl; _exit(144); } (void) user_uid; (void) user_gid; #else if (getuid() == 0) { // without the chdir, the chroot will escape the // jail right away if (chdir(dirname.c_str()) < 0) { error_client(client, string("chdir to ") + dirname + "failed"); log_perror("chdir() failed") << "\t" << dirname << endl; _exit(145); } if (chroot(dirname.c_str()) < 0) { error_client(client, string("chroot ") + dirname + "failed"); log_perror("chroot() failed") << "\t" << dirname << endl; _exit(144); } if (setgroups(0, NULL) < 0) { error_client(client, string("setgroups failed")); log_perror("setgroups() failed"); _exit(143); } if (setgid(user_gid) < 0) { error_client(client, string("setgid failed")); log_perror("setgid() failed"); _exit(143); } if (setuid(user_uid) < 0) { error_client(client, string("setuid failed")); log_perror("setuid() failed"); _exit(142); } } else { error_client(client, "cannot chroot to environment"); _exit(146); } #endif } // Verify that the environment works by simply running the bundled bin/true. bool verify_env(MsgChannel *client, const string &basedir, const string &target, const string &env, uid_t user_uid, gid_t user_gid) { if (target.empty() || env.empty()) { error_client(client, "verify_env: target or env empty"); log_error() << "verify_env target or env empty\n\t" << target << "\n\t" << env << endl; return false; } string dirname = basedir + "/target=" + target + "/" + env; if (::access(string(dirname + "/bin/true").c_str(), X_OK) < 0) { error_client(client, dirname + "/bin/true is not executable, installed environment removed?"); log_error() << "I don't have environment " << env << "(" << target << ") to verify." << endl; return false; } flush_debug(); pid_t pid = fork(); assert(pid >= 0); if (pid > 0) { // parent int status; while (waitpid(pid, &status, 0) < 0 && errno == EINTR) {} return shell_exit_status(status) == 0; } else if (pid < 0) { log_perror("Failed to fork for verifying environment"); return false; } // child reset_debug(); chdir_to_environment(client, dirname, user_uid, user_gid); execl("bin/true", "bin/true", (void*)NULL); log_perror("execl bin/true failed"); _exit(-1); } icecream-1.3.1/daemon/environment.h000066400000000000000000000052051361626760200172570ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef ICECREAM_ENVIRONMENT_H #define ICECREAM_ENVIRONMENT_H #include #include #include #include class MsgChannel; extern bool cleanup_cache(const std::string &basedir, uid_t user_uid, gid_t user_gid); extern int start_create_env(const std::string &basedir, uid_t user_uid, gid_t user_gid, const std::string &compiler, const std::list &extrafiles, const std::string &compression); extern size_t finish_create_env(int pipe, const std::string &basedir, std::string &native_environment); Environments available_environmnents(const std::string &basename); extern pid_t start_install_environment(const std::string &basename, const std::string &target, const std::string &name, MsgChannel *c, int& pipe_to_child, int& pipe_from_child, FileChunkMsg*& fmsg, uid_t user_uid, gid_t user_gid, int extract_priority); extern size_t finalize_install_environment(const std::string &basename, const std::string &target, uid_t user_uid, gid_t user_gid); extern size_t remove_environment(const std::string &basedir, const std::string &env); extern size_t remove_native_environment(const std::string &env); extern void chdir_to_environment(MsgChannel *c, const std::string &dirname, uid_t user_uid, gid_t user_gid); extern bool verify_env(MsgChannel *c, const std::string &basedir, const std::string &target, const std::string &env, uid_t user_uid, gid_t user_gid); #endif icecream-1.3.1/daemon/file_util.cpp000066400000000000000000000116611361626760200172250ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include #include #include "file_util.h" using namespace std; /** * Adapted from an answer by "Evan Teran" from this stack overflow question: * http://stackoverflow.com/questions/236129/split-a-string-in-c */ vector split(const string &s, char delim) { vector elems; stringstream ss(s); string item; while (getline(ss, item, delim)) { if (!item.empty()) { elems.push_back(item); } } return elems; } /** * Adapted from an answer by "dash-tom-bang" from this stack overflow question: * http://stackoverflow.com/questions/5772992/get-relative-path-from-two-absolute-paths */ string get_relative_path(const string &to, const string &from) { vector to_dirs = split(to, '/'); vector from_dirs = split(from, '/'); string output; output.reserve(to.size()); vector::const_iterator to_it = to_dirs.begin(), to_end = to_dirs.end(), from_it = from_dirs.begin(), from_end = from_dirs.end(); while ((to_it != to_end) && (from_it != from_end) && *to_it == *from_it) { ++to_it; ++from_it; } while (from_it != from_end) { output += "../"; ++from_it; } while (to_it != to_end) { output += *to_it; ++to_it; if (to_it != to_end) { output += "/"; } } return output; } /** * Returns a string without '..' and '.' * * Preconditions: path must be an absolute path * Postconditions: if path is empty or not an absolute path, return original * path, otherwise, return path after resolving '..' and '.' */ string get_canonicalized_path(const string &path) { if (path.empty() || path[0] != '/') { return path; } vector parts = split(path, '/'); vector canonicalized_path; vector::const_iterator parts_it = parts.begin(), parts_end = parts.end(); while (parts_it != parts_end) { if (*parts_it == ".." && !canonicalized_path.empty()) { canonicalized_path.pop_back(); } else if (*parts_it != "." && *parts_it != "..") { canonicalized_path.push_back(*parts_it); } ++parts_it; } vector::const_iterator path_it = canonicalized_path.begin(), path_end = canonicalized_path.end(); string output; output.reserve(path.size()); output += "/"; while (path_it != path_end) { output += *path_it; ++path_it; if (path_it != path_end) { output += "/"; } } return output; } /** * Adapted from an answer by "Mark" from this stack overflow question: * http://stackoverflow.com/questions/675039/how-can-i-create-directory-tree-in-c-linux */ bool mkpath(const string &path) { bool success = false; int ret = mkdir(path.c_str(), 0775); if(ret == -1) { switch(errno) { case ENOENT: if(mkpath(path.substr(0, path.rfind('/')))) success = 0 == mkdir(path.c_str(), 0775); else success = false; break; case EEXIST: success = true; break; default: success = false; break; } } else { success = true; } return success; } /** * Adapted from an answer by "asveikau" from this stack overflow question: * http://stackoverflow.com/questions/2256945/removing-a-non-empty-directory-programmatically-in-c-or-c */ bool rmpath(const char* path) { DIR *d = opendir(path); size_t path_len = strlen(path); int r = -1; if (d) { struct dirent *p; r = 0; while (!r && (p=readdir(d))) { int r2 = -1; char *buf; size_t len; /* Skip the names "." and ".." as we don't want to recurse on them. */ if (!strcmp(p->d_name, ".") || !strcmp(p->d_name, "..")) { continue; } len = path_len + strlen(p->d_name) + 2; buf = (char*)malloc(len); if (buf) { struct stat statbuf; snprintf(buf, len, "%s/%s", path, p->d_name); if (!stat(buf, &statbuf)) { if (S_ISDIR(statbuf.st_mode)) { r2 = rmpath(buf); } else { r2 = unlink(buf); } } free(buf); } r = r2; } closedir(d); } if (!r) { r = rmdir(path); } return r; } icecream-1.3.1/daemon/file_util.h000066400000000000000000000023631361626760200166710ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef ICECREAM_FILE_UTIL_H #define ICECREAM_FILE_UTIL_H #include #include std::vector split(const std::string &s, char delim); std::string get_relative_path(const std::string &to, const std::string &from); std::string get_canonicalized_path(const std::string &path); bool mkpath(const std::string &path); bool rmpath(const char* path); #endif icecream-1.3.1/daemon/load.cpp000066400000000000000000000302501361626760200161630ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* Copyright (c) 1999, 2000 Chris Schlaeger Copyright (c) 2003 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include "load.h" #include #include #include #include #include #include #include #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_MACH_HOST_INFO_H #define USE_MACH 1 #elif !defined( __linux__ ) && !defined(__CYGWIN__) #define USE_SYSCTL #endif #ifdef USE_MACH #include #include #include #endif #ifdef HAVE_KINFO_H #include #endif #ifdef HAVE_DEVSTAT_H #include #include #include #endif #ifdef __APPLE__ #include #endif using namespace std; // what the kernel puts as ticks in /proc/stat typedef unsigned long long load_t; struct CPULoadInfo { /* A CPU can be loaded with user processes, reniced processes and * system processes. Unused processing time is called idle load. * These variable store the percentage of each load type. */ int userLoad; int niceLoad; int sysLoad; int idleLoad; /* To calculate the loads we need to remember the tick values for each * load type. */ load_t userTicks; load_t niceTicks; load_t sysTicks; load_t idleTicks; load_t waitTicks; CPULoadInfo() { userLoad = 0; niceLoad = 0; sysLoad = 0; idleLoad = 0; userTicks = 0; niceTicks = 0; sysTicks = 0; idleTicks = 0; waitTicks = 0; } }; static void updateCPULoad(CPULoadInfo *load) { load_t totalTicks; load_t currUserTicks, currSysTicks, currNiceTicks, currIdleTicks, currWaitTicks; #if defined(USE_SYSCTL) && defined(__DragonFly__) static struct kinfo_cputime cp_time; kinfo_get_sched_cputime(&cp_time); /* There is one more load type exported via this interface in DragonFlyBSD - * interrupt load. But I think that we can do without it for our needs. */ currUserTicks = cp_time.cp_user; currNiceTicks = cp_time.cp_nice; currSysTicks = cp_time.cp_sys; currIdleTicks = cp_time.cp_idle; /* It doesn't exist in DragonFlyBSD. */ currWaitTicks = 0; #elif defined (USE_SYSCTL) static int mibs[4] = { 0, 0, 0, 0 }; static size_t mibsize = 4; unsigned long ticks[CPUSTATES]; size_t mibdatasize = sizeof(ticks); if (mibs[0] == 0) { if (sysctlnametomib("kern.cp_time", mibs, &mibsize) < 0) { load->userTicks = load->sysTicks = load->niceTicks = load->idleTicks = 0; load->userLoad = load->sysLoad = load->niceLoad = load->idleLoad = 0; mibs[0] = 0; return; } } if (sysctl(mibs, mibsize, &ticks, &mibdatasize, NULL, 0) < 0) { load->userTicks = load->sysTicks = load->niceTicks = load->idleTicks = 0; load->userLoad = load->sysLoad = load->niceLoad = load->idleLoad = 0; return; } currUserTicks = ticks[CP_USER]; currNiceTicks = ticks[CP_NICE]; currSysTicks = ticks[CP_SYS]; currIdleTicks = ticks[CP_IDLE]; currWaitTicks = 0; #elif defined( USE_MACH ) host_cpu_load_info r_load; kern_return_t error; mach_msg_type_number_t count; count = HOST_CPU_LOAD_INFO_COUNT; mach_port_t port = mach_host_self(); error = host_statistics(port, HOST_CPU_LOAD_INFO, (host_info_t)&r_load, &count); if (error != KERN_SUCCESS) { return; } currUserTicks = r_load.cpu_ticks[CPU_STATE_USER]; currNiceTicks = r_load.cpu_ticks[CPU_STATE_NICE]; currSysTicks = r_load.cpu_ticks[CPU_STATE_SYSTEM]; currIdleTicks = r_load.cpu_ticks[CPU_STATE_IDLE]; currWaitTicks = 0; #else char buf[256]; static int fd = -1; if (fd < 0) { if ((fd = open("/proc/stat", O_RDONLY)) < 0) { log_error() << "Cannot open file \'/proc/stat\'!\n" "The kernel needs to be compiled with support\n" "for /proc filesystem enabled!" << endl; return; } fcntl(fd, F_SETFD, FD_CLOEXEC); } if (lseek(fd, 0, SEEK_SET) == -1){ log_perror("lseek failed"); return; } ssize_t n; while ((n = read(fd, buf, sizeof(buf) - 1)) < 0 && errno == EINTR) {} if (n < 20) { log_error() << "no enough data in /proc/stat?" << endl; return; } buf[n] = 0; /* wait ticks only exist with Linux >= 2.6.0. treat as 0 otherwise */ currWaitTicks = 0; // sscanf( buf, "%*s %lu %lu %lu %lu %lu", &currUserTicks, &currNiceTicks, sscanf(buf, "%*s %llu %llu %llu %llu %llu", &currUserTicks, &currNiceTicks, // RL modif &currSysTicks, &currIdleTicks, &currWaitTicks); #endif totalTicks = (currUserTicks - load->userTicks) + (currSysTicks - load->sysTicks) + (currNiceTicks - load->niceTicks) + (currIdleTicks - load->idleTicks) + (currWaitTicks - load->waitTicks); if (totalTicks > 10) { load->userLoad = (1000 * (currUserTicks - load->userTicks)) / totalTicks; load->sysLoad = (1000 * (currSysTicks - load->sysTicks)) / totalTicks; load->niceLoad = (1000 * (currNiceTicks - load->niceTicks)) / totalTicks; load->idleLoad = (1000 * (currIdleTicks - load->idleTicks)) / totalTicks; } else { load->userLoad = load->sysLoad = load->niceLoad = 0; load->idleLoad = 1000; } load->userTicks = currUserTicks; load->sysTicks = currSysTicks; load->niceTicks = currNiceTicks; load->idleTicks = currIdleTicks; load->waitTicks = currWaitTicks; } #if !defined(USE_SYSCTL) && !defined(USE_MACH) static unsigned long int scan_one(const char *buff, const char *key) { const char *b = strstr(buff, key); if (!b) { return 0; } unsigned long int val = 0; if (sscanf(b + strlen(key), ": %lu", &val) != 1) { return 0; } return val; } #endif static unsigned int calculateMemLoad(unsigned long int &NetMemFree) { unsigned long long MemTotal = 0, MemFree = 0, Buffers = 0, Cached = 0; #ifdef USE_MACH /* Get VM statistics. */ vm_statistics_data_t vm_stat; mach_msg_type_number_t count = sizeof(vm_stat) / sizeof(natural_t); kern_return_t error = host_statistics(mach_host_self(), HOST_VM_INFO, (host_info_t)&vm_stat, &count); if (error != KERN_SUCCESS) { return 0; } vm_size_t pagesize; host_page_size(mach_host_self(), &pagesize); unsigned long long MemInactive = (unsigned long long) vm_stat.inactive_count * pagesize; MemFree = (unsigned long long) vm_stat.free_count * pagesize; // blunt lie - but when's sche macht Buffers = MemInactive; #ifdef __APPLE__ { size_t len = sizeof(MemTotal); if ((sysctlbyname("hw.memsize", &MemTotal, &len, NULL, 0) == -1) || !len) { MemTotal = 0; } } #endif #elif defined( USE_SYSCTL ) size_t len = sizeof(MemFree); if ((sysctlbyname("hw.physmem", &MemTotal, &len, NULL, 0) == -1) || !len) { MemTotal = 0; /* Doesn't work under FreeBSD v2.2.x */ } if ((sysctlbyname("vm.stats.vm.v_free_count", &MemFree, &len, NULL, 0) == -1) || !len) { MemFree = 0; /* Doesn't work under FreeBSD v2.2.x */ } len = sizeof(Buffers); if ((sysctlbyname("vfs.bufspace", &Buffers, &len, NULL, 0) == -1) || !len) { Buffers = 0; /* Doesn't work under FreeBSD v2.2.x */ } len = sizeof(Cached); if ((sysctlbyname("vm.stats.vm.v_cache_count", &Cached, &len, NULL, 0) == -1) || !len) { Cached = 0; /* Doesn't work under FreeBSD v2.2.x */ } #else /* The interesting information is definitely within the first 256 bytes */ char buf[256]; static int fd = -1; if (fd < 0) { if ((fd = open("/proc/meminfo", O_RDONLY)) < 0) { log_error() << "Cannot open file \'/proc/meminfo\'!\n" "The kernel needs to be compiled with support\n" "for /proc filesystem enabled!" << endl; return 0; } fcntl(fd, F_SETFD, FD_CLOEXEC); } if (lseek(fd, 0, SEEK_SET) == -1){ log_perror("lseek failed"); return 0; } ssize_t n; while ((n = read(fd, buf, sizeof(buf) - 1)) < 0 && errno == EINTR) {} if (n < 20) { return 0; } buf[n] = '\0'; MemTotal = scan_one(buf, "MemTotal"); MemFree = scan_one(buf, "MemFree"); Buffers = scan_one(buf, "Buffers"); Cached = scan_one(buf, "Cached"); #endif /* Can't calculate a memory load if we don't know how much memory we have */ if (!MemTotal) return 0; if (Buffers > MemTotal / 100) { Buffers -= MemTotal / 100; } else { Buffers /= 2; } if (Cached > MemTotal / 100) { Cached -= MemTotal / 100; } else { Cached /= 2; } NetMemFree = MemFree + Cached + Buffers; return 1000 - (NetMemFree * 1000 / MemTotal); } // Load average calculation based on CALC_LOAD(), in the 2.6 Linux kernel // oldVal - previous load avg. // numJobs - current number of active jobs // rate - update rate, in seconds (usually 60, 300, or 900) // delta_t - time since last update, in seconds double compute_load(double oldVal, unsigned int currentJobs, unsigned int rate, double delta_t) { double weight = 1.0 / exp(delta_t / rate); return oldVal * weight + currentJobs * (1.0 - weight); } double getEpocTime() { timeval tv; gettimeofday(&tv, NULL); return (double) tv.tv_sec + (double) tv.tv_usec / 1000000.0; } // Simulates getloadavg(), but only for specified number of jobs // Note: this is stateful and not thread-safe! // Also, it differs from getloadavg() in that its notion of load // is only updated as often as it's called. int fakeloadavg(double *p_result, int resultEntries, unsigned int currentJobs) { // internal state static const int numLoads = 3; static double loads[numLoads] = { 0.0, 0.0, 0.0 }; static unsigned int rates[numLoads] = { 60, 300, 900 }; static double lastUpdate = getEpocTime(); // First, update all state double now = getEpocTime(); double delta_t = std::max(now - lastUpdate, 0.0); // guard against user changing system time backwards lastUpdate = now; for (int l = 0; l < numLoads; l++) { loads[l] = compute_load(loads[0], currentJobs, rates[l], delta_t); } // Then, return requested values int numFilled = std::min(std::max(resultEntries, 0), numLoads); for (int n = 0; n < numFilled; n++) { p_result[n] = loads[n]; } return numFilled; } void fill_stats(unsigned long &myidleload, unsigned long &myniceload, unsigned int &memory_fillgrade, StatsMsg *msg, unsigned int hint) { static CPULoadInfo load; updateCPULoad(&load); myidleload = load.idleLoad; myniceload = load.niceLoad; if (msg) { unsigned long int MemFree = 0; memory_fillgrade = calculateMemLoad(MemFree); double avg[3]; #if HAVE_GETLOADAVG getloadavg(avg, 3); (void) hint; #else fakeloadavg(avg, 3, hint); #endif msg->loadAvg1 = (load_t)(avg[0] * 1000); msg->loadAvg5 = (load_t)(avg[1] * 1000); msg->loadAvg10 = (load_t)(avg[2] * 1000); msg->freeMem = (load_t)(MemFree / 1024.0 + 0.5); } } icecream-1.3.1/daemon/load.h000066400000000000000000000023231361626760200156300ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef ICECREAM_LOAD_H #define ICECREAM_LOAD_H #include // 'hint' is used to approximate the load, whenever getloadavg() is unavailable. void fill_stats(unsigned long &myidleload, unsigned long &myniceload, unsigned int &memory_fillgrade, StatsMsg *msg, unsigned int hint); #endif icecream-1.3.1/daemon/main.cpp000066400000000000000000002365561361626760200162110ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Stephan Kulow 2002, 2003 by Martin Pool This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ //#define ICECC_DEBUG 1 #ifndef _GNU_SOURCE // getopt_long #define _GNU_SOURCE 1 #endif #include "config.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_ARPA_NAMESER_H # include #endif #ifdef HAVE_SYS_VFS_H #include #endif #include #ifdef HAVE_RESOLV_H # include #endif #include #ifndef RUSAGE_SELF # define RUSAGE_SELF (0) #endif #ifndef RUSAGE_CHILDREN # define RUSAGE_CHILDREN (-1) #endif #ifdef HAVE_LIBCAP_NG # include #endif #include #include #include #include #include #include #include #include "ncpus.h" #include "exitcode.h" #include "serve.h" #include "workit.h" #include "logging.h" #include #include "load.h" #include "environment.h" #include "platform.h" #include "util.h" #include "getifaddrs.h" static std::string pidFilePath; static volatile sig_atomic_t exit_main_loop = 0; #ifndef __attribute_warn_unused_result__ #define __attribute_warn_unused_result__ #endif using namespace std; struct Client { public: /* * UNKNOWN: Client was just created - not supposed to be long term * GOTNATIVE: Client asked us for the native env - this is the first step * PENDING_USE_CS: We have a CS from scheduler and need to tell the client * as soon as there is a spot available on the local machine * JOBDONE: This was compiled by a local client and we got a jobdone - awaiting END * LINKJOB: This is a local job (aka link job) by a local client we told the scheduler about * and await the finish of it * TOINSTALL: We're receiving an environment transfer and wait for it to complete. * WAITINSTALL: Client is waiting for the environment transfer unpacking child to finish. * TOCOMPILE: We're supposed to compile it ourselves * WAITFORCS: Client asked for a CS and we asked the scheduler - waiting for its answer * WAITCOMPILE: Client got a CS and will ask him now (it's not me) * CLIENTWORK: Client is busy working and we reserve the spot (job_id is set if it's a scheduler job) * WAITFORCHILD: Client is waiting for the compile job to finish. * WAITCREATEENV: We're waiting for icecc-create-env to finish. */ enum Status { UNKNOWN, GOTNATIVE, PENDING_USE_CS, JOBDONE, LINKJOB, TOINSTALL, WAITINSTALL, TOCOMPILE, WAITFORCS, WAITCOMPILE, CLIENTWORK, WAITFORCHILD, WAITCREATEENV, LASTSTATE = WAITCREATEENV } status; Client() { job_id = 0; channel = 0; job = 0; usecsmsg = 0; client_id = 0; status = UNKNOWN; pipe_from_child = -1; pipe_to_child = -1; child_pid = -1; } static string status_str(Status status) { switch (status) { case UNKNOWN: return "unknown"; case GOTNATIVE: return "gotnative"; case PENDING_USE_CS: return "pending_use_cs"; case JOBDONE: return "jobdone"; case LINKJOB: return "linkjob"; case TOINSTALL: return "toinstall"; case WAITINSTALL: return "waitinstall"; case TOCOMPILE: return "tocompile"; case WAITFORCS: return "waitforcs"; case CLIENTWORK: return "clientwork"; case WAITCOMPILE: return "waitcompile"; case WAITFORCHILD: return "waitforchild"; case WAITCREATEENV: return "waitcreateenv"; } assert(false); return string(); // shutup gcc } ~Client() { status = (Status) - 1; delete channel; channel = 0; delete usecsmsg; usecsmsg = 0; delete job; job = 0; if (pipe_from_child >= 0) { if (-1 == close(pipe_from_child) && (errno != EBADF)){ log_perror("Failed to close pipe from child process"); } } if (pipe_to_child >= 0) { if (-1 == close(pipe_to_child) && (errno != EBADF)){ log_perror("Failed to close pipe to child process"); } } } uint32_t job_id; string outfile; // only useful for LINKJOB or TOINSTALL/WAITINSTALL MsgChannel *channel; UseCSMsg *usecsmsg; CompileJob *job; int client_id; // pipe from child process with end status, only valid if WAITFORCHILD or TOINSTALL/WAITINSTALL int pipe_from_child; // pipe to child process, only valid if TOINSTALL/WAITINSTALL int pipe_to_child; pid_t child_pid; string pending_create_env; // only for WAITCREATEENV string dump() const { string ret = status_str(status) + " " + channel->dump(); switch (status) { case LINKJOB: return ret + " ClientID: " + toString(client_id) + " " + outfile + " PID: " + toString(child_pid); case TOINSTALL: case WAITINSTALL: return ret + " ClientID: " + toString(client_id) + " " + outfile + " PID: " + toString(child_pid); case WAITFORCHILD: return ret + " ClientID: " + toString(client_id) + " PID: " + toString(child_pid) + " PFD: " + toString(pipe_from_child); case WAITCREATEENV: return ret + " " + toString(client_id) + " " + pending_create_env; default: if (job_id) { string jobs; if (usecsmsg) { jobs = " CompileServer: " + usecsmsg->hostname; } return ret + " ClientID: " + toString(client_id) + " Job ID: " + toString(job_id) + jobs; } else { return ret + " ClientID: " + toString(client_id); } } return ret; } }; class Clients : public map { public: Clients() { active_processes = 0; } unsigned int active_processes; Client *find_by_client_id(int id) const { for (const_iterator it = begin(); it != end(); ++it) if (it->second->client_id == id) { return it->second; } return 0; } Client *find_by_channel(MsgChannel *c) const { const_iterator it = find(c); if (it == end()) { return 0; } return it->second; } Client *find_by_pid(pid_t pid) const { for (const_iterator it = begin(); it != end(); ++it) if (it->second->child_pid == pid) { return it->second; } return 0; } Client *first() { iterator it = begin(); if (it == end()) { return 0; } Client *cl = it->second; return cl; } string dump_status(Client::Status s) const { int count = 0; for (const_iterator it = begin(); it != end(); ++it) { if (it->second->status == s) { count++; } } if (count) { return toString(count) + " " + Client::status_str(s) + ", "; } return string(); } string dump_per_status() const { string s; for (Client::Status i = Client::UNKNOWN; i <= Client::LASTSTATE; i = Client::Status(int(i) + 1)) { s += dump_status(i); } return s; } Client *get_earliest_client(Client::Status s) const { // TODO: possibly speed this up in adding some sorted lists Client *client = 0; int min_client_id = 0; for (const_iterator it = begin(); it != end(); ++it) { if (it->second->status == s && (!min_client_id || min_client_id > it->second->client_id)) { client = it->second; min_client_id = client->client_id; } } return client; } }; static int set_new_pgrp(void) { /* If we're a session group leader, then we are not able to call * setpgid(). However, setsid will implicitly have put us into a new * process group, so we don't have to do anything. */ /* Does everyone have getpgrp()? It's in POSIX.1. We used to call * getpgid(0), but that is not available on BSD/OS. */ int pgrp_id = getpgrp(); if (-1 == pgrp_id){ log_perror("Failed to get process group ID"); return EXIT_DISTCC_FAILED; } if (pgrp_id == getpid()) { trace() << "already a process group leader\n"; return 0; } if (setpgid(0, 0) == 0) { trace() << "entered process group\n"; return 0; } trace() << "setpgid(0, 0) failed: " << strerror(errno) << endl; return EXIT_DISTCC_FAILED; } static void dcc_daemon_terminate(int); /** * Catch all relevant termination signals. Set up in parent and also * applies to children. **/ void dcc_daemon_catch_signals(void) { /* SIGALRM is caught to allow for built-in timeouts when running test * cases. */ signal(SIGTERM, &dcc_daemon_terminate); signal(SIGINT, &dcc_daemon_terminate); signal(SIGALRM, &dcc_daemon_terminate); } pid_t dcc_master_pid; /** * Called when a daemon gets a fatal signal. * * Some cleanup is done only if we're the master/parent daemon. **/ static void dcc_daemon_terminate(int whichsig) { /** * This is a signal handler. don't do stupid stuff. * Don't call printf. and especially don't call the log_*() functions. */ if (exit_main_loop > 1) { // The > 1 is because we get one more signal from the kill(0,...) below. // hmm, we got killed already twice. try better static const char msg[] = "forced exit.\n"; ignore_result(write(STDERR_FILENO, msg, strlen( msg ))); _exit(1); } // make BSD happy signal(whichsig, dcc_daemon_terminate); bool am_parent = (getpid() == dcc_master_pid); if (am_parent && exit_main_loop == 0) { /* kill whole group */ kill(0, whichsig); /* Remove pid file */ unlink(pidFilePath.c_str()); } ++exit_main_loop; } void usage(const char *reason = 0) { if (reason) { cerr << reason << endl; } cerr << "usage: iceccd [-n ] [-m ] [--no-remote] [-d|--daemonize] [-l logfile] [-s ]" " [-v[v[v]]] [-u|--user-uid ] [-b ] [--cache-limit ] [-N ] [-i|--interface ] [-p|--port ]" << endl; exit(1); } struct timeval last_stat; // Initial rlimit for a compile job, measured in megabytes. Will vary with // the amount of available memory. int mem_limit = 100; // Minimum rlimit for a compile job, measured in megabytes. const int min_mem_limit = 100; unsigned int max_kids = 0; size_t cache_size_limit = 256 * 1024 * 1024; struct NativeEnvironment { string name; // the hash // Timestamps for files including compiler binaries, if they have changed since the time // the native env was built, it needs to be rebuilt. map filetimes; int create_env_pipe; // if in progress of creating the environment NativeEnvironment() { create_env_pipe = 0; } }; struct Daemon { Clients clients; map envs_last_use; // Map of native environments, the basic one(s) containing just the compiler // and possibly more containing additional files (such as compiler plugins). // The key is the compiler name and a concatenated list of the additional files // (or just the compiler name for the basic ones). map native_environments; string envbasedir; uid_t user_uid; gid_t user_gid; int warn_icecc_user_errno; int tcp_listen_fd; int tcp_listen_local_fd; // if tcp_listen is bound to a specific network interface, this one is bound to lo interface int unix_listen_fd; string machine_name; string nodename; bool noremote; bool custom_nodename; size_t cache_size; map fd2chan; int new_client_id; string remote_name; time_t next_scheduler_connect; unsigned long icecream_load; struct timeval icecream_usage; int current_load; int num_cpus; MsgChannel *scheduler; DiscoverSched *discover; string netname; string schedname; int scheduler_port; string daemon_interface; int daemon_port; unsigned int supported_features; int max_scheduler_pong; int max_scheduler_ping; unsigned int current_kids; Daemon() { warn_icecc_user_errno = 0; if (getuid() == 0) { struct passwd *pw = getpwnam("icecc"); if (pw) { user_uid = pw->pw_uid; user_gid = pw->pw_gid; } else { warn_icecc_user_errno = errno ? errno : ENOENT; // apparently errno can be 0 on error here user_uid = 65534; user_gid = 65533; } } else { user_uid = getuid(); user_gid = getgid(); } envbasedir = "/var/tmp/icecc-envs"; tcp_listen_fd = -1; tcp_listen_local_fd = -1; unix_listen_fd = -1; new_client_id = 0; next_scheduler_connect = 0; cache_size = 0; noremote = false; custom_nodename = false; icecream_load = 0; icecream_usage.tv_sec = icecream_usage.tv_usec = 0; current_load = - 1000; num_cpus = 0; scheduler = 0; discover = 0; scheduler_port = 8765; daemon_interface = ""; daemon_port = 10245; max_scheduler_pong = MAX_SCHEDULER_PONG; max_scheduler_ping = MAX_SCHEDULER_PING; current_kids = 0; } ~Daemon() { delete discover; } bool reannounce_environments() __attribute_warn_unused_result__; void answer_client_requests(); bool handle_transfer_env(Client *client, EnvTransferMsg *msg) __attribute_warn_unused_result__; bool handle_env_install_child_done(Client *client); bool finish_transfer_env(Client *client, bool cancel = false); bool handle_get_native_env(Client *client, GetNativeEnvMsg *msg) __attribute_warn_unused_result__; bool finish_get_native_env(Client *client, string env_key); void handle_old_request(); bool handle_compile_file(Client *client, Msg *msg) __attribute_warn_unused_result__; bool handle_activity(Client *client) __attribute_warn_unused_result__; bool handle_file_chunk_env(Client *client, Msg *msg) __attribute_warn_unused_result__; void handle_end(Client *client, int exitcode); int scheduler_get_internals() __attribute_warn_unused_result__; void clear_children(); int scheduler_use_cs(UseCSMsg *msg) __attribute_warn_unused_result__; int scheduler_no_cs(NoCSMsg *msg) __attribute_warn_unused_result__; bool handle_get_cs(Client *client, Msg *msg) __attribute_warn_unused_result__; bool handle_local_job(Client *client, Msg *msg) __attribute_warn_unused_result__; bool handle_job_done(Client *cl, JobDoneMsg *m) __attribute_warn_unused_result__; bool handle_compile_done(Client *client) __attribute_warn_unused_result__; bool handle_verify_env(Client *client, VerifyEnvMsg *msg) __attribute_warn_unused_result__; bool handle_blacklist_host_env(Client *client, Msg *msg) __attribute_warn_unused_result__; int handle_cs_conf(ConfCSMsg *msg); string dump_internals() const; string determine_nodename(); void determine_system(); void determine_supported_features(); bool maybe_stats(bool force_check = false); bool send_scheduler(const Msg &msg) __attribute_warn_unused_result__; void close_scheduler(); bool reconnect(); int working_loop(); bool setup_listen_fds(); bool setup_listen_tcp_fd( int& fd, const string& interface ); bool setup_listen_unix_fd(); void check_cache_size(const string &new_env); bool create_env_finished(string env_key); }; bool Daemon::setup_listen_fds() { tcp_listen_fd = -1; tcp_listen_local_fd = -1; unix_listen_fd = -1; if (!noremote) { // if we only listen to local clients, there is no point in going TCP if( !setup_listen_tcp_fd( tcp_listen_fd, daemon_interface )) return false; // We should always listen on the loopback interface, so if we're binding only // to a specific interface, bind also to the loopback. if( !daemon_interface.empty()) { if( !setup_listen_tcp_fd( tcp_listen_local_fd, "lo" )) return false; } } if( !setup_listen_unix_fd()) return false; return true; } bool Daemon::setup_listen_tcp_fd( int& fd, const string& interface ) { if( !interface.empty()) trace() << "starting to listen on interface " << interface << endl; else trace() << "starting to listen on all interfaces" << endl; if ((fd = socket(PF_INET, SOCK_STREAM, 0)) < 0) { log_perror("Failed to create TCP listen socket."); return false; } int optval = 1; if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)) < 0) { log_perror("Failed to set 'Reuse Address(SO_REUSEADDR)' option on TCP Listen Socket"); return false; } struct sockaddr_in myaddr; if (!build_address_for_interface(myaddr, interface, daemon_port)) { return false; } int count = 5; while (count) { if (::bind(fd, (struct sockaddr *)&myaddr, sizeof(myaddr)) < 0) { log_perror("Failed to bind address to TCP listen socket"); sleep(2); if (!--count) { return false; } continue; } else { break; } } if (listen(fd, 1024) < 0) { log_perror("Failed to set TCP socket for listening to incoming connections"); return false; } fcntl(fd, F_SETFD, FD_CLOEXEC); return true; } bool Daemon::setup_listen_unix_fd() { if ((unix_listen_fd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) { log_perror("Failed to create a Unix scoket for listening"); return false; } struct sockaddr_un myaddr; memset(&myaddr, 0, sizeof(myaddr)); myaddr.sun_family = AF_UNIX; bool reset_umask = false; mode_t old_umask = 0; if (getenv("ICECC_TEST_SOCKET") == NULL) { #ifdef HAVE_LIBCAP_NG // We run as system daemon (UID has been already changed). if (capng_have_capability( CAPNG_EFFECTIVE, CAP_SYS_CHROOT )) { #else if (getuid() == 0) { #endif string default_socket = "/var/run/icecc/iceccd.socket"; strncpy(myaddr.sun_path, default_socket.c_str() , sizeof(myaddr.sun_path) - 1); myaddr.sun_path[sizeof(myaddr.sun_path) - 1] = '\0'; if(default_socket.length() > sizeof(myaddr.sun_path) - 1) { log_error() << "default socket path too long for sun_path" << endl; } if (-1 == unlink(myaddr.sun_path) && errno != ENOENT){ log_perror("unlink failed") << "\t" << myaddr.sun_path << endl; } old_umask = umask(0); reset_umask = true; } else { // Started by user. if( getenv( "HOME" )) { string socket_path = getenv("HOME"); socket_path.append("/.iceccd.socket"); strncpy(myaddr.sun_path, socket_path.c_str(), sizeof(myaddr.sun_path) - 1); myaddr.sun_path[sizeof(myaddr.sun_path) - 1] = '\0'; if(socket_path.length() > sizeof(myaddr.sun_path) - 1) { log_error() << "$HOME/.iceccd.socket path too long for sun_path" << endl; } if (-1 == unlink(myaddr.sun_path) && errno != ENOENT){ log_perror("unlink failed") << "\t" << myaddr.sun_path << endl; } } else { log_error() << "launched by user, but $HOME not set" << endl; return false; } } } else { string test_socket = getenv("ICECC_TEST_SOCKET"); strncpy(myaddr.sun_path, test_socket.c_str(), sizeof(myaddr.sun_path) - 1); myaddr.sun_path[sizeof(myaddr.sun_path) - 1] = '\0'; if(test_socket.length() > sizeof(myaddr.sun_path) - 1) { log_error() << "$ICECC_TEST_SOCKET path too long for sun_path" << endl; } if (-1 == unlink(myaddr.sun_path) && errno != ENOENT){ log_perror("unlink failed") << "\t" << myaddr.sun_path << endl; } } if (::bind(unix_listen_fd, (struct sockaddr*)&myaddr, sizeof(myaddr)) < 0) { log_perror("Failed to bind address to unix listen socket"); if (reset_umask) { umask(old_umask); } return false; } if (reset_umask) { umask(old_umask); } if (listen(unix_listen_fd, 1024) < 0) { log_perror("Failed to set unix socket for listening"); return false; } fcntl(unix_listen_fd, F_SETFD, FD_CLOEXEC); return true; } void Daemon::determine_system() { struct utsname uname_buf; if (uname(&uname_buf)) { log_perror("uname call failed. Unable to determine system node name and platform"); return; } if (nodename.length() && (nodename != uname_buf.nodename)) { custom_nodename = true; } if (!custom_nodename) { nodename = uname_buf.nodename; } machine_name = determine_platform(); } string Daemon::determine_nodename() { if (custom_nodename && !nodename.empty()) { return nodename; } // perhaps our host name changed due to network change? struct utsname uname_buf; if (!uname(&uname_buf)) { nodename = uname_buf.nodename; } return nodename; } void Daemon::determine_supported_features() { supported_features = 0; struct archive* a = archive_read_new(); static bool test_disable = false; // Make one of the two remotes in tests say it doesn't support xz/zstd tarballs. if( getenv( "ICECC_TESTS" ) != NULL && nodename == "remoteice2" ) test_disable = true; (void)test_disable; #ifdef HAVE_LIBARCHIVE_XZ if( !test_disable && archive_read_support_filter_xz(a) >= ARCHIVE_WARN ) // includes ARCHIVE_OK supported_features = supported_features | NODE_FEATURE_ENV_XZ; #endif #ifdef HAVE_LIBARCHIVE_ZSTD if( !test_disable && archive_read_support_filter_zstd(a) >= ARCHIVE_WARN ) // includes ARCHIVE_OK supported_features = supported_features | NODE_FEATURE_ENV_ZSTD; #endif // sanity checks if( archive_read_support_filter_gzip(a) < ARCHIVE_WARN ) // error log_error() << "No support for uncompressing gzip available." << endl; if( archive_read_support_format_tar(a) < ARCHIVE_WARN ) // error log_error() << "No support for unpacking tar available." << endl; archive_read_free(a); } bool Daemon::send_scheduler(const Msg& msg) { if (!scheduler) { log_error() << "scheduler dead ?!" << endl; return false; } if (!scheduler->send_msg(msg)) { log_error() << "sending message to scheduler failed.." << endl; close_scheduler(); return false; } return true; } bool Daemon::reannounce_environments() { log_info() << "reannounce_environments " << endl; LoginMsg lmsg(0, nodename, "", supported_features); lmsg.envs = available_environmnents(envbasedir); return send_scheduler(lmsg); } void Daemon::close_scheduler() { if (!scheduler) { return; } delete scheduler; scheduler = 0; delete discover; discover = 0; next_scheduler_connect = time(0) + 20 + (rand() & 31); static bool fast_reconnect = getenv( "ICECC_TESTS" ) != NULL; if( fast_reconnect ) next_scheduler_connect = time(0) + 3; } bool Daemon::maybe_stats(bool force_check) { struct timeval now; gettimeofday(&now, 0); time_t diff_sent = (now.tv_sec - last_stat.tv_sec) * 1000 + (now.tv_usec - last_stat.tv_usec) / 1000; if (diff_sent >= max_scheduler_pong * 1000 || force_check) { StatsMsg msg; unsigned int memory_fillgrade; unsigned long idleLoad = 0; unsigned long niceLoad = 0; fill_stats(idleLoad, niceLoad, memory_fillgrade, &msg, clients.active_processes); time_t diff_stat = (now.tv_sec - last_stat.tv_sec) * 1000 + (now.tv_usec - last_stat.tv_usec) / 1000; last_stat = now; /* icecream_load contains time in milliseconds we have used for icecream */ /* idle time could have been used for icecream, so claim it */ icecream_load += idleLoad * diff_stat / 1000; /* add the time of our childrens, but only the time since the last run */ struct rusage ru; if (!getrusage(RUSAGE_CHILDREN, &ru)) { uint32_t ice_msec = ((ru.ru_utime.tv_sec - icecream_usage.tv_sec) * 1000 + (ru.ru_utime.tv_usec - icecream_usage.tv_usec) / 1000) / num_cpus; /* heuristics when no child terminated yet: account 25% of total nice as our clients */ if (!ice_msec && current_kids) { ice_msec = (niceLoad * diff_stat) / (4 * 1000); } icecream_load += ice_msec * diff_stat / 1000; icecream_usage.tv_sec = ru.ru_utime.tv_sec; icecream_usage.tv_usec = ru.ru_utime.tv_usec; } unsigned int idle_average = icecream_load; if (diff_sent) { idle_average = icecream_load * 1000 / diff_sent; } if (idle_average > 1000) idle_average = 1000; msg.load = std::max((1000 - idle_average), memory_fillgrade); #ifdef HAVE_SYS_VFS_H struct statfs buf; int ret = statfs(envbasedir.c_str(), &buf); // Require at least 25MiB of free disk space per build. if (!ret && long(buf.f_bavail) < ((long(max_kids + 1 - current_kids) * 25 * 1024 * 1024) / buf.f_bsize)) { msg.load = 1000; } #endif mem_limit = std::max(int(msg.freeMem / std::min(std::max(max_kids, 1U), 4U)), min_mem_limit); if (abs(int(msg.load) - current_load) >= 100 || (msg.load == 1000 && current_load != 1000) || (msg.load != 1000 && current_load == 1000)) { if (!send_scheduler(msg)) { return false; } } icecream_load = 0; current_load = msg.load; } return true; } string Daemon::dump_internals() const { string result; result += "Node Name: " + nodename + "\n"; result += " Remote name: " + remote_name + "\n"; for (map::const_iterator it = fd2chan.begin(); it != fd2chan.end(); ++it) { result += " fd2chan[" + toString(it->first) + "] = " + it->second->dump() + "\n"; } for (Clients::const_iterator it = clients.begin(); it != clients.end(); ++it) { result += " client " + toString(it->second->client_id) + ": " + it->second->dump() + "\n"; } if (cache_size) { result += " Cache Size: " + toString(cache_size) + "\n"; } result += " Architecture: " + machine_name + "\n"; for (map::const_iterator it = native_environments.begin(); it != native_environments.end(); ++it) { result += " NativeEnv (" + it->first + "): " + it->second.name + (it->second.create_env_pipe ? " (creating)" : "" ) + "\n"; } if (!envs_last_use.empty()) { result += " Now: " + toString(time(0)) + "\n"; } for (map::const_iterator it = envs_last_use.begin(); it != envs_last_use.end(); ++it) { result += " envs_last_use[" + it->first + "] = " + toString(it->second) + "\n"; } result += " Current kids: " + toString(current_kids) + " (max: " + toString(max_kids) + ")\n"; result += " Supported features: " + supported_features_to_string(supported_features) + "\n"; if (scheduler) { result += " Scheduler protocol: " + toString(scheduler->protocol) + "\n"; } StatsMsg msg; unsigned int memory_fillgrade = 0; unsigned long idleLoad = 0; unsigned long niceLoad = 0; fill_stats(idleLoad, niceLoad, memory_fillgrade, &msg, clients.active_processes); result += " cpu: " + toString(idleLoad) + " idle, " + toString(niceLoad) + " nice\n"; result += " load: " + toString(msg.loadAvg1 / 1000.) + ", icecream_load: " + toString(icecream_load) + "\n"; result += " memory: " + toString(memory_fillgrade) + " (free: " + toString(msg.freeMem) + ")\n"; return result; } int Daemon::scheduler_get_internals() { trace() << "handle_get_internals " << dump_internals() << endl; return send_scheduler(StatusTextMsg(dump_internals())) ? 0 : 1; } int Daemon::scheduler_use_cs(UseCSMsg *msg) { Client *c = clients.find_by_client_id(msg->client_id); trace() << "scheduler_use_cs " << msg->job_id << " " << msg->client_id << " " << c << " " << msg->hostname << " " << remote_name << endl; if (!c) { if (send_scheduler(JobDoneMsg(msg->job_id, 107, JobDoneMsg::FROM_SUBMITTER, clients.size()))) { return 0; } return 1; } if (msg->hostname == remote_name && int(msg->port) == daemon_port) { c->usecsmsg = new UseCSMsg(msg->host_platform, "127.0.0.1", daemon_port, msg->job_id, true, 1, msg->matched_job_id); c->status = Client::PENDING_USE_CS; } else { c->usecsmsg = new UseCSMsg(msg->host_platform, msg->hostname, msg->port, msg->job_id, true, 1, msg->matched_job_id); if (!c->channel->send_msg(*msg)) { handle_end(c, 143); return 0; } c->status = Client::WAITCOMPILE; } c->job_id = msg->job_id; return 0; } int Daemon::scheduler_no_cs(NoCSMsg *msg) { Client *c = clients.find_by_client_id(msg->client_id); trace() << "scheduler_no_cs " << msg->job_id << " " << msg->client_id << " " << c << " " << endl; if (!c) { if (send_scheduler(JobDoneMsg(msg->job_id, 107, JobDoneMsg::FROM_SUBMITTER, clients.size()))) { return 0; } return 1; } c->usecsmsg = new UseCSMsg(string(), "127.0.0.1", daemon_port, msg->job_id, true, 1, 0); c->status = Client::PENDING_USE_CS; c->job_id = msg->job_id; return 0; } bool Daemon::handle_transfer_env(Client *client, EnvTransferMsg *emsg) { log_info() << "handle_transfer_env, client status " << Client::status_str(client->status) << endl; assert(client->status != Client::TOINSTALL && client->status != Client::WAITINSTALL && client->status != Client::TOCOMPILE && client->status != Client::WAITCOMPILE); assert(client->pipe_from_child < 0); assert(client->pipe_to_child < 0); string target = emsg->target; if (target.empty()) { target = machine_name; } int pipe_from_child = -1; int pipe_to_child = -1; FileChunkMsg *fmsg = 0; pid_t pid = start_install_environment(envbasedir, target, emsg->name, client->channel, pipe_to_child, pipe_from_child, fmsg, user_uid, user_gid, nice_level); if( pid <= 0 ) { delete fmsg; remove_environment(envbasedir, target + "/" + emsg->name); handle_end(client, 144); return false; } client->status = Client::TOINSTALL; client->outfile = target + "/" + emsg->name; current_kids++; trace() << "PID of child thread running untaring environment: " << pid << endl; client->pipe_to_child = pipe_to_child; client->pipe_from_child = pipe_from_child; client->child_pid = pid; if (!handle_file_chunk_env(client, fmsg)) { delete fmsg; return false; } delete fmsg; return true; } bool Daemon::handle_file_chunk_env(Client *client, Msg *msg) { /* this sucks, we can block when we're writing the file chunk to the child, but we can't let the child handle MsgChannel itself due to MsgChannel's stupid caching layer inbetween, which causes us to lose partial data after the M_END msg of the env transfer. */ assert(client); assert(client->status == Client::TOINSTALL || client->status == Client::WAITINSTALL); assert(client->pipe_to_child >= 0); if (msg->type == M_FILE_CHUNK) { FileChunkMsg *fcmsg = static_cast(msg); ssize_t len = fcmsg->len; off_t off = 0; while (len) { ssize_t bytes = write(client->pipe_to_child, fcmsg->buffer + off, len); if (bytes < 0 && errno == EINTR) { continue; } if (bytes < 0 && errno == EPIPE) { // Broken pipe may mean the unpacking has failed, but it also may // mean the child has already finished successfully (it seems to happen, // maybe some tar implementations add needless trailing bytes?). // Wait for the child to finish to find out whether it was ok. return true; } if (bytes == -1) { log_perror("write to transfer env pipe failed."); handle_end(client, 137); return false; } len -= bytes; off += bytes; } return true; } if (msg->type == M_END) { trace() << "received end of environment, waiting for child" << endl; close(client->pipe_to_child); client->pipe_to_child = -1; if( client->child_pid >= 0 ) { // Transfer done, wait for handle_transfer_env_child_done() to finish the handling. client->status = Client::WAITINSTALL; // Ignore further messages until child finishes. return true; } // Transfer done, child done, finish. return finish_transfer_env( client ); } // unexpected message type log_error() << "protocol error while receiving environment (" << msg->type << ")" << endl; handle_end(client, 138); return false; } bool Daemon::handle_env_install_child_done(Client *client) { assert(client->status == Client::TOINSTALL || client->status == Client::WAITINSTALL); assert(client->child_pid >= 0); assert(client->pipe_from_child >= 0); bool success = false; for (;;) { char resultByte; ssize_t n = ::read(client->pipe_from_child, &resultByte, 1); if (n == -1 && errno == EINTR) continue; // The child at the end of start_install_environment() writes status on success. if (n == 1 && resultByte == 0 ) success = true; break; } log_info() << "handle_env_install_child_done PID " << client->child_pid << " for " << client->outfile << " status: " << ( success ? "success" : "failed" ) << endl; client->child_pid = -1; assert(current_kids > 0); current_kids--; if (client->pipe_from_child >= 0) { close(client->pipe_from_child); client->pipe_from_child = -1; } if( !success ) return finish_transfer_env( client, true ); // cancel if( client->pipe_to_child >= 0 ) { // we still haven't received M_END message, wait for that assert( client->status == Client::TOINSTALL ); return true; } // Child done, transfer done, finish. return finish_transfer_env( client ); } bool Daemon::finish_transfer_env(Client *client, bool cancel) { log_info() << "finish_transfer_env for " << client->outfile << ( cancel ? " (cancel)" : "" ) << endl; assert(client->outfile.size()); assert(client->status == Client::TOINSTALL || client->status == Client::WAITINSTALL); if (client->pipe_from_child >= 0) { assert( cancel ); // If not cancelled, this is closed by handle_env_install_child_done(). close(client->pipe_from_child); client->pipe_from_child = -1; } if (client->pipe_to_child >= 0) { assert( cancel ); // If not cancelled, this is closed by handle_file_chunk_env(). close(client->pipe_to_child); client->pipe_to_child = -1; } if (client->child_pid >= 0 ) { assert( cancel ); // If not cancelled, this is handled by handle_env_install_child_done(). kill( client->child_pid, SIGTERM ); int status; trace() << "finish_transfer_env kill and waiting for child PID " << client->child_pid <child_pid, &status, 0) < 0 && errno == EINTR) ; client->child_pid = -1; assert(current_kids > 0); current_kids--; } size_t installed_size = 0; if( !cancel ) { installed_size = finalize_install_environment(envbasedir, client->outfile, user_uid, user_gid); log_info() << "installed_size: " << installed_size << endl; } if( installed_size == 0 ) remove_environment(envbasedir, client->outfile); client->status = Client::UNKNOWN; string current = client->outfile; client->outfile.clear(); if (installed_size) { cache_size += installed_size; envs_last_use[current] = time(NULL); log_info() << "installed " << current << " size: " << installed_size << " all: " << cache_size << endl; } check_cache_size(current); bool r = reannounce_environments(); // do that before the file compiles if (!maybe_stats(true)) { // update stats in case our disk is too full to accept more jobs r = false; } return r; } void Daemon::check_cache_size(const string &new_env) { time_t now = time(NULL); while (cache_size > cache_size_limit) { string oldest; // I don't dare to use (time_t)-1 time_t oldest_time = time(NULL) + 90000; string oldest_native_env_key; for (map::const_iterator it = envs_last_use.begin(); it != envs_last_use.end(); ++it) { trace() << "considering cached environment: " << it->first << " " << it->second << " " << oldest_time << endl; // ignore recently used envs (they might be in use _right_ now) int keep_timeout = 200; string native_env_key; // If it is a native environment, allow removing it only after a longer period, // unless there are many native environments. for (map::const_iterator it2 = native_environments.begin(); it2 != native_environments.end(); ++it2) { if (it2->second.name == it->first) { native_env_key = it2->first; if (native_environments.size() < 5) { keep_timeout = 24 * 60 * 60; // 1 day } if (it2->second.create_env_pipe) { keep_timeout = 365 * 24 * 60 * 60; // do not remove if it's still being created } break; } } if (it->second < oldest_time && now - it->second > keep_timeout) { bool env_currently_in_use = false; for (Clients::const_iterator it2 = clients.begin(); it2 != clients.end(); ++it2) { if (it2->second->status == Client::TOCOMPILE || it2->second->status == Client::TOINSTALL || it2->second->status == Client::WAITINSTALL || it2->second->status == Client::WAITFORCHILD) { assert(it2->second->job); string envforjob = it2->second->job->targetPlatform() + "/" + it2->second->job->environmentVersion(); if (envforjob == it->first) { env_currently_in_use = true; } } } if (!env_currently_in_use) { oldest_time = it->second; oldest = it->first; oldest_native_env_key = native_env_key; } } } if (oldest.empty() || oldest == new_env) { break; } size_t removed; if (!oldest_native_env_key.empty()) { removed = remove_native_environment(oldest); native_environments.erase(oldest_native_env_key); trace() << "removing " << oldest << " " << oldest_time << " " << removed << endl; } else { removed = remove_environment(envbasedir, oldest); trace() << "removing " << envbasedir << "/" << oldest << " " << oldest_time << " " << removed << endl; } cache_size -= min(removed, cache_size); envs_last_use.erase(oldest); } } bool Daemon::handle_get_native_env(Client *client, GetNativeEnvMsg *msg) { string env_key; map filetimes; struct stat st; string compiler = msg->compiler; // Older clients passed simply "gcc" or "clang" and not a binary. if( !IS_PROTOCOL_41(client->channel) && compiler.find('/') == string::npos) compiler = "/usr/bin/" + compiler; string ccompiler = get_c_compiler(compiler); string cppcompiler = get_cpp_compiler(compiler); trace() << "get_native_env for " << msg->compiler << " (" << ccompiler << "," << cppcompiler << ")" << endl; if (stat(ccompiler.c_str(), &st) != 0) { log_error() << "Compiler binary " << ccompiler << " for environment not found." << endl; client->channel->send_msg(EndMsg()); handle_end(client, 122); return false; } filetimes[ccompiler] = st.st_mtime; if (stat(cppcompiler.c_str(), &st) == 0) { // C++ compiler is optional. filetimes[cppcompiler] = st.st_mtime; } env_key = msg->compression + ":" + ccompiler; for (list::const_iterator it = msg->extrafiles.begin(); it != msg->extrafiles.end(); ++it) { env_key += ':'; env_key += *it; if (stat(it->c_str(), &st) != 0) { log_error() << "Extra file " << *it << " for environment not found." << endl; client->channel->send_msg(EndMsg()); handle_end(client, 122); return false; } filetimes[*it] = st.st_mtime; } if (native_environments[env_key].name.length()) { const NativeEnvironment &env = native_environments[env_key]; if (env.filetimes != filetimes || access(env.name.c_str(), R_OK) != 0) { trace() << "native_env needs rebuild" << endl; cache_size -= remove_native_environment(env.name); envs_last_use.erase(env.name); if (env.create_env_pipe) { if ((-1 == close(env.create_env_pipe)) && (errno != EBADF)){ log_perror("close failed"); } // TODO kill the still running icecc-create-env process? } native_environments.erase(env_key); // invalidates 'env' } } trace() << "get_native_env " << native_environments[env_key].name << " (" << env_key << ")" << endl; client->status = Client::WAITCREATEENV; client->pending_create_env = env_key; if (native_environments[env_key].name.length()) { // already available return finish_get_native_env(client, env_key); } else { NativeEnvironment &env = native_environments[env_key]; // also inserts it if (!env.create_env_pipe) { // start creating it only if not already in progress env.filetimes = filetimes; trace() << "start_create_env " << env_key << endl; env.create_env_pipe = start_create_env(envbasedir, user_uid, user_gid, ccompiler, msg->extrafiles, msg->compression); } else { trace() << "waiting for already running create_env " << env_key << endl; } } return true; } bool Daemon::finish_get_native_env(Client *client, string env_key) { assert(client->status == Client::WAITCREATEENV); assert(client->pending_create_env == env_key); UseNativeEnvMsg m(native_environments[env_key].name); if (!client->channel->send_msg(m)) { handle_end(client, 138); return false; } envs_last_use[native_environments[env_key].name] = time(NULL); client->status = Client::GOTNATIVE; client->pending_create_env.clear(); return true; } bool Daemon::create_env_finished(string env_key) { assert(native_environments.count(env_key)); NativeEnvironment &env = native_environments[env_key]; trace() << "create_env_finished " << env_key << endl; assert(env.create_env_pipe); size_t installed_size = finish_create_env(env.create_env_pipe, envbasedir, env.name); env.create_env_pipe = 0; // we only clean out cache on next target install cache_size += installed_size; trace() << "cache_size = " << cache_size << endl; if (!installed_size) { bool repeat = true; while(repeat) { repeat = false; for (Clients::const_iterator it = clients.begin(); it != clients.end(); ++it) { if (it->second->pending_create_env == env_key) { it->second->channel->send_msg(EndMsg()); handle_end(it->second, 121); // The handle_end call invalidates our iterator, so break out of the loop, // but try again just in case, until there's no match. repeat = true; break; } } } return false; } envs_last_use[env.name] = time(NULL); check_cache_size(env.name); for (Clients::const_iterator it = clients.begin(); it != clients.end(); ++it) { if (it->second->pending_create_env == env_key) finish_get_native_env(it->second, env_key); } return true; } bool Daemon::handle_job_done(Client *cl, JobDoneMsg *m) { if (cl->status == Client::CLIENTWORK) { clients.active_processes--; } cl->status = Client::JOBDONE; JobDoneMsg *msg = static_cast(m); trace() << "handle_job_done " << msg->job_id << " " << msg->exitcode << endl; if (!m->is_from_server() && (m->user_msec + m->sys_msec) <= m->real_msec) { icecream_load += (m->user_msec + m->sys_msec) / num_cpus; } assert(msg->job_id == cl->job_id); cl->job_id = 0; // the scheduler doesn't have it anymore msg->client_count = clients.size(); return send_scheduler(*msg); } void Daemon::handle_old_request() { while ((current_kids + clients.active_processes) < std::max((unsigned int)1, max_kids)) { Client *client = clients.get_earliest_client(Client::LINKJOB); if (client) { trace() << "send JobLocalBeginMsg to client" << endl; if (!client->channel->send_msg(JobLocalBeginMsg())) { log_warning() << "can't send start message to client" << endl; handle_end(client, 112); } else { client->status = Client::CLIENTWORK; clients.active_processes++; trace() << "pushed local job " << client->client_id << endl; if (!send_scheduler(JobLocalBeginMsg(client->client_id, client->outfile))) { return; } } continue; } client = clients.get_earliest_client(Client::PENDING_USE_CS); if (client) { trace() << "pending " << client->dump() << endl; if (client->channel->send_msg(*client->usecsmsg)) { client->status = Client::CLIENTWORK; /* we make sure we reserve a spot and the rest is done if the * client contacts as back with a Compile request */ clients.active_processes++; } else { handle_end(client, 129); } continue; } /* we don't want to handle TOCOMPILE jobs as long as our load is too high */ if (current_load >= 1000) { break; } client = clients.get_earliest_client(Client::TOCOMPILE); if (client) { CompileJob *job = client->job; assert(job); int sock = -1; pid_t pid = -1; trace() << "request for job " << job->jobID() << endl; string envforjob = job->targetPlatform() + "/" + job->environmentVersion(); envs_last_use[envforjob] = time(NULL); pid = handle_connection(envbasedir, job, client->channel, sock, mem_limit, user_uid, user_gid); trace() << "handle connection returned " << pid << endl; if (pid > 0) { current_kids++; client->status = Client::WAITFORCHILD; client->pipe_from_child = sock; client->child_pid = pid; if (!send_scheduler(JobBeginMsg(job->jobID(), clients.size()))) { log_info() << "failed sending scheduler about " << job->jobID() << endl; } } else { handle_end(client, 117); } continue; } break; } } bool Daemon::handle_compile_done(Client *client) { assert(client->status == Client::WAITFORCHILD); assert(client->child_pid > 0); assert(client->pipe_from_child >= 0); JobDoneMsg *msg = new JobDoneMsg(client->job->jobID(), -1, JobDoneMsg::FROM_SERVER, clients.size()); assert(msg); assert(current_kids > 0); current_kids--; unsigned int job_stat[8]; int end_status = 151; if (read(client->pipe_from_child, job_stat, sizeof(job_stat)) == sizeof(job_stat)) { msg->in_uncompressed = job_stat[JobStatistics::in_uncompressed]; msg->in_compressed = job_stat[JobStatistics::in_compressed]; msg->out_compressed = msg->out_uncompressed = job_stat[JobStatistics::out_uncompressed]; end_status = msg->exitcode = job_stat[JobStatistics::exit_code]; msg->real_msec = job_stat[JobStatistics::real_msec]; msg->user_msec = job_stat[JobStatistics::user_msec]; msg->sys_msec = job_stat[JobStatistics::sys_msec]; msg->pfaults = job_stat[JobStatistics::sys_pfaults]; } close(client->pipe_from_child); client->pipe_from_child = -1; string envforjob = client->job->targetPlatform() + "/" + client->job->environmentVersion(); envs_last_use[envforjob] = time(NULL); if(!send_scheduler(*msg)) log_warning() << "failed sending scheduler about compile done " << client->job->jobID() << endl; handle_end(client, end_status); delete msg; return false; } bool Daemon::handle_compile_file(Client *client, Msg *msg) { CompileJob *job = dynamic_cast(msg)->takeJob(); assert(client); assert(job); client->job = job; if (client->status == Client::CLIENTWORK) { assert(job->environmentVersion() == "__client"); if (!send_scheduler(JobBeginMsg(job->jobID(), clients.size()))) { trace() << "can't reach scheduler to tell him about compile file job " << job->jobID() << endl; return false; } // no scheduler is not an error case! } else { client->status = Client::TOCOMPILE; } return true; } bool Daemon::handle_verify_env(Client *client, VerifyEnvMsg *msg) { assert(msg); bool ok = verify_env(client->channel, envbasedir, msg->target, msg->environment, user_uid, user_gid); trace() << "Verify environment done, " << (ok ? "success" : "failure") << ", environment " << msg->environment << " (" << msg->target << ")" << endl; VerifyEnvResultMsg resultmsg(ok); if (!client->channel->send_msg(resultmsg)) { log_error() << "sending verify end result failed.." << endl; return false; } return true; } bool Daemon::handle_blacklist_host_env(Client *client, Msg *msg) { // just forward assert(dynamic_cast(msg)); assert(client); (void)client; if (!scheduler) { return false; } return send_scheduler(*msg); } void Daemon::handle_end(Client *client, int exitcode) { trace() << "handle_end " << client->client_id << " " << client->channel->name << endl; #ifdef ICECC_DEBUG trace() << "handle_end " << client->dump() << endl; trace() << dump_internals() << endl; #endif fd2chan.erase(client->channel->fd); if (client->status == Client::TOINSTALL || client->status == Client::WAITINSTALL) { finish_transfer_env(client, true); } if (client->status == Client::CLIENTWORK) { clients.active_processes--; } if (client->status == Client::WAITCOMPILE && exitcode == 119) { /* the client sent us a real good bye, so forget about the scheduler */ client->job_id = 0; } /* Delete from the clients map before send_scheduler, which causes a double deletion. */ if (!clients.erase(client->channel)) { log_error() << "client can't be erased: " << client->channel << endl; flush_debug(); log_error() << dump_internals() << endl; flush_debug(); assert(false); } if (scheduler && client->status != Client::WAITFORCHILD) { int job_id = client->job_id; bool use_client_id = false; if (client->status == Client::TOCOMPILE) { job_id = client->job->jobID(); } if (client->status == Client::WAITFORCS) { // We don't know the job id, because we haven't received a reply // from the scheduler yet. Use client_id to identify the job, // the scheduler will use it for matching. use_client_id = true; assert( client->client_id > 0 ); } if (job_id > 0 || use_client_id) { JobDoneMsg::from_type flag = JobDoneMsg::FROM_SUBMITTER; switch (client->status) { case Client::TOCOMPILE: flag = JobDoneMsg::FROM_SERVER; break; case Client::UNKNOWN: case Client::GOTNATIVE: case Client::JOBDONE: case Client::WAITFORCHILD: case Client::LINKJOB: case Client::TOINSTALL: case Client::WAITINSTALL: case Client::WAITCREATEENV: assert(false); // should not have a job_id break; case Client::WAITCOMPILE: case Client::PENDING_USE_CS: case Client::CLIENTWORK: case Client::WAITFORCS: flag = JobDoneMsg::FROM_SUBMITTER; break; } trace() << "scheduler->send_msg( JobDoneMsg( " << client->dump() << ", " << exitcode << "))\n"; JobDoneMsg msg(job_id, exitcode, flag, clients.size()); if( use_client_id ) { msg.set_unknown_job_client_id( client->client_id ); } if (!send_scheduler(msg)) { trace() << "failed to reach scheduler for remote job done msg!" << endl; } } else if (client->status == Client::CLIENTWORK) { // Clientwork && !job_id == LINK trace() << "scheduler->send_msg( JobLocalDoneMsg( " << client->client_id << ") );\n"; if (!send_scheduler(JobLocalDoneMsg(client->client_id))) { trace() << "failed to reach scheduler for local job done msg!" << endl; } } } delete client; } void Daemon::clear_children() { while (!clients.empty()) { Client *cl = clients.first(); handle_end(cl, 116); } while (current_kids > 0) { int status; pid_t child; while ((child = waitpid(-1, &status, 0)) < 0 && errno == EINTR) {} current_kids--; } // they should be all in clients too assert(fd2chan.empty()); fd2chan.clear(); new_client_id = 0; trace() << "cleared children\n"; } bool Daemon::handle_get_cs(Client *client, Msg *msg) { GetCSMsg *umsg = dynamic_cast(msg); assert(client); client->status = Client::WAITFORCS; umsg->client_id = client->client_id; trace() << "handle_get_cs " << umsg->client_id << endl; if (!scheduler) { /* now the thing is this: if there is no scheduler there is no point in trying to ask him. So we just redefine this as local job */ client->usecsmsg = new UseCSMsg(umsg->target, "127.0.0.1", daemon_port, umsg->client_id, true, 1, 0); client->status = Client::PENDING_USE_CS; client->job_id = umsg->client_id; return true; } umsg->client_count = clients.size(); return send_scheduler(*umsg); } int Daemon::handle_cs_conf(ConfCSMsg *msg) { max_scheduler_pong = msg->max_scheduler_pong; max_scheduler_ping = msg->max_scheduler_ping; return 0; } bool Daemon::handle_local_job(Client *client, Msg *msg) { client->status = Client::LINKJOB; client->outfile = dynamic_cast(msg)->outfile; return true; } bool Daemon::handle_activity(Client *client) { assert(client->status != Client::TOCOMPILE && client->status != Client::WAITINSTALL); Msg *msg = client->channel->get_msg(0, true); if (!msg) { handle_end(client, 118); return false; } bool ret = false; if (client->status == Client::TOINSTALL) { ret = handle_file_chunk_env(client, msg); delete msg; return ret; } switch (msg->type) { case M_GET_NATIVE_ENV: ret = handle_get_native_env(client, dynamic_cast(msg)); break; case M_COMPILE_FILE: ret = handle_compile_file(client, msg); break; case M_TRANFER_ENV: ret = handle_transfer_env(client, dynamic_cast(msg)); break; case M_GET_CS: ret = handle_get_cs(client, msg); break; case M_END: handle_end(client, 119); ret = false; break; case M_JOB_LOCAL_BEGIN: ret = handle_local_job(client, msg); break; case M_JOB_DONE: ret = handle_job_done(client, dynamic_cast(msg)); break; case M_VERIFY_ENV: ret = handle_verify_env(client, dynamic_cast(msg)); break; case M_BLACKLIST_HOST_ENV: ret = handle_blacklist_host_env(client, msg); break; default: log_error() << "protocol error " << msg->type << " on client " << client->dump() << endl; client->channel->send_msg(EndMsg()); handle_end(client, 120); ret = false; } delete msg; return ret; } void Daemon::answer_client_requests() { #ifdef ICECC_DEBUG if (clients.size() + current_kids) { log_info() << dump_internals() << endl; } log_info() << "clients " << clients.dump_per_status() << " " << current_kids << " (" << max_kids << ")" << endl; #endif /* reap zombis */ int status; while (waitpid(-1, &status, WNOHANG) < 0 && errno == EINTR) {} handle_old_request(); /* collect the stats after the children exited icecream_load */ if (scheduler) { maybe_stats(); } vector< pollfd > pollfds; pollfds.reserve( fd2chan.size() + 6 ); pollfd pfd; // tmp varible if (tcp_listen_fd != -1) { pfd.fd = tcp_listen_fd; pfd.events = POLLIN; pollfds.push_back(pfd); } if (tcp_listen_local_fd != -1) { pfd.fd = tcp_listen_local_fd; pfd.events = POLLIN; pollfds.push_back(pfd); } pfd.fd = unix_listen_fd; pfd.events = POLLIN; pollfds.push_back(pfd); for (map::const_iterator it = fd2chan.begin(); it != fd2chan.end();) { int i = it->first; MsgChannel *c = it->second; ++it; /* don't select on a fd that we're currently not interested in. Avoids that we wake up on an event we're not handling anyway */ Client *client = clients.find_by_channel(c); assert(client); int current_status = client->status; bool ignore_channel = current_status == Client::TOCOMPILE || current_status == Client::WAITFORCHILD || current_status == Client::WAITINSTALL; if (!ignore_channel && (!c->has_msg() || handle_activity(client))) { pfd.fd = i; pfd.events = POLLIN; pollfds.push_back(pfd); } if ((current_status == Client::WAITFORCHILD || current_status == Client::TOINSTALL || current_status == Client::WAITINSTALL) && client->pipe_from_child != -1) { pfd.fd = client->pipe_from_child; pfd.events = POLLIN; pollfds.push_back(pfd); } } if (scheduler) { pfd.fd = scheduler->fd; pfd.events = POLLIN; pollfds.push_back(pfd); } else if (discover && discover->listen_fd() >= 0) { /* We don't explicitely check for discover->get_fd() being in the selected set below. If it's set, we simply will return and our call will make sure we try to get the scheduler. */ pfd.fd = discover->listen_fd(); pfd.events = POLLIN; pollfds.push_back(pfd); } for (map::const_iterator it = native_environments.begin(); it != native_environments.end(); ++it) { if (it->second.create_env_pipe) { pfd.fd = it->second.create_env_pipe; pfd.events = POLLIN; pollfds.push_back(pfd); } } int ret = poll(pollfds.data(), pollfds.size(), max_scheduler_pong * 1000); if (ret < 0 && errno != EINTR) { log_perror("poll"); close_scheduler(); return; } // Reset debug if needed, but only if we aren't waiting for any child processes to finish, // otherwise their debug output could end up reset in the middle (and flush log marks used // by tests could be written out before debug output from children). if( current_kids == 0 ) { reset_debug_if_needed(); } if (ret > 0) { bool had_scheduler = scheduler; if (scheduler && pollfd_is_set(pollfds, scheduler->fd, POLLIN)) { while (!scheduler->read_a_bit() || scheduler->has_msg()) { Msg *msg = scheduler->get_msg(0, true); if (!msg) { log_warning() << "scheduler closed connection" << endl; close_scheduler(); clear_children(); return; } ret = 0; switch (msg->type) { case M_PING: if (!IS_PROTOCOL_27(scheduler)) { ret = !send_scheduler(PingMsg()); } break; case M_USE_CS: ret = scheduler_use_cs(static_cast(msg)); break; case M_NO_CS: ret = scheduler_no_cs(static_cast(msg)); break; case M_GET_INTERNALS: ret = scheduler_get_internals(); break; case M_CS_CONF: ret = handle_cs_conf(static_cast(msg)); break; default: log_error() << "unknown scheduler type " << (char)msg->type << endl; ret = 1; } delete msg; if (ret) { close_scheduler(); return; } } } int listen_fd = -1; if (tcp_listen_fd != -1 && pollfd_is_set(pollfds, tcp_listen_fd, POLLIN)) { listen_fd = tcp_listen_fd; } if (tcp_listen_local_fd != -1 && pollfd_is_set(pollfds, tcp_listen_local_fd, POLLIN)) { listen_fd = tcp_listen_local_fd; } if (pollfd_is_set(pollfds, unix_listen_fd, POLLIN)) { listen_fd = unix_listen_fd; } if (listen_fd != -1) { struct sockaddr cli_addr; socklen_t cli_len = sizeof cli_addr; int acc_fd = accept(listen_fd, &cli_addr, &cli_len); if (acc_fd < 0) { log_perror("accept error"); } if (acc_fd == -1 && errno != EINTR) { log_perror("accept failed:"); return; } MsgChannel *c = Service::createChannel(acc_fd, &cli_addr, cli_len); if (!c) { return; } Client *client = new Client; client->client_id = ++new_client_id; client->channel = c; clients[c] = client; fd2chan[c->fd] = c; trace() << "accepted " << c->fd << " " << c->name << " as " << client->client_id << endl; while (!c->read_a_bit() || c->has_msg()) { if (!handle_activity(client)) { break; } if (client->status == Client::TOCOMPILE || client->status == Client::WAITFORCHILD || client->status == Client::WAITINSTALL) { break; } } } else { for (map::const_iterator it = fd2chan.begin(); it != fd2chan.end();) { int i = it->first; MsgChannel *c = it->second; Client *client = clients.find_by_channel(c); assert(client); ++it; if (client->status == Client::WAITFORCHILD && client->pipe_from_child >= 0 && pollfd_is_set(pollfds, client->pipe_from_child, POLLIN)) { if (!handle_compile_done(client)) { return; } } if ((client->status == Client::TOINSTALL || client->status == Client::WAITINSTALL) && client->pipe_from_child >= 0 && pollfd_is_set(pollfds, client->pipe_from_child, POLLIN)) { if (!handle_env_install_child_done(client)) { return; } } if (pollfd_is_set(pollfds, i, POLLIN)) { assert(client->status != Client::TOCOMPILE && client->status != Client::WAITINSTALL); while (!c->read_a_bit() || c->has_msg()) { if (!handle_activity(client)) { break; } if (client->status == Client::TOCOMPILE || client->status == Client::WAITFORCHILD || client->status == Client::WAITINSTALL) { break; } } } } for (map::iterator it = native_environments.begin(); it != native_environments.end(); ) { if (it->second.create_env_pipe && pollfd_is_set(pollfds, it->second.create_env_pipe, POLLIN)) { if(!create_env_finished(it->first)) { native_environments.erase(it++); continue; } } ++it; } } if (had_scheduler && !scheduler) { clear_children(); return; } } } bool Daemon::reconnect() { if (scheduler) { return true; } if (!discover && next_scheduler_connect > time(0)) { trace() << "Delaying reconnect." << endl; return false; } #ifdef ICECC_DEBUG trace() << "reconn " << dump_internals() << endl; #endif if (!discover || (NULL == (scheduler = discover->try_get_scheduler()) && discover->timed_out())) { delete discover; discover = new DiscoverSched(netname, max_scheduler_pong, schedname, scheduler_port); } if (!scheduler) { log_warning() << "scheduler not yet found/selected." << endl; return false; } delete discover; discover = 0; sockaddr_in name; socklen_t len = sizeof(name); int error = getsockname(scheduler->fd, (struct sockaddr*)&name, &len); if (!error) { remote_name = inet_ntoa(name.sin_addr); } else { remote_name = string(); } log_info() << "Connected to scheduler (I am known as " << remote_name << ")" << endl; current_load = -1000; gettimeofday(&last_stat, 0); icecream_load = 0; LoginMsg lmsg(daemon_port, determine_nodename(), machine_name, supported_features); lmsg.envs = available_environmnents(envbasedir); lmsg.max_kids = max_kids; lmsg.noremote = noremote; return send_scheduler(lmsg); } int Daemon::working_loop() { for (;;) { reconnect(); answer_client_requests(); if (exit_main_loop) { close_scheduler(); clear_children(); break; } } return 0; } int main(int argc, char **argv) { int max_processes = -1; srand(time(0) + getpid()); Daemon d; int debug_level = Error; string logfile; bool detach = false; nice_level = 5; // defined in serve.h while (true) { int option_index = 0; static const struct option long_options[] = { { "netname", 1, NULL, 'n' }, { "max-processes", 1, NULL, 'm' }, { "help", 0, NULL, 'h' }, { "daemonize", 0, NULL, 'd'}, { "log-file", 1, NULL, 'l'}, { "nice", 1, NULL, 0}, { "name", 1, NULL, 'N'}, { "scheduler-host", 1, NULL, 's' }, { "env-basedir", 1, NULL, 'b' }, { "user-uid", 1, NULL, 'u'}, { "cache-limit", 1, NULL, 0}, { "no-remote", 0, NULL, 0}, { "interface", 1, NULL, 'i'}, { "port", 1, NULL, 'p'}, { 0, 0, 0, 0 } }; const int c = getopt_long(argc, argv, "N:n:m:l:s:hvdb:u:i:p:", long_options, &option_index); if (c == -1) { break; // eoo } switch (c) { case 0: { string optname = long_options[option_index].name; if (optname == "nice") { if (optarg && *optarg) { errno = 0; int tnice = atoi(optarg); if (!errno) { nice_level = tnice; } } else { usage("Error: --nice requires argument"); } } else if (optname == "name") { if (optarg && *optarg) { d.nodename = optarg; } else { usage("Error: --name requires argument"); } } else if (optname == "cache-limit") { if (optarg && *optarg) { errno = 0; int mb = atoi(optarg); if (!errno) { cache_size_limit = mb * 1024 * 1024; } } else { usage("Error: --cache-limit requires argument"); } } else if (optname == "no-remote") { d.noremote = true; } } break; case 'd': detach = true; break; case 'N': if (optarg && *optarg) { d.nodename = optarg; } else { usage("Error: -N requires argument"); } break; case 'l': if (optarg && *optarg) { logfile = optarg; } else { usage("Error: -l requires argument"); } break; case 'v': if (debug_level < MaxVerboseLevel) { debug_level++; } break; case 'n': if (optarg && *optarg) { d.netname = optarg; } else { usage("Error: -n requires argument"); } break; case 'm': if (optarg && *optarg) { max_processes = atoi(optarg); } else { usage("Error: -m requires argument"); } break; case 's': if (optarg && *optarg) { string scheduler = optarg; size_t colon = scheduler.rfind( ':' ); if( colon == string::npos ) { d.schedname = scheduler; } else { d.schedname = scheduler.substr(0, colon); d.scheduler_port = atoi( scheduler.substr( colon + 1 ).c_str()); if( d.scheduler_port == 0 ) { usage("Error: -s requires valid port if hostname includes colon"); } } } else { usage("Error: -s requires hostname argument"); } break; case 'b': if (optarg && *optarg) { d.envbasedir = optarg; } break; case 'u': if (optarg && *optarg) { struct passwd *pw = getpwnam(optarg); if (!pw) { usage("Error: -u requires a valid username"); } else { d.user_uid = pw->pw_uid; d.user_gid = pw->pw_gid; d.warn_icecc_user_errno = 0; if (!d.user_gid || !d.user_uid) { usage("Error: -u must not be root"); } } } else { usage("Error: -u requires a valid username"); } break; case 'i': if (optarg && *optarg) { string daemon_interface = optarg; if (daemon_interface.empty()) { usage("Error: Invalid network interface specified"); } d.daemon_interface = daemon_interface; } else { usage("Error: -i requires argument"); } break; case 'p': if (optarg && *optarg) { d.daemon_port = atoi(optarg); if (0 == d.daemon_port) { usage("Error: Invalid port specified"); } } else { usage("Error: -p requires argument"); } break; default: usage(); } } if (d.warn_icecc_user_errno != 0) { log_errno("No icecc user on system. Falling back to nobody.", d.warn_icecc_user_errno); } umask(022); bool remote_disabled = false; if (getuid() == 0) { if (!logfile.length() && detach) { mkdir("/var/log/icecc", S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); chmod("/var/log/icecc", S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); ignore_result(chown("/var/log/icecc", d.user_uid, d.user_gid)); logfile = "/var/log/icecc/iceccd.log"; } mkdir("/var/run/icecc", S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); chmod("/var/run/icecc", S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); ignore_result(chown("/var/run/icecc", d.user_uid, d.user_gid)); #ifdef HAVE_LIBCAP_NG capng_clear(CAPNG_SELECT_BOTH); capng_update(CAPNG_ADD, (capng_type_t)(CAPNG_EFFECTIVE | CAPNG_PERMITTED), CAP_SYS_CHROOT); int r = capng_change_id(d.user_uid, d.user_gid, (capng_flags_t)(CAPNG_DROP_SUPP_GRP | CAPNG_CLEAR_BOUNDING)); if (r) { log_error() << "Error: capng_change_id failed: " << r << endl; exit(EXIT_SETUID_FAILED); } #endif } else { #ifdef HAVE_LIBCAP_NG // It's possible to have the capability even without being root. if (!capng_have_capability( CAPNG_EFFECTIVE, CAP_SYS_CHROOT )) { #else { #endif d.noremote = true; remote_disabled = true; } } setup_debug(debug_level, logfile); log_info() << "ICECREAM daemon " VERSION " starting up (nice level " << nice_level << ") " << endl; if (remote_disabled) log_warning() << "Cannot use chroot, no remote jobs accepted." << endl; if (d.noremote) d.daemon_port = 0; d.determine_system(); if (chdir("/") != 0) { log_error() << "failed to switch to root directory: " << strerror(errno) << endl; exit(EXIT_DISTCC_FAILED); } if (detach) if (daemon(0, 0)) { log_perror("Failed to run as a daemon."); exit(EXIT_DISTCC_FAILED); } if (dcc_ncpus(&d.num_cpus) == 0) { log_info() << d.num_cpus << " CPU(s) online on this server" << endl; } if (max_processes < 0) { max_kids = d.num_cpus; } else { max_kids = max_processes; } log_info() << "allowing up to " << max_kids << " active jobs" << endl; d.determine_supported_features(); log_info() << "supported features: " << supported_features_to_string(d.supported_features) << endl; int ret; /* Still create a new process group, even if not detached */ trace() << "not detaching\n"; if ((ret = set_new_pgrp()) != 0) { return ret; } /* Don't catch signals until we've detached or created a process group. */ dcc_daemon_catch_signals(); if (signal(SIGPIPE, SIG_IGN) == SIG_ERR) { log_warning() << "signal(SIGPIPE, ignore) failed: " << strerror(errno) << endl; exit(EXIT_DISTCC_FAILED); } if (signal(SIGCHLD, SIG_DFL) == SIG_ERR) { log_warning() << "signal(SIGCHLD) failed: " << strerror(errno) << endl; exit(EXIT_DISTCC_FAILED); } /* This is called in the master daemon, whether that is detached or * not. */ dcc_master_pid = getpid(); ofstream pidFile; string progName = argv[0]; progName = find_basename(progName); pidFilePath = string(RUNDIR) + string("/") + progName + string(".pid"); pidFile.open(pidFilePath.c_str()); pidFile << dcc_master_pid << endl; pidFile.close(); if (!cleanup_cache(d.envbasedir, d.user_uid, d.user_gid)) { return 1; } list nl = get_netnames(200, d.scheduler_port); trace() << "Netnames:" << endl; for (list::const_iterator it = nl.begin(); it != nl.end(); ++it) { trace() << *it << endl; } if (!d.setup_listen_fds()) { // error return 1; } return d.working_loop(); } icecream-1.3.1/daemon/serve.cpp000066400000000000000000000266261361626760200164040ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Stephan Kulow 2002, 2003 by Martin Pool This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_SYS_SIGNAL_H # include #endif /* HAVE_SYS_SIGNAL_H */ #include #include #include #include #include "environment.h" #include "exitcode.h" #include "tempfile.h" #include "workit.h" #include "logging.h" #include "serve.h" #include "util.h" #include "file_util.h" #include #ifdef __FreeBSD__ #include #include #endif #ifndef O_LARGEFILE #define O_LARGEFILE 0 #endif #ifndef _PATH_TMP #define _PATH_TMP "/tmp" #endif using namespace std; int nice_level = 5; static void error_client(MsgChannel *client, string error) { if (IS_PROTOCOL_22(client)) { client->send_msg(StatusTextMsg(error)); } } static void write_output_file( const string& file, MsgChannel* client ) { int obj_fd = -1; try { obj_fd = open(file.c_str(), O_RDONLY | O_LARGEFILE); if (obj_fd == -1) { log_error() << "open failed" << endl; error_client(client, "open of object file failed"); throw myexception(EXIT_DISTCC_FAILED); } unsigned char buffer[100000]; do { ssize_t bytes = read(obj_fd, buffer, sizeof(buffer)); if (bytes < 0) { if (errno == EINTR) { continue; } throw myexception(EXIT_DISTCC_FAILED); } if (!bytes) { if( !client->send_msg(EndMsg())) { log_info() << "write of obj end failed " << endl; throw myexception(EXIT_DISTCC_FAILED); } break; } FileChunkMsg fcmsg(buffer, bytes); if (!client->send_msg(fcmsg)) { log_info() << "write of obj chunk failed " << bytes << endl; throw myexception(EXIT_DISTCC_FAILED); } } while (1); } catch(...) { if( obj_fd != -1 ) if ((-1 == close( obj_fd )) && (errno != EBADF)){ log_perror("close failed"); } throw; } } /** * Read a request, run the compiler, and send a response. **/ int handle_connection(const string &basedir, CompileJob *job, MsgChannel *client, int &out_fd, unsigned int mem_limit, uid_t user_uid, gid_t user_gid) { int socket[2]; if (pipe(socket) == -1) { log_perror("pipe failed"); return -1; } flush_debug(); pid_t pid = fork(); assert(pid >= 0); if (pid > 0) { // parent if ((-1 == close(socket[1])) && (errno != EBADF)){ log_perror("close failure"); } out_fd = socket[0]; fcntl(out_fd, F_SETFD, FD_CLOEXEC); return pid; } reset_debug(); if ((-1 == close(socket[0])) && (errno != EBADF)){ log_perror("close failed"); } out_fd = socket[1]; /* internal communication channel, don't inherit to gcc */ fcntl(out_fd, F_SETFD, FD_CLOEXEC); int niceval = nice(nice_level); if (niceval == -1) { log_warning() << "failed to set nice value: " << strerror(errno) << endl; } string tmp_path, obj_file, dwo_file; int exit_code = 0; try { if (job->environmentVersion().size()) { string dirname = basedir + "/target=" + job->targetPlatform() + "/" + job->environmentVersion(); if (::access(string(dirname + "/usr/bin/as").c_str(), X_OK) < 0) { error_client(client, dirname + "/usr/bin/as is not executable, installed environment removed?"); log_error() << "I don't have environment " << job->environmentVersion() << "(" << job->targetPlatform() << ") " << job->jobID() << endl; // The scheduler didn't listen to us, or maybe something has removed the files. throw myexception(EXIT_DISTCC_FAILED); } chdir_to_environment(client, dirname, user_uid, user_gid); } else { error_client(client, "empty environment"); log_error() << "Empty environment (" << job->targetPlatform() << ") " << job->jobID() << endl; throw myexception(EXIT_DISTCC_FAILED); } if (::access(_PATH_TMP + 1, W_OK) < 0) { error_client(client, "can't write to " _PATH_TMP); log_error() << "can't write into " << _PATH_TMP << " " << strerror(errno) << endl; throw myexception(-1); } int ret; unsigned int job_stat[8]; CompileResultMsg rmsg; unsigned int job_id = job->jobID(); memset(job_stat, 0, sizeof(job_stat)); char *tmp_output = 0; char prefix_output[32]; // 20 for 2^64 + 6 for "icecc-" + 1 for trailing NULL sprintf(prefix_output, "icecc-%u", job_id); if (job->dwarfFissionEnabled() && (ret = dcc_make_tmpdir(&tmp_output)) == 0) { tmp_path = tmp_output; free(tmp_output); // dwo information is embedded in the final object file, but the compiler // hard codes the path to the dwo file based on the given path to the // object output file. In every case, we must recreate the directory structure of // the client system inside our tmp directory, including both the working // directory the compiler will be run from as well as the relative path from // that directory to the specified output file. // // the work_it() function will rewrite the tmp build directory as root, effectively // letting us set up a "chroot"ed environment inside the build folder and letting // us set up the paths to mimic the client system string job_output_file = job->outputFile(); string job_working_dir = job->workingDirectory(); size_t slash_index = job_output_file.rfind('/'); string file_dir, file_name; if (slash_index != string::npos) { file_dir = job_output_file.substr(0, slash_index); file_name = job_output_file.substr(slash_index+1); } else { file_name = job_output_file; } string output_dir, relative_file_path; if (!file_dir.empty() && file_dir[0] == '/') { // output dir is absolute, convert to relative relative_file_path = get_relative_path(get_canonicalized_path(job_output_file), get_canonicalized_path(job_working_dir)); output_dir = tmp_path + get_canonicalized_path(file_dir); } else { // output file is already relative, canonicalize in relation to working dir string canonicalized_dir = get_canonicalized_path(job_working_dir + '/' + file_dir); relative_file_path = get_relative_path(canonicalized_dir + '/' + file_name, get_canonicalized_path(job_working_dir)); output_dir = tmp_path + canonicalized_dir; } if (!mkpath(output_dir)) { error_client(client, "could not create object file location in tmp directory"); throw myexception(EXIT_IO_ERROR); } if (!mkpath(tmp_path + job_working_dir)) { error_client(client, "could not create compiler working directory in tmp directory"); throw myexception(EXIT_IO_ERROR); } obj_file = output_dir + '/' + file_name; dwo_file = obj_file.substr(0, obj_file.rfind('.')) + ".dwo"; ret = work_it(*job, job_stat, client, rmsg, tmp_path, job_working_dir, relative_file_path, mem_limit, client->fd); } else if (!job->dwarfFissionEnabled() && (ret = dcc_make_tmpnam(prefix_output, ".o", &tmp_output, 0)) == 0) { obj_file = tmp_output; free(tmp_output); string build_path = obj_file.substr(0, obj_file.rfind('/')); string file_name = obj_file.substr(obj_file.rfind('/')+1); ret = work_it(*job, job_stat, client, rmsg, build_path, "", file_name, mem_limit, client->fd); } if (ret) { if (ret == EXIT_OUT_OF_MEMORY) { // we catch that as special case rmsg.was_out_of_memory = true; } else if (ret == EXIT_IO_ERROR) { // This was probably running out of disk space. // Fake that as running out of memory, since it's in practice // a very similar problem. rmsg.was_out_of_memory = true; } else { throw myexception(ret); } } struct stat st; if (stat(obj_file.c_str(), &st) == 0) { job_stat[JobStatistics::out_uncompressed] += st.st_size; } if (stat(dwo_file.c_str(), &st) == 0) { job_stat[JobStatistics::out_uncompressed] += st.st_size; rmsg.have_dwo_file = true; } else rmsg.have_dwo_file = false; if (!client->send_msg(rmsg)) { log_info() << "write of result failed" << endl; throw myexception(EXIT_DISTCC_FAILED); } /* wake up parent and tell him that compile finished */ /* if the write failed, well, doesn't matter */ ignore_result(write(out_fd, job_stat, sizeof(job_stat))); if ((-1 == close(out_fd)) && (errno != EBADF)){ log_perror("close failed"); } if (rmsg.status == 0) { write_output_file(obj_file, client); if (rmsg.have_dwo_file) { write_output_file(dwo_file, client); } } throw myexception(rmsg.status); } catch (const myexception& e) { delete client; client = 0; if (!obj_file.empty()) { if (-1 == unlink(obj_file.c_str()) && errno != ENOENT){ log_perror("unlink failure") << "\t" << obj_file << endl; } } if (!dwo_file.empty()) { if (-1 == unlink(dwo_file.c_str()) && errno != ENOENT){ log_perror("unlink failure") << "\t" << dwo_file << endl; } } if (!tmp_path.empty()) { rmpath(tmp_path.c_str()); } delete job; exit_code = e.exitcode(); } _exit(exit_code); } icecream-1.3.1/daemon/serve.h000066400000000000000000000024741361626760200160440ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Stephan Kulow 2002, 2003 by Martin Pool This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef ICECREAM_SERVE_H #define ICECREAM_SERVE_H #include class CompileJob; class MsgChannel; extern int nice_level; int handle_connection(const std::string &basedir, CompileJob *job, MsgChannel *serv, int & out_fd, unsigned int mem_limit, uid_t user_uid, gid_t user_gid); #endif icecream-1.3.1/daemon/workit.cpp000066400000000000000000000576001361626760200165730ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Stephan Kulow 2002, 2003 by Martin Pool This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include "workit.h" #include "tempfile.h" #include "assert.h" #include "exitcode.h" #include "logging.h" #include #include #ifdef __FreeBSD__ #include #endif /* According to earlier standards */ #include #include #include #include #include #include #include #if HAVE_SYS_USER_H && !defined(__DragonFly__) # include #endif #include #ifdef HAVE_SYS_VFS_H #include #endif #if defined(__FreeBSD__) || defined(__DragonFly__) || defined(__APPLE__) #ifndef RUSAGE_SELF #define RUSAGE_SELF (0) #endif #ifndef RUSAGE_CHILDREN #define RUSAGE_CHILDREN (-1) #endif #endif #include #include #include #include "comm.h" #include "platform.h" #include "util.h" using namespace std; static int death_pipe[2]; extern "C" { static void theSigCHLDHandler(int) { char foo = 0; ignore_result(write(death_pipe[1], &foo, 1)); } } static void error_client(MsgChannel *client, string error) { if (IS_PROTOCOL_23(client)) { client->send_msg(StatusTextMsg(error)); } } /* * This is all happening in a forked child. * That means that we can block and be lazy about closing fds * (in the error cases which exit quickly). */ int work_it(CompileJob &j, unsigned int job_stat[], MsgChannel *client, CompileResultMsg &rmsg, const std::string &tmp_root, const std::string &build_path, const std::string &file_name, unsigned long int mem_limit, int client_fd) { rmsg.out.erase(rmsg.out.begin(), rmsg.out.end()); rmsg.out.erase(rmsg.out.begin(), rmsg.out.end()); std::list list = j.nonLocalFlags(); if (!IS_PROTOCOL_41(client) && j.dwarfFissionEnabled()) { list.push_back("-gsplit-dwarf"); } trace() << "remote compile for file " << j.inputFile() << endl; string argstxt; for (std::list::const_iterator it = list.begin(); it != list.end(); ++it) { argstxt += ' '; argstxt += *it; } trace() << "remote compile arguments:" << argstxt << endl; int sock_err[2]; int sock_out[2]; int sock_in[2]; int main_sock[2]; char buffer[4096]; if (pipe(sock_err)) { return EXIT_DISTCC_FAILED; } if (pipe(sock_out)) { return EXIT_DISTCC_FAILED; } if (pipe(main_sock)) { return EXIT_DISTCC_FAILED; } if (pipe(death_pipe)) { return EXIT_DISTCC_FAILED; } // We use a socket pair instead of a pipe to get a "slightly" bigger // output buffer. This saves context switches and latencies. if (socketpair(AF_UNIX, SOCK_STREAM, 0, sock_in) < 0) { return EXIT_DISTCC_FAILED; } int maxsize = 2 * 1024 * 2024; #ifdef SO_SNDBUFFORCE if (setsockopt(sock_in[1], SOL_SOCKET, SO_SNDBUFFORCE, &maxsize, sizeof(maxsize)) < 0) #endif { setsockopt(sock_in[1], SOL_SOCKET, SO_SNDBUF, &maxsize, sizeof(maxsize)); } if (fcntl(sock_in[1], F_SETFL, O_NONBLOCK)) { return EXIT_DISTCC_FAILED; } /* Testing */ struct sigaction act; sigemptyset(&act.sa_mask); act.sa_handler = SIG_IGN; act.sa_flags = 0; sigaction(SIGPIPE, &act, 0L); act.sa_handler = theSigCHLDHandler; act.sa_flags = SA_NOCLDSTOP; sigaction(SIGCHLD, &act, 0); sigaddset(&act.sa_mask, SIGCHLD); // Make sure we don't block this signal. gdb tends to do that :-( sigprocmask(SIG_UNBLOCK, &act.sa_mask, 0); flush_debug(); pid_t pid = fork(); if (pid == -1) { return EXIT_OUT_OF_MEMORY; } else if (pid == 0) { setenv("PATH", "/usr/bin", 1); // Safety check if (getuid() == 0 || getgid() == 0) { error_client(client, "UID is 0 - aborting."); _exit(142); } #ifdef RLIMIT_AS // Sanitizers use huge amounts of virtual memory and the setrlimit() call below // may lead to the process getting killed at any moment without any warning // or message. Both gcc's and clang's macros are unreliable (no way to detect -fsanitize=leak, // for example), but hopefully with the configure check this is good enough. #ifndef SANITIZER_USED #ifdef __SANITIZE_ADDRESS__ #define SANITIZER_USED #endif #if defined(__has_feature) #if __has_feature(address_sanitizer) #define SANITIZER_USED #endif #endif #endif #ifndef SANITIZER_USED struct rlimit rlim; rlim_t lim = mem_limit * 1024 * 1024; rlim.rlim_cur = lim; rlim.rlim_max = lim; if (setrlimit(RLIMIT_AS, &rlim)) { error_client(client, "setrlimit failed."); log_perror("setrlimit"); } else { log_info() << "Compile job memory limit set to " << mem_limit << " megabytes" << endl; } #endif #endif int argc = list.size(); argc++; // the program argc += 6; // -x c - -o file.o -fpreprocessed argc += 4; // gpc parameters argc += 9; // clang extra flags char **argv = new char*[argc + 1]; int i = 0; bool clang = false; if (IS_PROTOCOL_30(client)) { assert(!j.compilerName().empty()); clang = (j.compilerName().find("clang") != string::npos); argv[i++] = strdup(("/usr/bin/" + j.compilerName()).c_str()); } else { if (j.language() == CompileJob::Lang_C) { argv[i++] = strdup("/usr/bin/gcc"); } else if (j.language() == CompileJob::Lang_CXX) { argv[i++] = strdup("/usr/bin/g++"); } else { assert(0); } } argv[i++] = strdup("-x"); if (j.language() == CompileJob::Lang_C) { argv[i++] = strdup("c"); } else if (j.language() == CompileJob::Lang_CXX) { argv[i++] = strdup("c++"); } else if (j.language() == CompileJob::Lang_OBJC) { argv[i++] = strdup("objective-c"); } else if (j.language() == CompileJob::Lang_OBJCXX) { argv[i++] = strdup("objective-c++"); } else { error_client(client, "language not supported"); log_perror("language not supported"); } if( clang ) { // gcc seems to handle setting main file name and working directory fine // (it gets it from the preprocessed info), but clang needs help if( !j.inputFile().empty()) { argv[i++] = strdup("-Xclang"); argv[i++] = strdup("-main-file-name"); argv[i++] = strdup("-Xclang"); argv[i++] = strdup(j.inputFile().c_str()); } if( !j.workingDirectory().empty()) { argv[i++] = strdup("-Xclang"); argv[i++] = strdup("-fdebug-compilation-dir"); argv[i++] = strdup("-Xclang"); argv[i++] = strdup(j.workingDirectory().c_str()); } } // HACK: If in / , Clang records DW_AT_name with / prepended . if (chdir((tmp_root + build_path).c_str()) != 0) { error_client(client, "/tmp dir missing?"); } for (std::list::const_iterator it = list.begin(); it != list.end(); ++it) { argv[i++] = strdup(it->c_str()); } if (!clang) { argv[i++] = strdup("-fpreprocessed"); } argv[i++] = strdup("-"); argv[i++] = strdup("-o"); argv[i++] = strdup(file_name.c_str()); if (!clang) { argv[i++] = strdup("--param"); sprintf(buffer, "ggc-min-expand=%d", ggc_min_expand_heuristic(mem_limit)); argv[i++] = strdup(buffer); argv[i++] = strdup("--param"); sprintf(buffer, "ggc-min-heapsize=%d", ggc_min_heapsize_heuristic(mem_limit)); argv[i++] = strdup(buffer); } if (clang) { argv[i++] = strdup("-no-canonical-prefixes"); // otherwise clang tries to access /proc/self/exe } if (!clang && j.dwarfFissionEnabled()) { sprintf(buffer, "-fdebug-prefix-map=%s/=/", tmp_root.c_str()); argv[i++] = strdup(buffer); } // before you add new args, check above for argc argv[i] = 0; assert(i <= argc); argstxt.clear(); for (int pos = 1; pos < i; ++pos ) { argstxt += ' '; argstxt += argv[pos]; } trace() << "final arguments:" << argstxt << endl; close_debug(); if ((-1 == close(sock_out[0])) && (errno != EBADF)){ log_perror("close failed"); } if (-1 == dup2(sock_out[1], STDOUT_FILENO)){ log_perror("dup2 failed"); } if ((-1 == close(sock_out[1])) && (errno != EBADF)){ log_perror("close failed"); } if ((-1 == close(sock_err[0])) && (errno != EBADF)){ log_perror("close failed"); } if (-1 == dup2(sock_err[1], STDERR_FILENO)){ log_perror("dup2 failed"); } if ((-1 == close(sock_err[1])) && (errno != EBADF)){ log_perror("close failed"); } if ((-1 == close(sock_in[1])) && (errno != EBADF)){ log_perror("close failed"); } if (-1 == dup2(sock_in[0], STDIN_FILENO)){ log_perror("dup2 failed"); } if ((-1 == close(sock_in[0])) && (errno != EBADF)){ log_perror("close failed"); } if ((-1 == close(main_sock[0])) && (errno != EBADF)){ log_perror("close failed"); } fcntl(main_sock[1], F_SETFD, FD_CLOEXEC); if ((-1 == close(death_pipe[0])) && (errno != EBADF)){ log_perror("close failed"); } if ((-1 == close(death_pipe[1])) && (errno != EBADF)){ log_perror("close failed"); } #ifdef ICECC_DEBUG for (int f = STDERR_FILENO + 1; f < 4096; ++f) { long flags = fcntl(f, F_GETFD, 0); assert(flags < 0 || (flags & FD_CLOEXEC)); } #endif execv(argv[0], const_cast(argv)); // no return perror("ICECC: execv"); char resultByte = 1; ignore_result(write(main_sock[1], &resultByte, 1)); _exit(-1); } if ((-1 == close(sock_in[0])) && (errno != EBADF)){ log_perror("close failed"); } if ((-1 == close(sock_out[1])) && (errno != EBADF)){ log_perror("close failed"); } if ((-1 == close(sock_err[1])) && (errno != EBADF)){ log_perror("close failed"); } // idea borrowed from kprocess. // check whether the compiler could be run at all. if ((-1 == close(main_sock[1])) && (errno != EBADF)){ log_perror("close failed"); } for (;;) { char resultByte; ssize_t n = ::read(main_sock[0], &resultByte, 1); if (n == -1 && errno == EINTR) { continue; // Ignore } if (n == 1) { rmsg.status = resultByte; log_error() << "compiler did not start" << endl; error_client(client, "compiler did not start"); return EXIT_COMPILER_MISSING; } break; // != EINTR } if ((-1 == close(main_sock[0])) && (errno != EBADF)){ log_perror("close failed"); } struct timeval starttv; gettimeofday(&starttv, 0); int return_value = 0; // Got EOF for preprocessed input. stdout send may be still pending. bool input_complete = false; // Pending data to send to stdin FileChunkMsg *fcmsg = 0; size_t off = 0; log_block parent_wait("parent, waiting"); for (;;) { if (client_fd >= 0 && !fcmsg) { if (Msg *msg = client->get_msg(0, true)) { if (input_complete) { rmsg.err.append("client cancelled\n"); return_value = EXIT_CLIENT_KILLED; client_fd = -1; kill(pid, SIGTERM); delete fcmsg; fcmsg = 0; delete msg; } else { if (msg->type == M_END) { input_complete = true; if (!fcmsg && sock_in[1] != -1) { if (-1 == close(sock_in[1])){ log_perror("close failed"); } sock_in[1] = -1; } delete msg; } else if (msg->type == M_FILE_CHUNK) { fcmsg = static_cast(msg); off = 0; job_stat[JobStatistics::in_uncompressed] += fcmsg->len; job_stat[JobStatistics::in_compressed] += fcmsg->compressed; } else { log_error() << "protocol error while reading preprocessed file" << endl; input_complete = true; return_value = EXIT_IO_ERROR; client_fd = -1; kill(pid, SIGTERM); delete fcmsg; fcmsg = 0; delete msg; } } } else if (client->at_eof()) { log_warning() << "unexpected EOF while reading preprocessed file" << endl; input_complete = true; return_value = EXIT_IO_ERROR; client_fd = -1; kill(pid, SIGTERM); delete fcmsg; fcmsg = 0; } } vector< pollfd > pollfds; pollfd pfd; // tmp variable if (sock_out[0] >= 0) { pfd.fd = sock_out[0]; pfd.events = POLLIN; pollfds.push_back(pfd); } if (sock_err[0] >= 0) { pfd.fd = sock_err[0]; pfd.events = POLLIN; pollfds.push_back(pfd); } if (sock_in[1] == -1 && fcmsg) { // This state can occur when the compiler has terminated before // all file input is received from the client. The daemon must continue // reading all file input from the client because the client expects it to. // Deleting the file chunk message here tricks the poll() below to continue // listening for more file data from the client even though it is being // thrown away. delete fcmsg; fcmsg = 0; } if (client_fd >= 0 && !fcmsg) { pfd.fd = client_fd; pfd.events = POLLIN; pollfds.push_back(pfd); // Note that we don't actually query the status of this fd - // we poll it in every iteration. } // If all file data has been received from the client then start // listening on the death_pipe to know when the compiler has // terminated. The daemon can't start listening for the death of // the compiler sooner or else it might close the client socket before the // client had time to write all of the file data and wait for a response. // The client isn't coded to properly handle the closing of the socket while // sending all file data to the daemon. if (input_complete) { pfd.fd = death_pipe[0]; pfd.events = POLLIN; pollfds.push_back(pfd); } // Don't try to write to sock_in it if was already closed because // the compile terminated before reading all of the file data. if (fcmsg && sock_in[1] != -1) { pfd.fd = sock_in[1]; pfd.events = POLLOUT; pollfds.push_back(pfd); } int timeout = input_complete ? -1 : 60 * 1000; switch (poll(pollfds.data(), pollfds.size(), timeout)) { case 0: if (!input_complete) { log_warning() << "timeout while reading preprocessed file" << endl; kill(pid, SIGTERM); // Won't need it any more ... return_value = EXIT_IO_ERROR; client_fd = -1; input_complete = true; delete fcmsg; fcmsg = 0; continue; } // this should never happen assert(false); return EXIT_DISTCC_FAILED; case -1: if (errno == EINTR) { continue; } // this should never happen assert(false); return EXIT_DISTCC_FAILED; default: if (fcmsg && pollfd_is_set(pollfds, sock_in[1], POLLOUT)) { ssize_t bytes = write(sock_in[1], fcmsg->buffer + off, fcmsg->len - off); if (bytes < 0) { if (errno == EINTR) { continue; } kill(pid, SIGTERM); // Most likely crashed anyway ... if (input_complete) { return_value = EXIT_COMPILER_CRASHED; } delete fcmsg; fcmsg = 0; if (-1 == close(sock_in[1])){ log_perror("close failed"); } sock_in[1] = -1; continue; } off += bytes; if (off == fcmsg->len) { delete fcmsg; fcmsg = 0; if (input_complete) { if (-1 == close(sock_in[1])){ log_perror("close failed"); } sock_in[1] = -1; } } } if (sock_out[0] >= 0 && pollfd_is_set(pollfds, sock_out[0], POLLIN)) { ssize_t bytes = read(sock_out[0], buffer, sizeof(buffer) - 1); if (bytes > 0) { buffer[bytes] = 0; rmsg.out.append(buffer); } else if (bytes == 0) { if (-1 == close(sock_out[0])){ log_perror("close failed"); } sock_out[0] = -1; } } if (sock_err[0] >= 0 && pollfd_is_set(pollfds, sock_err[0], POLLIN)) { ssize_t bytes = read(sock_err[0], buffer, sizeof(buffer) - 1); if (bytes > 0) { buffer[bytes] = 0; rmsg.err.append(buffer); } else if (bytes == 0) { if (-1 == close(sock_err[0])){ log_perror("close failed"); } sock_err[0] = -1; } } if (pollfd_is_set(pollfds, death_pipe[0], POLLIN)) { // Note that we have already read any remaining stdout/stderr: // the sigpipe is delivered after everything was written, // and the notification is multiplexed into the select above. struct rusage ru; int status; if (wait4(pid, &status, 0, &ru) != pid) { // this should never happen assert(false); return EXIT_DISTCC_FAILED; } if (shell_exit_status(status) != 0) { if( !rmsg.out.empty()) trace() << "compiler produced stdout output:\n" << rmsg.out; if( !rmsg.err.empty()) trace() << "compiler produced stderr output:\n" << rmsg.err; unsigned long int mem_used = ((ru.ru_minflt + ru.ru_majflt) * getpagesize()) / 1024; rmsg.status = EXIT_OUT_OF_MEMORY; if (((mem_used * 100) > (85 * mem_limit * 1024)) || (rmsg.err.find("memory exhausted") != string::npos) || (rmsg.err.find("out of memory") != string::npos) || (rmsg.err.find("annot allocate memory") != string::npos) || (rmsg.err.find("failed to map segment from shared object") != string::npos) || (rmsg.err.find("Assertion `NewElts && \"Out of memory\"' failed") != string::npos) || (rmsg.err.find("terminate called after throwing an instance of 'std::bad_alloc'") != string::npos) || (rmsg.err.find("llvm::MallocSlabAllocator::Allocate") != string::npos)) { // the relation between ulimit and memory used is pretty thin ;( log_warning() << "Remote compilation failed, presumably because of running out of memory (exit code " << shell_exit_status(status) << ")" << endl; return EXIT_OUT_OF_MEMORY; } #ifdef HAVE_SYS_VFS_H struct statfs buf; int ret = statfs( "/", &buf); // If there's less than 10MiB of disk space free, we're probably running out of disk space. if ((ret == 0 && long(buf.f_bavail) < ((10 * 1024 * 1024) / buf.f_bsize)) || rmsg.err.find("o space left on device") != string::npos) { log_warning() << "Remote compilation failed, presumably because of running out of disk space (exit code " << shell_exit_status(status) << ")" << endl; return EXIT_IO_ERROR; } #endif } if (WIFEXITED(status)) { struct timeval endtv; gettimeofday(&endtv, 0); rmsg.status = shell_exit_status(status); job_stat[JobStatistics::exit_code] = shell_exit_status(status); job_stat[JobStatistics::real_msec] = ((endtv.tv_sec - starttv.tv_sec) * 1000) + ((long(endtv.tv_usec) - long(starttv.tv_usec)) / 1000); job_stat[JobStatistics::user_msec] = (ru.ru_utime.tv_sec * 1000) + (ru.ru_utime.tv_usec / 1000); job_stat[JobStatistics::sys_msec] = (ru.ru_stime.tv_sec * 1000) + (ru.ru_stime.tv_usec / 1000); job_stat[JobStatistics::sys_pfaults] = ru.ru_majflt + ru.ru_nswap + ru.ru_minflt; if(rmsg.status != 0) { log_warning() << "Remote compilation exited with exit code " << shell_exit_status(status) << endl; } else { log_info() << "Remote compilation completed with exit code " << shell_exit_status(status) << endl; } } else { log_warning() << "Remote compilation aborted with exit code " << shell_exit_status(status) << endl; } return return_value; } } } } icecream-1.3.1/daemon/workit.h000066400000000000000000000035211361626760200162310ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Stephan Kulow 2002, 2003 by Martin Pool This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef ICECREAM_WORKIT_H #define ICECREAM_WORKIT_H #include #include #include #include class MsgChannel; class CompileResultMsg; // No icecream ;( class myexception : public std::exception { int code; public: myexception(int _exitcode) : exception(), code(_exitcode) {} int exitcode() const { return code; } }; namespace JobStatistics { enum job_stat_fields { in_compressed, in_uncompressed, out_uncompressed, exit_code, real_msec, user_msec, sys_msec, sys_pfaults }; } extern int work_it(CompileJob &j, unsigned int job_stats[], MsgChannel *client, CompileResultMsg &msg, const std::string &tmp_root, const std::string &build_path, const std::string &file_name, unsigned long int mem_limit, int client_fd); #endif icecream-1.3.1/doc/000077500000000000000000000000001361626760200140425ustar00rootroot00000000000000icecream-1.3.1/doc/Makefile.am000066400000000000000000000012251361626760200160760ustar00rootroot00000000000000if WITH_ICECREAM_MAN icecc.1: $(srcdir)/man-icecc.1.xml $(DOCBOOK2X) $< icerun.1: $(srcdir)/man-icerun.1.xml $(DOCBOOK2X) $< icecc-create-env.1: $(srcdir)/man-icecc-create-env.1.xml $(DOCBOOK2X) $< icecc-scheduler.1: $(srcdir)/man-icecc-scheduler.1.xml $(DOCBOOK2X) $< iceccd.1: $(srcdir)/man-iceccd.1.xml $(DOCBOOK2X) $< icecream.7: $(srcdir)/man-icecream.7.xml $(DOCBOOK2X) $< dist_man_MANS = \ icecc.1 \ icerun.1 \ iceccd.1 \ icecc-scheduler.1 \ icecc-create-env.1 \ icecream.7 endif EXTRA_DIST = \ man-icecc.1.xml \ man-icerun.1.xml \ man-iceccd.1.xml \ man-icecc-scheduler.1.xml \ man-icecc-create-env.1.xml \ man-icecream.7.xml icecream-1.3.1/doc/man-icecc-create-env.1.xml000066400000000000000000000043761361626760200206030ustar00rootroot00000000000000 Clang"> GCC"> icecc-create-env"> ]> Icecream User's Manual Pino Toscano June 5rd, 2013 Icecream icecc-create-env 1 icecc-create-env Create an Icrecream environment tarball icecc-create-env compiler-binary --addfile file Description &icecc-create-env; is an Icecream helper that creates a new .tar.gz archive with all the files (compiler, tools and libraries) needed to setup a build environment. The resulting archive has a random file name like ddaea39ca1a7c88522b185eca04da2d8.tar.gz, which can then be renamed. See icecream(7) for more information on using the environment tarballs. Note that in the usual case it is not necessary to invoke icecc-create-env manually, as it will be called automatically for the native compiler used whenever necessary. Options file Add file to the environment archive; can be specified multiple times. See Also icecream(7), icecc-scheduler(1), iceccd(1) Author Pino Toscano icecream-1.3.1/doc/man-icecc-scheduler.1.xml000066400000000000000000000064371361626760200205300ustar00rootroot00000000000000 icecc-scheduler"> ]> Icecream User's Manual Cornelius Schumacher April 21th, 2005 Icecream icecc-scheduler 1 icecc-scheduler Icecream scheduler icecc-scheduler -d -r -l log-file -n net-name -p port -u user -vvv Description The Icecream scheduler is the central instance of an Icecream compile network. It distributes the compile jobs and provides the data for the monitors. Options , Detach daemon from shell. , Client connections are not disconnected from the scheduler even if there is a better scheduler available. , Print help message and exit. , log-file Name of file where log output is written to. , net-name The name of the Icecream network the scheduler controls. , port IP port the scheduler uses. , user Specify the system user used by the daemon, which must be different than root. If not specified, the daemon defaults to the icecc system user if available, or nobody if not. , , Control verbosity of daemon. The more v the more verbose. See Also icecream(7), iceccd(1), icemon(1) Author Cornelius Schumacher icecream-1.3.1/doc/man-icecc.1.xml000066400000000000000000000031271361626760200165450ustar00rootroot00000000000000 icecc"> ]> Icecream User's Manual Cornelius Schumacher April 21th, 2005 Icecream icecc 1 icecc Icecream compiler stub icecc compiler compile options -o OBJECT -c SOURCE Description &icecc; is the Icecream compiler stub. It gets called in place of the actual compiler and transparently routes the compile requests to the Icecream network. You should not call &icecc; directly, but place the specific compiler stubs in your path: export PATH=/usr/lib/icecc/bin:$PATH. See Also icecream(7), icecc-scheduler(1), iceccd(1), icemon(1) Author Cornelius Schumacher icecream-1.3.1/doc/man-iceccd.1.xml000066400000000000000000000131521361626760200167100ustar00rootroot00000000000000 iceccd"> ]> Icecream User's Manual Cornelius Schumacher April 21th, 2005 Icecream iceccd 1 iceccd Icecream daemon iceccd -b env-basedir --cache-limit MB -d -l log-file -m max-processes -N hostname -n node-name --nice level --no-remote -s scheduler-host -u user -vvv Description The Icecream daemon has to run on all nodes being part of the Icecream compile cluster. It receives compile jobs and executes them in a chroot environment. The compile clients send their compile environment the first time they send a job to a particular daemon, so that the environment of the daemon does not have to match the one of the client. The daemon also has to run on clients sending compile jobs to the Icecream network. If a node should be able to send compile jobs, but never receive any, start the daemon with the option . All Icecream daemons need to have contact to the Icecream scheduler which controls the distribution of data between compile nodes. Normally the daemon will automatically find the right scheduler. If this is not the case you can explicitly specify the name of the Icecream network and the host running the scheduler. Options , env-basedir Base directory for storing compile environments sent to the daemon by the compile clients. MB Maximum size in Mega Bytes of cache used to store compile environments of compile clients. , Detach daemon from shell. , Print help message and exit. , log-file Name of file where log output is written to. , max-processes Maximum number of compile jobs started in parallel on machine running the daemon. hostname The name of the icecream host on the network. , , net-name The name of the icecream network the daemon should connect to. There has to be a scheduler running for the network under the same network name. level The level of niceness to use. Default is 5. Prevents jobs from other nodes being scheduled on this one. , scheduler-host Name of host running the scheduler for the network the daemon should connect to. This option might help if the scheduler cannot broadcast its presence to the clients due to firewall settings or similar reasons, when this is enabled scheduler should use --persistent-client-connection. , user Specify the system user used by the daemon, which must be different than root. If not specified, the daemon defaults to the icecc system user if available, or nobody if not. , , Control verbosity of daemon. The more v the more verbose. See Also icecream(7), icecc-scheduler(1), icemon(1) Author Cornelius Schumacher icecream-1.3.1/doc/man-icecream.7.xml000066400000000000000000000377121361626760200172640ustar00rootroot00000000000000 ccache"> distcc"> icecc"> icemon"> ]> Icecream User's Manual Cornelius Schumacher April 21th, 2005 Icecream icecream 7 icecream A distributed compile system Description Icecream is a distributed compile system for C and C++. Icecream is created by SUSE and is based on ideas and code by &distcc;. Like &distcc; it takes compile jobs from your build and distributes it to remote machines allowing a parallel build on several machines you have got. But unlike &distcc; Icecream uses a central server that schedules the compile jobs to the fastest free server and is as this dynamic. This advantage pays off mostly for shared computers, if you are the only user on X machines, you have full control over them anyway. How to use icecream You need: One machine that runs the scheduler (icecc-scheduler ) Many machines that run the daemon (iceccd ) If you want to compile using icecream, make sure $prefix/lib/icecc/bin is the first entry in your path, e.g. type export PATH=/usr/lib/icecc/bin:$PATH (Hint: put this in ~/.bashrc or /etc/profile to not have to type it in everytime) Then you just compile with make , where num is the amount of jobs you want to compile in parallel. Do not exaggerate. Too large numbers can overload your machine or the compile cluster and make the build in fact slower. Never use icecream in untrusted environments. Run the daemons and the scheduler as unpriviliged user in such networks if you have to! But you will have to rely on homogeneous networks then (see below). If you want an overview of your icecream compile cluster, or if you just want funny stats, you might want to run &icemon;. Using icecream in heterogeneous environments If you are running icecream daemons in the same icecream network but on machines with incompatible compiler versions, icecream needs to send your build environment to remote machines (note: they all must be running as root if compiled without libcap-ng support. In the future icecream might gain the ability to know when machines cannot accept a different environment, but for now it is all or nothing). Under normal circumstances this is handled transparently by the icecream daemon, which will prepare a tarball with the environment when needed. This is the recommended way, as the daemon will also automatically update the tarball whenever your compiler changes. If you want to handle this manually for some reason, you have to tell icecream which environment you are using. Use icecc to create an archive file containing all the files necessary to setup the compiler environment. The file will have a random unique name like ddaea39ca1a7c88522b185eca04da2d8.tar.bz2 per default. Rename it to something more expressive for your convenience, e.g. i386-3.3.1.tar.bz2. Set ICECC_VERSION=filename_of_archive_containing_your_environment in the shell environment where you start the compile jobs and the file will be transferred to the daemons where your compile jobs run and installed to a chroot environment for executing the compile jobs in the environment fitting to the environment of the client. This requires that the icecream daemon runs as root. Cross-Compiling using icecream SUSE got quite some good machines not having a processor from Intel or AMD, so icecream is pretty good in using cross-compiler environments similar to the above way of spreading compilers. There the ICECC_VERSION variable looks like <native_filename>(,<platform>:<cross_compiler_filename>)*, for example like this: /work/9.1-i386.tar.bz2,ia64:/work/9.1-cross-ia64.tar.bz2 How to package such a cross compiler is pretty straightforward if you look what is inside the tarballs generated by icecc . Cross-Compiling for embedded targets using icecream When building for embedded targets like ARM often you will have a toolchain that runs on your host and produces code for the target. In these situations you can exploit the power of icecream as well. Create symlinks from where &icecc; is to the name of your cross compilers (e.g. arm-linux-g++ and arm-linux-gcc), make sure that these symlinks are in the path and before the path of your toolchain, with $ICECC_CC and $ICECC_CXX you need to tell icecream which compilers to use for preprocessing and local compiling. e.g. set it to ICECC_CC=arm-linux-gcc and ICECC_CXX=arm-linux-g++. As the next step you need to create a .tar.bz2 of your cross compiler, check the result of build-native to see what needs to be present. Finally one needs to set ICECC_VERSION and point it to the .tar.bz2 you have created. When you start compiling your toolchain will be used. With ICECC_VERSION you point out on which platforms your toolchain runs, you do not indicate for which target code will be generated. Cross-Compiling for multiple targets in the same environment using icecream When working with toolchains for multiple targets, icecream can be configured to support multiple toolchains in the same environment. Multiple toolchains can be configured by appending =<target> to the tarball filename in the ICECC_VERSION variable. Where the <target> is the cross compiler prefix. There the ICECC_VERSION variable will look like <native_filename>(,<platform>:<cross_compiler_filename>=<target>)*. Below an example of how to configure icecream to use two toolchains, /work/toolchain1/bin/arm-eabi-[gcc,g++] and /work/toolchain2/bin/arm-linux-androideabi-[gcc,g++], for the same host architecture: Create symbolic links with the cross compilers names (e.g. arm-eabi-[gcc,g++] and arm-linux-androideabi-[gcc,g++]) pointing to where the icecc binary is. Make sure these symbolic links are in the $PATH and before the path of the toolchains. Create a tarball file for each toolchain that you want to use with icecream. The icecc-create-env script can be used to create the tarball file for each toolchain, for example: icecc-create-env /work/toolchain1/bin/arm-eabi-gcc icecc-create-env /work/toolchain2/bin/arm-linux-androideabi-gcc. Set ICECC_VERSION to point to the native tarball file and for each tarball file created to the toolchains (e.g ICECC_VERSION=/work/i386-native.tar.gz,/work/arm-eabi-toolchain1.tar.gz=arm-eabi,/work/arm-linux-androideabi-toolchain2.tar.gz=arm-linux-androideabi). With these steps the icecrem will use /work/arm-eabi-toolchain1.tar.gz file to cross compilers with the prefix arm-eabi (e.g. arm-eabi-gcc and arm-eabi-g++), use /work/arm-linux-androideabi-toolchain2.tar.gz file to cross compilers with the prefix arm-linux-androideabi (e.g. arm-linux-androideabi-gcc and arm-linux-androideabi-g++) and use /work/i386-native.tar.gz file to compilers without prefix, the native compilers. How to combine icecream with &ccache; The easiest way to use &ccache; with icecream is to set CCACHE_PREFIX to &icecc; (the actual icecream client wrapper) export CCACHE_PREFIX=icecc This will make &ccache; prefix any compilation command it needs to do with icecc, making it use icecream for the compilation (but not for preprocessing alone). To actually use &ccache;, the mechanism is the same like with using icecream alone. Since &ccache; does not provide any symlinks in /opt/ccache/bin, you can create them manually: mkdir /opt/ccache/bin ln -s /usr/bin/ccache /opt/ccache/bin/gcc ln -s /usr/bin/ccache /opt/ccache/bin/g++ And then compile with export PATH=/opt/ccache/bin:$PATH Note however that &ccache; is not really worth the trouble if you are not recompiling your project three times a day from scratch (it adds quite some overhead in comparing the preprocessor output and uses quite some disc space and I found a cache hit of 18% a bit too few, so I disabled it again). Debug output You can use the environment variable ICECC_DEBUG to control if icecream gives debug output or not. Set it to debug to get debug output. The other possible values are error, warning and info (the option for daemon and scheduler raise the level per on the command line - so use for full debug). Avoiding old hosts It is possible that compilation on some hosts fails because they are too old (typically the kernel on the remote host is too old for the glibc from the local host). Recent icecream versions should automatically detect this and avoid such hosts when compilation would fail. If some hosts are running old icecream versions and it is not possible to upgrade them for some reason, use export ICECC_IGNORE_UNVERIFIED=1 Some Numbers Numbers of my test case (some STL C++ genetic algorithm) g++ on my machine: 1.6s g++ on fast machine: 1.1s icecream using my machine as remote machine: 1.9s icecream using fast machine: 1.8s The icecream overhead is quite huge as you might notice, but the compiler cannot interleave preprocessing with compilation and the file needs to be read/written once more and in between the file is transferred. But even if the other computer is faster, using g++ on my local machine is faster. If you are (for whatever reason) alone in your network at some point, you lose all advantages of distributed compiling and only add the overhead. So icecream got a special case for local compilations (the same special meaning that localhost got within $DISTCC_HOSTS). This makes compiling on my machine using icecream down to 1.7s (the overhead is actually less than 0.1s in average). As the scheduler is aware of that meaning, it will prefer your own computer if it is free and got not less than 70% of the fastest available computer. Keep in mind, that this affects only the first compile job, the second one is distributed anyway. So if I had to compile two of my files, I would get g++ on my machine: 3.2s g++ on the fast machine: 2.2s using icecream on my machine: max(1.7,1.8)=1.8s (using icecream on the other machine: max(1.1,1.8)=1.8s) The math is a bit tricky and depends a lot on the current state of the compilation network, but make sure you are not blindly assuming make halves your compilation time. What is the best environment for icecream In most requirements icecream is not special, e.g. it does not matter what distributed compile system you use, you will not have fun if your nodes are connected through than less or equal to 10MBit. Note that icecream compresses input and output files (using lzo), so you can calc with ~1MBit per compile job - i.e. more than make will not be possible without delays. Remember that more machines are only good if you can use massive parallelization, but you will for sure get the best result if your submitting machine (the one you called g++ on) will be fast enough to feed the others. Especially if your project consists of many easy to compile files, the preprocessing and file I/O will be job enough to need a quick machine. The scheduler will try to give you the fastest machines available, so even if you add old machines, they will be used only in exceptional situations, but still you can have bad luck - the scheduler does not know how long a job will take before it started. So if you have 3 machines and two quick to compile and one long to compile source file, you are not safe from a choice where everyone has to wait on the slow machine. Keep that in mind. Network setup for Icecream (firewalls) A short overview of the ports icecream requires: TCP/10245 on the daemon computers (required) TCP/8765 for the the scheduler computer (required) TCP/8766 for the telnet interface to the scheduler (optional) UDP/8765 for broadcast to find the scheduler (optional) If the monitor cannot find the scheduler, use ICECC_SCHEDULER=host icemon. See Also icecc-scheduler(1), iceccd(1), icemon(1) Icecream Authors Stephan Kulow <coolo@suse.de> Michael Matz <matz@suse.de> Cornelius Schumacher <cschum@suse.de> ...and various other contributors. icecream-1.3.1/doc/man-icerun.1.xml000066400000000000000000000030611361626760200167610ustar00rootroot00000000000000 icerun"> ]> Icecream User's Manual Cornelius Schumacher April 21th, 2005 Icecream icerun 1 icerun Icecream custom command wrapper icerun command command options Description &icerun; is the Icecream custom command wrapper. It simply executes the passed command, but limits the number of parallel invocations the same way local compilations are limited. Since builds using Icecream usually require increasing the parallelism of a build, wrapping custom resource-demanding commands this way prevents these commnads from overloading the system. See Also icecream(7), icecc(1), icecc-scheduler(1), iceccd(1), icemon(1) Author Cornelius Schumacher icecream-1.3.1/m4/000077500000000000000000000000001361626760200136155ustar00rootroot00000000000000icecream-1.3.1/m4/cap-ng.m4000066400000000000000000000025431361626760200152300ustar00rootroot00000000000000# libcap-ng.m4 - Checks for the libcap-ng support # Copyright (c) 2009 Steve Grubb sgrubb@redhat.com # AC_DEFUN([ICECC_LIBCAP_NG_PATH], [ case $host in *linux*) capng_auto=no ;; *) capng_auto=yes ;; esac AC_ARG_WITH(libcap-ng, [ --with-libcap-ng=[auto/yes/no] Add Libcap-ng support],, with_libcap_ng=auto) # Check for Libcap-ng API # # libcap-ng detection if test x$with_libcap_ng = xno ; then have_libcap_ng=no; else # Start by checking for header file AC_CHECK_HEADER(cap-ng.h, capng_headers=yes, capng_headers=no) # See if we have libcap-ng library AC_CHECK_LIB(cap-ng, capng_clear, CAPNG_LDADD=-lcap-ng,) # Check results are usable if test x$with_libcap_ng = xyes -a x$CAPNG_LDADD = x ; then AC_MSG_ERROR(libcap-ng support was requested and the library was not found) fi if test x$capng_auto = xno -a x$CAPNG_LDADD = x ; then AC_MSG_ERROR(libcap-ng library was not found) fi if test x$CAPNG_LDADD != x -a $capng_headers = no ; then AC_MSG_ERROR(libcap-ng libraries found but headers are missing) fi fi AC_SUBST(CAPNG_LDADD) AC_MSG_CHECKING(whether to use libcap-ng) if test x$CAPNG_LDADD != x ; then AC_DEFINE(HAVE_LIBCAP_NG,1,[libcap-ng support]) AC_MSG_RESULT(yes) else AC_MSG_RESULT(no) fi ]) icecream-1.3.1/scheduler/000077500000000000000000000000001361626760200152535ustar00rootroot00000000000000icecream-1.3.1/scheduler/Makefile.am000066400000000000000000000004211361626760200173040ustar00rootroot00000000000000 sbin_PROGRAMS = icecc-scheduler icecc_scheduler_SOURCES = compileserver.cpp job.cpp jobstat.cpp scheduler.cpp icecc_scheduler_LDADD = ../services/libicecc.la AM_LIBTOOLFLAGS = --silent noinst_HEADERS = \ compileserver.h \ job.h \ jobstat.h \ scheduler.h icecream-1.3.1/scheduler/compileserver.cpp000066400000000000000000000364431361626760200206500ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Michael Matz 2004 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "compileserver.h" #include #include #include #include #include #include "../services/logging.h" #include "../services/job.h" #include "job.h" #include "scheduler.h" unsigned int CompileServer::s_hostIdCounter = 0; CompileServer::CompileServer(const int fd, struct sockaddr *_addr, const socklen_t _len, const bool text_based) : MsgChannel(fd, _addr, _len, text_based) , m_remotePort(0) , m_hostId(0) , m_nodeName() , m_busyInstalling(0) , m_hostPlatform() , m_load(1000) , m_maxJobs(0) , m_noRemote(false) , m_jobList() , m_state(CONNECTED) , m_type(UNKNOWN) , m_chrootPossible(false) , m_featuresSupported(0) , m_clientCount(0) , m_submittedJobsCount(0) , m_lastPickId(0) , m_compilerVersions() , m_lastCompiledJobs() , m_lastRequestedJobs() , m_cumCompiled() , m_cumRequested() , m_clientMap() , m_blacklist() , m_inFd(-1) , m_inConnAttempt(0) , m_nextConnTime(0) , m_lastConnStartTime(0) , m_acceptingInConnection(true) { } void CompileServer::pick_new_id() { assert(!m_hostId); m_hostId = ++s_hostIdCounter; } bool CompileServer::check_remote(const Job *job) const { bool local = (job->submitter() == this); return local || !m_noRemote; } bool CompileServer::platforms_compatible(const string &target) const { if (target == hostPlatform()) { return true; } // the below doesn't work as the unmapped platform is transferred back to the // client and that asks the daemon for a platform he can't install (see TODO) static multimap platform_map; if (platform_map.empty()) { platform_map.insert(make_pair(string("i386"), string("i486"))); platform_map.insert(make_pair(string("i386"), string("i586"))); platform_map.insert(make_pair(string("i386"), string("i686"))); platform_map.insert(make_pair(string("i386"), string("x86_64"))); platform_map.insert(make_pair(string("i486"), string("i586"))); platform_map.insert(make_pair(string("i486"), string("i686"))); platform_map.insert(make_pair(string("i486"), string("x86_64"))); platform_map.insert(make_pair(string("i586"), string("i686"))); platform_map.insert(make_pair(string("i586"), string("x86_64"))); platform_map.insert(make_pair(string("i686"), string("x86_64"))); platform_map.insert(make_pair(string("ppc"), string("ppc64"))); platform_map.insert(make_pair(string("s390"), string("s390x"))); } multimap::const_iterator end = platform_map.upper_bound(target); for (multimap::const_iterator it = platform_map.lower_bound(target); it != end; ++it) { if (it->second == hostPlatform()) { return true; } } return false; } /* Given a candidate CS and a JOB, check if any of the requested environments could be installed on the CS. This is the case if that env can be run there, i.e. if the host platforms of the CS and of the environment are compatible. Return an empty string if none can be installed, otherwise return the platform of the first found environments which can be installed. */ string CompileServer::can_install(const Job *job, bool ignore_installing) const { // trace() << "can_install host: '" << cs->host_platform << "' target: '" // << job->target_platform << "'" << endl; if (!ignore_installing && busyInstalling()) { #if DEBUG_SCHEDULER > 0 trace() << nodeName() << " is busy installing since " << time(0) - busyInstalling() << " seconds." << endl; #endif return string(); } Environments environments = job->environments(); for (Environments::const_iterator it = environments.begin(); it != environments.end(); ++it) { if (platforms_compatible(it->first) && !blacklisted(job, *it)) { return it->first; } } return string(); } bool CompileServer::is_eligible_ever(const Job *job) const { bool jobs_okay = m_maxJobs > 0; // We cannot use just 'protocol', because if the scheduler's protocol // is lower than the daemon's then this is set to the minimum. // But here we are asked about the daemon's protocol version, so check that. bool version_okay = job->minimalHostVersion() <= maximum_remote_protocol; bool features_okay = featuresSupported(job->requiredFeatures()); bool eligible = jobs_okay && (m_chrootPossible || job->submitter() == this) && version_okay && features_okay && m_acceptingInConnection && can_install(job, true).size() && check_remote(job); #if DEBUG_SCHEDULER > 2 trace() << nodeName() << " is_eligible_ever: " << eligible << " (jobs_okay " << jobs_okay << ", version_okay " << version_okay << ", features_okay " << features_okay << ", chroot_or_local " << (m_chrootPossible || job->submitter() == this) << ", accepting " << m_acceptingInConnection << ", can_install " << (can_install(job).size() != 0) << ", check_remote " << check_remote(job) << ")" << endl; #endif return eligible; } bool CompileServer::is_eligible_now(const Job *job) const { if(!is_eligible_ever(job)) return false; bool jobs_okay = int(m_jobList.size()) < m_maxJobs; if( m_maxJobs > 0 && int(m_jobList.size()) == m_maxJobs ) jobs_okay = true; // allow one job for preloading bool load_okay = m_load < 1000; bool eligible = jobs_okay && load_okay && can_install(job, false).size(); #if DEBUG_SCHEDULER > 2 trace() << nodeName() << " is_eligible_now: " << eligible << " (jobs_okay " << jobs_okay << ", load_okay " << load_okay << ")" << endl; #endif return eligible; } unsigned int CompileServer::remotePort() const { return m_remotePort; } void CompileServer::setRemotePort(unsigned int port) { m_remotePort = port; } unsigned int CompileServer::hostId() const { return m_hostId; } void CompileServer::setHostId(unsigned int id) { m_hostId = id; } string CompileServer::nodeName() const { return m_nodeName; } void CompileServer::setNodeName(const string &name) { m_nodeName = name; } bool CompileServer::matches(const string& nm) const { return m_nodeName == nm || name == nm; } time_t CompileServer::busyInstalling() const { return m_busyInstalling; } void CompileServer::setBusyInstalling(time_t time) { m_busyInstalling = time; } string CompileServer::hostPlatform() const { return m_hostPlatform; } void CompileServer::setHostPlatform(const string &platform) { m_hostPlatform = platform; } unsigned int CompileServer::load() const { return m_load; } void CompileServer::setLoad(unsigned int load) { m_load = load; } int CompileServer::maxJobs() const { return m_maxJobs; } void CompileServer::setMaxJobs(int jobs) { m_maxJobs = jobs; } bool CompileServer::noRemote() const { return m_noRemote; } void CompileServer::setNoRemote(bool value) { m_noRemote = value; } list CompileServer::jobList() const { return m_jobList; } void CompileServer::appendJob(Job *job) { m_lastPickId = job->id(); m_jobList.push_back(job); } void CompileServer::removeJob(Job *job) { m_jobList.remove(job); } unsigned int CompileServer::lastPickedId() { return m_lastPickId; } CompileServer::State CompileServer::state() const { return m_state; } void CompileServer::setState(const CompileServer::State state) { m_state = state; } CompileServer::Type CompileServer::type() const { return m_type; } void CompileServer::setType(const CompileServer::Type type) { m_type = type; } bool CompileServer::chrootPossible() const { return m_chrootPossible; } void CompileServer::setChrootPossible(const bool possible) { m_chrootPossible = possible; } bool CompileServer::featuresSupported(unsigned int features) const { return ( m_featuresSupported & features ) == features; } unsigned int CompileServer::supportedFeatures() const { return m_featuresSupported; } void CompileServer::setSupportedFeatures(unsigned int features) { m_featuresSupported = features; } int CompileServer::clientCount() const { return m_clientCount; } void CompileServer::setClientCount( int clientCount ) { m_clientCount = clientCount; } int CompileServer::submittedJobsCount() const { return m_submittedJobsCount; } void CompileServer::submittedJobsIncrement() { m_submittedJobsCount++; } void CompileServer::submittedJobsDecrement() { m_submittedJobsCount--; } Environments CompileServer::compilerVersions() const { return m_compilerVersions; } void CompileServer::setCompilerVersions(const Environments &environments) { m_compilerVersions = environments; } list CompileServer::lastCompiledJobs() const { return m_lastCompiledJobs; } void CompileServer::appendCompiledJob(const JobStat &stats) { m_lastCompiledJobs.push_back(stats); } void CompileServer::popCompiledJob() { m_lastCompiledJobs.pop_front(); } list CompileServer::lastRequestedJobs() const { return m_lastRequestedJobs; } void CompileServer::appendRequestedJobs(const JobStat &stats) { m_lastRequestedJobs.push_back(stats); } void CompileServer::popRequestedJobs() { m_lastRequestedJobs.pop_front(); } JobStat CompileServer::cumCompiled() const { return m_cumCompiled; } void CompileServer::setCumCompiled(const JobStat &stats) { m_cumCompiled = stats; } JobStat CompileServer::cumRequested() const { return m_cumRequested; } void CompileServer::setCumRequested(const JobStat &stats) { m_cumRequested = stats; } int CompileServer::getClientJobId(const int localJobId) { return m_clientMap[localJobId]; } void CompileServer::insertClientJobId(const int localJobId, const int newJobId) { m_clientMap[localJobId] = newJobId; } void CompileServer::eraseClientJobId(const int localJobId) { m_clientMap.erase(localJobId); } map CompileServer::blacklist() const { return m_blacklist; } Environments CompileServer::getEnvsForBlacklistedCS(const CompileServer *cs) { return m_blacklist[cs]; } void CompileServer::blacklistCompileServer(CompileServer *cs, const std::pair &env) { m_blacklist[cs].push_back(env); } void CompileServer::eraseCSFromBlacklist(CompileServer *cs) { m_blacklist.erase(cs); } bool CompileServer::blacklisted(const Job *job, const pair &environment) const { Environments blacklist = job->submitter()->getEnvsForBlacklistedCS(this); return find(blacklist.begin(), blacklist.end(), environment) != blacklist.end(); } int CompileServer::getInFd() const { return m_inFd; } void CompileServer::startInConnectionTest() { if (m_noRemote || getConnectionInProgress() || (m_nextConnTime > time(0))) { return; } m_inFd = socket(PF_INET, SOCK_STREAM, 0); fcntl(m_inFd, F_SETFL, O_NONBLOCK); struct hostent *host = gethostbyname(name.c_str()); struct sockaddr_in remote_addr; remote_addr.sin_family = AF_INET; remote_addr.sin_port = htons(remotePort()); memcpy(&remote_addr.sin_addr.s_addr, host->h_addr_list[0], host->h_length); memset(remote_addr.sin_zero, '\0', sizeof(remote_addr.sin_zero)); int status = connect(m_inFd, (struct sockaddr *)&remote_addr, sizeof(remote_addr)); if(status == 0) { updateInConnectivity(isConnected()); } else if (!(errno == EINPROGRESS || errno == EAGAIN)) { updateInConnectivity(false); } m_lastConnStartTime=time(0); } void CompileServer::updateInConnectivity(bool acceptingIn) { static const time_t time_offset_table[] = { 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096 }; static const size_t table_size = sizeof(time_offset_table); //On a successful connection, we should still check back every 1min static const time_t check_back_time = 60; if(acceptingIn) { if(!m_acceptingInConnection) { m_acceptingInConnection = true; m_inConnAttempt = 0; trace() << "Client (" << m_nodeName << " " << name << ":" << m_remotePort << ") is accepting incoming connections." << endl; } m_nextConnTime = time(0) + check_back_time; close(m_inFd); m_inFd = -1; } else { if(m_acceptingInConnection) { m_acceptingInConnection = false; trace() << "Client (" << m_nodeName << " " << name << ":" << m_remotePort << ") connected but is not able to accept incoming connections." << endl; } m_nextConnTime = time(0) + time_offset_table[m_inConnAttempt]; if(m_inConnAttempt < (table_size - 1)) m_inConnAttempt++; trace() << nodeName() << " failed to accept an incoming connection on " << name << ":" << m_remotePort << " attempting again in " << m_nextConnTime - time(0) << " seconds" << endl; close(m_inFd); m_inFd = -1; } } bool CompileServer::isConnected() { if (getConnectionTimeout() == 0) { return false; } struct hostent *host = gethostbyname(name.c_str()); struct sockaddr_in remote_addr; remote_addr.sin_family = AF_INET; remote_addr.sin_port = htons(remotePort()); memcpy(&remote_addr.sin_addr.s_addr, host->h_addr_list[0], host->h_length); memset(remote_addr.sin_zero, '\0', sizeof(remote_addr.sin_zero)); int error = 0; socklen_t err_len= sizeof(error); return (getsockopt(m_inFd, SOL_SOCKET, SO_ERROR, &error, &err_len) == 0 && error == 0); } time_t CompileServer::getConnectionTimeout() { time_t now = time(0); time_t elapsed_time = now - m_lastConnStartTime; time_t max_timeout = 5; return (elapsed_time < max_timeout) ? max_timeout - elapsed_time : 0; } bool CompileServer::getConnectionInProgress() { return (m_inFd != -1); } time_t CompileServer::getNextTimeout() { if (m_noRemote) { return -1; } if (m_inFd != -1) { return getConnectionTimeout(); } time_t until_connect = m_nextConnTime - time(0); return (until_connect > 0) ? until_connect : 0; } icecream-1.3.1/scheduler/compileserver.h000066400000000000000000000127211361626760200203060ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Michael Matz 2004 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef COMPILESERVER_H #define COMPILESERVER_H #include #include #include #include "../services/comm.h" #include "jobstat.h" class Job; using namespace std; /* One compile server (receiver, compile daemon) */ class CompileServer : public MsgChannel { public: enum State { CONNECTED, LOGGEDIN }; enum Type { UNKNOWN, DAEMON, MONITOR, LINE }; CompileServer(const int fd, struct sockaddr *_addr, const socklen_t _len, const bool text_based); void pick_new_id(); bool check_remote(const Job *job) const; bool platforms_compatible(const string &target) const; string can_install(const Job *job, bool ignore_installing = false) const; bool is_eligible_ever(const Job *job) const; bool is_eligible_now(const Job *job) const; unsigned int remotePort() const; void setRemotePort(const unsigned int port); unsigned int hostId() const; void setHostId(const unsigned int id); string nodeName() const; void setNodeName(const string &name); bool matches(const string& nm) const; time_t busyInstalling() const; void setBusyInstalling(const time_t time); string hostPlatform() const; void setHostPlatform(const string &platform); unsigned int load() const; void setLoad(const unsigned int load); int maxJobs() const; void setMaxJobs(const int jobs); bool noRemote() const; void setNoRemote(const bool value); list jobList() const; void appendJob(Job *job); void removeJob(Job *job); unsigned int lastPickedId(); State state() const; void setState(const State state); Type type() const; void setType(const Type type); bool chrootPossible() const; void setChrootPossible(const bool possible); bool featuresSupported(unsigned int features) const; unsigned int supportedFeatures() const; void setSupportedFeatures(unsigned int features); int clientCount() const; void setClientCount( int clientCount ); int submittedJobsCount() const; void submittedJobsIncrement(); void submittedJobsDecrement(); Environments compilerVersions() const; void setCompilerVersions(const Environments &environments); list lastCompiledJobs() const; void appendCompiledJob(const JobStat &stats); void popCompiledJob(); list lastRequestedJobs() const; void appendRequestedJobs(const JobStat &stats); void popRequestedJobs(); JobStat cumCompiled() const; void setCumCompiled(const JobStat &stats); JobStat cumRequested() const; void setCumRequested(const JobStat &stats); unsigned int hostidCounter() const; int getClientJobId(const int localJobId); void insertClientJobId(const int localJobId, const int newJobId); void eraseClientJobId(const int localJobId); map blacklist() const; Environments getEnvsForBlacklistedCS(const CompileServer *cs); void blacklistCompileServer(CompileServer *cs, const std::pair &env); void eraseCSFromBlacklist(CompileServer *cs); int getInFd() const; void startInConnectionTest(); time_t getConnectionTimeout(); time_t getNextTimeout(); bool getConnectionInProgress(); bool isConnected(); void updateInConnectivity(bool acceptingIn); private: bool blacklisted(const Job *job, const pair &environment) const; /* The listener port, on which it takes compile requests. */ unsigned int m_remotePort; unsigned int m_hostId; string m_nodeName; time_t m_busyInstalling; string m_hostPlatform; // LOAD is load * 1000 unsigned int m_load; int m_maxJobs; bool m_noRemote; list m_jobList; State m_state; Type m_type; bool m_chrootPossible; unsigned int m_featuresSupported; int m_clientCount; // number of client connections the daemon has int m_submittedJobsCount; unsigned int m_lastPickId; Environments m_compilerVersions; // Available compilers list m_lastCompiledJobs; list m_lastRequestedJobs; JobStat m_cumCompiled; // cumulated JobStat m_cumRequested; static unsigned int s_hostIdCounter; map m_clientMap; // map client ID for daemon to our IDs map m_blacklist; int m_inFd; unsigned int m_inConnAttempt; time_t m_nextConnTime; time_t m_lastConnStartTime; bool m_acceptingInConnection; }; #endif icecream-1.3.1/scheduler/job.cpp000066400000000000000000000103071361626760200165320ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Michael Matz 2004 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "job.h" #include "compileserver.h" Job::Job(const unsigned int _id, CompileServer *subm) : m_id(_id) , m_localClientId(0) , m_state(PENDING) , m_server(0) , m_submitter(subm) , m_startTime(0) , m_startOnScheduler(0) , m_doneTime(0) , m_targetPlatform() , m_fileName() , m_masterJobFor() , m_argFlags(0) , m_language() , m_preferredHost() , m_minimalHostVersion(0) , m_requiredFeatures(0) { m_submitter->submittedJobsIncrement(); } Job::~Job() { // XXX is this really deleted on all other paths? /* fd2chan.erase (channel->fd); delete channel;*/ m_submitter->submittedJobsDecrement(); } unsigned int Job::id() const { return m_id; } unsigned int Job::localClientId() const { return m_localClientId; } void Job::setLocalClientId(const unsigned int id) { m_localClientId = id; } Job::State Job::state() const { return m_state; } void Job::setState(const Job::State state) { m_state = state; } CompileServer *Job::server() const { return m_server; } void Job::setServer(CompileServer *server) { m_server = server; } CompileServer *Job::submitter() const { return m_submitter; } void Job::setSubmitter(CompileServer *submitter) { m_submitter = submitter; } Environments Job::environments() const { return m_environments; } void Job::setEnvironments(const Environments &environments) { m_environments = environments; } void Job::appendEnvironment(const std::pair &env) { m_environments.push_back(env); } void Job::clearEnvironments() { m_environments.clear(); } time_t Job::startTime() const { return m_startTime; } void Job::setStartTime(const time_t time) { m_startTime = time; } time_t Job::startOnScheduler() const { return m_startOnScheduler; } void Job::setStartOnScheduler(const time_t time) { m_startOnScheduler = time; } time_t Job::doneTime() const { return m_doneTime; } void Job::setDoneTime(const time_t time) { m_doneTime = time; } std::string Job::targetPlatform() const { return m_targetPlatform; } void Job::setTargetPlatform(const std::string &platform) { m_targetPlatform = platform; } std::string Job::fileName() const { return m_fileName; } void Job::setFileName(const std::string &fileName) { m_fileName = fileName; } std::list Job::masterJobFor() const { return m_masterJobFor; } void Job::appendJob(Job *job) { m_masterJobFor.push_back(job); } unsigned int Job::argFlags() const { return m_argFlags; } void Job::setArgFlags(const unsigned int argFlags) { m_argFlags = argFlags; } std::string Job::language() const { return m_language; } void Job::setLanguage(const std::string &language) { m_language = language; } std::string Job::preferredHost() const { return m_preferredHost; } void Job::setPreferredHost(const std::string &host) { m_preferredHost = host; } int Job::minimalHostVersion() const { return m_minimalHostVersion; } void Job::setMinimalHostVersion(int version) { m_minimalHostVersion = version; } unsigned int Job::requiredFeatures() const { return m_requiredFeatures; } void Job::setRequiredFeatures(unsigned int features) { m_requiredFeatures = features; } icecream-1.3.1/scheduler/job.h000066400000000000000000000075751361626760200162140ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Michael Matz 2004 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef JOB_H #define JOB_H #include #include #include #include "../services/comm.h" class CompileServer; class Job { public: enum State { PENDING, WAITINGFORCS, COMPILING }; Job(const unsigned int _id, CompileServer *subm); ~Job(); unsigned int id() const; unsigned int localClientId() const; void setLocalClientId(const unsigned int id); State state() const; void setState(const State state); CompileServer *server() const; void setServer(CompileServer *server); CompileServer *submitter() const; void setSubmitter(CompileServer *submitter); Environments environments() const; void setEnvironments(const Environments &environments); void appendEnvironment(const std::pair &env); void clearEnvironments(); time_t startTime() const; void setStartTime(const time_t time); time_t startOnScheduler() const; void setStartOnScheduler(const time_t time); time_t doneTime() const; void setDoneTime(const time_t time); std::string targetPlatform() const; void setTargetPlatform(const std::string &platform); std::string fileName() const; void setFileName(const std::string &fileName); std::list masterJobFor() const; void appendJob(Job *job); unsigned int argFlags() const; void setArgFlags(const unsigned int argFlags); std::string language() const; void setLanguage(const std::string &language); std::string preferredHost() const; void setPreferredHost(const std::string &host); int minimalHostVersion() const; void setMinimalHostVersion( int version ); unsigned int requiredFeatures() const; void setRequiredFeatures(unsigned int features); private: const unsigned int m_id; unsigned int m_localClientId; State m_state; CompileServer *m_server; // on which server we build CompileServer *m_submitter; // who submitted us Environments m_environments; time_t m_startTime; // _local_ to the compiler server time_t m_startOnScheduler; // starttime local to scheduler /** * the end signal from client and daemon is a bit of a race and * in 99.9% of all cases it's catched correctly. But for the remaining * 0.1% we need a solution too - otherwise these jobs are eating up slots. * So the solution is to track done jobs (client exited, daemon didn't signal) * and after 10s no signal, kill the daemon (and let it rehup) **/ time_t m_doneTime; std::string m_targetPlatform; std::string m_fileName; std::list m_masterJobFor; unsigned int m_argFlags; std::string m_language; // for debugging std::string m_preferredHost; // for debugging daemons int m_minimalHostVersion; // minimal version required for the the remote server unsigned int m_requiredFeatures; // flags the job requires on the remote server }; #endif icecream-1.3.1/scheduler/jobstat.cpp000066400000000000000000000055131361626760200174310ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Michael Matz 2004 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "jobstat.h" JobStat::JobStat() : m_outputSize(0) , m_compileTimeReal(0) , m_compileTimeUser(0) , m_compileTimeSys(0) , m_jobId(0) { } unsigned long JobStat::outputSize() const { return m_outputSize; } void JobStat::setOutputSize(unsigned long size) { m_outputSize = size; } unsigned long JobStat::compileTimeReal() const { return m_compileTimeReal; } void JobStat::setCompileTimeReal(unsigned long time) { m_compileTimeReal = time; } unsigned long JobStat::compileTimeUser() const { return m_compileTimeUser; } void JobStat::setCompileTimeUser(unsigned long time) { m_compileTimeUser = time; } unsigned long JobStat::compileTimeSys() const { return m_compileTimeSys; } void JobStat::setCompileTimeSys(unsigned long time) { m_compileTimeSys = time; } unsigned int JobStat::jobId() const { return m_jobId; } void JobStat::setJobId(unsigned int id) { m_jobId = id; } JobStat &JobStat::operator+(const JobStat &st) { m_outputSize += st.m_outputSize; m_compileTimeReal += st.m_compileTimeReal; m_compileTimeUser += st.m_compileTimeUser; m_compileTimeSys += st.m_compileTimeSys; m_jobId = 0; return *this; } JobStat &JobStat::operator+=(const JobStat &st) { return *this + st; } JobStat &JobStat::operator-(const JobStat &st) { m_outputSize -= st.m_outputSize; m_compileTimeReal -= st.m_compileTimeReal; m_compileTimeUser -= st.m_compileTimeUser; m_compileTimeSys -= st.m_compileTimeSys; m_jobId = 0; return *this; } JobStat &JobStat::operator-=(const JobStat &st) { return *this - st; } JobStat JobStat::operator/(int d) const { JobStat r = *this; r /= d; return r; } JobStat &JobStat::operator/=(int d) { m_outputSize /= d; m_compileTimeReal /= d; m_compileTimeUser /= d; m_compileTimeSys /= d; m_jobId = 0; return *this; } icecream-1.3.1/scheduler/jobstat.h000066400000000000000000000037021361626760200170740ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Michael Matz 2004 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef JOBSTAT_H #define JOBSTAT_H struct JobStat { public: JobStat(); unsigned long outputSize() const; void setOutputSize(unsigned long size); unsigned long compileTimeReal() const; void setCompileTimeReal(unsigned long time); unsigned long compileTimeUser() const; void setCompileTimeUser(unsigned long time); unsigned long compileTimeSys() const; void setCompileTimeSys(unsigned long time); unsigned int jobId() const; void setJobId(unsigned int id); JobStat &operator+(const JobStat &st); JobStat &operator+=(const JobStat &st); JobStat &operator-(const JobStat &st); JobStat &operator-=(const JobStat &st); JobStat operator/(int d) const; JobStat &operator/=(int d); private: unsigned long m_outputSize; // output size (uncompressed) unsigned long m_compileTimeReal; // in milliseconds unsigned long m_compileTimeUser; unsigned long m_compileTimeSys; unsigned int m_jobId; }; #endif icecream-1.3.1/scheduler/scheduler.cpp000066400000000000000000002234111361626760200177400ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Michael Matz 2004 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _GNU_SOURCE // getopt_long #define _GNU_SOURCE 1 #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../services/comm.h" #include "../services/getifaddrs.h" #include "../services/logging.h" #include "../services/job.h" #include "../services/util.h" #include "config.h" #include "compileserver.h" #include "job.h" #include "scheduler.h" /* TODO: * leak check * are all filedescs closed when done? * simplify livetime of the various structures (Jobs/Channels/CompileServers know of each other and sometimes take over ownership) */ /* TODO: - iron out differences in code size between architectures + ia64/i686: 1.63 + x86_64/i686: 1.48 + ppc/i686: 1.22 + ppc64/i686: 1.59 (missing data for others atm) */ /* The typical flow of messages for a remote job should be like this: prereq: daemon is connected to scheduler * client does GET_CS * request gets queued * request gets handled * scheduler sends USE_CS * client asks remote daemon * daemon sends JOB_BEGIN * client sends END + closes connection * daemon sends JOB_DONE (this can be swapped with the above one) This means, that iff the client somehow closes the connection we can and must remove all traces of jobs resulting from that client in all lists. */ using namespace std; static string pidFilePath; static map fd2cs; static volatile sig_atomic_t exit_main_loop = false; time_t starttime; time_t last_announce; static string scheduler_interface = ""; static unsigned int scheduler_port = 8765; // A subset of connected_hosts representing the compiler servers static list css; static list monitors; static list controls; static list block_css; static unsigned int new_job_id; static map jobs; /* XXX Uah. Don't use a queue for the job requests. It's a hell to delete anything out of them (for clean up). */ struct UnansweredList { list l; CompileServer *submitter; bool remove_job(Job *); }; static list toanswer; static list all_job_stats; static JobStat cum_job_stats; static float server_speed(CompileServer *cs, Job *job = 0, bool blockDebug = false); /* Searches the queue for JOB and removes it. Returns true if something was deleted. */ bool UnansweredList::remove_job(Job *job) { list::iterator it; for (it = l.begin(); it != l.end(); ++it) if (*it == job) { l.erase(it); return true; } return false; } static void add_job_stats(Job *job, JobDoneMsg *msg) { JobStat st; /* We don't want to base our timings on failed or too small jobs. */ if (msg->out_uncompressed < 4096 || msg->exitcode != 0) { return; } st.setOutputSize(msg->out_uncompressed); st.setCompileTimeReal(msg->real_msec); st.setCompileTimeUser(msg->user_msec); st.setCompileTimeSys(msg->sys_msec); st.setJobId(job->id()); if (job->argFlags() & CompileJob::Flag_g) { st.setOutputSize(st.outputSize() * 10 / 36); // average over 1900 jobs: faktor 3.6 in osize } else if (job->argFlags() & CompileJob::Flag_g3) { st.setOutputSize(st.outputSize() * 10 / 45); // average over way less jobs: factor 1.25 over -g } // the difference between the -O flags isn't as big as the one between -O0 and -O>=1 // the numbers are actually for gcc 3.3 - but they are _very_ rough heurstics anyway) if (job->argFlags() & CompileJob::Flag_O || job->argFlags() & CompileJob::Flag_O2 || job->argFlags() & CompileJob::Flag_Ol2) { st.setOutputSize(st.outputSize() * 58 / 35); } if (job->server()->lastCompiledJobs().size() >= 7) { /* Smooth out spikes by not allowing one job to add more than 20% of the current speed. */ float this_speed = (float) st.outputSize() / (float) st.compileTimeUser(); /* The current speed of the server, but without adjusting to the current job, hence no second argument. */ float cur_speed = server_speed(job->server()); if ((this_speed / 1.2) > cur_speed) { st.setOutputSize((long unsigned) (cur_speed * 1.2 * st.compileTimeUser())); } else if ((this_speed * 1.2) < cur_speed) { st.setOutputSize((long unsigned)(cur_speed / 1.2 * st.compileTimeUser())); } } job->server()->appendCompiledJob(st); job->server()->setCumCompiled(job->server()->cumCompiled() + st); if (job->server()->lastCompiledJobs().size() > 200) { job->server()->setCumCompiled(job->server()->cumCompiled() - *job->server()->lastCompiledJobs().begin()); job->server()->popCompiledJob(); } job->submitter()->appendRequestedJobs(st); job->submitter()->setCumRequested(job->submitter()->cumRequested() + st); if (job->submitter()->lastRequestedJobs().size() > 200) { job->submitter()->setCumRequested(job->submitter()->cumRequested() - *job->submitter()->lastRequestedJobs().begin()); job->submitter()->popRequestedJobs(); } all_job_stats.push_back(st); cum_job_stats += st; if (all_job_stats.size() > 2000) { cum_job_stats -= *all_job_stats.begin(); all_job_stats.pop_front(); } #if DEBUG_SCHEDULER > 1 if (job->argFlags() < 7000) { trace() << "add_job_stats " << job->language() << " " << (time(0) - starttime) << " " << st.compileTimeUser() << " " << (job->argFlags() & CompileJob::Flag_g ? '1' : '0') << (job->argFlags() & CompileJob::Flag_g3 ? '1' : '0') << (job->argFlags() & CompileJob::Flag_O ? '1' : '0') << (job->argFlags() & CompileJob::Flag_O2 ? '1' : '0') << (job->argFlags() & CompileJob::Flag_Ol2 ? '1' : '0') << " " << st.outputSize() << " " << msg->out_uncompressed << " " << job->server()->nodeName() << " " << float(msg->out_uncompressed) / st.compileTimeUser() << " " << server_speed(job->server(), NULL, true) << endl; } #endif } static bool handle_end(CompileServer *cs, Msg *); static void notify_monitors(Msg *m) { list::iterator it; list::iterator it_old; for (it = monitors.begin(); it != monitors.end();) { it_old = it++; /* If we can't send it, don't be clever, simply close this monitor. */ if (!(*it_old)->send_msg(*m, MsgChannel::SendNonBlocking /*| MsgChannel::SendBulkOnly*/)) { trace() << "monitor is blocking... removing" << endl; handle_end(*it_old, 0); } } delete m; } static float server_speed(CompileServer *cs, Job *job, bool blockDebug) { #if DEBUG_SCHEDULER <= 2 (void)blockDebug; #endif if (cs->lastCompiledJobs().size() == 0 || cs->cumCompiled().compileTimeUser() == 0) { return 0; } else { float f = (float)cs->cumCompiled().outputSize() / (float) cs->cumCompiled().compileTimeUser(); // we only care for the load if we're about to add a job to it if (job) { if (job->submitter() == cs) { int clientCount = cs->clientCount(); if( clientCount == 0 ) { // Older client/daemon that doesn't send client count. Use the number of jobs // that we've already been told about as the fallback value (it will sometimes // be an underestimate). clientCount = cs->submittedJobsCount(); } if (clientCount > cs->maxJobs()) { // The submitter would be overloaded by building all its jobs locally, // so penalize it heavily in order to send jobs preferably to other nodes, // so that the submitter should preferably do tasks that cannot be distributed, // such as linking or preparing jobs for remote nodes. f *= 0.1; #if DEBUG_SCHEDULER > 2 if(!blockDebug) log_info() << "penalizing local build for job " << job->id() << endl; #endif } else if (clientCount == cs->maxJobs()) { // This means the submitter would be fully loaded by its jobs. It is still // preferable to distribute the job, unless the submitter is noticeably faster. f *= 0.8; #if DEBUG_SCHEDULER > 2 if(!blockDebug) log_info() << "slightly penalizing local build for job " << job->id() << endl; #endif } else if (clientCount <= cs->maxJobs() / 2) { // The submitter has only few jobs, slightly prefer building the job locally // in order to save the overhead of distributing. // Note that this is unreliable, the submitter may be in fact running a large // parallel build but this is just the first of the jobs and other icecc instances // haven't been launched yet. There's probably no good way to detect this reliably. f *= 1.1; #if DEBUG_SCHEDULER > 2 if(!blockDebug) log_info() << "slightly preferring local build for job " << job->id() << endl; #endif } else { // the remaining case, don't adjust f *= 1; } // ignoring load for submitter - assuming the load is our own } else { f *= float(1000 - cs->load()) / 1000; } /* Gradually throttle with the number of assigned jobs. This * takes care of the fact that not all slots are equally fast on * CPUs with SMT and dynamic clock ramping. */ f *= (1.0f - (0.5f * cs->jobList().size() / cs->maxJobs())); } // below we add a pessimism factor - assuming the first job a computer got is not representative if (cs->lastCompiledJobs().size() < 7) { f *= (-0.5 * cs->lastCompiledJobs().size() + 4.5); } return f; } } static void handle_monitor_stats(CompileServer *cs, StatsMsg *m = 0) { if (monitors.empty()) { return; } string msg; char buffer[1000]; sprintf(buffer, "Name:%s\n", cs->nodeName().c_str()); msg += buffer; sprintf(buffer, "IP:%s\n", cs->name.c_str()); msg += buffer; sprintf(buffer, "MaxJobs:%d\n", cs->maxJobs()); msg += buffer; sprintf(buffer, "NoRemote:%s\n", cs->noRemote() ? "true" : "false"); msg += buffer; sprintf(buffer, "Platform:%s\n", cs->hostPlatform().c_str()); msg += buffer; sprintf(buffer, "Version:%d\n", cs->maximum_remote_protocol); msg += buffer; sprintf(buffer, "Features:%s\n", supported_features_to_string(cs->supportedFeatures()).c_str()); msg += buffer; sprintf(buffer, "Speed:%f\n", server_speed(cs)); msg += buffer; if (m) { sprintf(buffer, "Load:%d\n", m->load); msg += buffer; sprintf(buffer, "LoadAvg1:%u\n", m->loadAvg1); msg += buffer; sprintf(buffer, "LoadAvg5:%u\n", m->loadAvg5); msg += buffer; sprintf(buffer, "LoadAvg10:%u\n", m->loadAvg10); msg += buffer; sprintf(buffer, "FreeMem:%u\n", m->freeMem); msg += buffer; } else { sprintf(buffer, "Load:%u\n", cs->load()); msg += buffer; } notify_monitors(new MonStatsMsg(cs->hostId(), msg)); } static Job *create_new_job(CompileServer *submitter) { ++new_job_id; assert(jobs.find(new_job_id) == jobs.end()); Job *job = new Job(new_job_id, submitter); jobs[new_job_id] = job; return job; } static void enqueue_job_request(Job *job) { if (!toanswer.empty() && toanswer.back()->submitter == job->submitter()) { toanswer.back()->l.push_back(job); } else { UnansweredList *newone = new UnansweredList(); newone->submitter = job->submitter(); newone->l.push_back(job); toanswer.push_back(newone); } } static Job *get_job_request(void) { if (toanswer.empty()) { return 0; } UnansweredList *first = toanswer.front(); assert(!first->l.empty()); return first->l.front(); } /* Removes the first job request (the one returned by get_job_request()) */ static void remove_job_request(void) { if (toanswer.empty()) { return; } UnansweredList *first = toanswer.front(); toanswer.pop_front(); first->l.pop_front(); if (first->l.empty()) { delete first; } else { toanswer.push_back(first); } } static string dump_job(Job *job); static bool handle_cs_request(MsgChannel *cs, Msg *_m) { GetCSMsg *m = dynamic_cast(_m); if (!m) { return false; } CompileServer *submitter = static_cast(cs); submitter->setClientCount(m->client_count); Job *master_job = 0; for (unsigned int i = 0; i < m->count; ++i) { Job *job = create_new_job(submitter); job->setEnvironments(m->versions); job->setTargetPlatform(m->target); job->setArgFlags(m->arg_flags); switch(m->lang) { case CompileJob::Lang_C: job->setLanguage("C"); break; case CompileJob::Lang_CXX: job->setLanguage("C++"); break; case CompileJob::Lang_OBJC: job->setLanguage("ObjC"); break; case CompileJob::Lang_OBJCXX: job->setLanguage("ObjC++"); break; case CompileJob::Lang_Custom: job->setLanguage(""); break; default: job->setLanguage("???"); // presumably newer client? break; } job->setFileName(m->filename); job->setLocalClientId(m->client_id); job->setPreferredHost(m->preferred_host); job->setMinimalHostVersion(m->minimal_host_version); job->setRequiredFeatures(m->required_features); enqueue_job_request(job); std::ostream &dbg = log_info(); dbg << "NEW " << job->id() << " client=" << submitter->nodeName() << " versions=["; Environments envs = job->environments(); for (Environments::const_iterator it = envs.begin(); it != envs.end();) { dbg << it->second << "(" << it->first << ")"; if (++it != envs.end()) { dbg << ", "; } } dbg << "] " << m->filename << " " << job->language() << endl; notify_monitors(new MonGetCSMsg(job->id(), submitter->hostId(), m)); if (!master_job) { master_job = job; } else { master_job->appendJob(job); } } return true; } static bool handle_local_job(CompileServer *cs, Msg *_m) { JobLocalBeginMsg *m = dynamic_cast(_m); if (!m) { return false; } ++new_job_id; trace() << "handle_local_job " << m->outfile << " " << m->id << endl; cs->insertClientJobId(m->id, new_job_id); notify_monitors(new MonLocalJobBeginMsg(new_job_id, m->outfile, m->stime, cs->hostId())); return true; } static bool handle_local_job_done(CompileServer *cs, Msg *_m) { JobLocalDoneMsg *m = dynamic_cast(_m); if (!m) { return false; } trace() << "handle_local_job_done " << m->job_id << endl; notify_monitors(new JobLocalDoneMsg(cs->getClientJobId(m->job_id))); cs->eraseClientJobId(m->job_id); return true; } /* Given a candidate CS and a JOB, check all installed environments on the CS for a match. Return an empty string if none of the required environments for this job is installed. Otherwise return the host platform of the first found installed environment which is among the requested. That can be send to the client, which then completely specifies which environment to use (name, host platform and target platform). */ static string envs_match(CompileServer *cs, const Job *job) { if (job->submitter() == cs) { return cs->hostPlatform(); // it will compile itself } Environments compilerVersions = cs->compilerVersions(); /* Check all installed envs on the candidate CS ... */ for (Environments::const_iterator it = compilerVersions.begin(); it != compilerVersions.end(); ++it) { if (it->first == job->targetPlatform()) { /* ... IT now is an installed environment which produces code for the requested target platform. Now look at each env which could be installed from the client (i.e. those coming with the job) if it matches in name and additionally could be run by the candidate CS. */ Environments environments = job->environments(); for (Environments::const_iterator it2 = environments.begin(); it2 != environments.end(); ++it2) { if (it->second == it2->second && cs->platforms_compatible(it2->first)) { return it2->first; } } } } return string(); } static CompileServer *pick_server(Job *job) { #if DEBUG_SCHEDULER > 1 trace() << "pick_server " << job->id() << " " << job->targetPlatform() << endl; #endif #if DEBUG_SCHEDULER > 0 /* consistency checking for now */ for (list::iterator it = css.begin(); it != css.end(); ++it) { CompileServer *cs = *it; list jobList = cs->jobList(); for (list::const_iterator it2 = jobList.begin(); it2 != jobList.end(); ++it2) { assert(jobs.find((*it2)->id()) != jobs.end()); } } for (map::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { Job *j = it->second; if (j->state() == Job::COMPILING) { CompileServer *cs = j->server(); list jobList = cs->jobList(); assert(find(jobList.begin(), jobList.end(), j) != jobList.end()); } } #endif /* if the user wants to test/prefer one specific daemon, we look for that one first */ if (!job->preferredHost().empty()) { for (list::iterator it = css.begin(); it != css.end(); ++it) { if ((*it)->matches(job->preferredHost()) && (*it)->is_eligible_now(job)) { #if DEBUG_SCHEDULER > 1 trace() << "taking preferred " << (*it)->nodeName() << " " << server_speed(*it, job, true) << endl; #endif return *it; } } return 0; } /* If we have no statistics simply use any server which is usable. */ if (!all_job_stats.size ()) { CompileServer *selected = NULL; int eligible_count = 0; for (list::iterator it = css.begin(); it != css.end(); ++it) { if ((*it)->is_eligible_now( job )) { ++eligible_count; // Do not select the first one (which could be broken and so we might never get job stats), // but rather select randomly. if( random() % eligible_count == 0 ) selected = *it; } } if( selected != NULL ) { trace() << "no job stats - returning randomly selected " << selected->nodeName() << " load: " << selected->load() << " can install: " << selected->can_install(job) << endl; return selected; } return 0; } CompileServer *best = 0; // best uninstalled CompileServer *bestui = 0; // best preloadable host CompileServer *bestpre = 0; uint matches = 0; for (list::iterator it = css.begin(); it != css.end(); ++it) { CompileServer *cs = *it; /* For now ignore overloaded servers. */ /* Pre-loadable (cs->jobList().size()) == (cs->maxJobs()) is checked later. */ if ((int(cs->jobList().size()) > cs->maxJobs()) || (cs->load() >= 1000)) { #if DEBUG_SCHEDULER > 1 trace() << "overloaded " << cs->nodeName() << " " << cs->jobList().size() << "/" << cs->maxJobs() << " jobs, load:" << cs->load() << endl; #endif continue; } // Ignore ineligible servers if (!cs->is_eligible_now(job)) { #if DEBUG_SCHEDULER > 1 trace() << cs->nodeName() << " not eligible" << endl; #endif continue; } // incompatible architecture or busy installing if (!cs->can_install(job).size()) { #if DEBUG_SCHEDULER > 2 trace() << cs->nodeName() << " can't install " << job->id() << endl; #endif continue; } /* Don't use non-chroot-able daemons for remote jobs. XXX */ if (!cs->chrootPossible() && cs != job->submitter()) { trace() << cs->nodeName() << " can't use chroot\n"; continue; } // Check if remote & if remote allowed if (!cs->check_remote(job)) { trace() << cs->nodeName() << " fails remote job check\n"; continue; } #if DEBUG_SCHEDULER > 1 trace() << cs->nodeName() << " compiled " << cs->lastCompiledJobs().size() << " got now: " << cs->jobList().size() << " speed: " << server_speed(cs, job, true) << " compile time " << cs->cumCompiled().compileTimeUser() << " produced code " << cs->cumCompiled().outputSize() << " client count: " << cs->clientCount() << endl; #endif if ((cs->lastCompiledJobs().size() == 0) && (cs->jobList().size() == 0) && cs->maxJobs()) { /* Make all servers compile a job at least once, so we'll get an idea about their speed. */ if (!envs_match(cs, job).empty()) { best = cs; matches++; } else { // if there is one server that already got the environment and one that // hasn't compiled at all, pick the one with environment first bestui = cs; } break; } /* Distribute 5% of our jobs to servers which haven't been picked in a long time. This gives us a chance to adjust the server speed rating, which may change due to external influences out of our control. */ if (!cs->lastPickedId() || ((job->id() - cs->lastPickedId()) > (20 * css.size()))) { best = cs; break; } if (!envs_match(cs, job).empty()) { if (!best) { best = cs; } /* Search the server with the earliest projected time to compile the job. (XXX currently this is equivalent to the fastest one) */ else if ((best->lastCompiledJobs().size() != 0) && (server_speed(best, job) < server_speed(cs, job))) { if (int(cs->jobList().size()) < cs->maxJobs()) { best = cs; } else { bestpre = cs; } } matches++; } else { if (!bestui) { bestui = cs; } /* Search the server with the earliest projected time to compile the job. (XXX currently this is equivalent to the fastest one) */ else if ((bestui->lastCompiledJobs().size() != 0) && (server_speed(bestui, job) < server_speed(cs, job))) { if (int(cs->jobList().size()) < cs->maxJobs()) { bestui = cs; } else { bestpre = cs; } } } } if (best) { #if DEBUG_SCHEDULER > 1 trace() << "taking best installed " << best->nodeName() << " " << server_speed(best, job, true) << endl; #endif return best; } if (bestui) { #if DEBUG_SCHEDULER > 1 trace() << "taking best uninstalled " << bestui->nodeName() << " " << server_speed(bestui, job, true) << endl; #endif return bestui; } if (bestpre) { #if DEBUG_SCHEDULER > 1 trace() << "taking best preload " << bestpre->nodeName() << " " << server_speed(bestpre, job, true) << endl; #endif } return bestpre; } /* Prunes the list of connected servers by those which haven't answered for a long time. Return the number of seconds when we have to cleanup next time. */ static time_t prune_servers() { list::iterator it; time_t now = time(0); time_t min_time = MAX_SCHEDULER_PING; for (it = controls.begin(); it != controls.end();) { if ((now - (*it)->last_talk) >= MAX_SCHEDULER_PING) { CompileServer *old = *it; ++it; handle_end(old, 0); continue; } min_time = min(min_time, MAX_SCHEDULER_PING - now + (*it)->last_talk); ++it; } for (it = css.begin(); it != css.end();) { (*it)->startInConnectionTest(); time_t cs_in_conn_timeout = (*it)->getNextTimeout(); if(cs_in_conn_timeout != -1) { min_time = min(min_time, cs_in_conn_timeout); } if ((*it)->busyInstalling() && ((now - (*it)->busyInstalling()) >= MAX_BUSY_INSTALLING)) { trace() << "busy installing for a long time - removing " << (*it)->nodeName() << endl; CompileServer *old = *it; ++it; handle_end(old, 0); continue; } /* protocol version 27 and newer use TCP keepalive */ if (IS_PROTOCOL_27(*it)) { ++it; continue; } if ((now - (*it)->last_talk) >= MAX_SCHEDULER_PING) { if ((*it)->maxJobs() >= 0) { trace() << "send ping " << (*it)->nodeName() << endl; (*it)->setMaxJobs((*it)->maxJobs() * -1); // better not give it away if ((*it)->send_msg(PingMsg())) { // give it MAX_SCHEDULER_PONG to answer a ping (*it)->last_talk = time(0) - MAX_SCHEDULER_PING + 2 * MAX_SCHEDULER_PONG; min_time = min(min_time, (time_t) 2 * MAX_SCHEDULER_PONG); ++it; continue; } } // R.I.P. trace() << "removing " << (*it)->nodeName() << endl; CompileServer *old = *it; ++it; handle_end(old, 0); continue; } else { min_time = min(min_time, MAX_SCHEDULER_PING - now + (*it)->last_talk); } #if DEBUG_SCHEDULER > 1 if ((random() % 400) < 0) { // R.I.P. trace() << "FORCED removing " << (*it)->nodeName() << endl; CompileServer *old = *it; ++it; handle_end(old, 0); continue; } #endif ++it; } return min_time; } static Job *delay_current_job() { assert(!toanswer.empty()); if (toanswer.size() == 1) { return 0; } UnansweredList *first = toanswer.front(); toanswer.pop_front(); toanswer.push_back(first); return get_job_request(); } static bool empty_queue() { Job *job = get_job_request(); if (!job) { return false; } assert(!css.empty()); Job *first_job = job; CompileServer *cs = 0; while (true) { cs = pick_server(job); if (cs) { break; } /* Ignore the load on the submitter itself if no other host could be found. We only obey to its max job number. */ cs = job->submitter(); if (!((int(cs->jobList().size()) < cs->maxJobs()) && job->preferredHost().empty() /* This should be trivially true. */ && cs->can_install(job).size())) { job = delay_current_job(); if ((job == first_job) || !job) { // no job found in the whole toanswer list job = first_job; for (list::iterator it = css.begin(); it != css.end(); ++it) { if(!job->preferredHost().empty() && !(*it)->matches(job->preferredHost())) continue; if((*it)->is_eligible_ever(job)) { trace() << "No suitable host found, delaying" << endl; return false; } } // This means that there's nobody who could possibly handle the job, // so there's no point in delaying. log_info() << "No suitable host found, assigning submitter" << endl; cs = job->submitter(); break; } } else { break; } } remove_job_request(); job->setState(Job::WAITINGFORCS); job->setServer(cs); string host_platform = envs_match(cs, job); bool gotit = true; if (host_platform.empty()) { gotit = false; host_platform = cs->can_install(job); } // mix and match between job ids unsigned matched_job_id = 0; unsigned count = 0; list lastRequestedJobs = job->submitter()->lastRequestedJobs(); for (list::const_iterator l = lastRequestedJobs.begin(); l != lastRequestedJobs.end(); ++l) { unsigned rcount = 0; list lastCompiledJobs = cs->lastCompiledJobs(); for (list::const_iterator r = lastCompiledJobs.begin(); r != lastCompiledJobs.end(); ++r) { if (l->jobId() == r->jobId()) { matched_job_id = l->jobId(); } if (++rcount > 16) { break; } } if (matched_job_id || (++count > 16)) { break; } } if(IS_PROTOCOL_37(job->submitter()) && cs == job->submitter()) { NoCSMsg m2(job->id(), job->localClientId()); if (!job->submitter()->send_msg(m2)) { trace() << "failed to deliver job " << job->id() << endl; handle_end(job->submitter(), 0); // will care for the rest return true; } } else { UseCSMsg m2(host_platform, cs->name, cs->remotePort(), job->id(), gotit, job->localClientId(), matched_job_id); if (!job->submitter()->send_msg(m2)) { trace() << "failed to deliver job " << job->id() << endl; handle_end(job->submitter(), 0); // will care for the rest return true; } } #if DEBUG_SCHEDULER >= 0 if (!gotit) { trace() << "put " << job->id() << " in joblist of " << cs->nodeName() << " (will install now)" << endl; } else { trace() << "put " << job->id() << " in joblist of " << cs->nodeName() << endl; } #endif cs->appendJob(job); /* if it doesn't have the environment, it will get it. */ if (!gotit) { cs->setBusyInstalling(time(0)); } string env; if (!job->masterJobFor().empty()) { Environments environments = job->environments(); for (Environments::const_iterator it = environments.begin(); it != environments.end(); ++it) { if (it->first == cs->hostPlatform()) { env = it->second; break; } } } if (!env.empty()) { list masterJobFor = job->masterJobFor(); for (list::iterator it = masterJobFor.begin(); it != masterJobFor.end(); ++it) { // remove all other environments (*it)->clearEnvironments(); (*it)->appendEnvironment(make_pair(cs->hostPlatform(), env)); } } return true; } static bool handle_login(CompileServer *cs, Msg *_m) { LoginMsg *m = dynamic_cast(_m); if (!m) { return false; } std::ostream &dbg = trace(); cs->setRemotePort(m->port); cs->setCompilerVersions(m->envs); cs->setMaxJobs(m->max_kids); cs->setNoRemote(m->noremote); if (m->nodename.length()) { cs->setNodeName(m->nodename); } else { cs->setNodeName(cs->name); } cs->setHostPlatform(m->host_platform); cs->setChrootPossible(m->chroot_possible); cs->setSupportedFeatures(m->supported_features); cs->pick_new_id(); for (list::const_iterator it = block_css.begin(); it != block_css.end(); ++it) if (cs->matches(*it)) { return false; } dbg << "login " << m->nodename << " protocol version: " << cs->protocol << " features: " << supported_features_to_string(m->supported_features) << " ["; for (Environments::const_iterator it = m->envs.begin(); it != m->envs.end(); ++it) { dbg << it->second << "(" << it->first << "), "; } dbg << "]" << endl; handle_monitor_stats(cs); /* remove any other clients with the same IP and name, they must be stale */ for (list::iterator it = css.begin(); it != css.end();) { if (cs->eq_ip(*(*it)) && cs->nodeName() == (*it)->nodeName()) { CompileServer *old = *it; ++it; handle_end(old, 0); continue; } ++it; } css.push_back(cs); /* Configure the daemon */ if (IS_PROTOCOL_24(cs)) { cs->send_msg(ConfCSMsg()); } return true; } static bool handle_relogin(MsgChannel *mc, Msg *_m) { LoginMsg *m = dynamic_cast(_m); if (!m) { return false; } CompileServer *cs = static_cast(mc); cs->setCompilerVersions(m->envs); cs->setBusyInstalling(0); std::ostream &dbg = trace(); dbg << "RELOGIN " << cs->nodeName() << "(" << cs->hostPlatform() << "): ["; for (Environments::const_iterator it = m->envs.begin(); it != m->envs.end(); ++it) { dbg << it->second << "(" << it->first << "), "; } dbg << "]" << endl; /* Configure the daemon */ if (IS_PROTOCOL_24(cs)) { cs->send_msg(ConfCSMsg()); } return false; } static bool handle_mon_login(CompileServer *cs, Msg *_m) { MonLoginMsg *m = dynamic_cast(_m); if (!m) { return false; } monitors.push_back(cs); // monitors really want to be fed lazily cs->setBulkTransfer(); for (list::const_iterator it = css.begin(); it != css.end(); ++it) { handle_monitor_stats(*it); } fd2cs.erase(cs->fd); // no expected data from them return true; } static bool handle_job_begin(CompileServer *cs, Msg *_m) { JobBeginMsg *m = dynamic_cast(_m); if (!m) { return false; } if (jobs.find(m->job_id) == jobs.end()) { trace() << "handle_job_begin: no valid job id " << m->job_id << endl; return false; } Job *job = jobs[m->job_id]; if (job->server() != cs) { trace() << "that job isn't handled by " << cs->name << endl; return false; } cs->setClientCount(m->client_count); job->setState(Job::COMPILING); job->setStartTime(m->stime); job->setStartOnScheduler(time(0)); notify_monitors(new MonJobBeginMsg(m->job_id, m->stime, cs->hostId())); #if DEBUG_SCHEDULER >= 0 trace() << "BEGIN: " << m->job_id << " client=" << job->submitter()->nodeName() << "(" << job->targetPlatform() << ")" << " server=" << job->server()->nodeName() << "(" << job->server()->hostPlatform() << ")" << endl; #endif return true; } static bool handle_job_done(CompileServer *cs, Msg *_m) { JobDoneMsg *m = dynamic_cast(_m); if (!m) { return false; } Job *j = 0; if (uint32_t clientId = m->unknown_job_client_id()) { // The daemon has sent a done message for a job for which it doesn't know the job id (happens // if the job is cancelled before we send back the job id). Find the job using the client id. map::iterator mit; for (mit = jobs.begin(); mit != jobs.end(); ++mit) { Job *job = mit->second; trace() << "looking for waitcs " << job->server() << " " << job->submitter() << " " << cs << " " << job->state() << " " << job->localClientId() << " " << clientId << endl; if (job->server() == 0 && job->submitter() == cs && job->localClientId() == clientId) { trace() << "STOP (WAITFORCS) FOR " << mit->first << endl; j = job; m->set_job_id( j->id()); // Now we know the job's id. /* Unfortunately the toanswer queues are also tagged based on the daemon, so we need to clean them up also. */ list::iterator it; for (it = toanswer.begin(); it != toanswer.end(); ++it) if ((*it)->submitter == cs) { UnansweredList *l = *it; list::iterator jit; for (jit = l->l.begin(); jit != l->l.end(); ++jit) { if (*jit == j) { l->l.erase(jit); break; } } if (l->l.empty()) { it = toanswer.erase(it); break; } } } } } else if (jobs.find(m->job_id) != jobs.end()) { j = jobs[m->job_id]; } if (!j) { trace() << "job ID not present " << m->job_id << endl; return false; } if (m->is_from_server() && (j->server() != cs)) { log_info() << "the server isn't the same for job " << m->job_id << endl; log_info() << "server: " << j->server()->nodeName() << endl; log_info() << "msg came from: " << cs->nodeName() << endl; // the daemon is not following matz's rules: kick him handle_end(cs, 0); return false; } if (!m->is_from_server() && (j->submitter() != cs)) { log_info() << "the submitter isn't the same for job " << m->job_id << endl; log_info() << "submitter: " << j->submitter()->nodeName() << endl; log_info() << "msg came from: " << cs->nodeName() << endl; // the daemon is not following matz's rules: kick him handle_end(cs, 0); return false; } cs->setClientCount(m->client_count); if (m->exitcode == 0) { std::ostream &dbg = trace(); dbg << "END " << m->job_id << " status=" << m->exitcode; if (m->in_uncompressed) dbg << " in=" << m->in_uncompressed << "(" << int(m->in_compressed * 100 / m->in_uncompressed) << "%)"; else { dbg << " in=0(0%)"; } if (m->out_uncompressed) dbg << " out=" << m->out_uncompressed << "(" << int(m->out_compressed * 100 / m->out_uncompressed) << "%)"; else { dbg << " out=0(0%)"; } dbg << " real=" << m->real_msec << " user=" << m->user_msec << " sys=" << m->sys_msec << " pfaults=" << m->pfaults << " server=" << j->server()->nodeName() << endl; } else { trace() << "END " << m->job_id << " status=" << m->exitcode << endl; } if (j->server()) { j->server()->removeJob(j); } add_job_stats(j, m); notify_monitors(new MonJobDoneMsg(*m)); jobs.erase(m->job_id); delete j; return true; } static bool handle_ping(CompileServer *cs, Msg * /*_m*/) { cs->last_talk = time(0); if (cs->maxJobs() < 0) { cs->setMaxJobs(cs->maxJobs() * -1); } return true; } static bool handle_stats(CompileServer *cs, Msg *_m) { StatsMsg *m = dynamic_cast(_m); if (!m) { return false; } /* Before protocol 25, ping and stat handling was clutched together. */ if (!IS_PROTOCOL_25(cs)) { cs->last_talk = time(0); if (cs && (cs->maxJobs() < 0)) { cs->setMaxJobs(cs->maxJobs() * -1); } } for (list::iterator it = css.begin(); it != css.end(); ++it) if (*it == cs) { (*it)->setLoad(m->load); (*it)->setClientCount(m->client_count); handle_monitor_stats(*it, m); return true; } return false; } static bool handle_blacklist_host_env(CompileServer *cs, Msg *_m) { BlacklistHostEnvMsg *m = dynamic_cast(_m); if (!m) { return false; } for (list::const_iterator it = css.begin(); it != css.end(); ++it) if ((*it)->name == m->hostname) { trace() << "Blacklisting host " << m->hostname << " for environment " << m->environment << " (" << m->target << ")" << endl; cs->blacklistCompileServer(*it, make_pair(m->target, m->environment)); } return true; } static string dump_job(Job *job) { char buffer[1000]; string line; string jobState; switch(job->state()) { case Job::PENDING: jobState = "PEND"; break; case Job::WAITINGFORCS: jobState = "WAIT"; break; case Job::COMPILING: jobState = "COMP"; break; default: jobState = "Huh?"; } snprintf(buffer, sizeof(buffer), "%u %s sub:%s on:%s ", job->id(), jobState.c_str(), job->submitter() ? job->submitter()->nodeName().c_str() : "<>", job->server() ? job->server()->nodeName().c_str() : ""); buffer[sizeof(buffer) - 1] = 0; line = buffer; line = line + job->fileName(); return line; } /* Splits the string S between characters in SET and add them to list L. */ static void split_string(const string &s, const char *set, list &l) { string::size_type end = 0; while (end != string::npos) { string::size_type start = s.find_first_not_of(set, end); if (start == string::npos) { break; } end = s.find_first_of(set, start); /* Do we really need to check end here or is the subtraction defined on every platform correctly (with GCC it's ensured, that (npos - start) is the rest of the string). */ if (end != string::npos) { l.push_back(s.substr(start, end - start)); } else { l.push_back(s.substr(start)); } } } static bool handle_control_login(CompileServer *cs) { cs->setType(CompileServer::LINE); cs->last_talk = time(0); cs->setBulkTransfer(); cs->setState(CompileServer::LOGGEDIN); assert(find(controls.begin(), controls.end(), cs) == controls.end()); controls.push_back(cs); std::ostringstream o; o << "200-ICECC " VERSION ": " << time(0) - starttime << "s uptime, " << css.size() << " hosts, " << jobs.size() << " jobs in queue " << "(" << new_job_id << " total)." << endl; o << "200 Use 'help' for help and 'quit' to quit." << endl; return cs->send_msg(TextMsg(o.str())); } static bool handle_line(CompileServer *cs, Msg *_m) { TextMsg *m = dynamic_cast(_m); if (!m) { return false; } string line; list l; split_string(m->text, " \t\n", l); string cmd; cs->last_talk = time(0); if (l.empty()) { cmd = ""; } else { cmd = l.front(); l.pop_front(); transform(cmd.begin(), cmd.end(), cmd.begin(), ::tolower); } if (cmd == "listcs") { for (list::iterator it = css.begin(); it != css.end(); ++it) { char buffer[1000]; sprintf(buffer, " (%s:%d) ", (*it)->name.c_str(), (*it)->remotePort()); line = " " + (*it)->nodeName() + buffer; line += "[" + (*it)->hostPlatform() + "] speed="; sprintf(buffer, "%.2f jobs=%d/%d load=%d", server_speed(*it), (int)(*it)->jobList().size(), (*it)->maxJobs(), (*it)->load()); line += buffer; if ((*it)->busyInstalling()) { sprintf(buffer, " busy installing since %ld s", time(0) - (*it)->busyInstalling()); line += buffer; } if (!cs->send_msg(TextMsg(line))) { return false; } list jobList = (*it)->jobList(); for (list::const_iterator it2 = jobList.begin(); it2 != jobList.end(); ++it2) { if (!cs->send_msg(TextMsg(" " + dump_job(*it2)))) { return false; } } } } else if (cmd == "listblocks") { for (list::const_iterator it = block_css.begin(); it != block_css.end(); ++it) { if (!cs->send_msg(TextMsg(" " + (*it)))) { return false; } } } else if (cmd == "listjobs") { for (map::const_iterator it = jobs.begin(); it != jobs.end(); ++it) if (!cs->send_msg(TextMsg(" " + dump_job(it->second)))) { return false; } } else if (cmd == "quit" || cmd == "exit") { handle_end(cs, 0); return false; } else if (cmd == "removecs" || cmd == "blockcs") { if (l.empty()) { if (!cs->send_msg(TextMsg(string("401 Sure. But which hosts?")))) { return false; } } else { for (list::const_iterator si = l.begin(); si != l.end(); ++si) { if (cmd == "blockcs") block_css.push_back(*si); for (list::iterator it = css.begin(); it != css.end(); ++it) { if ((*it)->matches(*si)) { if (cs->send_msg(TextMsg(string("removing host ") + *si))) { handle_end(*it, 0); } break; } } } } } else if (cmd == "unblockcs") { if (l.empty()) { if(!cs->send_msg (TextMsg (string ("401 Sure. But which host?")))) return false; } else { for (list::const_iterator si = l.begin(); si != l.end(); ++si) { for (list::iterator it = block_css.begin(); it != block_css.end(); ++it) { if (*si == *it) { block_css.erase(it); break; } } } } } else if (cmd == "internals") { for (list::iterator it = css.begin(); it != css.end(); ++it) { Msg *msg = NULL; if (!l.empty()) { list::const_iterator si; for (si = l.begin(); si != l.end(); ++si) { if ((*it)->matches(*si)) { break; } } if (si == l.end()) { continue; } } if ((*it)->send_msg(GetInternalStatus())) { msg = (*it)->get_msg(); } if (msg && msg->type == M_STATUS_TEXT) { if (!cs->send_msg(TextMsg(dynamic_cast(msg)->text))) { return false; } } else { if (!cs->send_msg(TextMsg((*it)->nodeName() + " not reporting\n"))) { return false; } } delete msg; } } else if (cmd == "help") { if (!cs->send_msg(TextMsg( "listcs\nlistblocks\nlistjobs\nremovecs\nblockcs\nunblockcs\ninternals\nhelp\nquit"))) { return false; } } else { string txt = "Invalid command '"; txt += m->text; txt += "'"; if (!cs->send_msg(TextMsg(txt))) { return false; } } return cs->send_msg(TextMsg(string("200 done"))); } // return false if some error occurred, leaves C open. */ static bool try_login(CompileServer *cs, Msg *m) { bool ret = true; switch (m->type) { case M_LOGIN: cs->setType(CompileServer::DAEMON); ret = handle_login(cs, m); break; case M_MON_LOGIN: cs->setType(CompileServer::MONITOR); ret = handle_mon_login(cs, m); break; default: log_info() << "Invalid first message " << (char)m->type << endl; ret = false; break; } if (ret) { cs->setState(CompileServer::LOGGEDIN); } else { handle_end(cs, m); } delete m; return ret; } static bool handle_end(CompileServer *toremove, Msg *m) { #if DEBUG_SCHEDULER > 1 trace() << "Handle_end " << toremove << " " << m << endl; #else (void)m; #endif switch (toremove->type()) { case CompileServer::MONITOR: assert(find(monitors.begin(), monitors.end(), toremove) != monitors.end()); monitors.remove(toremove); #if DEBUG_SCHEDULER > 1 trace() << "handle_end(moni) " << monitors.size() << endl; #endif break; case CompileServer::DAEMON: log_info() << "remove daemon " << toremove->nodeName() << endl; notify_monitors(new MonStatsMsg(toremove->hostId(), "State:Offline\n")); /* A daemon disconnected. We must remove it from the css list, and we have to delete all jobs scheduled on that daemon. There might be still clients connected running on the machine on which the daemon died. We expect that the daemon dying makes the client disconnect soon too. */ css.remove(toremove); /* Unfortunately the toanswer queues are also tagged based on the daemon, so we need to clean them up also. */ for (list::iterator it = toanswer.begin(); it != toanswer.end();) { if ((*it)->submitter == toremove) { UnansweredList *l = *it; list::iterator jit; for (jit = l->l.begin(); jit != l->l.end(); ++jit) { trace() << "STOP (DAEMON) FOR " << (*jit)->id() << endl; notify_monitors(new MonJobDoneMsg(JobDoneMsg((*jit)->id(), 255))); if ((*jit)->server()) { (*jit)->server()->setBusyInstalling(0); } jobs.erase((*jit)->id()); delete(*jit); } delete l; it = toanswer.erase(it); } else { ++it; } } for (map::iterator mit = jobs.begin(); mit != jobs.end();) { Job *job = mit->second; if (job->server() == toremove || job->submitter() == toremove) { trace() << "STOP (DAEMON2) FOR " << mit->first << endl; notify_monitors(new MonJobDoneMsg(JobDoneMsg(job->id(), 255))); /* If this job is removed because the submitter is removed also remove the job from the servers joblist. */ if (job->server() && job->server() != toremove) { job->server()->removeJob(job); } if (job->server()) { job->server()->setBusyInstalling(0); } jobs.erase(mit++); delete job; } else { ++mit; } } for (list::iterator itr = css.begin(); itr != css.end(); ++itr) { (*itr)->eraseCSFromBlacklist(toremove); } break; case CompileServer::LINE: toremove->send_msg(TextMsg("200 Good Bye!")); controls.remove(toremove); break; default: trace() << "remote end had UNKNOWN type?" << endl; break; } fd2cs.erase(toremove->fd); delete toremove; return true; } /* Returns TRUE if C was not closed. */ static bool handle_activity(CompileServer *cs) { Msg *m; bool ret = true; m = cs->get_msg(0, true); if (!m) { handle_end(cs, m); return false; } /* First we need to login. */ if (cs->state() == CompileServer::CONNECTED) { return try_login(cs, m); } switch (m->type) { case M_JOB_BEGIN: ret = handle_job_begin(cs, m); break; case M_JOB_DONE: ret = handle_job_done(cs, m); break; case M_PING: ret = handle_ping(cs, m); break; case M_STATS: ret = handle_stats(cs, m); break; case M_END: handle_end(cs, m); ret = false; break; case M_JOB_LOCAL_BEGIN: ret = handle_local_job(cs, m); break; case M_JOB_LOCAL_DONE: ret = handle_local_job_done(cs, m); break; case M_LOGIN: ret = handle_relogin(cs, m); break; case M_TEXT: ret = handle_line(cs, m); break; case M_GET_CS: ret = handle_cs_request(cs, m); break; case M_BLACKLIST_HOST_ENV: ret = handle_blacklist_host_env(cs, m); break; default: log_info() << "Invalid message type arrived " << (char)m->type << endl; handle_end(cs, m); ret = false; break; } delete m; return ret; } static int open_broad_listener(int port, const string &interface) { int listen_fd; struct sockaddr_in myaddr; if ((listen_fd = socket(PF_INET, SOCK_DGRAM, 0)) < 0) { log_perror("socket()"); return -1; } int optval = 1; if (setsockopt(listen_fd, SOL_SOCKET, SO_BROADCAST, &optval, sizeof(optval)) < 0) { log_perror("setsockopt()"); return -1; } if (!build_address_for_interface(myaddr, interface, port)) { return -1; } if (::bind(listen_fd, (struct sockaddr *) &myaddr, sizeof(myaddr)) < 0) { log_perror("bind()"); return -1; } return listen_fd; } static int open_tcp_listener(short port, const string &interface) { int fd; struct sockaddr_in myaddr; if ((fd = socket(PF_INET, SOCK_STREAM, 0)) < 0) { log_perror("socket()"); return -1; } int optval = 1; if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)) < 0) { log_perror("setsockopt()"); return -1; } /* Although we poll() on fd we need O_NONBLOCK, due to possible network errors making accept() block although poll() said there was some activity. */ if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) { log_perror("fcntl()"); return -1; } if (!build_address_for_interface(myaddr, interface, port)) { return -1; } if (::bind(fd, (struct sockaddr *) &myaddr, sizeof(myaddr)) < 0) { log_perror("bind()"); return -1; } if (listen(fd, 1024) < 0) { log_perror("listen()"); return -1; } return fd; } static void usage(const char *reason = 0) { if (reason) { cerr << reason << endl; } cerr << "ICECREAM scheduler " VERSION "\n"; cerr << "usage: icecc-scheduler [options] \n" << "Options:\n" << " -n, --netname \n" << " -i, --interface \n" << " -p, --port \n" << " -h, --help\n" << " -l, --log-file \n" << " -d, --daemonize\n" << " -u, --user-uid\n" << " -v[v[v]]]\n" << " -r, --persistent-client-connection\n" << endl; exit(1); } static void trigger_exit(int signum) { if (!exit_main_loop) { exit_main_loop = true; } else { // hmm, we got killed already. try better static const char msg[] = "forced exit.\n"; ignore_result(write(STDERR_FILENO, msg, strlen( msg ))); _exit(1); } // make BSD happy signal(signum, trigger_exit); } static void handle_scheduler_announce(const char* buf, const char* netname, bool persistent_clients, struct sockaddr_in broad_addr) { /* Another scheduler is announcing it's running, disconnect daemons if it has a better version or the same version but was started earlier. */ time_t other_time; int other_protocol_version; string other_netname; Broadcasts::getSchedulerVersionData(buf, &other_protocol_version, &other_time, &other_netname); trace() << "Received scheduler announcement from " << inet_ntoa(broad_addr.sin_addr) << ":" << ntohs(broad_addr.sin_port) << " (version " << int(other_protocol_version) << ", netname " << other_netname << ")" << endl; if (other_protocol_version >= 36) { if (other_netname == netname) { if (other_protocol_version > PROTOCOL_VERSION || (other_protocol_version == PROTOCOL_VERSION && other_time < starttime)) { if (!persistent_clients){ log_info() << "Scheduler from " << inet_ntoa(broad_addr.sin_addr) << ":" << ntohs(broad_addr.sin_port) << " (version " << int(other_protocol_version) << ") has announced itself as a preferred" " scheduler, disconnecting all connections." << endl; if (!css.empty() || !monitors.empty()) { while (!css.empty()) { handle_end(css.front(), NULL); } while (!monitors.empty()) { handle_end(monitors.front(), NULL); } } } } } } } int main(int argc, char *argv[]) { int listen_fd, remote_fd, broad_fd, text_fd; struct sockaddr_in remote_addr; socklen_t remote_len; const char *netname = "ICECREAM"; bool detach = false; bool persistent_clients = false; int debug_level = Error; string logfile; uid_t user_uid; gid_t user_gid; int warn_icecc_user_errno = 0; if (getuid() == 0) { struct passwd *pw = getpwnam("icecc"); if (pw) { user_uid = pw->pw_uid; user_gid = pw->pw_gid; } else { warn_icecc_user_errno = errno ? errno : ENOENT; // apparently errno can be 0 on error here user_uid = 65534; user_gid = 65533; } } else { user_uid = getuid(); user_gid = getgid(); } while (true) { int option_index = 0; static const struct option long_options[] = { { "netname", 1, NULL, 'n' }, { "help", 0, NULL, 'h' }, { "persistent-client-connection", 0, NULL, 'r' }, { "interface", 1, NULL, 'i' }, { "port", 1, NULL, 'p' }, { "daemonize", 0, NULL, 'd'}, { "log-file", 1, NULL, 'l'}, { "user-uid", 1, NULL, 'u'}, { 0, 0, 0, 0 } }; const int c = getopt_long(argc, argv, "n:i:p:hl:vdru:", long_options, &option_index); if (c == -1) { break; // eoo } switch (c) { case 0: (void) long_options[option_index].name; break; case 'd': detach = true; break; case 'r': persistent_clients= true; break; case 'l': if (optarg && *optarg) { logfile = optarg; } else { usage("Error: -l requires argument"); } break; case 'v': if (debug_level < MaxVerboseLevel) { debug_level++; } break; case 'n': if (optarg && *optarg) { netname = optarg; } else { usage("Error: -n requires argument"); } break; case 'i': if (optarg && *optarg) { string interface = optarg; if (interface.empty()) { usage("Error: Invalid network interface specified"); } scheduler_interface = interface; } else { usage("Error: -i requires argument"); } break; case 'p': if (optarg && *optarg) { scheduler_port = atoi(optarg); if (0 == scheduler_port) { usage("Error: Invalid port specified"); } } else { usage("Error: -p requires argument"); } break; case 'u': if (optarg && *optarg) { struct passwd *pw = getpwnam(optarg); if (!pw) { usage("Error: -u requires a valid username"); } else { user_uid = pw->pw_uid; user_gid = pw->pw_gid; warn_icecc_user_errno = 0; if (!user_gid || !user_uid) { usage("Error: -u must not be root"); } } } else { usage("Error: -u requires a valid username"); } break; default: usage(); } } if (warn_icecc_user_errno != 0) { log_errno("No icecc user on system. Falling back to nobody.", errno); } if (getuid() == 0) { if (!logfile.size() && detach) { if (mkdir("/var/log/icecc", S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH)) { if (errno == EEXIST) { if (-1 == chmod("/var/log/icecc", S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH)){ log_perror("chmod() failure"); } if (-1 == chown("/var/log/icecc", user_uid, user_gid)){ log_perror("chown() failure"); } } } logfile = "/var/log/icecc/scheduler.log"; } if (setgroups(0, NULL) < 0) { log_perror("setgroups() failed"); return 1; } if (setgid(user_gid) < 0) { log_perror("setgid() failed"); return 1; } if (setuid(user_uid) < 0) { log_perror("setuid() failed"); return 1; } } setup_debug(debug_level, logfile); log_info() << "ICECREAM scheduler " VERSION " starting up, port " << scheduler_port << endl; if (detach) { if (daemon(0, 0) != 0) { log_errno("Failed to detach.", errno); exit(1); } } listen_fd = open_tcp_listener(scheduler_port, scheduler_interface); if (listen_fd < 0) { return 1; } text_fd = open_tcp_listener(scheduler_port + 1, scheduler_interface); if (text_fd < 0) { return 1; } broad_fd = open_broad_listener(scheduler_port, scheduler_interface); if (broad_fd < 0) { return 1; } if (signal(SIGPIPE, SIG_IGN) == SIG_ERR) { log_warning() << "signal(SIGPIPE, ignore) failed: " << strerror(errno) << endl; return 1; } starttime = time(0); if( getenv( "ICECC_FAKE_STARTTIME" ) != NULL ) starttime -= 1000; ofstream pidFile; string progName = argv[0]; progName = find_basename(progName); pidFilePath = string(RUNDIR) + string("/") + progName + string(".pid"); pidFile.open(pidFilePath.c_str()); pidFile << getpid() << endl; pidFile.close(); signal(SIGTERM, trigger_exit); signal(SIGINT, trigger_exit); signal(SIGALRM, trigger_exit); log_info() << "scheduler ready" << endl; time_t next_listen = 0; Broadcasts::broadcastSchedulerVersion(scheduler_port, netname, starttime); last_announce = starttime; while (!exit_main_loop) { int timeout = prune_servers(); while (empty_queue()) { continue; } /* Announce ourselves from time to time, to make other possible schedulers disconnect their daemons if we are the preferred scheduler (daemons with version new enough should automatically select the best scheduler, but old daemons connect randomly). */ if (last_announce + 120 < time(NULL)) { Broadcasts::broadcastSchedulerVersion(scheduler_port, netname, starttime); last_announce = time(NULL); } vector< pollfd > pollfds; pollfds.reserve( fd2cs.size() + css.size() + 5 ); pollfd pfd; // tmp variable if (time(0) >= next_listen) { pfd.fd = listen_fd; pfd.events = POLLIN; pollfds.push_back( pfd ); pfd.fd = text_fd; pfd.events = POLLIN; pollfds.push_back( pfd ); } pfd.fd = broad_fd; pfd.events = POLLIN; pollfds.push_back( pfd ); for (map::const_iterator it = fd2cs.begin(); it != fd2cs.end();) { int i = it->first; CompileServer *cs = it->second; bool ok = true; ++it; /* handle_activity() can delete c and make the iterator invalid. */ while (ok && cs->has_msg()) { if (!handle_activity(cs)) { ok = false; } } if (ok) { pfd.fd = i; pfd.events = POLLIN; pollfds.push_back( pfd ); } } list cs_in_tsts; for (list::iterator it = css.begin(); it != css.end(); ++it) { if ((*it)->getConnectionInProgress()) { int csInFd = (*it)->getInFd(); cs_in_tsts.push_back(*it); pfd.fd = csInFd; pfd.events = POLLIN | POLLOUT; pollfds.push_back( pfd ); } } int active_fds = poll(pollfds.data(), pollfds.size(), timeout * 1000); int poll_errno = errno; if (active_fds < 0 && errno == EINTR) { reset_debug_if_needed(); // we possibly got SIGHUP continue; } reset_debug_if_needed(); if (active_fds < 0) { errno = poll_errno; log_perror("poll()"); return 1; } if (pollfd_is_set(pollfds, listen_fd, POLLIN)) { active_fds--; bool pending_connections = true; while (pending_connections) { remote_len = sizeof(remote_addr); remote_fd = accept(listen_fd, (struct sockaddr *) &remote_addr, &remote_len); if (remote_fd < 0) { pending_connections = false; } if (remote_fd < 0 && errno != EAGAIN && errno != EINTR && errno != EWOULDBLOCK) { log_perror("accept()"); /* don't quit because of ECONNABORTED, this can happen during * floods */ } if (remote_fd >= 0) { CompileServer *cs = new CompileServer(remote_fd, (struct sockaddr *) &remote_addr, remote_len, false); trace() << "accepted " << cs->name << endl; cs->last_talk = time(0); if (!cs->protocol) { // protocol mismatch delete cs; continue; } fd2cs[cs->fd] = cs; while (!cs->read_a_bit() || cs->has_msg()) { if (! handle_activity(cs)) { break; } } } } next_listen = time(0) + 1; } if (active_fds && pollfd_is_set(pollfds, text_fd, POLLIN)) { active_fds--; remote_len = sizeof(remote_addr); remote_fd = accept(text_fd, (struct sockaddr *) &remote_addr, &remote_len); if (remote_fd < 0 && errno != EAGAIN && errno != EINTR) { log_perror("accept()"); /* Don't quit the scheduler just because a debugger couldn't connect. */ } if (remote_fd >= 0) { CompileServer *cs = new CompileServer(remote_fd, (struct sockaddr *) &remote_addr, remote_len, true); fd2cs[cs->fd] = cs; if (!handle_control_login(cs)) { handle_end(cs, 0); continue; } while (!cs->read_a_bit() || cs->has_msg()) if (!handle_activity(cs)) { break; } } } if (active_fds && pollfd_is_set(pollfds, broad_fd, POLLIN)) { active_fds--; char buf[Broadcasts::BROAD_BUFLEN + 1]; struct sockaddr_in broad_addr; socklen_t broad_len = sizeof(broad_addr); /* We can get either a daemon request for a scheduler (1 byte) or another scheduler announcing itself (4 bytes + time). */ int buflen = recvfrom(broad_fd, buf, Broadcasts::BROAD_BUFLEN, 0, (struct sockaddr *) &broad_addr, &broad_len); if (buflen < 0 || buflen > Broadcasts::BROAD_BUFLEN){ int err = errno; log_perror("recvfrom()"); /* Some linux 2.6 kernels can return from select with data available, and then return from read() with EAGAIN even on a blocking socket (breaking POSIX). Happens when the arriving packet has a wrong checksum. So we ignore EAGAIN here, but still abort for all other errors. */ if (err != EAGAIN && err != EWOULDBLOCK) { return -1; } } int daemon_version; if (DiscoverSched::isSchedulerDiscovery(buf, buflen, &daemon_version)) { /* Daemon is searching for a scheduler, only answer if daemon would be able to talk to us. */ if ( daemon_version >= MIN_PROTOCOL_VERSION){ log_info() << "broadcast from " << inet_ntoa(broad_addr.sin_addr) << ":" << ntohs(broad_addr.sin_port) << " (version " << daemon_version << ")\n"; int reply_len = DiscoverSched::prepareBroadcastReply(buf, netname, starttime); if (sendto(broad_fd, buf, reply_len, 0, (struct sockaddr *) &broad_addr, broad_len) != reply_len) { log_perror("sendto()"); } } } else if(Broadcasts::isSchedulerVersion(buf, buflen)) { handle_scheduler_announce(buf, netname, persistent_clients, broad_addr); } } for (map::const_iterator it = fd2cs.begin(); active_fds > 0 && it != fd2cs.end();) { int i = it->first; CompileServer *cs = it->second; /* handle_activity can delete the channel from the fd2cs list, hence advance the iterator right now, so it doesn't become invalid. */ ++it; if (pollfd_is_set(pollfds, i, POLLIN)) { while (!cs->read_a_bit() || cs->has_msg()) { if (!handle_activity(cs)) { break; } } active_fds--; } } for (list::const_iterator it = cs_in_tsts.begin(); it != cs_in_tsts.end(); ++it) { if(find(css.begin(), css.end(), *it) == css.end()) { continue; // deleted meanwhile } if((*it)->getConnectionInProgress()) { if(active_fds > 0 && pollfd_is_set(pollfds, (*it)->getInFd(), POLLIN | POLLOUT) && (*it)->isConnected()) { active_fds--; (*it)->updateInConnectivity(true); } else if((active_fds == 0 || pollfd_is_set(pollfds, (*it)->getInFd(), POLLIN | POLLOUT)) && !(*it)->isConnected()) { (*it)->updateInConnectivity(false); } } } } shutdown(broad_fd, SHUT_RDWR); while (!css.empty()) handle_end(css.front(), NULL); while (!monitors.empty()) handle_end(monitors.front(), NULL); if ((-1 == close(broad_fd)) && (errno != EBADF)){ log_perror("close failed"); } if (-1 == unlink(pidFilePath.c_str()) && errno != ENOENT){ log_perror("unlink failed") << "\t" << pidFilePath << endl; } return 0; } icecream-1.3.1/scheduler/scheduler.h000066400000000000000000000020751361626760200174060ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Michael Matz 2004 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef SCHEDULER_H #define SCHEDULER_H // Values 0 to 3. #define DEBUG_SCHEDULER 0 #endif icecream-1.3.1/services/000077500000000000000000000000001361626760200151205ustar00rootroot00000000000000icecream-1.3.1/services/Makefile.am000066400000000000000000000011161361626760200171530ustar00rootroot00000000000000lib_LTLIBRARIES = libicecc.la libicecc_la_SOURCES = job.cpp comm.cpp exitcode.cpp getifaddrs.cpp logging.cpp ncpus.c tempfile.c platform.cpp gcc.cpp util.cpp libicecc_la_LIBADD = \ $(LZO_LDADD) \ $(ZSTD_LDADD) \ $(CAPNG_LDADD) \ $(DL_LDADD) libicecc_la_CFLAGS = -fPIC -DPIC libicecc_la_CXXFLAGS = -fPIC -DPIC icedir = $(includedir)/icecc ice_HEADERS = \ job.h \ comm.h \ logging.h noinst_HEADERS = \ exitcode.h \ getifaddrs.h \ logging.h \ ncpus.h \ tempfile.h \ platform.h \ util.h pkgconfigdir = $(libdir)/pkgconfig pkgconfig_DATA = icecc.pc AM_LIBTOOLFLAGS = --silent icecream-1.3.1/services/comm.cpp000066400000000000000000002175471361626760200165770ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Michael Matz 2004 Stephan Kulow 2007 Dirk Mueller This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_NETINET_TCP_VAR_H #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_LIBCAP_NG #include #endif #include "getifaddrs.h" #include #include #include "logging.h" #include "job.h" #include "comm.h" using namespace std; // Prefer least amount of CPU use #undef ZSTD_CLEVEL_DEFAULT #define ZSTD_CLEVEL_DEFAULT 1 // old libzstd? #ifndef ZSTD_COMPRESSBOUND #define ZSTD_COMPRESSBOUND(n) ZSTD_compressBound(n) #endif static int zstd_compression() { const char *level = getenv("ICECC_COMPRESSION"); if (!level || !*level) return ZSTD_CLEVEL_DEFAULT; char *endptr; int n = strtol(level, &endptr, 0); if (*endptr) return ZSTD_CLEVEL_DEFAULT; return n; } /* * A generic DoS protection. The biggest messages are of type FileChunk * which shouldn't be larger than 100kb. so anything bigger than 10 times * of that is definitely fishy, and we must reject it (we're running as root, * so be cautious). */ #define MAX_MSG_SIZE 1 * 1024 * 1024 /* * On a slow and congested network it's possible for a send call to get starved. * This will happen especially when trying to send a huge number of bytes over at * once. We can avoid this situation to a large extend by sending smaller * chunks of data over. */ #define MAX_SLOW_WRITE_SIZE 10 * 1024 /* TODO * buffered in/output per MsgChannel + move read* into MsgChannel, create buffer-fill function + add timeouting poll() there, handle it in the different + read* functions. + write* unbuffered / or per message buffer (flush in send_msg) * think about error handling + saving errno somewhere (in MsgChannel class) * handle unknown messages (implement a UnknownMsg holding the content of the whole data packet?) */ /* Tries to fill the inbuf completely. */ bool MsgChannel::read_a_bit() { chop_input(); size_t count = inbuflen - inofs; if (count < 128) { inbuflen = (inbuflen + 128 + 127) & ~(size_t) 127; inbuf = (char *) realloc(inbuf, inbuflen); assert(inbuf); // Probably unrecoverable if realloc fails anyway. count = inbuflen - inofs; } char *buf = inbuf + inofs; bool error = false; while (count) { if (eof) { break; } ssize_t ret = read(fd, buf, count); if (ret > 0) { count -= ret; buf += ret; } else if (ret < 0 && errno == EINTR) { continue; } else if (ret < 0) { // EOF or some error if (errno != EAGAIN && errno != EWOULDBLOCK) { error = true; } } else if (ret == 0) { eof = true; } break; } inofs = buf - inbuf; if (!update_state()) { error = true; } if (error) { // Daemons sometimes successfully do accept() but then the connection // gets ECONNRESET. Probably a spurious result from accept(), so // just be silent about it in this case. set_error( instate == NEED_PROTO ); return false; } return true; } bool MsgChannel::update_state(void) { switch (instate) { case NEED_PROTO: while (inofs - intogo >= 4) { if (protocol == 0) { return false; } uint32_t remote_prot = 0; unsigned char vers[4]; //readuint32 (remote_prot); memcpy(vers, inbuf + intogo, 4); intogo += 4; for (int i = 0; i < 4; ++i) { remote_prot |= vers[i] << (i * 8); } if (protocol == -1) { /* The first time we read the remote protocol. */ protocol = 0; if (remote_prot < MIN_PROTOCOL_VERSION || remote_prot > (1 << 20)) { remote_prot = 0; set_error(); return false; } maximum_remote_protocol = remote_prot; if (remote_prot > PROTOCOL_VERSION) { remote_prot = PROTOCOL_VERSION; // ours is smaller } for (int i = 0; i < 4; ++i) { vers[i] = remote_prot >> (i * 8); } writefull(vers, 4); if (!flush_writebuf(true)) { set_error(); return false; } protocol = -1 - remote_prot; } else if (protocol < -1) { /* The second time we read the remote protocol. */ protocol = - (protocol + 1); if ((int)remote_prot != protocol) { protocol = 0; set_error(); return false; } instate = NEED_LEN; /* Don't consume bytes from messages. */ break; } else { trace() << "NEED_PROTO but protocol > 0" << endl; set_error(); return false; } } /* FALLTHROUGH if the protocol setup was complete (instate was changed to NEED_LEN then). */ if (instate != NEED_LEN) { break; } // fallthrough case NEED_LEN: if (text_based) { // Skip any leading whitespace for (; inofs < intogo; ++inofs) if (inbuf[inofs] >= ' ') { break; } // Skip until next newline for (inmsglen = 0; inmsglen < inofs - intogo; ++inmsglen) if (inbuf[intogo + inmsglen] < ' ') { instate = HAS_MSG; break; } break; } else if (inofs - intogo >= 4) { (*this) >> inmsglen; if (inmsglen > MAX_MSG_SIZE) { log_error() << "received a too large message (size " << inmsglen << "), ignoring" << endl; set_error(); return false; } if (inbuflen - intogo < inmsglen) { inbuflen = (inmsglen + intogo + 127) & ~(size_t)127; inbuf = (char *) realloc(inbuf, inbuflen); assert(inbuf); // Probably unrecoverable if realloc fails anyway. } instate = FILL_BUF; /* FALLTHROUGH */ } else { break; } /* FALLTHROUGH */ case FILL_BUF: if (inofs - intogo >= inmsglen) { instate = HAS_MSG; } /* FALLTHROUGH */ else { break; } case HAS_MSG: /* handled elsewere */ break; case ERROR: return false; } return true; } void MsgChannel::chop_input() { /* Make buffer smaller, if there's much already read in front of it, or it is cheap to do. */ if (intogo > 8192 || inofs - intogo <= 16) { if (inofs - intogo != 0) { memmove(inbuf, inbuf + intogo, inofs - intogo); } inofs -= intogo; intogo = 0; } } void MsgChannel::chop_output() { if (msgofs > 8192 || msgtogo <= 16) { if (msgtogo) { memmove(msgbuf, msgbuf + msgofs, msgtogo); } msgofs = 0; } } void MsgChannel::writefull(const void *_buf, size_t count) { if (msgtogo + count >= msgbuflen) { /* Realloc to a multiple of 128. */ msgbuflen = (msgtogo + count + 127) & ~(size_t)127; msgbuf = (char *) realloc(msgbuf, msgbuflen); assert(msgbuf); // Probably unrecoverable if realloc fails anyway. } memcpy(msgbuf + msgtogo, _buf, count); msgtogo += count; } static size_t get_max_write_size() { if( const char* icecc_slow_network = getenv( "ICECC_SLOW_NETWORK" )) if( icecc_slow_network[ 0 ] == '1' ) return MAX_SLOW_WRITE_SIZE; return MAX_MSG_SIZE; } bool MsgChannel::flush_writebuf(bool blocking) { const char *buf = msgbuf + msgofs; bool error = false; while (msgtogo) { int send_errno; static size_t max_write_size = get_max_write_size(); #ifdef MSG_NOSIGNAL ssize_t ret = send(fd, buf, min( msgtogo, max_write_size ), MSG_NOSIGNAL); send_errno = errno; #else void (*oldsigpipe)(int); oldsigpipe = signal(SIGPIPE, SIG_IGN); ssize_t ret = send(fd, buf, min( msgtogo, max_write_size ), 0); send_errno = errno; signal(SIGPIPE, oldsigpipe); #endif if (ret < 0) { if (send_errno == EINTR) { continue; } /* If we want to write blocking, but couldn't write anything, select on the fd. */ if (blocking && ( send_errno == EAGAIN || send_errno == ENOTCONN || send_errno == EWOULDBLOCK )) { int ready; for (;;) { pollfd pfd; pfd.fd = fd; pfd.events = POLLOUT; ready = poll(&pfd, 1, 30 * 1000); if (ready < 0 && errno == EINTR) { continue; } break; } /* socket ready now for writing ? */ if (ready > 0) { continue; } if (ready == 0) { log_error() << "timed out while trying to send data" << endl; } /* Timeout or real error --> error. */ } errno = send_errno; log_perror("flush_writebuf() failed"); error = true; break; } else if (ret == 0) { // EOF while writing --> error error = true; break; } msgtogo -= ret; buf += ret; } msgofs = buf - msgbuf; chop_output(); if(error) { set_error(); return false; } return true; } MsgChannel &MsgChannel::operator>>(uint32_t &buf) { if (inofs >= intogo + 4) { if (ptrdiff_t(inbuf + intogo) % 4) { uint32_t t_buf[1]; memcpy(t_buf, inbuf + intogo, 4); buf = t_buf[0]; } else { buf = *(uint32_t *)(inbuf + intogo); } intogo += 4; buf = ntohl(buf); } else { buf = 0; } return *this; } MsgChannel &MsgChannel::operator<<(uint32_t i) { i = htonl(i); writefull(&i, 4); return *this; } MsgChannel &MsgChannel::operator>>(string &s) { char *buf; // len is including the (also saved) 0 Byte uint32_t len; *this >> len; if (!len || len > inofs - intogo) { s = ""; } else { buf = inbuf + intogo; intogo += len; s = buf; } return *this; } MsgChannel &MsgChannel::operator<<(const std::string &s) { uint32_t len = 1 + s.length(); *this << len; writefull(s.c_str(), len); return *this; } MsgChannel &MsgChannel::operator>>(list &l) { uint32_t len; l.clear(); *this >> len; while (len--) { string s; *this >> s; l.push_back(s); if (inofs == intogo) { break; } } return *this; } MsgChannel &MsgChannel::operator<<(const std::list &l) { *this << (uint32_t) l.size(); for (list::const_iterator it = l.begin(); it != l.end(); ++it) { *this << *it; } return *this; } void MsgChannel::write_environments(const Environments &envs) { *this << envs.size(); for (Environments::const_iterator it = envs.begin(); it != envs.end(); ++it) { *this << it->first; *this << it->second; } } void MsgChannel::read_environments(Environments &envs) { envs.clear(); uint32_t count; *this >> count; for (unsigned int i = 0; i < count; i++) { string plat; string vers; *this >> plat; *this >> vers; envs.push_back(make_pair(plat, vers)); } } void MsgChannel::readcompressed(unsigned char **uncompressed_buf, size_t &_uclen, size_t &_clen) { lzo_uint uncompressed_len; lzo_uint compressed_len; uint32_t tmp; *this >> tmp; uncompressed_len = tmp; *this >> tmp; compressed_len = tmp; uint32_t proto = C_LZO; if (IS_PROTOCOL_40(this)) { *this >> proto; if (proto != C_LZO && proto != C_ZSTD) { log_error() << "Unknown compression protocol " << proto << endl; *uncompressed_buf = 0; _uclen = 0; _clen = compressed_len; set_error(); return; } } /* If there was some input, but nothing compressed, or lengths are bigger than the whole chunk message or we don't have everything to uncompress, there was an error. */ if (uncompressed_len > MAX_MSG_SIZE || compressed_len > (inofs - intogo) || (uncompressed_len && !compressed_len) || inofs < intogo + compressed_len) { log_error() << "failure in readcompressed() length checking" << endl; *uncompressed_buf = 0; uncompressed_len = 0; _uclen = uncompressed_len; _clen = compressed_len; set_error(); return; } *uncompressed_buf = new unsigned char[uncompressed_len]; if (proto == C_ZSTD && uncompressed_len && compressed_len) { const void *compressed_buf = inbuf + intogo; size_t ret = ZSTD_decompress(*uncompressed_buf, uncompressed_len, compressed_buf, compressed_len); if (ZSTD_isError(ret)) { log_error() << "internal error - decompression of data from " << dump().c_str() << " failed: " << ZSTD_getErrorName(ret) << endl; delete[] *uncompressed_buf; *uncompressed_buf = 0; uncompressed_len = 0; } } else if (proto == C_LZO && uncompressed_len && compressed_len) { const lzo_byte *compressed_buf = (lzo_byte *)(inbuf + intogo); lzo_voidp wrkmem = (lzo_voidp) malloc(LZO1X_MEM_COMPRESS); int ret = lzo1x_decompress(compressed_buf, compressed_len, *uncompressed_buf, &uncompressed_len, wrkmem); free(wrkmem); if (ret != LZO_E_OK) { /* This should NEVER happen. Remove the buffer, and indicate there is nothing in it, but don't reset the compressed_len, so our caller know, that there actually was something read in. */ log_error() << "internal error - decompression of data from " << dump().c_str() << " failed: " << ret << endl; delete [] *uncompressed_buf; *uncompressed_buf = 0; uncompressed_len = 0; } } /* Read over everything used, _also_ if there was some error. If we couldn't decode it now, it won't get better in the future, so just ignore this hunk. */ intogo += compressed_len; _uclen = uncompressed_len; _clen = compressed_len; } void MsgChannel::writecompressed(const unsigned char *in_buf, size_t _in_len, size_t &_out_len) { uint32_t proto = C_LZO; if (IS_PROTOCOL_40(this)) proto = C_ZSTD; lzo_uint in_len = _in_len; lzo_uint out_len = _out_len; if (proto == C_LZO) out_len = in_len + in_len / 64 + 16 + 3; else if (proto == C_ZSTD) out_len = ZSTD_COMPRESSBOUND(in_len); *this << in_len; size_t msgtogo_old = msgtogo; *this << (uint32_t) 0; if (IS_PROTOCOL_40(this)) *this << proto; if (msgtogo + out_len >= msgbuflen) { /* Realloc to a multiple of 128. */ msgbuflen = (msgtogo + out_len + 127) & ~(size_t)127; msgbuf = (char *) realloc(msgbuf, msgbuflen); assert(msgbuf); // Probably unrecoverable if realloc fails anyway. } if (proto == C_LZO) { lzo_byte *out_buf = (lzo_byte *)(msgbuf + msgtogo); lzo_voidp wrkmem = (lzo_voidp) malloc(LZO1X_MEM_COMPRESS); int ret = lzo1x_1_compress(in_buf, in_len, out_buf, &out_len, wrkmem); free(wrkmem); if (ret != LZO_E_OK) { /* this should NEVER happen */ log_error() << "internal error - compression failed: " << ret << endl; out_len = 0; } } else if (proto == C_ZSTD) { void *out_buf = msgbuf + msgtogo; size_t ret = ZSTD_compress(out_buf, out_len, in_buf, in_len, zstd_compression()); if (ZSTD_isError(ret)) { /* this should NEVER happen */ log_error() << "internal error - compression failed: " << ZSTD_getErrorName(ret) << endl; out_len = 0; } out_len = ret; } uint32_t _olen = htonl(out_len); if(out_len > MAX_MSG_SIZE) { log_error() << "internal error - size of compressed message to write exceeds max size:" << out_len << endl; } memcpy(msgbuf + msgtogo_old, &_olen, 4); msgtogo += out_len; _out_len = out_len; } void MsgChannel::read_line(string &line) { /* XXX handle DOS and MAC line endings and null bytes as string endings. */ if (!text_based || inofs < intogo) { line = ""; } else { line = string(inbuf + intogo, inmsglen); intogo += inmsglen; while (intogo < inofs && inbuf[intogo] < ' ') { intogo++; } } } void MsgChannel::write_line(const string &line) { size_t len = line.length(); writefull(line.c_str(), len); if (line[len - 1] != '\n') { char c = '\n'; writefull(&c, 1); } } void MsgChannel::set_error(bool silent) { if( instate == ERROR ) { return; } if( !silent && !set_error_recursion ) { trace() << "setting error state for channel " << dump() << endl; // After the state is set to error, get_msg() will not return anything anymore, // so try to fetch last status from the other side, if available. set_error_recursion = true; Msg* msg = get_msg( 2, true ); if (msg && msg->type == M_STATUS_TEXT) { log_error() << "remote status: " << static_cast(msg)->text << endl; } set_error_recursion = false; } instate = ERROR; eof = true; } static int prepare_connect(const string &hostname, unsigned short p, struct sockaddr_in &remote_addr) { int remote_fd; int i = 1; if ((remote_fd = socket(PF_INET, SOCK_STREAM, 0)) < 0) { log_perror("socket()"); return -1; } struct hostent *host = gethostbyname(hostname.c_str()); if (!host) { log_error() << "Connecting to " << hostname << " failed: " << hstrerror( h_errno ) << endl; if ((-1 == close(remote_fd)) && (errno != EBADF)){ log_perror("close failed"); } return -1; } if (host->h_length != 4) { log_error() << "Invalid address length" << endl; if ((-1 == close(remote_fd)) && (errno != EBADF)){ log_perror("close failed"); } return -1; } setsockopt(remote_fd, IPPROTO_TCP, TCP_NODELAY, (char *) &i, sizeof(i)); remote_addr.sin_family = AF_INET; remote_addr.sin_port = htons(p); memcpy(&remote_addr.sin_addr.s_addr, host->h_addr_list[0], host->h_length); return remote_fd; } static bool connect_async(int remote_fd, struct sockaddr *remote_addr, size_t remote_size, int timeout) { fcntl(remote_fd, F_SETFL, O_NONBLOCK); // code majorly derived from lynx's http connect (GPL) int status = connect(remote_fd, remote_addr, remote_size); if ((status < 0) && (errno == EINPROGRESS || errno == EAGAIN)) { pollfd pfd; pfd.fd = remote_fd; pfd.events = POLLOUT; int ret; do { /* we poll for a specific time and if that succeeds, we connect one final time. Everything else we ignore */ ret = poll(&pfd, 1, timeout * 1000); if (ret < 0 && errno == EINTR) { continue; } break; } while (1); if (ret > 0) { /* ** Extra check here for connection success, if we try to ** connect again, and get EISCONN, it means we have a ** successful connection. But don't check with SOCKS. */ status = connect(remote_fd, remote_addr, remote_size); if ((status < 0) && (errno == EISCONN)) { status = 0; } } } if (status < 0) { /* ** The connect attempt failed or was interrupted, ** so close up the socket. */ if ((-1 == close(remote_fd)) && (errno != EBADF)){ log_perror("close failed"); } return false; } else { /* ** Make the socket blocking again on good connect. */ fcntl(remote_fd, F_SETFL, 0); } return true; } MsgChannel *Service::createChannel(const string &hostname, unsigned short p, int timeout) { int remote_fd; struct sockaddr_in remote_addr; if ((remote_fd = prepare_connect(hostname, p, remote_addr)) < 0) { return 0; } if (timeout) { if (!connect_async(remote_fd, (struct sockaddr *) &remote_addr, sizeof(remote_addr), timeout)) { return 0; // remote_fd is already closed } } else { int i = 2048; setsockopt(remote_fd, SOL_SOCKET, SO_SNDBUF, &i, sizeof(i)); if (connect(remote_fd, (struct sockaddr *) &remote_addr, sizeof(remote_addr)) < 0) { log_perror_trace("connect"); trace() << "connect failed on " << hostname << endl; if (-1 == close(remote_fd) && (errno != EBADF)){ log_perror("close failed"); } return 0; } } trace() << "connected to " << hostname << endl; return createChannel(remote_fd, (struct sockaddr *)&remote_addr, sizeof(remote_addr)); } MsgChannel *Service::createChannel(const string &socket_path) { int remote_fd; struct sockaddr_un remote_addr; if ((remote_fd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) { log_perror("socket()"); return 0; } remote_addr.sun_family = AF_UNIX; strncpy(remote_addr.sun_path, socket_path.c_str(), sizeof(remote_addr.sun_path) - 1); remote_addr.sun_path[sizeof(remote_addr.sun_path) - 1] = '\0'; if(socket_path.length() > sizeof(remote_addr.sun_path) - 1) { log_error() << "socket_path path too long for sun_path" << endl; } if (connect(remote_fd, (struct sockaddr *) &remote_addr, sizeof(remote_addr)) < 0) { log_perror_trace("connect"); trace() << "connect failed on " << socket_path << endl; if ((-1 == close(remote_fd)) && (errno != EBADF)){ log_perror("close failed"); } return 0; } trace() << "connected to " << socket_path << endl; return createChannel(remote_fd, (struct sockaddr *)&remote_addr, sizeof(remote_addr)); } static std::string shorten_filename(const std::string &str) { std::string::size_type ofs = str.rfind('/'); for (int i = 2; i--;) { if (ofs != string::npos) { ofs = str.rfind('/', ofs - 1); } } return str.substr(ofs + 1); } bool MsgChannel::eq_ip(const MsgChannel &s) const { struct sockaddr_in *s1, *s2; s1 = (struct sockaddr_in *) addr; s2 = (struct sockaddr_in *) s.addr; return (addr_len == s.addr_len && memcmp(&s1->sin_addr, &s2->sin_addr, sizeof(s1->sin_addr)) == 0); } MsgChannel *Service::createChannel(int fd, struct sockaddr *_a, socklen_t _l) { MsgChannel *c = new MsgChannel(fd, _a, _l, false); if (!c->wait_for_protocol()) { delete c; c = 0; } return c; } MsgChannel::MsgChannel(int _fd, struct sockaddr *_a, socklen_t _l, bool text) : fd(_fd) { addr_len = (sizeof(struct sockaddr) > _l) ? sizeof(struct sockaddr) : _l; if (addr_len && _a) { addr = (struct sockaddr *)malloc(addr_len); memcpy(addr, _a, _l); if(addr->sa_family == AF_UNIX) { name = "local unix domain socket"; } else { char buf[16384] = ""; if(int error = getnameinfo(addr, _l, buf, sizeof(buf), NULL, 0, NI_NUMERICHOST)) log_error() << "getnameinfo(): " << error << endl; name = buf; } } else { addr = 0; name = ""; } // not using new/delete because of the need of realloc() msgbuf = (char *) malloc(128); msgbuflen = 128; msgofs = 0; msgtogo = 0; inbuf = (char *) malloc(128); inbuflen = 128; inofs = 0; intogo = 0; eof = false; text_based = text; set_error_recursion = false; maximum_remote_protocol = -1; int on = 1; if (!setsockopt(_fd, SOL_SOCKET, SO_KEEPALIVE, (char *) &on, sizeof(on))) { #if defined( TCP_KEEPIDLE ) || defined( TCPCTL_KEEPIDLE ) #if defined( TCP_KEEPIDLE ) int keepidle = TCP_KEEPIDLE; #else int keepidle = TCPCTL_KEEPIDLE; #endif int sec; sec = MAX_SCHEDULER_PING - 3 * MAX_SCHEDULER_PONG; setsockopt(_fd, IPPROTO_TCP, keepidle, (char *) &sec, sizeof(sec)); #endif #if defined( TCP_KEEPINTVL ) || defined( TCPCTL_KEEPINTVL ) #if defined( TCP_KEEPINTVL ) int keepintvl = TCP_KEEPINTVL; #else int keepintvl = TCPCTL_KEEPINTVL; #endif sec = MAX_SCHEDULER_PONG; setsockopt(_fd, IPPROTO_TCP, keepintvl, (char *) &sec, sizeof(sec)); #endif #ifdef TCP_KEEPCNT sec = 3; setsockopt(_fd, IPPROTO_TCP, TCP_KEEPCNT, (char *) &sec, sizeof(sec)); #endif } if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) { log_perror("MsgChannel fcntl()"); } if (fcntl(fd, F_SETFD, FD_CLOEXEC) < 0) { log_perror("MsgChannel fcntl() 2"); } if (text_based) { instate = NEED_LEN; protocol = PROTOCOL_VERSION; } else { instate = NEED_PROTO; protocol = -1; unsigned char vers[4] = {PROTOCOL_VERSION, 0, 0, 0}; //writeuint32 ((uint32_t) PROTOCOL_VERSION); writefull(vers, 4); if (!flush_writebuf(true)) { protocol = 0; // unusable set_error(); } } last_talk = time(0); } MsgChannel::~MsgChannel() { if (fd >= 0) { if ((-1 == close(fd)) && (errno != EBADF)){ log_perror("close failed"); } } fd = -1; if (msgbuf) { free(msgbuf); } if (inbuf) { free(inbuf); } if (addr) { free(addr); } } string MsgChannel::dump() const { return name + ": (" + char((int)instate + 'A') + " eof: " + char(eof + '0') + ")"; } /* Wait blocking until the protocol setup for this channel is complete. Returns false if an error occurred. */ bool MsgChannel::wait_for_protocol() { /* protocol is 0 if we couldn't send our initial protocol version. */ if (protocol == 0 || instate == ERROR) { return false; } while (instate == NEED_PROTO) { pollfd pfd; pfd.fd = fd; pfd.events = POLLIN; int ret = poll(&pfd, 1, 15 * 1000); // 15s if (ret < 0 && errno == EINTR) { continue; } if (ret == 0) { log_warning() << "no response within timeout" << endl; set_error(); return false; /* timeout. Consider it a fatal error. */ } if (ret < 0) { log_perror("select in wait_for_protocol()"); set_error(); return false; } if (!read_a_bit() || eof) { return false; } } return true; } void MsgChannel::setBulkTransfer() { if (fd < 0) { return; } int i = 0; setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *) &i, sizeof(i)); // would be nice but not portable across non-linux #ifdef __linux__ i = 1; setsockopt(fd, IPPROTO_TCP, TCP_CORK, (char *) &i, sizeof(i)); #endif i = 65536; setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &i, sizeof(i)); } /* This waits indefinitely (well, TIMEOUT seconds) for a complete message to arrive. Returns false if there was some error. */ bool MsgChannel::wait_for_msg(int timeout) { if (instate == ERROR) { return false; } if (has_msg()) { return true; } if (!read_a_bit()) { trace() << "!read_a_bit\n"; set_error(); return false; } if (timeout <= 0) { // trace() << "timeout <= 0\n"; return has_msg(); } while (!has_msg()) { pollfd pfd; pfd.fd = fd; pfd.events = POLLIN; if (poll(&pfd, 1, timeout * 1000) <= 0) { if (errno == EINTR) { continue; } /* Either timeout or real error. For this function also a timeout is an error. */ return false; } if (!read_a_bit()) { trace() << "!read_a_bit 2\n"; set_error(); return false; } } return true; } Msg *MsgChannel::get_msg(int timeout, bool eofAllowed) { Msg *m = 0; enum MsgType type; if (!wait_for_msg(timeout)) { // trace() << "!wait_for_msg()\n"; return 0; } /* If we've seen the EOF, and we don't have a complete message, then we won't see it anymore. Return that to the caller. Don't use has_msg() here, as it returns true for eof. */ if (at_eof()) { if (!eofAllowed) { trace() << "saw eof without complete msg! " << instate << endl; set_error(); } return 0; } if (!has_msg()) { trace() << "saw eof without msg! " << eof << " " << instate << endl; set_error(); return 0; } size_t intogo_old = intogo; if (text_based) { type = M_TEXT; } else { uint32_t t; *this >> t; type = (enum MsgType) t; } switch (type) { case M_UNKNOWN: set_error(); return 0; case M_PING: m = new PingMsg; break; case M_END: m = new EndMsg; break; case M_GET_CS: m = new GetCSMsg; break; case M_USE_CS: m = new UseCSMsg; break; case M_NO_CS: m = new NoCSMsg; break; case M_COMPILE_FILE: m = new CompileFileMsg(new CompileJob, true); break; case M_FILE_CHUNK: m = new FileChunkMsg; break; case M_COMPILE_RESULT: m = new CompileResultMsg; break; case M_JOB_BEGIN: m = new JobBeginMsg; break; case M_JOB_DONE: m = new JobDoneMsg; break; case M_LOGIN: m = new LoginMsg; break; case M_STATS: m = new StatsMsg; break; case M_GET_NATIVE_ENV: m = new GetNativeEnvMsg; break; case M_NATIVE_ENV: m = new UseNativeEnvMsg; break; case M_MON_LOGIN: m = new MonLoginMsg; break; case M_MON_GET_CS: m = new MonGetCSMsg; break; case M_MON_JOB_BEGIN: m = new MonJobBeginMsg; break; case M_MON_JOB_DONE: m = new MonJobDoneMsg; break; case M_MON_STATS: m = new MonStatsMsg; break; case M_JOB_LOCAL_BEGIN: m = new JobLocalBeginMsg; break; case M_JOB_LOCAL_DONE : m = new JobLocalDoneMsg; break; case M_MON_LOCAL_JOB_BEGIN: m = new MonLocalJobBeginMsg; break; case M_TRANFER_ENV: m = new EnvTransferMsg; break; case M_TEXT: m = new TextMsg; break; case M_GET_INTERNALS: m = new GetInternalStatus; break; case M_STATUS_TEXT: m = new StatusTextMsg; break; case M_CS_CONF: m = new ConfCSMsg; break; case M_VERIFY_ENV: m = new VerifyEnvMsg; break; case M_VERIFY_ENV_RESULT: m = new VerifyEnvResultMsg; break; case M_BLACKLIST_HOST_ENV: m = new BlacklistHostEnvMsg; break; case M_TIMEOUT: break; } if (!m) { trace() << "no message type" << endl; set_error(); return 0; } m->fill_from_channel(this); if (!text_based) { if( intogo - intogo_old != inmsglen ) { log_error() << "internal error - message not read correctly, message size " << inmsglen << " read " << (intogo - intogo_old) << endl; delete m; set_error(); return 0; } } instate = NEED_LEN; update_state(); return m; } bool MsgChannel::send_msg(const Msg &m, int flags) { if (instate == ERROR) { return false; } if (instate == NEED_PROTO && !wait_for_protocol()) { return false; } chop_output(); size_t msgtogo_old = msgtogo; if (text_based) { m.send_to_channel(this); } else { *this << (uint32_t) 0; m.send_to_channel(this); uint32_t out_len = msgtogo - msgtogo_old - 4; if(out_len > MAX_MSG_SIZE) { log_error() << "internal error - size of message to write exceeds max size:" << out_len << endl; set_error(); return false; } uint32_t len = htonl(out_len); memcpy(msgbuf + msgtogo_old, &len, 4); } if ((flags & SendBulkOnly) && msgtogo < 4096) { return true; } return flush_writebuf((flags & SendBlocking)); } static int get_second_port_for_debug( int port ) { // When running tests, we want to check also interactions between 2 schedulers, but // when they are both local, they cannot bind to the same port. So make sure to // send all broadcasts to both. static bool checkedDebug = false; static int debugPort1 = 0; static int debugPort2 = 0; if( !checkedDebug ) { checkedDebug = true; if( const char* env = getenv( "ICECC_TEST_SCHEDULER_PORTS" )) { debugPort1 = atoi( env ); const char* env2 = strchr( env, ':' ); if( env2 != NULL ) debugPort2 = atoi( env2 + 1 ); } } int secondPort = 0; if( port == debugPort1 ) secondPort = debugPort2; else if( port == debugPort2 ) secondPort = debugPort1; return secondPort ? secondPort : -1; } void Broadcasts::broadcastSchedulerVersion(int scheduler_port, const char* netname, time_t starttime) { // Code for older schedulers than version 38. Has endianness problems, the message size // is not BROAD_BUFLEN and the netname is possibly not null-terminated. const char length_netname = strlen(netname); const int schedbuflen = 5 + sizeof(uint64_t) + length_netname; char *buf = new char[ schedbuflen ]; buf[0] = 'I'; buf[1] = 'C'; buf[2] = 'E'; buf[3] = PROTOCOL_VERSION; uint64_t tmp_time = starttime; memcpy(buf + 4, &tmp_time, sizeof(uint64_t)); buf[4 + sizeof(uint64_t)] = length_netname; strncpy(buf + 5 + sizeof(uint64_t), netname, length_netname - 1); buf[ schedbuflen - 1 ] = '\0'; broadcastData(scheduler_port, buf, schedbuflen); delete[] buf; // Latest version. buf = new char[ BROAD_BUFLEN ]; memset(buf, 0, BROAD_BUFLEN ); buf[0] = 'I'; buf[1] = 'C'; buf[2] = 'F'; // one up buf[3] = PROTOCOL_VERSION; uint32_t tmp_time_low = starttime & 0xffffffffUL; uint32_t tmp_time_high = uint64_t(starttime) >> 32; tmp_time_low = htonl( tmp_time_low ); tmp_time_high = htonl( tmp_time_high ); memcpy(buf + 4, &tmp_time_high, sizeof(uint32_t)); memcpy(buf + 4 + sizeof(uint32_t), &tmp_time_low, sizeof(uint32_t)); const int OFFSET = 4 + 2 * sizeof(uint32_t); snprintf(buf + OFFSET, BROAD_BUFLEN - OFFSET, "%s", netname); buf[BROAD_BUFLEN - 1] = 0; broadcastData(scheduler_port, buf, BROAD_BUFLEN); delete[] buf; } bool Broadcasts::isSchedulerVersion(const char* buf, int buflen) { if( buflen != BROAD_BUFLEN ) return false; // Ignore versions older than 38, they are older than us anyway, so not interesting. if( buf[0] == 'I' && buf[1] == 'C' && buf[2] == 'F') { return true; } return false; } void Broadcasts::getSchedulerVersionData( const char* buf, int* protocol, time_t* time, string* netname ) { assert( isSchedulerVersion( buf, BROAD_BUFLEN )); const unsigned char other_scheduler_protocol = buf[3]; uint32_t tmp_time_low, tmp_time_high; memcpy(&tmp_time_high, buf + 4, sizeof(uint32_t)); memcpy(&tmp_time_low, buf + 4 + sizeof(uint32_t), sizeof(uint32_t)); tmp_time_low = ntohl( tmp_time_low ); tmp_time_high = ntohl( tmp_time_high ); time_t other_time = ( uint64_t( tmp_time_high ) << 32 ) | tmp_time_low;; string recv_netname = string(buf + 4 + 2 * sizeof(uint32_t)); if( protocol != NULL ) *protocol = other_scheduler_protocol; if( time != NULL ) *time = other_time; if( netname != NULL ) *netname = recv_netname; } /* Returns a filedesc. or a negative value for errors. */ static int open_send_broadcast(int port, const char* buf, int size) { int ask_fd; struct sockaddr_in remote_addr; if ((ask_fd = socket(PF_INET, SOCK_DGRAM, 0)) < 0) { log_perror("open_send_broadcast socket"); return -1; } if (fcntl(ask_fd, F_SETFD, FD_CLOEXEC) < 0) { log_perror("open_send_broadcast fcntl"); if (-1 == close(ask_fd)){ log_perror("close failed"); } return -1; } int optval = 1; if (setsockopt(ask_fd, SOL_SOCKET, SO_BROADCAST, &optval, sizeof(optval)) < 0) { log_perror("open_send_broadcast setsockopt"); if (-1 == close(ask_fd)){ log_perror("close failed"); } return -1; } struct kde_ifaddrs *addrs; int ret = kde_getifaddrs(&addrs); if (ret < 0) { return ret; } for (struct kde_ifaddrs *addr = addrs; addr != NULL; addr = addr->ifa_next) { /* * See if this interface address is IPv4... */ if (addr->ifa_addr == NULL || addr->ifa_addr->sa_family != AF_INET || addr->ifa_netmask == NULL || addr->ifa_name == NULL) { continue; } static bool in_tests = getenv( "ICECC_TESTS" ) != NULL; if (!in_tests) { if (ntohl(((struct sockaddr_in *) addr->ifa_addr)->sin_addr.s_addr) == 0x7f000001) { trace() << "ignoring localhost " << addr->ifa_name << " for broadcast" << endl; continue; } if ((addr->ifa_flags & IFF_POINTOPOINT) || !(addr->ifa_flags & IFF_BROADCAST)) { log_info() << "ignoring tunnels " << addr->ifa_name << " for broadcast" << endl; continue; } } else { if (ntohl(((struct sockaddr_in *) addr->ifa_addr)->sin_addr.s_addr) != 0x7f000001) { trace() << "ignoring non-localhost " << addr->ifa_name << " for broadcast" << endl; continue; } } if (addr->ifa_broadaddr) { log_info() << "broadcast " << addr->ifa_name << " " << inet_ntoa(((sockaddr_in *)addr->ifa_broadaddr)->sin_addr) << endl; remote_addr.sin_family = AF_INET; remote_addr.sin_port = htons(port); remote_addr.sin_addr = ((sockaddr_in *)addr->ifa_broadaddr)->sin_addr; if (sendto(ask_fd, buf, size, 0, (struct sockaddr *)&remote_addr, sizeof(remote_addr)) != size) { log_perror("open_send_broadcast sendto"); } } } kde_freeifaddrs(addrs); return ask_fd; } void Broadcasts::broadcastData(int port, const char* buf, int len) { int fd = open_send_broadcast(port, buf, len); if (fd >= 0) { if ((-1 == close(fd)) && (errno != EBADF)){ log_perror("close failed"); } } int secondPort = get_second_port_for_debug( port ); if( secondPort > 0 ) { int fd2 = open_send_broadcast(secondPort, buf, len); if (fd2 >= 0) { if ((-1 == close(fd2)) && (errno != EBADF)){ log_perror("close failed"); } } } } DiscoverSched::DiscoverSched(const std::string &_netname, int _timeout, const std::string &_schedname, int port) : netname(_netname) , schedname(_schedname) , timeout(_timeout) , ask_fd(-1) , ask_second_fd(-1) , sport(port) , best_version(0) , best_start_time(0) , best_port(0) , multiple(false) { time0 = time(0); if (schedname.empty()) { const char *get = getenv("ICECC_SCHEDULER"); if( get == NULL ) get = getenv("USE_SCHEDULER"); if (get) { string scheduler = get; size_t colon = scheduler.rfind( ':' ); if( colon == string::npos ) { schedname = scheduler; } else { schedname = scheduler.substr(0, colon); sport = atoi( scheduler.substr( colon + 1 ).c_str()); } } } if (netname.empty()) { netname = "ICECREAM"; } if (sport == 0 ) { sport = 8765; } if (!schedname.empty()) { netname = ""; // take whatever the machine is giving us attempt_scheduler_connect(); } else { sendSchedulerDiscovery( PROTOCOL_VERSION ); } } DiscoverSched::~DiscoverSched() { if (ask_fd >= 0) { if ((-1 == close(ask_fd)) && (errno != EBADF)){ log_perror("close failed"); } } if (ask_second_fd >= 0) { if ((-1 == close(ask_second_fd)) && (errno != EBADF)){ log_perror("close failed"); } } } bool DiscoverSched::timed_out() { return (time(0) - time0 >= timeout); } void DiscoverSched::attempt_scheduler_connect() { time0 = time(0) + MAX_SCHEDULER_PONG; log_info() << "scheduler is on " << schedname << ":" << sport << " (net " << netname << ")" << endl; if ((ask_fd = prepare_connect(schedname, sport, remote_addr)) >= 0) { fcntl(ask_fd, F_SETFL, O_NONBLOCK); } } void DiscoverSched::sendSchedulerDiscovery( int version ) { assert( version < 128 ); char buf = version; ask_fd = open_send_broadcast(sport, &buf, 1); int secondPort = get_second_port_for_debug( sport ); if( secondPort > 0 ) ask_second_fd = open_send_broadcast(secondPort, &buf, 1); } bool DiscoverSched::isSchedulerDiscovery(const char* buf, int buflen, int* daemon_version) { if( buflen != 1 ) return false; if( daemon_version != NULL ) { *daemon_version = buf[ 0 ]; } return true; } static const int BROAD_BUFLEN = 268; static const int BROAD_BUFLEN_OLD_2 = 32; static const int BROAD_BUFLEN_OLD_1 = 16; int DiscoverSched::prepareBroadcastReply(char* buf, const char* netname, time_t starttime) { if (buf[0] < 33) { // old client buf[0]++; memset(buf + 1, 0, BROAD_BUFLEN_OLD_1 - 1); snprintf(buf + 1, BROAD_BUFLEN_OLD_1 - 1, "%s", netname); buf[BROAD_BUFLEN_OLD_1 - 1] = 0; return BROAD_BUFLEN_OLD_1; } else if (buf[0] < 36) { // This is like 36, but 36 silently changed the size of BROAD_BUFLEN from 32 to 268. // Since get_broad_answer() explicitly null-terminates the data, this wouldn't lead // to those receivers reading a shorter string that would not be null-terminated, // but still, this is what versions 33-35 actually worked with. buf[0] += 2; memset(buf + 1, 0, BROAD_BUFLEN_OLD_2 - 1); uint32_t tmp_version = PROTOCOL_VERSION; uint64_t tmp_time = starttime; memcpy(buf + 1, &tmp_version, sizeof(uint32_t)); memcpy(buf + 1 + sizeof(uint32_t), &tmp_time, sizeof(uint64_t)); const int OFFSET = 1 + sizeof(uint32_t) + sizeof(uint64_t); snprintf(buf + OFFSET, BROAD_BUFLEN_OLD_2 - OFFSET, "%s", netname); buf[BROAD_BUFLEN_OLD_2 - 1] = 0; return BROAD_BUFLEN_OLD_2; } else if (buf[0] < 38) { // exposes endianess because of not using htonl() buf[0] += 2; memset(buf + 1, 0, BROAD_BUFLEN - 1); uint32_t tmp_version = PROTOCOL_VERSION; uint64_t tmp_time = starttime; memcpy(buf + 1, &tmp_version, sizeof(uint32_t)); memcpy(buf + 1 + sizeof(uint32_t), &tmp_time, sizeof(uint64_t)); const int OFFSET = 1 + sizeof(uint32_t) + sizeof(uint64_t); snprintf(buf + OFFSET, BROAD_BUFLEN - OFFSET, "%s", netname); buf[BROAD_BUFLEN - 1] = 0; return BROAD_BUFLEN; } else { // latest version buf[0] += 3; memset(buf + 1, 0, BROAD_BUFLEN - 1); uint32_t tmp_version = PROTOCOL_VERSION; uint32_t tmp_time_low = starttime & 0xffffffffUL; uint32_t tmp_time_high = uint64_t(starttime) >> 32; tmp_version = htonl( tmp_version ); tmp_time_low = htonl( tmp_time_low ); tmp_time_high = htonl( tmp_time_high ); memcpy(buf + 1, &tmp_version, sizeof(uint32_t)); memcpy(buf + 1 + sizeof(uint32_t), &tmp_time_high, sizeof(uint32_t)); memcpy(buf + 1 + 2 * sizeof(uint32_t), &tmp_time_low, sizeof(uint32_t)); const int OFFSET = 1 + 3 * sizeof(uint32_t); snprintf(buf + OFFSET, BROAD_BUFLEN - OFFSET, "%s", netname); buf[BROAD_BUFLEN - 1] = 0; return BROAD_BUFLEN; } } void DiscoverSched::get_broad_data(const char* buf, const char** name, int* version, time_t* start_time) { if (buf[0] == PROTOCOL_VERSION + 1) { // Scheduler version 32 or older, didn't send us its version, assume it's 32. if (name != NULL) *name = buf + 1; if (version != NULL) *version = 32; if (start_time != NULL) *start_time = 0; // Unknown too. } else if(buf[0] == PROTOCOL_VERSION + 2) { if (version != NULL) { uint32_t tmp_version; memcpy(&tmp_version, buf + 1, sizeof(uint32_t)); *version = tmp_version; } if (start_time != NULL) { uint64_t tmp_time; memcpy(&tmp_time, buf + 1 + sizeof(uint32_t), sizeof(uint64_t)); *start_time = tmp_time; } if (name != NULL) *name = buf + 1 + sizeof(uint32_t) + sizeof(uint64_t); } else if(buf[0] == PROTOCOL_VERSION + 3) { if (version != NULL) { uint32_t tmp_version; memcpy(&tmp_version, buf + 1, sizeof(uint32_t)); *version = ntohl( tmp_version ); } if (start_time != NULL) { uint32_t tmp_time_low, tmp_time_high; memcpy(&tmp_time_high, buf + 1 + sizeof(uint32_t), sizeof(uint32_t)); memcpy(&tmp_time_low, buf + 1 + 2 * sizeof(uint32_t), sizeof(uint32_t)); tmp_time_low = ntohl( tmp_time_low ); tmp_time_high = ntohl( tmp_time_high ); *start_time = ( uint64_t( tmp_time_high ) << 32 ) | tmp_time_low;; } if (name != NULL) *name = buf + 1 + 3 * sizeof(uint32_t); } else { abort(); } } MsgChannel *DiscoverSched::try_get_scheduler() { if (schedname.empty()) { socklen_t remote_len; char buf2[BROAD_BUFLEN]; /* Try to get the scheduler with the newest version, and if there are several with the same version, choose the one that's been running for the longest time. It should work like this (and it won't work perfectly if there are schedulers and/or daemons with old (<33) version): Whenever a daemon starts, it broadcasts for a scheduler. Schedulers all see the broadcast and respond with their version, start time and netname. Here we select the best one. If a new scheduler is started, it'll broadcast its version and all other schedulers will drop their daemon connections if they have an older version. If the best scheduler quits, all daemons will get their connections closed and will re-discover and re-connect. */ /* Read/test all packages arrived until now. */ while (get_broad_answer(ask_fd, 0/*timeout*/, buf2, (struct sockaddr_in *) &remote_addr, &remote_len) || ( ask_second_fd != -1 && get_broad_answer(ask_second_fd, 0/*timeout*/, buf2, (struct sockaddr_in *) &remote_addr, &remote_len))) { int version; time_t start_time; const char* name; get_broad_data(buf2, &name, &version, &start_time); if (strcasecmp(netname.c_str(), name) == 0) { if( version >= 128 || version < 1 ) { log_warning() << "Ignoring bogus version " << version << " from scheduler found at " << inet_ntoa(remote_addr.sin_addr) << ":" << ntohs(remote_addr.sin_port) << endl; continue; } else if (version < 33) { log_info() << "Suitable scheduler found at " << inet_ntoa(remote_addr.sin_addr) << ":" << ntohs(remote_addr.sin_port) << " (unknown version)" << endl; } else { log_info() << "Suitable scheduler found at " << inet_ntoa(remote_addr.sin_addr) << ":" << ntohs(remote_addr.sin_port) << " (version: " << version << ")" << endl; } if (best_version != 0) multiple = true; if (best_version < version || (best_version == version && best_start_time > start_time)) { best_schedname = inet_ntoa(remote_addr.sin_addr); best_port = ntohs(remote_addr.sin_port); best_version = version; best_start_time = start_time; } } else { log_info() << "Ignoring scheduler at " << inet_ntoa(remote_addr.sin_addr) << ":" << ntohs(remote_addr.sin_port) << " because of a different netname (" << name << ")" << endl; } } if (timed_out()) { if (best_version == 0) { return 0; } schedname = best_schedname; sport = best_port; if (multiple) log_info() << "Selecting scheduler at " << schedname << ":" << sport << endl; if (-1 == close(ask_fd)){ log_perror("close failed"); } ask_fd = -1; if( get_second_port_for_debug( sport ) > 0 ) { if (-1 == close(ask_second_fd)){ log_perror("close failed"); } ask_second_fd = -1; } else { assert( ask_second_fd == -1 ); } attempt_scheduler_connect(); if (ask_fd >= 0) { int status = connect(ask_fd, (struct sockaddr *) &remote_addr, sizeof(remote_addr)); if (status == 0 || (status < 0 && (errno == EISCONN || errno == EINPROGRESS))) { int fd = ask_fd; ask_fd = -1; return Service::createChannel(fd, (struct sockaddr *) &remote_addr, sizeof(remote_addr)); } } } } else if (ask_fd >= 0) { assert( ask_second_fd == -1 ); int status = connect(ask_fd, (struct sockaddr *) &remote_addr, sizeof(remote_addr)); if (status == 0 || (status < 0 && errno == EISCONN)) { int fd = ask_fd; ask_fd = -1; return Service::createChannel(fd, (struct sockaddr *) &remote_addr, sizeof(remote_addr)); } } return 0; } bool DiscoverSched::get_broad_answer(int ask_fd, int timeout, char *buf2, struct sockaddr_in *remote_addr, socklen_t *remote_len) { char buf = PROTOCOL_VERSION; pollfd pfd; assert(ask_fd > 0); pfd.fd = ask_fd; pfd.events = POLLIN; errno = 0; if (poll(&pfd, 1, timeout) <= 0 || (pfd.revents & POLLIN) == 0) { /* Normally this is a timeout, i.e. no scheduler there. */ if (errno && errno != EINTR) { log_perror("waiting for scheduler"); } return false; } *remote_len = sizeof(struct sockaddr_in); int len = recvfrom(ask_fd, buf2, BROAD_BUFLEN, 0, (struct sockaddr *) remote_addr, remote_len); if (len != BROAD_BUFLEN && len != BROAD_BUFLEN_OLD_1 && len != BROAD_BUFLEN_OLD_2) { log_perror("get_broad_answer recvfrom()"); return false; } if (! ((len == BROAD_BUFLEN_OLD_1 && buf2[0] == buf + 1) // PROTOCOL <= 32 scheduler || (len == BROAD_BUFLEN_OLD_2 && buf2[0] == buf + 2) // PROTOCOL >= 33 && < 36 scheduler || (len == BROAD_BUFLEN && buf2[0] == buf + 2) // PROTOCOL >= 36 && < 38 scheduler || (len == BROAD_BUFLEN && buf2[0] == buf + 3))) { // PROTOCOL >= 38 scheduler log_error() << "Wrong scheduler discovery answer (size " << len << ", mark " << int(buf2[0]) << ")" << endl; return false; } buf2[len - 1] = 0; return true; } list DiscoverSched::getNetnames(int timeout, int port) { list l; int ask_fd; struct sockaddr_in remote_addr; socklen_t remote_len; time_t time0 = time(0); char buf = PROTOCOL_VERSION; ask_fd = open_send_broadcast(port, &buf, 1); do { char buf2[BROAD_BUFLEN]; bool first = true; /* Wait at least two seconds to give all schedulers a chance to answer (unless that'd be longer than the timeout).*/ time_t timeout_time = time(NULL) + min(2 + 1, timeout); /* Read/test all arriving packages. */ while (get_broad_answer(ask_fd, first ? timeout : 0, buf2, &remote_addr, &remote_len) && time(NULL) < timeout_time) { first = false; const char* name; get_broad_data(buf2, &name, NULL, NULL); l.push_back(name); } } while (time(0) - time0 < (timeout / 1000)); if ((-1 == close(ask_fd)) && (errno != EBADF)){ log_perror("close failed"); } return l; } list get_netnames(int timeout, int port) { return DiscoverSched::getNetnames(timeout, port); } void Msg::fill_from_channel(MsgChannel *) { } void Msg::send_to_channel(MsgChannel *c) const { if (c->is_text_based()) { return; } *c << (uint32_t) type; } GetCSMsg::GetCSMsg(const Environments &envs, const std::string &f, CompileJob::Language _lang, unsigned int _count, std::string _target, unsigned int _arg_flags, const std::string &host, int _minimal_host_version, unsigned int _required_features, unsigned int _client_count) : Msg(M_GET_CS) , versions(envs) , filename(f) , lang(_lang) , count(_count) , target(_target) , arg_flags(_arg_flags) , client_id(0) , preferred_host(host) , minimal_host_version(_minimal_host_version) , required_features(_required_features) , client_count(_client_count) { // These have been introduced in protocol version 42. if( required_features & ( NODE_FEATURE_ENV_XZ | NODE_FEATURE_ENV_ZSTD )) minimal_host_version = max( minimal_host_version, 42 ); } void GetCSMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); c->read_environments(versions); *c >> filename; uint32_t _lang; *c >> _lang; *c >> count; *c >> target; lang = static_cast(_lang); *c >> arg_flags; *c >> client_id; preferred_host = string(); if (IS_PROTOCOL_22(c)) { *c >> preferred_host; } minimal_host_version = 0; if (IS_PROTOCOL_31(c)) { uint32_t ign; *c >> ign; // Versions 31-33 had this as a separate field, now set a minimal // remote version if needed. if (ign != 0 && minimal_host_version < 31) minimal_host_version = 31; } if (IS_PROTOCOL_34(c)) { uint32_t version; *c >> version; minimal_host_version = max( minimal_host_version, int( version )); } if (IS_PROTOCOL_39(c)) { *c >> client_count; } required_features = 0; if (IS_PROTOCOL_42(c)) { *c >> required_features; } } void GetCSMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); c->write_environments(versions); *c << shorten_filename(filename); *c << (uint32_t) lang; *c << count; *c << target; *c << arg_flags; *c << client_id; if (IS_PROTOCOL_22(c)) { *c << preferred_host; } if (IS_PROTOCOL_31(c)) { *c << uint32_t(minimal_host_version >= 31 ? 1 : 0); } if (IS_PROTOCOL_34(c)) { *c << minimal_host_version; } if (IS_PROTOCOL_39(c)) { *c << client_count; } if (IS_PROTOCOL_42(c)) { *c << required_features; } } void UseCSMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); *c >> job_id; *c >> port; *c >> hostname; *c >> host_platform; *c >> got_env; *c >> client_id; if (IS_PROTOCOL_28(c)) { *c >> matched_job_id; } else { matched_job_id = 0; } } void UseCSMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << job_id; *c << port; *c << hostname; *c << host_platform; *c << got_env; *c << client_id; if (IS_PROTOCOL_28(c)) { *c << matched_job_id; } } void NoCSMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); *c >> job_id; *c >> client_id; } void NoCSMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << job_id; *c << client_id; } void CompileFileMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); uint32_t id, lang; string version; *c >> lang; *c >> id; ArgumentsList l; if( IS_PROTOCOL_41(c)) { list largs; *c >> largs; // Whe compiling remotely, we no longer care about the Arg_Remote vs Arg_Rest // difference, so treat them all as Arg_Remote. for (list::const_iterator it = largs.begin(); it != largs.end(); ++it) l.append(*it, Arg_Remote); } else { list _l1, _l2; *c >> _l1; *c >> _l2; for (list::const_iterator it = _l1.begin(); it != _l1.end(); ++it) l.append(*it, Arg_Remote); for (list::const_iterator it = _l2.begin(); it != _l2.end(); ++it) l.append(*it, Arg_Rest); } *c >> version; job->setLanguage((CompileJob::Language) lang); job->setJobID(id); job->setFlags(l); job->setEnvironmentVersion(version); string target; *c >> target; job->setTargetPlatform(target); if (IS_PROTOCOL_30(c)) { string compilerName; *c >> compilerName; job->setCompilerName(compilerName); } if( IS_PROTOCOL_34(c)) { string inputFile; string workingDirectory; *c >> inputFile; *c >> workingDirectory; job->setInputFile(inputFile); job->setWorkingDirectory(workingDirectory); } if (IS_PROTOCOL_35(c)) { string outputFile; uint32_t dwarfFissionEnabled = 0; *c >> outputFile; *c >> dwarfFissionEnabled; job->setOutputFile(outputFile); job->setDwarfFissionEnabled(dwarfFissionEnabled); } } void CompileFileMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << (uint32_t) job->language(); *c << job->jobID(); if (IS_PROTOCOL_41(c)) { // By the time we're compiling, the args are all Arg_Remote or Arg_Rest and // we no longer care about the differences, but we may care about the ordering. // So keep them all in one list. *c << job->nonLocalFlags(); } else { if (IS_PROTOCOL_30(c)) { *c << job->remoteFlags(); } else { if (job->compilerName().find("clang") != string::npos) { // Hack for compilerwrapper. std::list flags = job->remoteFlags(); flags.push_front("clang"); *c << flags; } else { *c << job->remoteFlags(); } } *c << job->restFlags(); } *c << job->environmentVersion(); *c << job->targetPlatform(); if (IS_PROTOCOL_30(c)) { *c << remote_compiler_name(); } if( IS_PROTOCOL_34(c)) { *c << job->inputFile(); *c << job->workingDirectory(); } if (IS_PROTOCOL_35(c)) { *c << job->outputFile(); *c << (uint32_t) job->dwarfFissionEnabled(); } } // Environments created by icecc-create-env always use the same binary name // for compilers, so even if local name was e.g. c++, remote needs to // be g++ (before protocol version 30 remote CS even had /usr/bin/{gcc|g++} // hardcoded). For clang, the binary is just clang for both C/C++. string CompileFileMsg::remote_compiler_name() const { if (job->compilerName().find("clang") != string::npos) { return "clang"; } return job->language() == CompileJob::Lang_CXX ? "g++" : "gcc"; } CompileJob *CompileFileMsg::takeJob() { assert(deleteit); deleteit = false; return job; } void FileChunkMsg::fill_from_channel(MsgChannel *c) { if (del_buf) { delete [] buffer; } buffer = 0; del_buf = true; Msg::fill_from_channel(c); c->readcompressed(&buffer, len, compressed); } void FileChunkMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); c->writecompressed(buffer, len, compressed); } FileChunkMsg::~FileChunkMsg() { if (del_buf) { delete [] buffer; } } void CompileResultMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); uint32_t _status = 0; *c >> err; *c >> out; *c >> _status; status = _status; uint32_t was = 0; *c >> was; was_out_of_memory = was; if (IS_PROTOCOL_35(c)) { uint32_t dwo = 0; *c >> dwo; have_dwo_file = dwo; } } void CompileResultMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << err; *c << out; *c << status; *c << (uint32_t) was_out_of_memory; if (IS_PROTOCOL_35(c)) { *c << (uint32_t) have_dwo_file; } } void JobBeginMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); *c >> job_id; *c >> stime; if (IS_PROTOCOL_39(c)) { *c >> client_count; } } void JobBeginMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << job_id; *c << stime; if (IS_PROTOCOL_39(c)) { *c << client_count; } } void JobLocalBeginMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); *c >> stime; *c >> outfile; *c >> id; } void JobLocalBeginMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << stime; *c << outfile; *c << id; } void JobLocalDoneMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); *c >> job_id; } void JobLocalDoneMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << job_id; } JobDoneMsg::JobDoneMsg(int id, int exit, unsigned int _flags, unsigned int _client_count) : Msg(M_JOB_DONE) , exitcode(exit) , flags(_flags) , job_id(id) , client_count(_client_count) { real_msec = 0; user_msec = 0; sys_msec = 0; pfaults = 0; in_compressed = 0; in_uncompressed = 0; out_compressed = 0; out_uncompressed = 0; } void JobDoneMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); uint32_t _exitcode = 255; *c >> job_id; *c >> _exitcode; *c >> real_msec; *c >> user_msec; *c >> sys_msec; *c >> pfaults; *c >> in_compressed; *c >> in_uncompressed; *c >> out_compressed; *c >> out_uncompressed; *c >> flags; exitcode = (int) _exitcode; // Older versions used this special exit code to identify // EndJob messages for jobs with unknown job id. if (!IS_PROTOCOL_39(c) && exitcode == 200) { flags |= UnknownJobId; } if (IS_PROTOCOL_39(c)) { *c >> client_count; } } void JobDoneMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << job_id; if (!IS_PROTOCOL_39(c) && (flags & UnknownJobId)) { *c << (uint32_t) 200; } else { *c << (uint32_t) exitcode; } *c << real_msec; *c << user_msec; *c << sys_msec; *c << pfaults; *c << in_compressed; *c << in_uncompressed; *c << out_compressed; *c << out_uncompressed; *c << flags; if (IS_PROTOCOL_39(c)) { *c << client_count; } } void JobDoneMsg::set_unknown_job_client_id( uint32_t clientId ) { flags |= UnknownJobId; job_id = clientId; } uint32_t JobDoneMsg::unknown_job_client_id() const { if( flags & UnknownJobId ) { return job_id; } return 0; } void JobDoneMsg::set_job_id( uint32_t jobId ) { job_id = jobId; flags &= ~ (uint32_t) UnknownJobId; } LoginMsg::LoginMsg(unsigned int myport, const std::string &_nodename, const std::string &_host_platform, unsigned int myfeatures) : Msg(M_LOGIN) , port(myport) , max_kids(0) , noremote(false) , chroot_possible(false) , nodename(_nodename) , host_platform(_host_platform) , supported_features(myfeatures) { #ifdef HAVE_LIBCAP_NG chroot_possible = capng_have_capability(CAPNG_EFFECTIVE, CAP_SYS_CHROOT); #else // check if we're root chroot_possible = (geteuid() == 0); #endif } void LoginMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); *c >> port; *c >> max_kids; c->read_environments(envs); *c >> nodename; *c >> host_platform; uint32_t net_chroot_possible = 0; *c >> net_chroot_possible; chroot_possible = net_chroot_possible != 0; uint32_t net_noremote = 0; if (IS_PROTOCOL_26(c)) { *c >> net_noremote; } noremote = (net_noremote != 0); supported_features = 0; if (IS_PROTOCOL_42(c)) { *c >> supported_features; } } void LoginMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << port; *c << max_kids; c->write_environments(envs); *c << nodename; *c << host_platform; *c << chroot_possible; if (IS_PROTOCOL_26(c)) { *c << noremote; } if (IS_PROTOCOL_42(c)) { *c << supported_features; } } void ConfCSMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); *c >> max_scheduler_pong; *c >> max_scheduler_ping; string bench_source; // unused, kept for backwards compatibility *c >> bench_source; } void ConfCSMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << max_scheduler_pong; *c << max_scheduler_ping; string bench_source; *c << bench_source; } void StatsMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); *c >> load; *c >> loadAvg1; *c >> loadAvg5; *c >> loadAvg10; *c >> freeMem; } void StatsMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << load; *c << loadAvg1; *c << loadAvg5; *c << loadAvg10; *c << freeMem; } void GetNativeEnvMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); if (IS_PROTOCOL_32(c)) { *c >> compiler; *c >> extrafiles; } compression = string(); if (IS_PROTOCOL_42(c)) *c >> compression; } void GetNativeEnvMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); if (IS_PROTOCOL_32(c)) { *c << compiler; *c << extrafiles; } if (IS_PROTOCOL_42(c)) *c << compression; } void UseNativeEnvMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); *c >> nativeVersion; } void UseNativeEnvMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << nativeVersion; } void EnvTransferMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); *c >> name; *c >> target; } void EnvTransferMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << name; *c << target; } void MonGetCSMsg::fill_from_channel(MsgChannel *c) { if (IS_PROTOCOL_29(c)) { Msg::fill_from_channel(c); *c >> filename; uint32_t _lang; *c >> _lang; lang = static_cast(_lang); } else { GetCSMsg::fill_from_channel(c); } *c >> job_id; *c >> clientid; } void MonGetCSMsg::send_to_channel(MsgChannel *c) const { if (IS_PROTOCOL_29(c)) { Msg::send_to_channel(c); *c << shorten_filename(filename); *c << (uint32_t) lang; } else { GetCSMsg::send_to_channel(c); } *c << job_id; *c << clientid; } void MonJobBeginMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); *c >> job_id; *c >> stime; *c >> hostid; } void MonJobBeginMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << job_id; *c << stime; *c << hostid; } void MonLocalJobBeginMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); *c >> hostid; *c >> job_id; *c >> stime; *c >> file; } void MonLocalJobBeginMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << hostid; *c << job_id; *c << stime; *c << shorten_filename(file); } void MonStatsMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); *c >> hostid; *c >> statmsg; } void MonStatsMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << hostid; *c << statmsg; } void TextMsg::fill_from_channel(MsgChannel *c) { c->read_line(text); } void TextMsg::send_to_channel(MsgChannel *c) const { c->write_line(text); } void StatusTextMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); *c >> text; } void StatusTextMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << text; } void VerifyEnvMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); *c >> environment; *c >> target; } void VerifyEnvMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << environment; *c << target; } void VerifyEnvResultMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); uint32_t read_ok; *c >> read_ok; ok = read_ok != 0; } void VerifyEnvResultMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << uint32_t(ok); } void BlacklistHostEnvMsg::fill_from_channel(MsgChannel *c) { Msg::fill_from_channel(c); *c >> environment; *c >> target; *c >> hostname; } void BlacklistHostEnvMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << environment; *c << target; *c << hostname; } /* vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: */ icecream-1.3.1/services/comm.h000066400000000000000000000623531361626760200162350ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Michael Matz 2004 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef ICECREAM_COMM_H #define ICECREAM_COMM_H #ifdef __linux__ # include #endif #include #include #include #include #include "job.h" // if you increase the PROTOCOL_VERSION, add a macro below and use that #define PROTOCOL_VERSION 42 // if you increase the MIN_PROTOCOL_VERSION, comment out macros below and clean up the code #define MIN_PROTOCOL_VERSION 21 #define MAX_SCHEDULER_PONG 3 // MAX_SCHEDULER_PING must be multiple of MAX_SCHEDULER_PONG #define MAX_SCHEDULER_PING 12 * MAX_SCHEDULER_PONG // maximum amount of time in seconds a daemon can be busy installing #define MAX_BUSY_INSTALLING 120 #define IS_PROTOCOL_22(c) ((c)->protocol >= 22) #define IS_PROTOCOL_23(c) ((c)->protocol >= 23) #define IS_PROTOCOL_24(c) ((c)->protocol >= 24) #define IS_PROTOCOL_25(c) ((c)->protocol >= 25) #define IS_PROTOCOL_26(c) ((c)->protocol >= 26) #define IS_PROTOCOL_27(c) ((c)->protocol >= 27) #define IS_PROTOCOL_28(c) ((c)->protocol >= 28) #define IS_PROTOCOL_29(c) ((c)->protocol >= 29) #define IS_PROTOCOL_30(c) ((c)->protocol >= 30) #define IS_PROTOCOL_31(c) ((c)->protocol >= 31) #define IS_PROTOCOL_32(c) ((c)->protocol >= 32) #define IS_PROTOCOL_33(c) ((c)->protocol >= 33) #define IS_PROTOCOL_34(c) ((c)->protocol >= 34) #define IS_PROTOCOL_35(c) ((c)->protocol >= 35) #define IS_PROTOCOL_36(c) ((c)->protocol >= 36) #define IS_PROTOCOL_37(c) ((c)->protocol >= 37) #define IS_PROTOCOL_38(c) ((c)->protocol >= 38) #define IS_PROTOCOL_39(c) ((c)->protocol >= 39) #define IS_PROTOCOL_40(c) ((c)->protocol >= 40) #define IS_PROTOCOL_41(c) ((c)->protocol >= 41) #define IS_PROTOCOL_42(c) ((c)->protocol >= 42) // Terms used: // S = scheduler // C = client // CS = daemon enum MsgType { // so far unknown M_UNKNOWN = 'A', /* When the scheduler didn't get M_STATS from a CS for a specified time (e.g. 10m), then he sends a ping */ M_PING, /* Either the end of file chunks or connection (A<->A) */ M_END, M_TIMEOUT, // unused // C --> CS M_GET_NATIVE_ENV, // CS -> C M_NATIVE_ENV, // C --> S M_GET_CS, // S --> C M_USE_CS, // = 'H' // C --> CS M_COMPILE_FILE, // = 'I' // generic file transfer M_FILE_CHUNK, // CS --> C M_COMPILE_RESULT, // CS --> S (after the C got the CS from the S, the CS tells the S when the C asks him) M_JOB_BEGIN, M_JOB_DONE, // = 'M' // C --> CS, CS --> S (forwarded from C), _and_ CS -> C as start ping M_JOB_LOCAL_BEGIN, // = 'N' M_JOB_LOCAL_DONE, // CS --> S, first message sent M_LOGIN, // CS --> S (periodic) M_STATS, // messages between monitor and scheduler M_MON_LOGIN, M_MON_GET_CS, M_MON_JOB_BEGIN, // = 'T' M_MON_JOB_DONE, M_MON_LOCAL_JOB_BEGIN, M_MON_STATS, M_TRANFER_ENV, // = 'X' M_TEXT, M_STATUS_TEXT, // = 'Z' M_GET_INTERNALS, // S --> CS, answered by M_LOGIN M_CS_CONF, // C --> CS, after installing an environment M_VERIFY_ENV, // CS --> C M_VERIFY_ENV_RESULT, // C --> CS, CS --> S (forwarded from C), to not use given host for given environment M_BLACKLIST_HOST_ENV, // S --> CS M_NO_CS }; enum Compression { C_LZO = 0, C_ZSTD = 1 }; // The remote node is capable of unpacking environment compressed as .tar.xz . const int NODE_FEATURE_ENV_XZ = ( 1 << 0 ); // The remote node is capable of unpacking environment compressed as .tar.zst . const int NODE_FEATURE_ENV_ZSTD = ( 1 << 1 ); class MsgChannel; // a list of pairs of host platform, filename typedef std::list > Environments; class Msg { public: Msg(enum MsgType t) : type(t) {} virtual ~Msg() {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; enum MsgType type; }; class MsgChannel { public: enum SendFlags { SendBlocking = 1 << 0, SendNonBlocking = 1 << 1, SendBulkOnly = 1 << 2 }; virtual ~MsgChannel(); void setBulkTransfer(); std::string dump() const; // NULL <--> channel closed or timeout // Will warn in log if EOF and !eofAllowed. Msg *get_msg(int timeout = 10, bool eofAllowed = false); // false <--> error (msg not send) bool send_msg(const Msg &, int SendFlags = SendBlocking); bool has_msg(void) const { return eof || instate == HAS_MSG; } // Returns ture if there were no errors filling inbuf. bool read_a_bit(void); bool at_eof(void) const { return instate != HAS_MSG && eof; } bool is_text_based(void) const { return text_based; } void readcompressed(unsigned char **buf, size_t &_uclen, size_t &_clen); void writecompressed(const unsigned char *in_buf, size_t _in_len, size_t &_out_len); void write_environments(const Environments &envs); void read_environments(Environments &envs); void read_line(std::string &line); void write_line(const std::string &line); bool eq_ip(const MsgChannel &s) const; MsgChannel &operator>>(uint32_t &); MsgChannel &operator>>(std::string &); MsgChannel &operator>>(std::list &); MsgChannel &operator<<(uint32_t); MsgChannel &operator<<(const std::string &); MsgChannel &operator<<(const std::list &); // our filedesc int fd; // the minimum protocol version between me and him int protocol; // the actual maximum protocol the remote supports int maximum_remote_protocol; std::string name; time_t last_talk; protected: MsgChannel(int _fd, struct sockaddr *, socklen_t, bool text = false); bool wait_for_protocol(); // returns false if there was an error sending something bool flush_writebuf(bool blocking); void writefull(const void *_buf, size_t count); // returns false if there was an error in the protocol setup bool update_state(void); void chop_input(void); void chop_output(void); bool wait_for_msg(int timeout); void set_error(bool silent = false); char *msgbuf; size_t msgbuflen; size_t msgofs; size_t msgtogo; char *inbuf; size_t inbuflen; size_t inofs; size_t intogo; enum { NEED_PROTO, NEED_LEN, FILL_BUF, HAS_MSG, ERROR } instate; uint32_t inmsglen; bool eof; bool text_based; private: friend class Service; // deep copied struct sockaddr *addr; socklen_t addr_len; bool set_error_recursion; }; // just convenient functions to create MsgChannels class Service { public: static MsgChannel *createChannel(const std::string &host, unsigned short p, int timeout); static MsgChannel *createChannel(const std::string &domain_socket); static MsgChannel *createChannel(int remote_fd, struct sockaddr *, socklen_t); }; class Broadcasts { public: // Broadcasts a message about this scheduler and its information. static void broadcastSchedulerVersion(int scheduler_port, const char* netname, time_t starttime); // Checks if the data received is a scheduler version broadcast. static bool isSchedulerVersion(const char* buf, int buflen); // Reads data from a scheduler version broadcast. static void getSchedulerVersionData( const char* buf, int* protocol, time_t* time, std::string* netname ); /// Broadcasts the given data on the given port. static const int BROAD_BUFLEN = 268; private: static void broadcastData(int port, const char* buf, int size); }; // -------------------------------------------------------------------------- // this class is also used by icecream-monitor class DiscoverSched { public: /* Connect to a scheduler waiting max. TIMEOUT seconds. schedname can be the hostname of a box running a scheduler, to avoid broadcasting, port can be specified explicitly */ DiscoverSched(const std::string &_netname = std::string(), int _timeout = 2, const std::string &_schedname = std::string(), int port = 0); ~DiscoverSched(); bool timed_out(); int listen_fd() const { return schedname.empty() ? ask_fd : -1; } int connect_fd() const { return schedname.empty() ? -1 : ask_fd; } // compat for icecream monitor int get_fd() const { return listen_fd(); } /* Attempt to get a conenction to the scheduler. Continue to call this while it returns NULL and timed_out() returns false. If this returns NULL you should wait for either more data on listen_fd() (use select), or a timeout of your own. */ MsgChannel *try_get_scheduler(); // Returns the hostname of the scheduler - set by constructor or by try_get_scheduler std::string schedulerName() const { return schedname; } // Returns the network name of the scheduler - set by constructor or by try_get_scheduler std::string networkName() const { return netname; } /* Return a list of all reachable netnames. We wait max. WAITTIME milliseconds for answers. */ static std::list getNetnames(int waittime = 2000, int port = 8765); // Checks if the data is from a scheduler discovery broadcast, returns version of the sending // daemon is yes. static bool isSchedulerDiscovery(const char* buf, int buflen, int* daemon_version); // Prepares data for sending a reply to a scheduler discovery broadcast. static int prepareBroadcastReply(char* buf, const char* netname, time_t starttime); private: struct sockaddr_in remote_addr; std::string netname; std::string schedname; int timeout; int ask_fd; int ask_second_fd; // for debugging time_t time0; unsigned int sport; int best_version; time_t best_start_time; std::string best_schedname; int best_port; bool multiple; void attempt_scheduler_connect(); void sendSchedulerDiscovery( int version ); static bool get_broad_answer(int ask_fd, int timeout, char *buf2, struct sockaddr_in *remote_addr, socklen_t *remote_len); static void get_broad_data(const char* buf, const char** name, int* version, time_t* start_time); }; // -------------------------------------------------------------------------- /* Return a list of all reachable netnames. We wait max. WAITTIME milliseconds for answers. */ std::list get_netnames(int waittime = 2000, int port = 8765); class PingMsg : public Msg { public: PingMsg() : Msg(M_PING) {} }; class EndMsg : public Msg { public: EndMsg() : Msg(M_END) {} }; class GetCSMsg : public Msg { public: GetCSMsg() : Msg(M_GET_CS) , count(1) , arg_flags(0) , client_id(0) , client_count(0) {} GetCSMsg(const Environments &envs, const std::string &f, CompileJob::Language _lang, unsigned int _count, std::string _target, unsigned int _arg_flags, const std::string &host, int _minimal_host_version, unsigned int _required_features, unsigned int _client_count = 0); virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; Environments versions; std::string filename; CompileJob::Language lang; uint32_t count; // the number of UseCS messages to answer with - usually 1 std::string target; uint32_t arg_flags; uint32_t client_id; std::string preferred_host; int minimal_host_version; uint32_t required_features; uint32_t client_count; // number of CS -> C connections at the moment }; class UseCSMsg : public Msg { public: UseCSMsg() : Msg(M_USE_CS) {} UseCSMsg(std::string platform, std::string host, unsigned int p, unsigned int id, bool gotit, unsigned int _client_id, unsigned int matched_host_jobs) : Msg(M_USE_CS), job_id(id), hostname(host), port(p), host_platform(platform), got_env(gotit), client_id(_client_id), matched_job_id(matched_host_jobs) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; uint32_t job_id; std::string hostname; uint32_t port; std::string host_platform; uint32_t got_env; uint32_t client_id; uint32_t matched_job_id; }; class NoCSMsg : public Msg { public: NoCSMsg() : Msg(M_NO_CS) {} NoCSMsg(unsigned int id, unsigned int _client_id) : Msg(M_NO_CS), job_id(id), client_id(_client_id) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; uint32_t job_id; uint32_t client_id; }; class GetNativeEnvMsg : public Msg { public: GetNativeEnvMsg() : Msg(M_GET_NATIVE_ENV) {} GetNativeEnvMsg(const std::string &c, const std::list &e, const std::string &comp) : Msg(M_GET_NATIVE_ENV) , compiler(c) , extrafiles(e) , compression(comp) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; std::string compiler; // "gcc", "clang" or the actual binary std::list extrafiles; std::string compression; // "" (=default), "none", "gzip", "xz", etc. }; class UseNativeEnvMsg : public Msg { public: UseNativeEnvMsg() : Msg(M_NATIVE_ENV) {} UseNativeEnvMsg(std::string _native) : Msg(M_NATIVE_ENV) , nativeVersion(_native) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; std::string nativeVersion; }; class CompileFileMsg : public Msg { public: CompileFileMsg(CompileJob *j, bool delete_job = false) : Msg(M_COMPILE_FILE) , deleteit(delete_job) , job(j) {} ~CompileFileMsg() { if (deleteit) { delete job; } } virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; CompileJob *takeJob(); private: std::string remote_compiler_name() const; bool deleteit; CompileJob *job; }; class FileChunkMsg : public Msg { public: FileChunkMsg(unsigned char *_buffer, size_t _len) : Msg(M_FILE_CHUNK) , buffer(_buffer) , len(_len) , del_buf(false) {} FileChunkMsg() : Msg(M_FILE_CHUNK) , buffer(0) , len(0) , del_buf(true) {} ~FileChunkMsg(); virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; unsigned char *buffer; size_t len; mutable size_t compressed; bool del_buf; private: FileChunkMsg(const FileChunkMsg &); FileChunkMsg &operator=(const FileChunkMsg &); }; class CompileResultMsg : public Msg { public: CompileResultMsg() : Msg(M_COMPILE_RESULT) , status(0) , was_out_of_memory(false) , have_dwo_file(false) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; int status; std::string out; std::string err; bool was_out_of_memory; bool have_dwo_file; }; class JobBeginMsg : public Msg { public: JobBeginMsg() : Msg(M_JOB_BEGIN) , client_count(0) {} JobBeginMsg(unsigned int j, unsigned int _client_count) : Msg(M_JOB_BEGIN) , job_id(j) , stime(time(0)) , client_count(_client_count) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; uint32_t job_id; uint32_t stime; uint32_t client_count; // number of CS -> C connections at the moment }; class JobDoneMsg : public Msg { public: /* FROM_SERVER: this message was generated by the daemon responsible for remotely compiling the job (i.e. job->server). FROM_SUBMITTER: this message was generated by the daemon connected to the submitting client. */ enum from_type { FROM_SERVER = 0, FROM_SUBMITTER = 1 }; // other flags enum { UnknownJobId = (1 << 1) }; JobDoneMsg(int job_id = 0, int exitcode = -1, unsigned int flags = FROM_SERVER, unsigned int _client_count = 0); void set_from(from_type from) { flags |= (uint32_t)from; } bool is_from_server() { return (flags & FROM_SUBMITTER) == 0; } void set_unknown_job_client_id( uint32_t clientId ); uint32_t unknown_job_client_id() const; void set_job_id( uint32_t jobId ); virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; uint32_t real_msec; /* real time it used */ uint32_t user_msec; /* user time used */ uint32_t sys_msec; /* system time used */ uint32_t pfaults; /* page faults */ int exitcode; /* exit code */ uint32_t flags; uint32_t in_compressed; uint32_t in_uncompressed; uint32_t out_compressed; uint32_t out_uncompressed; uint32_t job_id; uint32_t client_count; // number of CS -> C connections at the moment }; class JobLocalBeginMsg : public Msg { public: JobLocalBeginMsg(int job_id = 0, const std::string &file = "") : Msg(M_JOB_LOCAL_BEGIN) , outfile(file) , stime(time(0)) , id(job_id) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; std::string outfile; uint32_t stime; uint32_t id; }; class JobLocalDoneMsg : public Msg { public: JobLocalDoneMsg(unsigned int id = 0) : Msg(M_JOB_LOCAL_DONE) , job_id(id) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; uint32_t job_id; }; class LoginMsg : public Msg { public: LoginMsg(unsigned int myport, const std::string &_nodename, const std::string &_host_platform, unsigned int my_features); LoginMsg() : Msg(M_LOGIN) , port(0) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; uint32_t port; Environments envs; uint32_t max_kids; bool noremote; bool chroot_possible; std::string nodename; std::string host_platform; uint32_t supported_features; // bitmask of various features the node supports }; class ConfCSMsg : public Msg { public: ConfCSMsg() : Msg(M_CS_CONF) , max_scheduler_pong(MAX_SCHEDULER_PONG) , max_scheduler_ping(MAX_SCHEDULER_PING) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; uint32_t max_scheduler_pong; uint32_t max_scheduler_ping; }; class StatsMsg : public Msg { public: StatsMsg() : Msg(M_STATS) , load(0) , client_count(0) { } virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; /** * For now the only load measure we have is the * load from 0-1000. * This is defined to be a daemon defined value * on how busy the machine is. The higher the load * is, the slower a given job will compile (preferably * linear scale). Load of 1000 means to not schedule * another job under no circumstances. */ uint32_t load; uint32_t loadAvg1; uint32_t loadAvg5; uint32_t loadAvg10; uint32_t freeMem; uint32_t client_count; // number of CS -> C connections at the moment }; class EnvTransferMsg : public Msg { public: EnvTransferMsg() : Msg(M_TRANFER_ENV) {} EnvTransferMsg(const std::string &_target, const std::string &_name) : Msg(M_TRANFER_ENV) , name(_name) , target(_target) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; std::string name; std::string target; }; class GetInternalStatus : public Msg { public: GetInternalStatus() : Msg(M_GET_INTERNALS) {} GetInternalStatus(const GetInternalStatus &) : Msg(M_GET_INTERNALS) {} }; class MonLoginMsg : public Msg { public: MonLoginMsg() : Msg(M_MON_LOGIN) {} }; class MonGetCSMsg : public GetCSMsg { public: MonGetCSMsg() : GetCSMsg() { // overwrite type = M_MON_GET_CS; clientid = job_id = 0; } MonGetCSMsg(int jobid, int hostid, GetCSMsg *m) : GetCSMsg(Environments(), m->filename, m->lang, 1, m->target, 0, std::string(), false, m->client_count) , job_id(jobid) , clientid(hostid) { type = M_MON_GET_CS; } virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; uint32_t job_id; uint32_t clientid; }; class MonJobBeginMsg : public Msg { public: MonJobBeginMsg() : Msg(M_MON_JOB_BEGIN) , job_id(0) , stime(0) , hostid(0) {} MonJobBeginMsg(unsigned int id, unsigned int time, int _hostid) : Msg(M_MON_JOB_BEGIN) , job_id(id) , stime(time) , hostid(_hostid) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; uint32_t job_id; uint32_t stime; uint32_t hostid; }; class MonJobDoneMsg : public JobDoneMsg { public: MonJobDoneMsg() : JobDoneMsg() { type = M_MON_JOB_DONE; } MonJobDoneMsg(const JobDoneMsg &o) : JobDoneMsg(o) { type = M_MON_JOB_DONE; } }; class MonLocalJobBeginMsg : public Msg { public: MonLocalJobBeginMsg() : Msg(M_MON_LOCAL_JOB_BEGIN) {} MonLocalJobBeginMsg(unsigned int id, const std::string &_file, unsigned int time, int _hostid) : Msg(M_MON_LOCAL_JOB_BEGIN) , job_id(id) , stime(time) , hostid(_hostid) , file(_file) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; uint32_t job_id; uint32_t stime; uint32_t hostid; std::string file; }; class MonStatsMsg : public Msg { public: MonStatsMsg() : Msg(M_MON_STATS) {} MonStatsMsg(int id, const std::string &_statmsg) : Msg(M_MON_STATS) , hostid(id) , statmsg(_statmsg) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; uint32_t hostid; std::string statmsg; }; class TextMsg : public Msg { public: TextMsg() : Msg(M_TEXT) {} TextMsg(const std::string &_text) : Msg(M_TEXT) , text(_text) {} TextMsg(const TextMsg &m) : Msg(M_TEXT) , text(m.text) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; std::string text; }; class StatusTextMsg : public Msg { public: StatusTextMsg() : Msg(M_STATUS_TEXT) {} StatusTextMsg(const std::string &_text) : Msg(M_STATUS_TEXT) , text(_text) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; std::string text; }; class VerifyEnvMsg : public Msg { public: VerifyEnvMsg() : Msg(M_VERIFY_ENV) {} VerifyEnvMsg(const std::string &_target, const std::string &_environment) : Msg(M_VERIFY_ENV) , environment(_environment) , target(_target) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; std::string environment; std::string target; }; class VerifyEnvResultMsg : public Msg { public: VerifyEnvResultMsg() : Msg(M_VERIFY_ENV_RESULT) {} VerifyEnvResultMsg(bool _ok) : Msg(M_VERIFY_ENV_RESULT) , ok(_ok) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; bool ok; }; class BlacklistHostEnvMsg : public Msg { public: BlacklistHostEnvMsg() : Msg(M_BLACKLIST_HOST_ENV) {} BlacklistHostEnvMsg(const std::string &_target, const std::string &_environment, const std::string &_hostname) : Msg(M_BLACKLIST_HOST_ENV) , environment(_environment) , target(_target) , hostname(_hostname) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; std::string environment; std::string target; std::string hostname; }; #endif icecream-1.3.1/services/exitcode.cpp000066400000000000000000000025161361626760200174340ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* * distcc -- A simple distributed compiler system * * Copyright (C) 2002, 2003 by Martin Pool * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "exitcode.h" #include #include /* Converts exit status from waitpid() to exit status to be returned by the process. */ int shell_exit_status(int status) { if (WIFEXITED(status)) { return WEXITSTATUS(status); } else if (WIFSIGNALED(status)) { return WTERMSIG(status) + 128; // shell does this } else { return -1; } } icecream-1.3.1/services/exitcode.h000066400000000000000000000040431361626760200170760ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* * distcc -- A simple distributed compiler system * * Copyright (C) 2002, 2003 by Martin Pool * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef DISTCC_EXITCODE_H #define DISTCC_EXITCODE_H /** * @file * * Common exit codes. **/ /** * Common exit codes for both client and server. * * These need to be in [1,255] so that they can be used as exit() * codes. **/ enum dcc_exitcode { EXIT_DISTCC_FAILED = 100, /**< General failure */ EXIT_BAD_ARGUMENTS = 101, EXIT_BIND_FAILED = 102, EXIT_CONNECT_FAILED = 103, EXIT_COMPILER_CRASHED = 104, EXIT_OUT_OF_MEMORY = 105, EXIT_BAD_HOSTSPEC = 106, EXIT_IO_ERROR = 107, EXIT_TRUNCATED = 108, EXIT_PROTOCOL_ERROR = 109, EXIT_COMPILER_MISSING = 110, /**< Compiler executable not found */ EXIT_RECURSION = 111, /**< icecc called itself */ EXIT_SETUID_FAILED = 112, /**< Failed to discard privileges */ EXIT_ACCESS_DENIED = 113, /**< Network access denied */ EXIT_BUSY = 114, /**< In use by another process. */ EXIT_NO_SUCH_FILE = 115, EXIT_NO_HOSTS = 116, EXIT_GONE = 117, /**< No longer relevant */ EXIT_CLIENT_KILLED = 118, EXIT_TEST_SOCKET_ERROR = 119 }; extern int shell_exit_status(int status); #endif /* _DISTCC_EXITCODE_H */ icecream-1.3.1/services/gcc.cpp000066400000000000000000000020301361626760200163530ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ // code based on gcc - Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc. #include /* Heuristic to set a default for GGC_MIN_EXPAND. */ int ggc_min_expand_heuristic(unsigned int mem_limit) { double min_expand = mem_limit; /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */ min_expand /= 1024; min_expand *= 70; min_expand = std::min(min_expand, 70.); min_expand += 30; return int(min_expand); } /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */ unsigned int ggc_min_heapsize_heuristic(unsigned int mem_limit) { /* The heuristic is RAM/8, with a lower bound of 4M and an upper bound of 128M (when RAM >= 1GB). */ mem_limit /= 8; mem_limit = std::max(mem_limit, 4U); mem_limit = std::min(mem_limit, 128U); return mem_limit * 1024; } icecream-1.3.1/services/getifaddrs.cpp000066400000000000000000000220771361626760200177500ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* getifaddrs -- get names and addresses of all network interfaces Copyright (C) 1999,2002 Free Software Foundation, Inc. This file is part of the GNU C Library. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * 02-12-26, tim@tjansen.de: put in kde_ namespace, C++ fixes, * included ifreq.h * removed glibc dependencies */ #include "config.h" #include "getifaddrs.h" #include "logging.h" #include #ifndef HAVE_IFADDRS_H #include #include #include #include #include #include #include #include #ifndef IF_NAMESIZE #define IF_NAMESIZE IFNAMSIZ #endif #ifdef SIOCGIFCONF #define old_siocgifconf 0 static inline void __ifreq(struct ifreq **ifreqs, int *num_ifs, int sockfd) { int fd = sockfd; struct ifconf ifc; int rq_len; int nifs; # define RQ_IFS 4 if (fd < 0) { fd = socket(AF_INET, SOCK_DGRAM, 0); } if (fd < 0) { *num_ifs = 0; *ifreqs = NULL; return; } ifc.ifc_buf = NULL; /* We may be able to get the needed buffer size directly, rather than guessing. */ if (! old_siocgifconf) { ifc.ifc_buf = NULL; ifc.ifc_len = 0; if (ioctl(fd, SIOCGIFCONF, &ifc) < 0 || ifc.ifc_len == 0) { rq_len = RQ_IFS * sizeof(struct ifreq); } else { rq_len = ifc.ifc_len; } } else { rq_len = RQ_IFS * sizeof(struct ifreq); } /* Read all the interfaces out of the kernel. */ while (1) { ifc.ifc_len = rq_len; ifc.ifc_buf = (char *) realloc(ifc.ifc_buf, ifc.ifc_len); if (ifc.ifc_buf == NULL || ioctl(fd, SIOCGIFCONF, &ifc) < 0) { if (ifc.ifc_buf) { free(ifc.ifc_buf); } if (fd != sockfd) { if ((-1 == close(fd)) && (errno != EBADF)){ log_perror("close failed"); } } *num_ifs = 0; *ifreqs = NULL; return; } if (!old_siocgifconf || ifc.ifc_len < rq_len) { break; } rq_len *= 2; } nifs = ifc.ifc_len / sizeof(struct ifreq); if (fd != sockfd) { if ((-1 == close(fd)) && (errno != EBADF)){ log_perror("close failed"); } } *num_ifs = nifs; *ifreqs = (ifreq *)realloc(ifc.ifc_buf, nifs * sizeof(struct ifreq)); } static inline struct ifreq *__if_nextreq(struct ifreq *ifr) { return ifr + 1; } static inline void __if_freereq(struct ifreq *ifreqs, int num_ifs) { free(ifreqs); } /* Create a linked list of `struct kde_ifaddrs' structures, one for each network interface on the host machine. If successful, store the list in *IFAP and return 0. On errors, return -1 and set `errno'. */ int kde_getifaddrs(struct kde_ifaddrs **ifap) { /* This implementation handles only IPv4 interfaces. The various ioctls below will only work on an AF_INET socket. Some different mechanism entirely must be used for IPv6. */ int fd = socket(AF_INET, SOCK_DGRAM, 0); struct ifreq *ifreqs; int nifs; if (fd < 0) { return -1; } __ifreq(&ifreqs, &nifs, fd); if (ifreqs == NULL) { /* XXX doesn't distinguish error vs none */ if (-1 == close(fd)){ log_perror("close failed"); } return -1; } /* Now we have the list of interfaces and each one's address. Put it into the expected format and fill in the remaining details. */ if (nifs == 0) { *ifap = NULL; } else { struct Storage { struct kde_ifaddrs ia; struct sockaddr addr, netmask, broadaddr; char name[IF_NAMESIZE]; } *storage; struct ifreq *ifr; int i; storage = (Storage *) malloc(nifs * sizeof storage[0]); if (storage == NULL) { if (-1 == close(fd)){ log_perror("close failed"); } __if_freereq(ifreqs, nifs); return -1; } i = 0; ifr = ifreqs; do { /* Fill in all pointers to the storage we've already allocated. */ storage[i].ia.ifa_next = &storage[i + 1].ia; storage[i].ia.ifa_addr = &storage[i].addr; storage[i].ia.ifa_netmask = &storage[i].netmask; storage[i].ia.ifa_broadaddr = &storage[i].broadaddr; /* & dstaddr */ /* Now copy the information we already have from SIOCGIFCONF. */ storage[i].ia.ifa_name = strncpy(storage[i].name, ifr->ifr_name, sizeof storage[i].name); storage[i].addr = ifr->ifr_addr; /* The SIOCGIFCONF call filled in only the name and address. Now we must also ask for the other information we need. */ if (ioctl(fd, SIOCGIFFLAGS, ifr) < 0) { break; } storage[i].ia.ifa_flags = ifr->ifr_flags; ifr->ifr_addr = storage[i].addr; if (ioctl(fd, SIOCGIFNETMASK, ifr) < 0) { break; } storage[i].netmask = ifr->ifr_netmask; if (ifr->ifr_flags & IFF_BROADCAST) { ifr->ifr_addr = storage[i].addr; if (ioctl(fd, SIOCGIFBRDADDR, ifr) < 0) { break; } storage[i].broadaddr = ifr->ifr_broadaddr; } else if (ifr->ifr_flags & IFF_POINTOPOINT) { ifr->ifr_addr = storage[i].addr; // Needed on Cygwin #ifndef SIOCGIFDSTADDR #define SIOCGIFDSTADDR 0x8917 #endif if (ioctl(fd, SIOCGIFDSTADDR, ifr) < 0) { break; } #if HAVE_IFR_DSTADDR storage[i].broadaddr = ifr->ifr_dstaddr; #else // Fix for Cygwin storage[i].broadaddr = ifr->ifr_broadaddr; #endif } else /* Just 'cause. */ { memset(&storage[i].broadaddr, 0, sizeof storage[i].broadaddr); } storage[i].ia.ifa_data = NULL; /* Nothing here for now. */ ifr = __if_nextreq(ifr); } while (++i < nifs); if (i < nifs) { /* Broke out early on error. */ if (-1 == close(fd)){ log_perror("close failed"); } free(storage); __if_freereq(ifreqs, nifs); return -1; } storage[i - 1].ia.ifa_next = NULL; *ifap = &storage[0].ia; if (-1 == close(fd)){ log_perror("close failed"); } __if_freereq(ifreqs, nifs); } return 0; } void kde_freeifaddrs(struct kde_ifaddrs *ifa) { free(ifa); } #else int kde_getifaddrs(struct kde_ifaddrs **) { return 1; } void kde_freeifaddrs(struct kde_ifaddrs *) { } struct { } kde_ifaddrs; #endif #endif bool build_address_for_interface(struct sockaddr_in &myaddr, const std::string &interface, int port) { // Pre-fill the output parameter with the default address (port, INADDR_ANY) myaddr.sin_family = AF_INET; myaddr.sin_port = htons(port); myaddr.sin_addr.s_addr = htonl( INADDR_ANY ); // If no interface was specified, return the default address if (interface.empty()) { return true; } // Explicit case for loopback. if (interface == "lo") { myaddr.sin_addr.s_addr = htonl( INADDR_LOOPBACK ); return true; } // Otherwise, search for the IP address of the given interface struct kde_ifaddrs *addrs; if (kde_getifaddrs(&addrs) < 0) { log_perror("kde_getifaddrs()"); return false; } bool found = false; for (struct kde_ifaddrs *addr = addrs; addr != NULL; addr = addr->ifa_next) { if (interface != addr->ifa_name) { continue; } if (addr->ifa_addr == NULL || addr->ifa_addr->sa_family != AF_INET) { continue; } myaddr.sin_addr.s_addr = reinterpret_cast(addr->ifa_addr)->sin_addr.s_addr; found = true; break; } kde_freeifaddrs(addrs); if (!found) { log_error() << "No IP address found for interface \"" << interface << "\"" << std::endl; return false; } return true; } icecream-1.3.1/services/getifaddrs.h000066400000000000000000000073711361626760200174150ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* ifaddrs.h -- declarations for getting network interface addresses Copyright (C) 2002 Free Software Foundation, Inc. This file is part of the GNU C Library. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef GETIFADDRS_H #define GETIFADDRS_H /** * 02-12-26, tim@tjansen.de: added kde_ prefix, fallback-code, * removed glibs dependencies */ #include "config.h" #include #include #include #include #ifndef IFF_POINTOPOINT # define IFF_POINTOPOINT 0x10 #endif #ifdef HAVE_IFADDRS_H #include #define kde_getifaddrs(a) getifaddrs(a) #define kde_freeifaddrs(a) freeifaddrs(a) #define kde_ifaddrs ifaddrs #else #include /* The `getifaddrs' function generates a linked list of these structures. Each element of the list describes one network interface. */ struct kde_ifaddrs { struct kde_ifaddrs *ifa_next; /* Pointer to the next structure. */ char *ifa_name; /* Name of this network interface. */ unsigned int ifa_flags; /* Flags as from SIOCGIFFLAGS ioctl. */ struct sockaddr *ifa_addr; /* Network address of this interface. */ struct sockaddr *ifa_netmask; /* Netmask of this interface. */ union { /* At most one of the following two is valid. If the IFF_BROADCAST bit is set in `ifa_flags', then `ifa_broadaddr' is valid. If the IFF_POINTOPOINT bit is set, then `ifa_dstaddr' is valid. It is never the case that both these bits are set at once. */ struct sockaddr *ifu_broadaddr; /* Broadcast address of this interface. */ struct sockaddr *ifu_dstaddr; /* Point-to-point destination address. */ } ifa_ifu; /* These very same macros are defined by for `struct ifaddr'. So if they are defined already, the existing definitions will be fine. */ # ifndef ifa_broadaddr # define ifa_broadaddr ifa_ifu.ifu_broadaddr # endif # ifndef ifa_dstaddr # define ifa_dstaddr ifa_ifu.ifu_dstaddr # endif void *ifa_data; /* Address-specific data (may be unused). */ }; /* Create a linked list of `struct kde_ifaddrs' structures, one for each network interface on the host machine. If successful, store the list in *IFAP and return 0. On errors, return -1 and set `errno'. The storage returned in *IFAP is allocated dynamically and can only be properly freed by passing it to `freeifaddrs'. */ extern int kde_getifaddrs(struct kde_ifaddrs **__ifap); /* Reclaim the storage allocated by a previous `getifaddrs' call. */ extern void kde_freeifaddrs(struct kde_ifaddrs *__ifa); #endif /** * Constructs an IPv4 socket address for a given port and network interface. * * The address is suitable for use by a subsequent call to bind(). * If the interface argument is an empty string, the socket will listen on all interfaces. */ bool build_address_for_interface(struct sockaddr_in &myaddr, const std::string &interface, int port); #endif icecream-1.3.1/services/icecc.pc.in000066400000000000000000000005111361626760200171140ustar00rootroot00000000000000# icecc pkg-config file prefix=@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ includedir=@includedir@ Name: icecc Description: icecc is a library for connecting to icecc schedulers Version: @VERSION@ Requires: Conflicts: Libs: -L${libdir} -licecc Libs.private: @CAPNG_LDADD@ -llzo2 -lzstd -larchive Cflags: -I${includedir} icecream-1.3.1/services/job.cpp000066400000000000000000000065411361626760200164040ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "job.h" #include "logging.h" #include "exitcode.h" #include "platform.h" #include using namespace std; list CompileJob::flags(Argument_Type argumentType) const { list args; for (ArgumentsList::const_iterator it = m_flags.begin(); it != m_flags.end(); ++it) { if (it->second == argumentType) { args.push_back(it->first); } } return args; } list CompileJob::localFlags() const { return flags(Arg_Local); } list CompileJob::remoteFlags() const { return flags(Arg_Remote); } list CompileJob::restFlags() const { return flags(Arg_Rest); } list CompileJob::nonLocalFlags() const { list args; for (ArgumentsList::const_iterator it = m_flags.begin(); it != m_flags.end(); ++it) { if (it->second != Arg_Local) { args.push_back(it->first); } } return args; } list CompileJob::allFlags() const { list args; for (ArgumentsList::const_iterator it = m_flags.begin(); it != m_flags.end(); ++it) { args.push_back(it->first); } return args; } void CompileJob::setTargetPlatform() { m_target_platform = determine_platform(); } unsigned int CompileJob::argumentFlags() const { unsigned int result = Flag_None; for (ArgumentsList::const_iterator it = m_flags.begin(); it != m_flags.end(); ++it) { const string arg = it->first; if (arg.at(0) == '-') { if (arg.length() == 1) { continue; } if (arg.at(1) == 'g') { if (arg.length() > 2 && arg.at(2) == '3') { result &= ~Flag_g; result |= Flag_g3; } else { result &= ~Flag_g3; result |= Flag_g; } } else if (arg.at(1) == 'O') { result &= ~(Flag_O | Flag_O2 | Flag_Ol2); if (arg.length() == 2) { result |= Flag_O; } else { assert(arg.length() > 2); if (arg.at(2) == '2') { result |= Flag_O2; } else if (arg.at(2) == '1') { result |= Flag_O; } else if (arg.at(2) != '0') { result |= Flag_Ol2; } } } } } return result; } icecream-1.3.1/services/job.h000066400000000000000000000136751361626760200160570ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004-2014 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef ICECREAM_COMPILE_JOB_H #define ICECREAM_COMPILE_JOB_H #include #include #include #include typedef enum { Arg_Local, // Local-only args. Arg_Remote, // Remote-only args. Arg_Rest // Args to use both locally and remotely. } Argument_Type; class ArgumentsList : public std::list > { public: void append(std::string s, Argument_Type t) { push_back(make_pair(s, t)); } }; class CompileJob { public: typedef enum { Lang_C, Lang_CXX, Lang_OBJC, Lang_OBJCXX, Lang_Custom } Language; typedef enum { Flag_None = 0, Flag_g = 0x1, Flag_g3 = 0x2, Flag_O = 0x4, Flag_O2 = 0x8, Flag_Ol2 = 0x10 } Flag; CompileJob() : m_id(0) , m_dwarf_fission(false) , m_block_rewrite_includes(false) { setTargetPlatform(); } void setCompilerName(const std::string &name) { m_compiler_name = name; } std::string compilerName() const { return m_compiler_name; } void setLanguage(Language lg) { m_language = lg; } Language language() const { return m_language; } // Not used remotely. void setCompilerPathname(const std::string& pathname) { m_compiler_pathname = pathname; } // Not used remotely. // Use find_compiler(), as this may be empty. std::string compilerPathname() const { return m_compiler_pathname; } void setEnvironmentVersion(const std::string &ver) { m_environment_version = ver; } std::string environmentVersion() const { return m_environment_version; } unsigned int argumentFlags() const; void setFlags(const ArgumentsList &flags) { m_flags = flags; } std::list localFlags() const; std::list remoteFlags() const; std::list restFlags() const; std::list nonLocalFlags() const; std::list allFlags() const; void setInputFile(const std::string &file) { m_input_file = file; } std::string inputFile() const { return m_input_file; } void setOutputFile(const std::string &file) { m_output_file = file; } std::string outputFile() const { return m_output_file; } // Since protocol 41 this is just a shortcut saying that allFlags() contains "-gsplit-dwarf". void setDwarfFissionEnabled(bool flag) { m_dwarf_fission = flag; } bool dwarfFissionEnabled() const { return m_dwarf_fission; } void setWorkingDirectory(const std::string& dir) { m_working_directory = dir; } std::string workingDirectory() const { return m_working_directory; } void setJobID(unsigned int id) { m_id = id; } unsigned int jobID() const { return m_id; } void appendFlag(std::string arg, Argument_Type argumentType) { m_flags.append(arg, argumentType); } std::string targetPlatform() const { return m_target_platform; } void setTargetPlatform(const std::string &_target) { m_target_platform = _target; } // Not used remotely. void setBlockRewriteIncludes(bool flag) { m_block_rewrite_includes = flag; } // Not used remotely. bool blockRewriteIncludes() const { return m_block_rewrite_includes; } private: std::list flags(Argument_Type argumentType) const; void setTargetPlatform(); unsigned int m_id; Language m_language; std::string m_compiler_pathname; std::string m_compiler_name; std::string m_environment_version; ArgumentsList m_flags; std::string m_input_file, m_output_file; std::string m_working_directory; std::string m_target_platform; bool m_dwarf_fission; bool m_block_rewrite_includes; }; inline void appendList(std::list &list, const std::list &toadd) { // Cannot splice since toadd is a reference-to-const list.insert(list.end(), toadd.begin(), toadd.end()); } inline std::ostream &operator<<( std::ostream &output, const CompileJob::Language &l ) { switch (l) { case CompileJob::Lang_CXX: output << "C++"; break; case CompileJob::Lang_C: output << "C"; break; case CompileJob::Lang_Custom: output << ""; break; case CompileJob::Lang_OBJC: output << "ObjC"; break; case CompileJob::Lang_OBJCXX: output << "ObjC++"; break; } return output; } inline std::string concat_args(const std::list &args) { std::stringstream str; str << "'"; for (std::list::const_iterator it = args.begin(); it != args.end();) { str << *it++; if (it != args.end()) str << ", "; } return str.str() + "'"; } #endif icecream-1.3.1/services/logging.cpp000066400000000000000000000133601361626760200172550ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include #include #include "logging.h" #include #include #include #include #ifdef __linux__ #include #endif using namespace std; int debug_level = Error; ostream *logfile_trace = 0; ostream *logfile_info = 0; ostream *logfile_warning = 0; ostream *logfile_error = 0; string logfile_prefix; volatile sig_atomic_t reset_debug_needed = 0; static ofstream logfile_null("/dev/null"); static ofstream logfile_file; static string logfile_filename; static void reset_debug_signal_handler(int); // Implementation of an iostream helper that allows redirecting output to a given file descriptor. // This seems to be the only portable way to do it. namespace { class ofdbuf : public streambuf { public: explicit ofdbuf( int fd ) : fd( fd ) {} virtual int_type overflow( int_type c ); virtual streamsize xsputn( const char* c, streamsize n ); private: int fd; }; ofdbuf::int_type ofdbuf::overflow( int_type c ) { if( c != EOF ) { char cc = c; if( write( fd, &cc, 1 ) != 1 ) return EOF; } return c; } streamsize ofdbuf::xsputn( const char* c, streamsize n ) { return write( fd, c, n ); } ostream* ccache_stream( int fd ) { int status = fcntl( fd, F_GETFL ); if( status < 0 || ( status & ( O_WRONLY | O_RDWR )) == 0 ) { // As logging is not set up yet, this will log to stderr. log_warning() << "UNCACHED_ERR_FD provides an invalid file descriptor, using stderr" << endl; return &cerr; // fd is not valid fd for writting } static ofdbuf buf( fd ); static ostream stream( &buf ); return &stream; } } // namespace void setup_debug(int level, const string &filename, const string &prefix) { debug_level = level; logfile_prefix = prefix; logfile_filename = filename; if (logfile_file.is_open()) { logfile_file.close(); } ostream *output = 0; if (filename.length()) { logfile_file.clear(); logfile_file.open(filename.c_str(), fstream::out | fstream::app); #ifdef __linux__ string fname = filename; if (fname[0] != '/') { char buf[PATH_MAX]; if (getcwd(buf, sizeof(buf))) { fname.insert(0, "/"); fname.insert(0, buf); } } setenv("SEGFAULT_OUTPUT_NAME", fname.c_str(), false); #endif output = &logfile_file; } else if( const char* ccache_err_fd = getenv( "UNCACHED_ERR_FD" )) { output = ccache_stream( atoi( ccache_err_fd )); } else { output = &cerr; } #ifdef __linux__ (void) dlopen("libSegFault.so", RTLD_NOW | RTLD_LOCAL); #endif if (debug_level >= Debug) { logfile_trace = output; } else { logfile_trace = &logfile_null; } if (debug_level >= Info) { logfile_info = output; } else { logfile_info = &logfile_null; } if (debug_level >= Warning) { logfile_warning = output; } else { logfile_warning = &logfile_null; } if (debug_level >= Error) { logfile_error = output; } else { logfile_error = &logfile_null; } signal(SIGHUP, reset_debug_signal_handler); } void reset_debug() { setup_debug(debug_level, logfile_filename); } void reset_debug_signal_handler(int) { reset_debug_needed = 1; } void reset_debug_if_needed() { if( reset_debug_needed ) { reset_debug_needed = 0; reset_debug(); if( const char* env = getenv( "ICECC_TEST_FLUSH_LOG_MARK" )) { ifstream markfile( env ); string mark; getline( markfile, mark ); if( !mark.empty()) { assert( logfile_trace != NULL ); *logfile_trace << "flush log mark: " << mark << endl; } } if( const char* env = getenv( "ICECC_TEST_LOG_HEADER" )) { ifstream markfile( env ); string header1, header2, header3; getline( markfile, header1 ); getline( markfile, header2 ); getline( markfile, header3 ); if( !header1.empty()) { assert( logfile_trace != NULL ); *logfile_trace << header1 << endl; *logfile_trace << header2 << endl; *logfile_trace << header3 << endl; } } } } void close_debug() { if (logfile_null.is_open()) { logfile_null.close(); } if (logfile_file.is_open()) { logfile_file.close(); } logfile_trace = logfile_info = logfile_warning = logfile_error = 0; } /* Flushes all ostreams used for debug messages. You need to call this before forking. */ void flush_debug() { if (logfile_null.is_open()) { logfile_null.flush(); } if (logfile_file.is_open()) { logfile_file.flush(); } } unsigned log_block::nesting; icecream-1.3.1/services/logging.h000066400000000000000000000102621361626760200167200ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2004 Stephan Kulow This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef ICECREAM_LOGGING_H #define ICECREAM_LOGGING_H #include #include #include #include #include #include #include #include // Verbosity level, from least to most. enum VerbosityLevel { Error = 0, Warning = 1, Info = 2, Debug = 3, MaxVerboseLevel = Debug }; extern std::ostream *logfile_info; extern std::ostream *logfile_warning; extern std::ostream *logfile_error; extern std::ostream *logfile_trace; extern std::string logfile_prefix; void setup_debug(int level, const std::string &logfile = "", const std::string &prefix = ""); void reset_debug_if_needed(); // if we get SIGHUP, this will handle the reset void reset_debug(); void close_debug(); void flush_debug(); static inline std::ostream &output_date(std::ostream &os) { time_t t = time(0); struct tm *tmp = localtime(&t); char buf[64]; strftime(buf, sizeof(buf), "%Y-%m-%d %T: ", tmp); if (logfile_prefix.size()) { os << logfile_prefix; } os << "[" << getpid() << "] "; os << buf; return os; } static inline std::ostream &log_info() { if (!logfile_info) { return std::cerr; } return output_date(*logfile_info); } static inline std::ostream &log_warning() { if (!logfile_warning) { return std::cerr; } return output_date(*logfile_warning); } static inline std::ostream &log_error() { if (!logfile_error) { return std::cerr; } return output_date(*logfile_error); } static inline std::ostream &trace() { if (!logfile_trace) { return std::cerr; } return output_date(*logfile_trace); } static inline std::ostream & log_errno(const char *prefix, int tmp_errno) { return log_error() << prefix << "(Error: " << strerror(tmp_errno) << ")" << std::endl; } static inline std::ostream & log_perror(const char *prefix) { return log_errno(prefix, errno); } static inline std::ostream & log_perror(const std::string &prefix) { return log_perror(prefix.c_str()); } static inline std::ostream & log_errno_trace(const char *prefix, int tmp_errno) { return trace() << prefix << "(Error: " << strerror(tmp_errno) << ")" << std::endl; } static inline std::ostream & log_perror_trace(const char *prefix) { return log_errno_trace(prefix, errno); } class log_block { static unsigned nesting; timeval m_start; char *m_label; public: log_block(const char *label = 0) { for (unsigned i = 0; i < nesting; ++i) { log_info() << " "; } log_info() << "<" << (label ? label : "") << ">\n"; m_label = strdup(label ? label : ""); ++nesting; gettimeofday(&m_start, 0); } ~log_block() { timeval end; gettimeofday(&end, 0); --nesting; for (unsigned i = 0; i < nesting; ++i) { log_info() << " "; } log_info() << "\n"; free(m_label); } }; #include #include template std::string toString(const T &val) { std::ostringstream os; os << val; return os.str(); } #endif icecream-1.3.1/services/ncpus.c000066400000000000000000000076361361626760200164300ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* * distcc -- A simple distributed compiler system * * Copyright (C) 2003 by Martin Pool * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /* Thanks to Dimitri PAPADOPOULOS-ORFANOS for researching many of the methods * in this file. */ #include "config.h" #include #include #include #include #include #include #include "ncpus.h" #include "exitcode.h" /** * Determine number of processors online. * * We will in the future use this to gauge how many concurrent tasks * should run on this machine. Obviously this is only very rough: the * correct number needs to take into account disk buffers, IO * bandwidth, other tasks, etc. **/ #if defined(__hpux__) || defined(__hpux) #include #include int dcc_ncpus(int *ncpus) { struct pst_dynamic psd; if (pstat_getdynamic(&psd, sizeof(psd), 1, 0) != -1) { *ncpus = psd.psd_proc_cnt; return 0; } rs_log_error("pstat_getdynamic failed: %s", strerror(errno)); *ncpus = 1; return EXIT_DISTCC_FAILED; } #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__APPLE__) || defined(__bsdi__) || defined(__DragonFly__) /* http://www.FreeBSD.org/cgi/man.cgi?query=sysctl&sektion=3&manpath=FreeBSD+4.6-stable http://www.openbsd.org/cgi-bin/man.cgi?query=sysctl&sektion=3&manpath=OpenBSD+Current http://www.tac.eu.org/cgi-bin/man-cgi?sysctl+3+NetBSD-current */ #include #include #include #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) #undef HAVE_RS_LOG_ERROR #else #define HAVE_RS_LOG_ERROR #endif int dcc_ncpus(int *ncpus) { int mib[2]; size_t len = sizeof(*ncpus); mib[0] = CTL_HW; mib[1] = HW_NCPU; if (sysctl(mib, 2, ncpus, &len, NULL, 0) == 0) { return 0; } #ifdef have_rs_log_error rs_log_error("sysctl(CTL_HW:HW_NCPU) failed: %s", strerror(errno)); #else fprintf(stderr, "sysctl(CTL_HW:HW_NCPU) failed: %s", strerror(errno)); #endif *ncpus = 1; return EXIT_DISTCC_FAILED; } #else /* every other system */ /* http://www.opengroup.org/onlinepubs/007904975/functions/sysconf.html http://docs.sun.com/?p=/doc/816-0213/6m6ne38dd&a=view http://www.tru64unix.compaq.com/docs/base_doc/DOCUMENTATION/V40G_HTML/MAN/MAN3/0629____.HTM http://techpubs.sgi.com/library/tpl/cgi-bin/getdoc.cgi?coll=0650&db=man&fname=/usr/share/catman/p_man/cat3c/sysconf.z */ int dcc_ncpus(int *ncpus) { #if defined(_SC_NPROCESSORS_ONLN) /* Linux, Solaris, Tru64, UnixWare 7, and Open UNIX 8 */ *ncpus = sysconf(_SC_NPROCESSORS_ONLN); #elif defined(_SC_NPROC_ONLN) /* IRIX */ *ncpus = sysconf(_SC_NPROC_ONLN); #else #warning "Please port this function" *ncpus = -1; /* unknown */ #endif if (*ncpus == -1) { *ncpus = 1; return EXIT_DISTCC_FAILED; } if (*ncpus == 0) { /* if there are no cpus, what are we running on? But it has * apparently been observed to happen on ARM Linux */ *ncpus = 1; } return 0; } #endif icecream-1.3.1/services/ncpus.h000066400000000000000000000021221361626760200164160ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (C) 2002, 2003 by Martin Pool This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _ICECREAM_NCPUS_H_ #define _ICECREAM_NCPUS_H_ #ifdef __cplusplus extern "C" { #endif int dcc_ncpus(int *); #ifdef __cplusplus } #endif #endif icecream-1.3.1/services/platform.cpp000066400000000000000000000042331361626760200174520ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2006 Mirko Boehm This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ extern "C" { #include } #include "logging.h" #include "platform.h" std::string determine_platform_once() { using namespace std; string platform; struct utsname uname_buf; if (uname(&uname_buf)) { log_perror("uname call failed"); throw("determine_platform: cannot determine OS version and machine architecture"); // return platform; } string os = uname_buf.sysname; if (os == "Darwin") { const std::string release = uname_buf.release; const string::size_type pos = release.find('.'); if (pos == string::npos) { throw(std::string("determine_platform: Cannot determine Darwin release from release string \"") + release + "\""); } os += release.substr(0, pos); } if (os != "Linux") { platform = os + '_' + uname_buf.machine; } else { // Linux platform = uname_buf.machine; } while (true) { string::size_type pos = platform.find(" "); if (pos == string::npos) { break; } platform.erase(pos, 1); } return platform; } const std::string &determine_platform() { const static std::string platform(determine_platform_once()); return platform; } icecream-1.3.1/services/platform.h000066400000000000000000000022431361626760200171160ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. Copyright (c) 2006 Mirko Boehm This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef PLATFORM_H #define PLATFORM_H #include extern const std::string &determine_platform(); extern int ggc_min_expand_heuristic(unsigned int mem_limit); extern unsigned int ggc_min_heapsize_heuristic(unsigned int mem_limit); #endif icecream-1.3.1/services/tempfile.c000066400000000000000000000115641361626760200171000ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* * distcc -- A simple distributed compiler system * * Copyright (C) 2002, 2003 by Martin Pool * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /* "More computing sins are committed in the name of * efficiency (without necessarily achieving it) than * for any other single reason - including blind * stupidity." -- W.A. Wulf */ #include "config.h" #include #include #include #include #include #include #include #include #include #include #include #include "tempfile.h" #include "exitcode.h" #ifndef _PATH_TMP #define _PATH_TMP "/tmp" #endif /** * Create a file inside the temporary directory and register it for * later cleanup, and return its name. * * The file will be reopened later, possibly in a child. But we know * that it exists with appropriately tight permissions. **/ int dcc_make_tmpnam(const char *prefix, const char *suffix, char **name_ret, int relative) { unsigned long random_bits; unsigned long tries = 0; size_t tmpname_length; char *tmpname; tmpname_length = strlen(_PATH_TMP) + 1 + strlen(prefix) + 1 + 8 + strlen(suffix) + 1; tmpname = malloc(tmpname_length); if (!tmpname) { return EXIT_OUT_OF_MEMORY; } random_bits = (unsigned long) getpid() << 16; { struct timeval tv; gettimeofday(&tv, NULL); random_bits ^= tv.tv_usec << 16; random_bits ^= tv.tv_sec; } #if 0 random_bits = 0; /* FOR TESTING */ #endif do { if (snprintf(tmpname, tmpname_length, "%s/%s_%08lx%s", (relative ? _PATH_TMP + 1 : _PATH_TMP), prefix, random_bits & 0xffffffffUL, suffix) == -1) { free(tmpname); return EXIT_OUT_OF_MEMORY; } /* Note that if the name already exists as a symlink, this * open call will fail. * * The permissions are tight because nobody but this process * and our children should do anything with it. */ int fd = open(tmpname, O_WRONLY | O_CREAT | O_EXCL, 0600); if (fd == -1) { /* Don't try getting a file too often. Safety net against endless loops. Probably just paranoia. */ if (++tries > 1000000) { free(tmpname); return EXIT_IO_ERROR; } /* Some errors won't change by changing the filename, e.g. ENOENT means that the directory where we try to create the file was removed from under us. Don't endlessly loop in that case. */ switch (errno) { case EACCES: case EEXIST: case EISDIR: case ELOOP: /* try again */ random_bits += 7777; /* fairly prime */ continue; } free(tmpname); return EXIT_IO_ERROR; } if (close(fd) == -1) { /* huh? */ free(tmpname); return EXIT_IO_ERROR; } break; } while (1); *name_ret = tmpname; return 0; } int dcc_make_tmpdir(char **name_ret) { unsigned long tries = 0; char template[] = "icecc-XXXXXX"; size_t tmpname_length = strlen(_PATH_TMP) + 1 + strlen(template) + 1; char *tmpname = malloc(tmpname_length); if (!tmpname) { return EXIT_OUT_OF_MEMORY; } if (snprintf(tmpname, tmpname_length, "%s/%s", _PATH_TMP, template) == -1) { free(tmpname); return EXIT_OUT_OF_MEMORY; } do { if (!mkdtemp(tmpname)) { if (++tries > 1000000) { free(tmpname); return EXIT_IO_ERROR; } switch (errno) { case EACCES: case EEXIST: case EISDIR: case ELOOP: continue; } free(tmpname); return EXIT_IO_ERROR; } break; } while (1); *name_ret = tmpname; return 0; } icecream-1.3.1/services/tempfile.h000066400000000000000000000024041361626760200170760ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* * distcc -- A simple distributed compiler system * * Copyright (C) 2002, 2003 by Martin Pool * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _ICECREAM_TEMPFILE_H_ #define _ICECREAM_TEMPFILE_H_ #ifdef __cplusplus extern "C" { #endif int dcc_make_tmpnam(const char *prefix, const char *suffix, char **name_ret, int relative); int dcc_make_tmpdir(char **name_ret); #ifdef __cplusplus } #endif #endif icecream-1.3.1/services/util.cpp000066400000000000000000000120641361626760200166040ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* * distcc -- A simple distributed compiler system * * Copyright (C) 2002, 2003 by Martin Pool * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "util.h" #include #include #include "comm.h" using namespace std; /** * Return a pointer to the basename of the file (everything after the * last slash.) If there is no slash, return the whole filename, * which is presumably in the current directory. **/ string find_basename(const string &sfile) { size_t index = sfile.rfind('/'); if (index == string::npos) { return sfile; } return sfile.substr(index + 1); } string find_prefix(const string &basename) { size_t index = basename.rfind('-'); if (index == string::npos) { return ""; } return basename.substr(0, index); } /* We support these compilers: - cc/c++ Only in this form, no prefix or suffix. It is usually a symlink to the real compiler, we will always detect it as !clang (FIXME?). - gcc/g++ Including a possible prefix or postfix separated by '-' (e.g. aarch64-suse-linux-gcc-6) - clang/clang++ Including a possible prefix or postfix separated by '-' (e.g. clang-8). */ bool is_c_compiler(const string& compiler) { string name = find_basename(compiler); if( name.find("++") != string::npos ) return false; return name.find("gcc") != string::npos || name.find("clang") != string::npos || name == "cc"; } bool is_cpp_compiler(const string& compiler) { string name = find_basename(compiler); return name.find("g++") != string::npos || name.find("clang++") != string::npos || name == "c++"; } string get_c_compiler(const string& compiler) { if(compiler.empty()) return compiler; size_t slash = compiler.rfind('/'); size_t pos = compiler.rfind( "++" ); if( pos == string::npos || pos < slash ) return compiler; pos = compiler.rfind( "clang++" ); if( pos != string::npos && pos >= slash + 1 ) return compiler.substr( 0, pos ) + "clang" + compiler.substr( pos + strlen( "clang++" )); pos = compiler.rfind( "g++" ); // g++ must go after clang++, it's a substring if( pos != string::npos && pos >= slash + 1 ) return compiler.substr( 0, pos ) + "gcc" + compiler.substr( pos + strlen( "g++" )); pos = compiler.rfind( "c++" ); if( pos != string::npos && pos == slash + 1 ) // only exactly "c++" return compiler.substr( 0, pos ) + "cc" + compiler.substr( pos + strlen( "c++" )); assert( false ); return string(); } string get_cpp_compiler(const string& compiler) { if(compiler.empty()) return compiler; size_t slash = compiler.rfind('/'); size_t pos = compiler.rfind( "++" ); if( pos != string::npos && pos >= slash + 1 ) return compiler; pos = compiler.rfind( "gcc" ); if( pos != string::npos && pos >= slash + 1 ) return compiler.substr( 0, pos ) + "g++" + compiler.substr( pos + strlen( "gcc" )); pos = compiler.rfind( "clang" ); if( pos != string::npos && pos >= slash + 1 ) return compiler.substr( 0, pos ) + "clang++" + compiler.substr( pos + strlen( "clang" )); pos = compiler.rfind( "cc" ); if( pos != string::npos && pos == slash + 1 ) // only exactly "cc" return compiler.substr( 0, pos ) + "c++" + compiler.substr( pos + strlen( "cc" )); assert( false ); return string(); } bool pollfd_is_set(const vector& pollfds, int fd, int flags, bool check_errors) { for( size_t i = 0; i < pollfds.size(); ++i ) { if( pollfds[ i ].fd == fd ) { if( pollfds[ i ].revents & flags ) return true; // Unlike with select(), where readfds gets set even on EOF, with poll() // POLLIN doesn't imply EOF and we need to check explicitly. if( check_errors && ( pollfds[ i ].revents & ( POLLERR | POLLHUP | POLLNVAL ))) return true; return false; } } return false; } string supported_features_to_string(unsigned int features) { string ret; if( features & NODE_FEATURE_ENV_XZ ) ret += " env_xz"; if( features & NODE_FEATURE_ENV_ZSTD ) ret += " env_zstd"; if( ret.empty()) ret = "--"; else ret.erase( 0, 1 ); // remove leading " " return ret; } icecream-1.3.1/services/util.h000066400000000000000000000037221361626760200162520ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ /* vim: set ts=4 sw=4 et tw=99: */ /* This file is part of Icecream. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef ICECREAM_UTIL_H #define ICECREAM_UTIL_H #include #include #include extern std::string find_basename(const std::string &sfile); extern std::string find_prefix(const std::string &basename); // These two detect if the given binary is a C/C++ compiler based on its name(gcc->C,clang++->C++) extern bool is_c_compiler(const std::string& compiler); extern bool is_cpp_compiler(const std::string& compiler); // These two return the given binary for the C or C++ compiler based on a compiler. // E.g. get_c_compiler("clang++-8") -> "clang-8". extern std::string get_c_compiler(const std::string& compiler); extern std::string get_cpp_compiler(const std::string& compiler); template inline T ignore_result(T x __attribute__((unused))) { return x; } // Returns true if _any_ of the given flags are set. // If check_errors is set, then errors such as POLLHUP are also considered as matching. bool pollfd_is_set(const std::vector& pollfds, int fd, int flags, bool check_errors = true); std::string supported_features_to_string(unsigned int features); #endif icecream-1.3.1/suse/000077500000000000000000000000001361626760200142545ustar00rootroot00000000000000icecream-1.3.1/suse/Makefile.am000066400000000000000000000035171361626760200163160ustar00rootroot00000000000000EXTRA_DIST = icecream.spec.in \ init.icecream \ logrotate \ SuSEfirewall.iceccd \ SuSEfirewall.scheduler \ sysconfig.icecream initdir=$(sysconfdir)/init.d templatesdir=$(localstatedir)/adm/fillup-templates icecreamcachedir=$(localstatedir)/cache/icecream logdir=$(localstatedir)/log/icecream logrotatedir=$(sysconfdir)/logrotate.d susefirewallservicesdir=$(sysconfdir)/sysconfig/SuSEfirewall2.d/services install: init.icecream logrotate SuSEfirewall.iceccd SuSEfirewall.scheduler sysconfig.icecream if test $(host_vendor) = "suse" -o $(host_vendor) = "ibm" ; then \ mkdir -p $(DESTDIR)/$(initdir) ;\ $(INSTALL) -m 755 $(srcdir)/init.icecream $(DESTDIR)$(initdir)/icecream ;\ ln -sf $(initdir)/icecream $(DESTDIR)$(sbindir)/rcicecream ;\ mkdir -p $(DESTDIR)$(templatesdir) ;\ $(INSTALL) -m 644 $(srcdir)/sysconfig.icecream $(DESTDIR)$(templatesdir)/sysconfig.icecream ;\ mkdir -p $(DESTDIR)$(icecreamcachedir) ;\ mkdir -p $(DESTDIR)$(logdir) ;\ $(INSTALL) -m 644 -D $(srcdir)/logrotate $(DESTDIR)$(logrotatedir)/icecream ;\ mkdir -p $(DESTDIR)$(susefirewallservicesdir) ;\ $(INSTALL) -m 644 $(srcdir)/SuSEfirewall.iceccd $(DESTDIR)$(susefirewallservicesdir)/iceccd ;\ $(INSTALL) -m 644 $(srcdir)/SuSEfirewall.scheduler $(DESTDIR)$(susefirewallservicesdir)/icecream-scheduler ;\ fi uninstall: if test $(host_vendor) = "suse"; then \ rm $(DESTDIR)$(initdir)/icecream ;\ rmdir $(DESTDIR)/$(initdir) ;\ rm $(DESTDIR)$(sbindir)/rcicecream ;\ rm $(DESTDIR)$(templatesdir)/sysconfig.icecream ;\ rmdir $(DESTDIR)$(templatesdir) ;\ rmdir $(DESTDIR)$(icecreamcachedir) ;\ rmdir $(DESTDIR)$(logdir) ;\ rm $(DESTDIR)$(logrotatedir)/icecream ;\ rm $(DESTDIR)$(susefirewallservicesdir)/iceccd ;\ rm $(DESTDIR)$(susefirewallservicesdir)/icecream-scheduler ;\ rmdir $(DESTDIR)$(susefirewallservicesdir) ;\ fi icecream-1.3.1/suse/SuSEfirewall.iceccd000066400000000000000000000011551361626760200177570ustar00rootroot00000000000000# Only the variables TCP, UDP, RPC, IP and BROADCAST are allowed. # More may be supported in the future. # # For a more detailed description of the individual variables see # the comments for FW_SERVICES_*_EXT in /etc/sysconfig/SuSEfirewall2 # ## Name: icecream daemon ## Description: opens socket for the icecream compilation daemon # space separated list of allowed TCP ports TCP="10245" # space separated list of allowed UDP ports UDP="" # space separated list of allowed RPC services RPC="" # space separated list of allowed IP protocols IP="" # space separated list of allowed UDP broadcast ports BROADCAST="" icecream-1.3.1/suse/SuSEfirewall.scheduler000066400000000000000000000011561361626760200205240ustar00rootroot00000000000000# Only the variables TCP, UDP, RPC, IP and BROADCAST are allowed. # More may be supported in the future. # # For a more detailed description of the individual variables see # the comments for FW_SERVICES_*_EXT in /etc/sysconfig/SuSEfirewall2 # ## Name: icecream scheduler ## Description: Opens ports for the icecream scheduler # space separated list of allowed TCP ports TCP="8765 8766" # space separated list of allowed UDP ports UDP="" # space separated list of allowed RPC services RPC="" # space separated list of allowed IP protocols IP="" # space separated list of allowed UDP broadcast ports BROADCAST="8765" icecream-1.3.1/suse/icecc-scheduler.xml000066400000000000000000000004301361626760200200150ustar00rootroot00000000000000 icecream scheduler Icecream distributed compilation scheduler. icecream-1.3.1/suse/iceccd.xml000066400000000000000000000003141361626760200162060ustar00rootroot00000000000000 icecream daemon Icecream distributed compilation scheduler. icecream-1.3.1/suse/icecream.spec.in000066400000000000000000000105001361626760200173010ustar00rootroot00000000000000# # spec file for package icecream # # Copyright (c) 2014 SUSE LINUX Products GmbH, Nuernberg, Germany. # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed # upon. The license for this file, and modifications and additions to the # file, is the same license as for the pristine package itself (unless the # license for the pristine package is not an Open Source License, in which # case the license is the MIT License). An "Open Source License" is a # license that conforms to the Open Source Definition (Version 1.9) # published by the Open Source Initiative. # Please submit bugfixes or comments via http://bugs.opensuse.org/ # # icecream 0 Name: icecream BuildRequires: gcc-c++ BuildRequires: lzo-devel %if 0%{?suse_version} > 1110 BuildRequires: libcap-ng-devel %endif Summary: For Distributed Compile in the Network License: GPL-2.0+ and LGPL-2.1+ Group: Development/Tools/Building Url: https://github.com/icecc/icecream Requires: /bin/tar Requires: /usr/bin/bzip2 %if 0%{?suse_version} PreReq: %fillup_prereq PreReq: %insserv_prereq %endif PreReq: /usr/sbin/useradd PreReq: /usr/sbin/groupadd Recommends: gcc-c++ Requires: logrotate Version: @VERSION@ Release: 0 Source0: ftp://ftp.suse.com/pub/projects/icecream/icecc-%{version}.tar.bz2 BuildRoot: %{_tmppath}/%{name}-%{version}-build %description Distributed compiler with a central scheduler to share build load %package -n libicecream-devel Summary: For Distributed Compile in the Network Group: Development/Tools/Building Requires: libstdc++-devel %if 0%{?suse_version} > 1110 Requires: libcap-ng-devel %endif %description -n libicecream-devel icecream is the next generation distcc. %package -n icecream-clang-wrappers Summary: Distributed Compile Wrappers for Clang Group: Development/Tools/Building Requires: clang Requires: icecream Supplements: packageand(icecream:clang) %description -n icecream-clang-wrappers Wrapper symlinks for clang/clang++ for icecream distributed building. %prep %setup -q -n icecc-%{version} # DO NOT ADD PATCHES without github reference %build export CFLAGS="$RPM_OPT_FLAGS" export CXXFLAGS="$RPM_OPT_FLAGS" %configure \ %if 0%{?suse_version} >= 1230 --enable-clang-rewrite-includes \ %endif --enable-clang-wrappers \ --libexecdir %_libexecdir make %{?jobs:-j %jobs} %install make DESTDIR=$RPM_BUILD_ROOT install # Create symlinks in /opt/icecream/bin on openSUSE <= 1220 %if 0%{?suse_version} <= 1220 mkdir -p $RPM_BUILD_ROOT/opt/icecream/bin for i in g++ gcc cc c++ clang++ clang; do ln -sf %_bindir/icecc $RPM_BUILD_ROOT/opt/icecream/bin/$i done %endif %preun %stop_on_removal icecream %pre /usr/sbin/groupadd -r icecream 2> /dev/null || : /usr/sbin/useradd -r -g icecream -s /bin/false -c "Icecream Daemon" -d /var/cache/icecream icecream 2> /dev/null || : %post # older icecream versions may have left some files owned by root:root in the cache rm -rf -- %_localstatedir/cache/icecream/* %if 0%{?suse_version} %{fillup_and_insserv -n icecream icecream} %endif %postun %restart_on_update icecream %{insserv_cleanup} %files %defattr(-,root,root) %doc COPYING README.md NEWS %config %_sysconfdir/logrotate.d/icecream %config %_sysconfdir/init.d/icecream %_bindir/icecc-create-env %_bindir/icecc %_bindir/icerun %_sbindir/icecc-scheduler %_sbindir/iceccd %_sbindir/rcicecream %_mandir/man*/* %_libexecdir/icecc %exclude %_libexecdir/icecc/bin/clang %exclude %_libexecdir/icecc/bin/clang++ %if 0%{?suse_version} %config %_sysconfdir/sysconfig/SuSEfirewall2.d/services/* %_localstatedir/adm/fillup-templates/sysconfig.icecream %if 0%{?suse_version} <= 1220 /opt/icecream %exclude /opt/icecream/bin/clang %exclude /opt/icecream/bin/clang++ %endif %endif %attr(-,icecream,icecream) %_localstatedir/cache/icecream %attr(-,icecream,icecream) %_localstatedir/log/icecream %files -n libicecream-devel %defattr(-,root,root) %_includedir/icecc %_libdir/libicecc.* %_libdir/pkgconfig/icecc.pc %files -n icecream-clang-wrappers %defattr(-,root,root) %_libexecdir/icecc/bin/clang %_libexecdir/icecc/bin/clang++ %if 0%{?suse_version} <= 1220 /opt/icecream/bin/clang /opt/icecream/bin/clang++ %endif %changelog icecream-1.3.1/suse/init.icecream000066400000000000000000000074071361626760200167210ustar00rootroot00000000000000#! /bin/sh # Copyright (c) 2003 SuSE Linux AG Nuernberg, Germany. # # Author: Stephan Kulow # # /etc/init.d/icecream # and its symbolic link # /usr/sbin/rcicecream # ### BEGIN INIT INFO # Provides: icecream # Required-Start: $network $syslog $remote_fs # Required-Stop: $network $remote_fs # Default-Start: 3 5 # Default-Stop: # Description: distributed compiler daemon # Short-Description: icecc ### END INIT INFO # Determine the base and follow a runlevel link name. base=${0##*/} link=${base#*[SK][0-9][0-9]} # Force execution if not called by a runlevel directory. test -x /usr/sbin/iceccd || exit 0 . /etc/rc.status . /etc/sysconfig/icecream rc_reset case "$1" in start) echo -n "Starting Distributed Compiler Daemon" netname= if test -n "$ICECREAM_NETNAME"; then netname="-n $ICECREAM_NETNAME" fi if test "$ICECREAM_RUN_SCHEDULER" == "yes"; then logfile="" if test -z "$ICECREAM_SCHEDULER_LOG_FILE"; then ICECREAM_SCHEDULER_LOG_FILE="/var/log/icecream/scheduler" fi logfile="-l $ICECREAM_SCHEDULER_LOG_FILE" : > $ICECREAM_SCHEDULER_LOG_FILE chown icecream:icecream $ICECREAM_SCHEDULER_LOG_FILE startproc -u icecream /usr/sbin/icecc-scheduler -d $logfile $netname fi logfile="" if test -n "$ICECREAM_LOG_FILE"; then touch $ICECREAM_LOG_FILE chown icecream:icecream $ICECREAM_LOG_FILE logfile="-l $ICECREAM_LOG_FILE" else touch /var/log/icecream/iceccd chown icecream:icecream /var/log/icecream/iceccd fi nice= if test -n "$ICECREAM_NICE_LEVEL"; then nice="--nice $ICECREAM_NICE_LEVEL" fi scheduler= if test -n "$ICECREAM_SCHEDULER_HOST"; then scheduler="-s $ICECREAM_SCHEDULER_HOST" fi noremote= if test "$ICECREAM_ALLOW_REMOTE" = "no" 2> /dev/null; then noremote="--no-remote" fi maxjobs= if test -n "$ICECREAM_MAX_JOBS"; then if test "$ICECREAM_MAX_JOBS" -eq 0 2> /dev/null; then maxjobs="-m 1" noremote="--no-remote" else maxjobs="-m $ICECREAM_MAX_JOBS" fi fi startproc /usr/sbin/iceccd -d $logfile $nice $scheduler $netname -u icecream -b "$ICECREAM_BASEDIR" $maxjobs $noremote rc_status -v ;; stop) echo -n "Shutting down Distributed Compiler Daemon" killproc -TERM /usr/sbin/iceccd if test "$ICECREAM_RUN_SCHEDULER" == "yes"; then killproc -TERM /usr/sbin/icecc-scheduler fi rc_status -v ;; restart) ## If first returns OK call the second, if first or ## second command fails, set echo return value. $0 stop; sleep 1 && $0 start rc_status ;; try-restart|condrestart) ## Do a restart only if the service was active before. ## Note: try-restart is now part of LSB (as of 1.9). ## RH has a similar command named condrestart. if test "$1" = "condrestart"; then echo "${attn} Use try-restart ${done}(LSB)${attn} rather than condrestart ${warn}(RH)${norm}" fi $0 status if test $? = 0; then $0 restart else rc_reset # Not running is not a failure. fi # Remember status and be quiet rc_status ;; reload|force-reload) if test "$ICECREAM_RUN_SCHEDULER" == "yes"; then killproc -HUP /usr/sbin/icecc-scheduler fi killproc -HUP /usr/sbin/iceccd rc_status ;; status) echo -n "Checking for Distributed Compiler Daemon: " checkproc /usr/sbin/iceccd rc_status -v ;; *) echo "Usage: $0 {start|stop|status|restart|try-restart|reload}" exit 1 ;; esac rc_exit icecream-1.3.1/suse/logrotate000066400000000000000000000004511361626760200161770ustar00rootroot00000000000000/var/log/icecream/iceccd /var/log/icecream/scheduler { compress dateext maxage 30 rotate 99 missingok notifempty size +4096k create 644 icecream icecream su icecream icecream sharedscripts postrotate /etc/init.d/icecream reload endscript } icecream-1.3.1/suse/sysconfig.icecream000066400000000000000000000036731361626760200177630ustar00rootroot00000000000000# ## Type: integer(0:19) ## Path: Applications/icecream ## Description: Icecream settings ## ServiceRestart: icecream ## Default: 5 # # Nice level of running compilers # ICECREAM_NICE_LEVEL="5" # ## Type: string ## Path: Applications/icecream ## Default: /var/log/icecream/iceccd # # icecream daemon log file # ICECREAM_LOG_FILE="/var/log/icecream/iceccd" # ## Type: string ## Path: Applications/icecream ## Default: no # # Start also the scheduler? # ICECREAM_RUN_SCHEDULER="no" # ## Type: string ## Path: Applications/icecream ## Default: /var/log/icecream/scheduler # # icecream scheduler log file # ICECREAM_SCHEDULER_LOG_FILE="/var/log/icecream/scheduler" # ## Type: string ## Path: Applications/icecream ## Default: "" # # Identification for the network the scheduler and daemon run on. # You can have several distinct icecream networks in the same LAN # for whatever reason. # ICECREAM_NETNAME="" # ## Type: string ## Path: Applications/icecream ## Default: "" # # If the daemon can't find the scheduler by broadcast (e.g. because # of a firewall) you can specify it. # ICECREAM_SCHEDULER_HOST="" # ## Type: integer ## Path: Applications/icecream ## Default: "" # # You can overwrite here the number of jobs to run in parallel. Per # default this depends on the number of (virtual) CPUs installed. # # Note: a value of "0" is actually interpreted as "1", however it # also sets ICECREAM_ALLOW_REMOTE="no". # ICECREAM_MAX_JOBS="" # ## Type: yesno ## Path: Applications/icecream ## Default: "yes" # # Specifies whether jobs submitted by other nodes are allowed to run on # this one. # ICECREAM_ALLOW_REMOTE="yes" # ## Type: string ## Path: Applications/icecream ## Default: "/var/cache/icecream" # # This is the directory where the icecream daemon stores the environments # it compiles in. In a big network this can grow quite a bit, so use some # path if your /tmp is small - but the user icecream has to write to it. # ICECREAM_BASEDIR="/var/cache/icecream" icecream-1.3.1/suse/update_rpm000077500000000000000000000013271361626760200163450ustar00rootroot00000000000000#! /bin/sh export PATH=/opt/kde3/bin:$PATH opwd=$PWD tdir=`mktemp -d` cd $tdir || exit 1 svn export svn+ssh://svn.kde.org/home/kde/trunk/icecream rm icecream/suse/update_rpm version=`grep INIT_AUTOMAKE icecream/configure.in | cut -d, -f2 | sed -e 's,[^"]*",,; s,".*,,'` tar -cvj -f $opwd/icecc-$version.tar.bz2 icecream sed -e "s,Version:.*,Version:$version," icecream/suse/icecream.spec.in > $opwd/icecream.spec mkdir mans omans=$PWD/mans cd icecream/doc for i in *.docbook; do output=${i/man-/} output=${output/.docbook/} meinproc4 --stylesheet /opt/kde3/share/apps/ksgmltools2/customization/kde-man.xsl $i && mv manpage.troff $omans/$output done cd ../.. tar cvjf $opwd/icecream-manpages.tar.bz2 mans cd / rm -rf $tdir icecream-1.3.1/tests/000077500000000000000000000000001361626760200144375ustar00rootroot00000000000000icecream-1.3.1/tests/Makefile.am000066400000000000000000000015431361626760200164760ustar00rootroot00000000000000# By default be lenient and don't fail if some tests are skipped. # Strict mode will fail in such case. test: test-full test-strict: $(MAKE) test STRICT=1 test-prepare: if test -n "$(VALGRIND)"; then \ true; \ elif test -x /sbin/setcap; then \ sudo /sbin/setcap cap_sys_chroot+ep ${sbindir}/iceccd ; \ elif test -x /usr/sbin/setcap; then \ sudo /usr/sbin/setcap cap_sys_chroot+ep ${sbindir}/iceccd ; \ elif command -v filecap >/dev/null 2>/dev/null; then \ sudo filecap ${sbindir}/iceccd sys_chroot ; \ else \ true ; \ fi test-full: test-prepare $(MAKE) test-run test-run: test-setup.sh results=`realpath -s ${builddir}/results` && builddir2=`realpath -s ${builddir}` && cd ${srcdir} && /bin/bash test.sh ${prefix} $$results --builddir=$$builddir2 --strict=$(STRICT) --valgrind=$(VALGRIND) check_SCRIPTS = test.sh test-setup.sh icecream-1.3.1/tests/Makefile.test000066400000000000000000000005171361626760200170600ustar00rootroot00000000000000all: maketest SOURCES = make1.cpp make2.cpp make3.cpp make4.cpp make5.cpp make6.cpp make7.cpp make8.cpp make9.cpp make10.cpp OBJS = $(patsubst %.cpp,$(OUTDIR)/%.o,$(SOURCES)) maketest: $(OUTDIR)/maketest $(OUTDIR)/maketest: $(OBJS) $(CXX) -o $@ $^ $(OUTDIR)/%.o: %.cpp $(CXX) -o $@ -c $^ clean: rm -f $(OBJS) $(OUTDIR)/maketest icecream-1.3.1/tests/README000066400000000000000000000106201361626760200153160ustar00rootroot00000000000000Testuite for Icecream ===================== In this directory are tests for Icecream. These tests run Icecream binaries and test them in practice in a testing setup. This makes these tests a bit slower than unit tests, but it allows testing functionality that would be hard or impossible to test using unit tests. How to run the tests: ===================== The tests do not conflict with system Icecream, so it is possible to compile and test while system Icecream is active. Since Icecream hardcodes locations of some binaries, it is necessary to install Icecream for the tests (really install, no $DESTDIR). It is however not necessary to do a system install, simply install somewhere. If you want to test also remote builds (recommended), it is necessary for the install to have the capability to chroot. This is most easily done by giving the CAP_SYS_CHROOT Linux capability to the iceccd binary (which is done is triggered to be done automatically by "make test" using sudo). An example of building Icecream and testing it: ./configure --prefix=$HOME/iceinstall make test The 'make test' step roughly performs: make make install sudo /sbin/setcap cap_sys_chroot+ep ${prefix}/sbin/iceccd cd tests ./test.sh ${prefix} ${builddir}/tests/results --builddir=${builddir}/tests This will build and install Icecream into $HOME/iceinstall prefix and all logs and temporary files will be in ${builddir}/tests/results. It is also possible to use 'filecap' from cap-ng instead of 'setcap', but 'filecap' does not report failures. Note that the limitations (real install needed in a helper location, chroot capability) mean that these tests are not suitable to be run as a part of building binary packages, they are meant to be run by developers. If a test fails, all relevant files should be in the log and temporary directory passed to test.sh . The {daemon}.log files contain logs relevant only to the last run test, files {daemon}_all.log include complete logs (up to but not including the failed test, if any). Exit status of test.sh is 0 for all tests passing, 1 for all tests passing but some being skipped and 2 for errors. If you want to use icemon for the tests, use 'ICECC_SCHEDULER=localhost:8767 icemon'. Note that icemon needs to be built with recent (Feb 2014) libicecc library for this to work. Valgrind: ========= It is possible to pass --valgrind to test.sh in order to test (almost) everything with Valgrind. Valgrind may be also automatically invoked using: make test VALGRIND=1 Note that in order to be able to chroot, it is necessary to give the capability to the actual Valgrind binary instead of iceccd (this is because this binary is what actually runs as the process): sudo /sbin/setcap cap_sys_chroot+ep /usr/lib/valgrind/memcheck-x86-linux Do not forget to reset it back when done: sudo /sbin/setcap -r /usr/lib/valgrind/memcheck-x86-linux Adding new tests: ================= The test.sh script is hopefully straightforward and commented enough. If you want to test handling of specific flags, see the run_ice function. If you want more complex/specific tests, you can also check logs using the various check_log_* functions. Common log messages: (icecc.log) "Have to use host 127.0.0.1:" - will be built remotely (one one of the remote hosts) (icecc.log) "Have to use host 127.0.0.1:10246" - will be built remotely on remoteice1 (icecc.log) "Have to use host 127.0.0.1:10247" - will be built remotely on remoteice1 (icecc.log) "building myself, but telling localhost" - job could be built remotely, but the scheduler selected the local daemon for the build (icecc.log) "" - the job is forced to be built locally (cannot be built remotely) Internals: ========== For most tests, there is a new scheduler run and three daemons, one (localice) that serves as the "local" daemon that icecc uses, and two more (remoteice1/2) that serve as "remote" daemons that which remote jobs will be distributed. In order not to interfere with system Icecream, they all use different ports and a special socket is used for communication with localice daemon. All daemons are set with 2 jobs max, in order to have reliable results when testing paralellism. ICECC_PREFERRED_HOST is used when forcing whether a job should be built locally or remotely. The localice daemon also has ICECC_TEST_REMOTEBUILD=1 to avoid building locally when it in fact should forward to "remote" daemons even though they are technically local. icecream-1.3.1/tests/assembler.args000066400000000000000000000000201361626760200172620ustar00rootroot00000000000000-al=listing.txt icecream-1.3.1/tests/brokenenvfile.tar.gz000066400000000000000000000240001361626760200204130ustar00rootroot00000000000000 2016-11-30T19:26:45.213722795P0D1LibreOfficeDev/6.4.0.0.alpha0$Linux_X86_64 LibreOffice_project/d34ca00c3cb9f5acf1f5080155129229ceacea5b 0 0 120104 16192 view1 2 8 0 0 0 0 2 0 0 0 0 0 75 60 true false 0 16 0 0 0 0 2 0 0 0 0 0 75 60 true false Sheet2 1859 0 75 60 false true true true 12632256 true true true true true false false 1270 1270 1 1 true false 7 true true true false false false true true #include #include #include #include #include #include #include using namespace clang; using namespace std; namespace IcecreamTest { void report( const CompilerInstance& compiler, DiagnosticsEngine::Level level, const char* txt, SourceLocation loc = SourceLocation()); class Action : public PluginASTAction { public: #if (CLANG_VERSION_MAJOR == 3 && CLANG_VERSION_MINOR >= 6) || CLANG_VERSION_MAJOR > 3 virtual std::unique_ptr CreateASTConsumer( CompilerInstance& compiler, StringRef infile ); #else virtual ASTConsumer* CreateASTConsumer( CompilerInstance& compiler, StringRef infile ); #endif virtual bool ParseArgs( const CompilerInstance& compiler, const vector< string >& args ); private: vector< string > _args; }; class Consumer : public RecursiveASTVisitor< Consumer >, public ASTConsumer { public: Consumer( CompilerInstance& compiler, const vector< string >& args ); bool VisitReturnStmt( const ReturnStmt* returnstmt ); virtual void HandleTranslationUnit( ASTContext& context ); private: CompilerInstance& compiler; }; #if (CLANG_VERSION_MAJOR == 3 && CLANG_VERSION_MINOR >= 6) || CLANG_VERSION_MAJOR > 3 std::unique_ptr Action::CreateASTConsumer( CompilerInstance& compiler, StringRef ) { return unique_ptr( new Consumer( compiler, _args )); } #else ASTConsumer* Action::CreateASTConsumer( CompilerInstance& compiler, StringRef ) { return new Consumer( compiler, _args ); } #endif bool Action::ParseArgs( const CompilerInstance& /*compiler*/, const vector< string >& args ) { _args = args; return true; } Consumer::Consumer( CompilerInstance& compiler, const vector< string >& args ) : compiler( compiler ) { // Check that the file passed as argument really exists (was included in ICECC_EXTRAFILES). if( args.size() != 1 ) { report( compiler, DiagnosticsEngine::Error, "Incorrect number of arguments" ); return; } ifstream is( args[ 0 ].c_str()); if( !is.good()) report( compiler, DiagnosticsEngine::Error, "Extra file open error" ); else { char buf[ 20 ]; is.getline( buf, 20 ); if( strcmp( buf, "testfile" ) != 0 ) report( compiler, DiagnosticsEngine::Error, "File contents do not match" ); else report( compiler, DiagnosticsEngine::Warning, "Extra file check successful" ); } } void Consumer::HandleTranslationUnit( ASTContext& context ) { if( context.getDiagnostics().hasErrorOccurred()) return; TraverseDecl( compiler.getASTContext().getTranslationUnitDecl()); } bool Consumer::VisitReturnStmt( const ReturnStmt* returnstmt ) { // Get the expression in the return statement (see ReturnStmt API docs). const Expr* expression = returnstmt->getRetValue(); if( expression == NULL ) return true; // plain 'return;' without expression // Check if the expression is a bool literal (Clang uses dyn_cast<> instead of dynamic_cast<>). if( const CXXBoolLiteralExpr* boolliteral = dyn_cast< CXXBoolLiteralExpr >( expression )) { // It is. if( boolliteral->getValue() == false ) report( compiler, DiagnosticsEngine::Warning, "Icecream plugin found return false", #if CLANG_VERSION_MAJOR >= 8 returnstmt->getBeginLoc()); #else returnstmt->getLocStart()); #endif } return true; } void report( const CompilerInstance& compiler, DiagnosticsEngine::Level level, const char* txt, SourceLocation loc ) { DiagnosticsEngine& engine = compiler.getDiagnostics(); #if (CLANG_VERSION_MAJOR == 3 && CLANG_VERSION_MINOR >= 5) || CLANG_VERSION_MAJOR > 3 if( loc.isValid()) engine.Report( loc, engine.getDiagnosticIDs()->getCustomDiagID( static_cast< DiagnosticIDs::Level >( level ), txt )); else engine.Report( engine.getDiagnosticIDs()->getCustomDiagID( static_cast< DiagnosticIDs::Level >( level ), txt )); #else if( loc.isValid()) engine.Report( loc, engine.getCustomDiagID( level, txt )); else engine.Report( engine.getCustomDiagID( level, txt )); #endif } } // namespace static FrontendPluginRegistry::Add< IcecreamTest::Action > X( "icecreamtest", "Icecream test plugin" ); icecream-1.3.1/tests/clangpluginextra.txt000066400000000000000000000000111361626760200205370ustar00rootroot00000000000000testfile icecream-1.3.1/tests/clangplugintest.cpp000066400000000000000000000000511361626760200203420ustar00rootroot00000000000000bool foo() { return false; } icecream-1.3.1/tests/countermacro.c000066400000000000000000000001111361626760200172750ustar00rootroot00000000000000int f() { #if __COUNTER__ > 1 return 1; #else return 2; #endif } icecream-1.3.1/tests/debug-gdb.txt000066400000000000000000000000701361626760200170150ustar00rootroot00000000000000start list main,main step print debugObject.debugMember icecream-1.3.1/tests/debug.cpp000066400000000000000000000003241361626760200162300ustar00rootroot00000000000000struct DebugStruct { int debugMember; }; int main() { // first line of main function static DebugStruct debugObject; debugObject.debugMember = 1243; return debugObject.debugMember; } icecream-1.3.1/tests/debug/000077500000000000000000000000001361626760200155255ustar00rootroot00000000000000icecream-1.3.1/tests/debug/debug2.cpp000066400000000000000000000003241361626760200174000ustar00rootroot00000000000000struct DebugStruct { int debugMember; }; int main() { // first line of main function static DebugStruct debugObject; debugObject.debugMember = 1423; return debugObject.debugMember; } icecream-1.3.1/tests/fsanitize-blacklist.txt000066400000000000000000000000361361626760200211410ustar00rootroot00000000000000fun:*test_fsanitize_function* icecream-1.3.1/tests/fsanitize.cpp000066400000000000000000000003011361626760200171310ustar00rootroot00000000000000void test_fsanitize_function() { int* arr = new int[10]; delete[] arr; int r = arr[ 0 ]; (void)r; } int main() { test_fsanitize_function(); return 0; } icecream-1.3.1/tests/icerun-test.sh000077500000000000000000000003361361626760200172420ustar00rootroot00000000000000#! /bin/bash dir="$1" num="$2" test -z "$dir" -o -z "$num" && exit 1 touch "$dir"/running$num if test -z "$ICERUN_TEST_VALGRIND"; then sleep 0.2 else sleep 1 fi rm "$dir"/running$num touch "$dir"/done$num exit 0 icecream-1.3.1/tests/includes-without.cpp000066400000000000000000000002641361626760200204540ustar00rootroot00000000000000// #include "includes.h" - will be done using -include includes.h #include void f() { std::cout << std::endl; // use something included only by includes.h } icecream-1.3.1/tests/includes.cpp000066400000000000000000000002101361626760200167420ustar00rootroot00000000000000#include "includes.h" #include void f() { std::cout << std::endl; // use something included only by includes.h } icecream-1.3.1/tests/includes.h000066400000000000000000000001271361626760200164160ustar00rootroot00000000000000#ifndef INCLUDES_H #define INCLUDES_H #include #include #endif icecream-1.3.1/tests/macros.s000066400000000000000000000000311361626760200161010ustar00rootroot00000000000000 .macro asm_macro .endm icecream-1.3.1/tests/make.h000066400000000000000000000006161361626760200155300ustar00rootroot00000000000000#ifndef MAKE_H #define MAKE_H // some includes that'll make the compile take at least a little time #include #include #include #include #include // This is to prevent scheduler from ignoring stats for the compile job, // as jobs with too small .o result are ignored in add_job_stats(). static volatile const int largedata[ 16384 ] = { 1, 2 }; #endif icecream-1.3.1/tests/make1.cpp000066400000000000000000000001221361626760200161340ustar00rootroot00000000000000#include "make.h" void make1() { } int main() { return 0; } icecream-1.3.1/tests/make10.cpp000066400000000000000000000000551361626760200162210ustar00rootroot00000000000000#include "make.h" void make10() { } icecream-1.3.1/tests/make2.cpp000066400000000000000000000000541361626760200161410ustar00rootroot00000000000000#include "make.h" void make2() { } icecream-1.3.1/tests/make3.cpp000066400000000000000000000000541361626760200161420ustar00rootroot00000000000000#include "make.h" void make3() { } icecream-1.3.1/tests/make4.cpp000066400000000000000000000000541361626760200161430ustar00rootroot00000000000000#include "make.h" void make4() { } icecream-1.3.1/tests/make5.cpp000066400000000000000000000000541361626760200161440ustar00rootroot00000000000000#include "make.h" void make5() { } icecream-1.3.1/tests/make6.cpp000066400000000000000000000000541361626760200161450ustar00rootroot00000000000000#include "make.h" void make6() { } icecream-1.3.1/tests/make7.cpp000066400000000000000000000000541361626760200161460ustar00rootroot00000000000000#include "make.h" void make7() { } icecream-1.3.1/tests/make8.cpp000066400000000000000000000000541361626760200161470ustar00rootroot00000000000000#include "make.h" void make8() { } icecream-1.3.1/tests/make9.cpp000066400000000000000000000000541361626760200161500ustar00rootroot00000000000000#include "make.h" void make9() { } icecream-1.3.1/tests/messages.cpp000066400000000000000000000001551361626760200167530ustar00rootroot00000000000000#include "messages.h" void f() { int unused; // this should give a warning about being unused } icecream-1.3.1/tests/messages.h000066400000000000000000000002121361626760200164120ustar00rootroot00000000000000#ifndef MESSAGES_H #define MESSAGES_H void g() { int unused; // this should also give a warning about being unused } #endif icecream-1.3.1/tests/plain000066400000000000000000000001501361626760200154610ustar00rootroot00000000000000// C++ feature (so that C++ mode is required) template< typename T > T func( T ); void f() { } icecream-1.3.1/tests/plain.c000066400000000000000000000000251361626760200157030ustar00rootroot00000000000000void f() { } icecream-1.3.1/tests/plain.cpp000066400000000000000000000000251361626760200162430ustar00rootroot00000000000000void f() { } icecream-1.3.1/tests/rawliterals.cpp000066400000000000000000000001211361626760200174660ustar00rootroot00000000000000int main() { const auto vert = R"( #hello)"; (void)vert; return 0; } icecream-1.3.1/tests/recursive_clang++000077500000000000000000000001061361626760200176630ustar00rootroot00000000000000#! /bin/sh # Indirectly invoke icecc again. exec icecc clang++ "$@" icecream-1.3.1/tests/recursive_g++000077500000000000000000000001021361626760200170210ustar00rootroot00000000000000#! /bin/sh # Indirectly invoke icecc again. exec icecc g++ "$@" icecream-1.3.1/tests/syntaxerror.cpp000066400000000000000000000000771361626760200175470ustar00rootroot00000000000000void f() { catch throw break auto; nonsense; } icecream-1.3.1/tests/test-setup.sh.in000066400000000000000000000002041361626760200175110ustar00rootroot00000000000000# Sourced by test.sh , not to be used directly. # Needed for locating our compiler wrapper symlinks. pkglibexecdir=@PKGLIBEXECDIR@ icecream-1.3.1/tests/test.sh000077500000000000000000002701371361626760200157670ustar00rootroot00000000000000#! /bin/bash prefix="$1" testdir="$2" shift shift valgrind= builddir=. strict= usage() { echo Usage: "$0 [--builddir=dir] [--valgrind[=command]] [--strict[=value]]" exit 3 } get_default_valgrind_flags() { default_valgrind_args="--num-callers=50 --suppressions=valgrind_suppressions --log-file=$testdir/valgrind-%p.log" # Check if valgrind knows --error-markers, which makes it simpler to find out if log contains any error. valgrind_error_markers="--error-marker2s=ICEERRORBEGIN,ICEERROREND" valgrind $valgrind_error_markers true 2>/dev/null if test $? -eq 0; then default_valgrind_args="$default_valgrind_args $valgrind_error_markers" else valgrind_error_markers= fi } while test -n "$1"; do case "$1" in --valgrind|--valgrind=1) get_default_valgrind_flags valgrind="valgrind --leak-check=no $default_valgrind_args --" ;; --valgrind=) # when invoked from Makefile, no valgrind ;; --valgrind=*) get_default_valgrind_flags valgrind="${1#--valgrind=} $default_valgrind_args --" ;; --builddir=*) builddir="${1#--builddir=}" ;; --strict) strict=1 ;; --strict=*) strict="${1#--strict=}" if test "$strict" = "0"; then strict= fi ;; *) usage ;; esac shift done . $builddir/test-setup.sh if test $? -ne 0; then echo Error sourcing test-setup.sh file, aborting. exit 4 fi icecc="${prefix}/bin/icecc" iceccd="${prefix}/sbin/iceccd" icecc_scheduler="${prefix}/sbin/icecc-scheduler" icecc_create_env="${prefix}/bin/icecc-create-env" icecc_test_env="${prefix}/bin/icecc-test-env" icerun="${prefix}/bin/icerun" wrapperdir="${pkglibexecdir}/bin" netname="icecctestnetname$$" protocolversion=$(grep '#define PROTOCOL_VERSION ' ../services/comm.h | sed 's/#define PROTOCOL_VERSION //') schedulerprotocolversion=$protocolversion daemonprotocolversion=$protocolversion # For testing compatibility of different versions: # The only 2 communications are client<->daemon and daemon<->scheduler. # So it should be necessary to test only these settings: # - other scheduler # - other daemon # - other client # Change the settings below to enable such runs (false->true). # Note that older versions may not pass successfully all tests. Either comment out what does not work # (if it's not an actual incompatibility problem) or use the test.sh script from the older version # to test with a current version. OTHERVERSIONPREFIX=/usr if false; then icecc="$OTHERVERSIONPREFIX"/bin/icecc icerun="$OTHERVERSIONPREFIX"/bin/icerun if test -d "$OTHERVERSIONPREFIX"/lib/icecc/bin; then wrapperdir="$OTHERVERSIONPREFIX"/lib/icecc/bin elif test -d "$OTHERVERSIONPREFIX"/lib64/icecc/bin; then wrapperdir="$OTHERVERSIONPREFIX"/lib64/icecc/bin else Cannot find wrapper dir for "$OTHERVERSIONPREFIX" . exit 1 fi fi if false; then # Make sure the daemon is capable of doing chroot (see e.g. how Makefile.am sets it in test-prepare). iceccd="$OTHERVERSIONPREFIX"/sbin/iceccd daemonprotocolversion=$(grep '#define PROTOCOL_VERSION ' "$OTHERVERSIONPREFIX"/include/icecc/comm.h | sed 's/#define PROTOCOL_VERSION //') if test -z "$daemonprotocolversion"; then Cannot find "$OTHERVERSIONPREFIX"/include/icecc/comm.h . exit 1 fi fi if false; then icecc_scheduler="$OTHERVERSIONPREFIX"/sbin/icecc-scheduler schedulerprotocolversion=$(grep '#define PROTOCOL_VERSION ' "$OTHERVERSIONPREFIX"/include/icecc/comm.h | sed 's/#define PROTOCOL_VERSION //') if test -z "$schedulerprotocolversion"; then Cannot find "$OTHERVERSIONPREFIX"/include/icecc/comm.h . exit 1 fi # If the scheduler is older than 1.3 (protocol 42), then it reported the lowest # of the protocol version of the scheduler and the daemon, so possibly set it here as well. #daemonprotocolversion=$schedulerprotocolversion fi if test -z "$prefix" -o ! -x "$icecc"; then usage fi # Remote compiler pretty much runs with this setting (and there are no locale files in the chroot anyway), # so force it also locally, otherwise comparing stderr would easily fail because of locale differences (different quotes). # Until somebody complains and has a good justification for the effort, don't bother with actually doing # anything about this for real. export LC_ALL=C unset MAKEFLAGS unset ICECC unset ICECC_VERSION unset ICECC_DEBUG unset ICECC_LOGFILE unset ICECC_REPEAT_RATE unset ICECC_PREFERRED_HOST unset ICECC_CC unset ICECC_CXX unset ICECC_REMOTE_CPP unset ICECC_CLANG_REMOTE_CPP unset ICECC_IGNORE_UNVERIFIED unset ICECC_EXTRAFILES unset ICECC_COLOR_DIAGNOSTICS unset ICECC_CARET_WORKAROUND # Make the tests faster. export ICECC_ENV_COMPRESSION=none mkdir -p "$testdir" skipped_tests= chroot_disabled= flush_log_mark=1 last_reset_log_mark= last_section_log_mark= check_compilers() { if test -z "$TESTCC"; then if cc -v >/dev/null 2>/dev/null; then TESTCC=/usr/bin/cc elif gcc -v >/dev/null 2>/dev/null; then TESTCC=/usr/bin/gcc elif clang -v >/dev/null 2>/dev/null; then TESTCC=/usr/bin/clang else echo Cannot find gcc or clang, explicitly set TESTCC. exit 5 fi fi if test -z "$TESTCXX"; then if c++ -v >/dev/null 2>/dev/null; then TESTCXX=/usr/bin/c++ elif g++ -v >/dev/null 2>/dev/null; then TESTCXX=/usr/bin/g++ elif clang -v >/dev/null 2>/dev/null; then TESTCXX=/usr/bin/clang++ else echo Cannot find g++ or clang++, explicitly set TESTCXX. exit 5 fi fi using_gcc= if $TESTCC -v 2>&1 | grep ^gcc >/dev/null; then using_gcc=1 fi using_clang= if $TESTCC --version | grep clang >/dev/null; then using_clang=1 fi echo Using C compiler: $TESTCC $TESTCC --version if test $? -ne 0; then echo Compiler $TESTCC failed. exit 5 fi echo Using C++ compiler: $TESTCXX $TESTCXX --version if test $? -ne 0; then echo Compiler $TESTCXX failed. exit 5 fi if test -z "$using_gcc" -a -z "$using_clang"; then echo "Unknown compiler type (neither GCC nor Clang), aborting." exit 5 fi echo } abort_tests() { dump_logs exit 2 } start_iceccd() { name=$1 shift ICECC_TEST_SOCKET="$testdir"/socket-${name} ICECC_SCHEDULER=:8767 ICECC_TESTS=1 ICECC_TEST_SCHEDULER_PORTS=8767:8769 \ ICECC_TEST_FLUSH_LOG_MARK="$testdir"/flush_log_mark.txt ICECC_TEST_LOG_HEADER="$testdir"/log_header.txt \ $valgrind "${iceccd}" -b "$testdir"/envs-${name} -l "$testdir"/${name}.log -n ${netname} -N ${name} -v -v -v "$@" & pid=$! eval ${name}_pid=${pid} echo ${pid} > "$testdir"/${name}.pid } kill_daemon() { daemon=$1 pid=${daemon}_pid if test -n "${!pid}"; then kill "${!pid}" 2>/dev/null if test $check_type -eq 1; then wait ${!pid} exitcode=$? if test $exitcode -ne 0; then echo Daemon $daemon exited with code $exitcode. stop_ice 0 abort_tests fi fi fi rm -f "$testdir"/$daemon.pid rm -rf "$testdir"/envs-${daemon} rm -f "$testdir"/socket-${daemon} eval ${pid}= } start_ice() { ICECC_TESTS=1 ICECC_TEST_SCHEDULER_PORTS=8767:8769 \ ICECC_TEST_FLUSH_LOG_MARK="$testdir"/flush_log_mark.txt ICECC_TEST_LOG_HEADER="$testdir"/log_header.txt \ $valgrind "${icecc_scheduler}" -p 8767 -l "$testdir"/scheduler.log -n ${netname} -v -v -v & scheduler_pid=$! echo $scheduler_pid > "$testdir"/scheduler.pid start_iceccd localice --no-remote -m 2 start_iceccd remoteice1 -p 10246 -m 2 start_iceccd remoteice2 -p 10247 -m 2 wait_for_ice_startup_complete scheduler localice remoteice1 remoteice2 flush_logs cat_log_last_mark remoteice1 | grep -q "Cannot use chroot, no remote jobs accepted." && chroot_disabled=1 cat_log_last_mark remoteice2 | grep -q "Cannot use chroot, no remote jobs accepted." && chroot_disabled=1 if test -n "$chroot_disabled"; then skipped_tests="$skipped_tests CHROOT" echo Chroot not available, remote tests will be skipped. fi } # start only local daemon, no scheduler start_only_daemon() { ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_SCHEDULER=:8767 ICECC_TESTS=1 ICECC_TEST_SCHEDULER_PORTS=8767:8769 \ ICECC_TEST_FLUSH_LOG_MARK="$testdir"/flush_log_mark.txt ICECC_TEST_LOG_HEADER="$testdir"/log_header.txt \ $valgrind "${iceccd}" --no-remote -b "$testdir"/envs-localice -l "$testdir"/localice.log -n ${netname} -N localice -m 2 -v -v -v & localice_pid=$! echo $localice_pid > "$testdir"/localice.pid wait_for_ice_startup_complete "noscheduler" localice } stop_ice() { # 0 - do not check # 1 - check normally # 2 - do not check, do not wait (wait would fail, started by previous shell) check_type="$1" if test $check_type -eq 2; then scheduler_pid=$(cat "$testdir"/scheduler.pid 2>/dev/null) localice_pid=$(cat "$testdir"/localice.pid 2>/dev/null) remoteice1_pid=$(cat "$testdir"/remoteice1.pid 2>/dev/null) remoteice2_pid=$(cat "$testdir"/remoteice2.pid 2>/dev/null) fi if test $check_type -eq 1; then if test -n "$scheduler_pid"; then if ! kill -0 $scheduler_pid; then echo Scheduler no longer running. stop_ice 0 abort_tests fi fi for daemon in localice remoteice1 remoteice2; do pid=${daemon}_pid if ! kill -0 ${!pid}; then echo Daemon $daemon no longer running. stop_ice 0 abort_tests fi done fi for daemon in localice remoteice1 remoteice2; do kill_daemon $daemon done if test -n "$scheduler_pid"; then kill "$scheduler_pid" 2>/dev/null if test $check_type -eq 1; then wait $scheduler_pid exitcode=$? if test $exitcode -ne 0; then echo Scheduler exited with code $exitcode. stop_ice 0 abort_tests fi fi scheduler_pid= fi rm -f "$testdir"/scheduler.pid stop_secondary_scheduler $check_type } stop_secondary_scheduler() { check_type="$1" if test $check_type -eq 2; then scheduler2_pid=$(cat "$testdir"/scheduler2.pid 2>/dev/null) fi if test $check_type -eq 1; then if test -n "$scheduler2_pid"; then if ! kill -0 $scheduler2_pid; then echo Secondary scheduler no longer running. stop_ice 0 abort_tests fi fi fi if test -n "$scheduler2_pid"; then kill "$scheduler2_pid" 2>/dev/null if test $check_type -eq 1; then wait $scheduler2_pid exitcode=$? if test $exitcode -ne 0; then echo Secondary scheduler exited with code $exitcode. stop_ice 0 abort_tests fi fi scheduler2_pid= fi rm -f "$testdir"/scheduler2.pid } stop_only_daemon() { check_first="$1" if test $check_first -ne 0; then if ! kill -0 $localice_pid; then echo Daemon localice no longer running. stop_only_daemon 0 abort_tests fi fi kill $localice_pid 2>/dev/null rm -f "$testdir"/localice.pid rm -rf "$testdir"/envs-localice rm -f "$testdir"/socket-localice localice_pid= } wait_for_ice_startup_complete() { noscheduler= if test "$1" == "noscheduler"; then noscheduler=1 shift fi processes="$@" timeout=10 if test -n "$valgrind"; then # need time to set up SIGHUP handler sleep 5 timeout=15 fi notready= for ((i=0; i/dev/null; then echo "$header" echo ================ diff -u "$file1" "$file2" echo ================ stop_ice 0 abort_tests fi } # First argument is the expected output file, if any (otherwise specify ""). # Second argument is "remote" (should be compiled on a remote host) or "local" (cannot be compiled remotely). # Third argument is expected exit code - if this is greater than 128 the exit code will be determined by invoking the compiler locally # Follow optional arguments, in this order: # - localrebuild - specifies that the command may result in local recompile # - keepoutput - will keep the file specified using $output (the remotely compiled version) # - split_dwarf - compilation is done with -gsplit-dwarf # - noresetlogs - will not use reset_logs at the start (needs to be done explicitly before calling run_ice) # - remoteabort - remote compilation will abort (as a result of local processing failing and remote daemon killing the remote compiler) # - nostderrcheck - will not compare stderr output # - unusedmacrohack - hack for Wunused-macros test # Rest is the command to pass to icecc. # Command will be run both locally and using icecc and results compared. run_ice() { output="$1" shift remote_type="$1" shift expected_exit=$1 shift localrebuild= localrebuildforlog= if test "$1" = "localrebuild"; then localrebuild=1 localrebuildforlog=localrebuild shift fi keepoutput= if test "$1" = "keepoutput"; then keepoutput=1 shift fi split_dwarf= if test "$1" = "split_dwarf"; then if test -n "$output"; then split_dwarf=$(echo $output | sed 's/\.[^.]*//g').dwo fi shift fi noresetlogs= if test "$1" = "noresetlogs"; then noresetlogs=1 shift fi remoteabort= if test "$1" = "remoteabort"; then remoteabort=1 shift fi nostderrcheck= if test "$1" = "nostderrcheck"; then nostderrcheck=1 shift fi unusedmacrohack= if test "$1" = "unusedmacrohack"; then unusedmacrohack=1 shift fi if [[ $expected_exit -gt 128 ]]; then $@ 2>/dev/null expected_exit=$? fi if test -z "$noresetlogs"; then reset_logs local "$@" else mark_logs local "$@" fi echo Running: "$@" ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=localice ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "${icecc}" "$@" 2>"$testdir"/stderr.localice localice_exit=$? if test -n "$output"; then mv "$output" "$output".localice fi if test -n "$split_dwarf"; then mv "$split_dwarf" "$split_dwarf".localice fi cat "$testdir"/stderr.localice >> "$testdir"/stderr.localice.log flush_logs check_logs_for_generic_errors $localrebuildforlog check_everything_is_idle if test "$remote_type" = "remote"; then check_log_message icecc "building myself, but telling localhost" if test -z "$localrebuild"; then check_log_error icecc "" fi else check_log_message icecc "" check_log_error icecc "building myself, but telling localhost" fi check_log_error icecc "Have to use host 127.0.0.1:10246" check_log_error icecc "Have to use host 127.0.0.1:10247" if test -z "$localrebuild"; then check_log_error icecc "local build forced" fi if test -z "$chroot_disabled"; then mark_logs remote "$@" ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "${icecc}" "$@" 2>"$testdir"/stderr.remoteice remoteice_exit=$? if test -n "$output"; then mv "$output" "$output".remoteice fi if test -n "$split_dwarf"; then mv "$split_dwarf" "$split_dwarf".remoteice fi cat "$testdir"/stderr.remoteice >> "$testdir"/stderr.remoteice.log flush_logs check_logs_for_generic_errors $localrebuildforlog check_everything_is_idle if test "$remote_type" = "remote"; then check_log_message icecc "Have to use host 127.0.0.1:10246" if test -z "$localrebuild"; then check_log_error icecc "" fi if test -n "$remoteabort"; then check_log_message remoteice1 "Remote compilation aborted with exit code" check_log_error remoteice1 "Remote compilation completed with exit code 0" check_log_error remoteice1 "Remote compilation exited with exit code" elif test -n "$output"; then check_log_message remoteice1 "Remote compilation completed with exit code 0" check_log_error remoteice1 "Remote compilation aborted with exit code" check_log_error remoteice1 "Remote compilation exited with exit code" else check_log_message remoteice1 "Remote compilation exited with exit code $expected_exit" check_log_error remoteice1 "Remote compilation completed with exit code 0" check_log_error remoteice1 "Remote compilation aborted with exit code" fi else check_log_message icecc "" check_log_error icecc "Have to use host 127.0.0.1:10246" fi check_log_error icecc "Have to use host 127.0.0.1:10247" check_log_error icecc "building myself, but telling localhost" if test -z "$localrebuild"; then check_log_error icecc "local build forced" fi fi mark_logs noice "$@" "$@" 2>"$testdir"/stderr normal_exit=$? cat "$testdir"/stderr >> "$testdir"/stderr.log flush_logs check_logs_for_generic_errors $localrebuildforlog check_everything_is_idle check_log_error icecc "Have to use host 127.0.0.1:10246" check_log_error icecc "Have to use host 127.0.0.1:10247" check_log_error icecc "" check_log_error icecc "building myself, but telling localhost" check_log_error icecc "local build forced" if test $localice_exit -ne $expected_exit; then echo "Local run exit code mismatch ($localice_exit vs $expected_exit)" stop_ice 0 abort_tests fi if test $localice_exit -ne $expected_exit; then echo "Run without icecc exit code mismatch ($normal_exit vs $expected_exit)" stop_ice 0 abort_tests fi if test -z "$chroot_disabled" -a "$remoteice_exit" != "$expected_exit"; then echo "Remote run exit code mismatch ($remoteice_exit vs $expected_exit)" stop_ice 0 abort_tests fi if test -z "$nostderrcheck"; then compare_outputs "$testdir"/stderr.localice "$testdir"/stderr "Stderr mismatch ($testdir/stderr.localice)" if test -z "$chroot_disabled"; then skipstderrcheck= if test -n "$unusedmacrohack" -a -n "$using_gcc"; then # gcc -Wunused-macro gives different location for the error depending on whether -E is used or not if ! diff "$testdir"/stderr.remoteice "$testdir"/stderr >/dev/null; then if diff "$testdir"/stderr.remoteice unusedmacro1.txt >/dev/null; then skipstderrcheck=1 fi if diff "$testdir"/stderr.remoteice unusedmacro2.txt >/dev/null; then skipstderrcheck=1 fi if diff "$testdir"/stderr.remoteice unusedmacro3.txt >/dev/null; then skipstderrcheck=1 fi fi fi if test -z "$skipstderrcheck"; then compare_outputs "$testdir"/stderr.remoteice "$testdir"/stderr "Stderr mismatch ($testdir/stderr.remoteice)" fi fi fi local remove_offset_number="s/<[A-Fa-f0-9]*>/<>/g" local remove_debug_info="s/\(Length\|DW_AT_\(GNU_dwo_\(id\|name\)\|comp_dir\|producer\|linkage_name\|name\)\).*/\1/g" local remove_debug_pubnames="/^\s*Offset\s*Name/,/^\s*$/s/\s*[A-Fa-f0-9]*\s*//" local remove_size_of_area="s/\(Size of area in.*section:\)\s*[0-9]*/\1/g" if test -n "$output"; then if file "$output" | grep -q ELF; then readelf -wlLiaprmfFoRt "$output" | sed -e "$remove_debug_info" \ -e "$remove_offset_number" \ -e "$remove_debug_pubnames" \ -e "$remove_size_of_area" > "$output".readelf.txt || cp "$output" "$output".readelf.txt readelf -wlLiaprmfFoRt "$output".localice | sed -e "$remove_debug_info" \ -e "$remove_offset_number" \ -e "$remove_debug_pubnames" \ -e "$remove_size_of_area" > "$output".local.readelf.txt || cp "$output" "$output".local.readelf.txt compare_outputs "$output".local.readelf.txt "$output".readelf.txt "Output mismatch ($output.localice)" if test -z "$chroot_disabled"; then readelf -wlLiaprmfFoRt "$output".remoteice | sed -e "$remove_debug_info" \ -e "$remove_offset_number" \ -e "$remove_debug_pubnames" \ -e "$remove_size_of_area" > "$output".remote.readelf.txt || cp "$output" "$output".remote.readelf.txt compare_outputs "$output".remote.readelf.txt "$output".readelf.txt "Output mismatch ($output.remoteice)" fi elif echo "$output" | grep -q '\.gch$'; then # PCH file, no idea how to check they are the same if they are not 100% identical # Make silent. true elif file "$output" | grep -q Mach; then # No idea how to check they are the same if they are not 100% identical if ! diff "$output".localice "$output" >/dev/null; then echo "Output mismatch ($output.localice), Mach object files, not knowing how to verify" fi if test -z "$chroot_disabled"; then if ! diff "$output".remoteice "$output" >/dev/null; then echo "Output mismatch ($output.remoteice), Mach object files, not knowing how to verify" fi fi elif echo "$output" | grep -q -e '\.o$' -e '\.dwo$'; then # possibly cygwin .o file, no idea how to check they are the same if they are not 100% identical if ! diff "$output".localice "$output" >/dev/null; then echo "Output mismatch ($output.localice), assuming Cygwin object files, not knowing how to verify" fi if test -z "$chroot_disabled"; then if ! diff "$output".remoteice "$output" >/dev/null; then echo "Output mismatch ($output.remoteice), assuming Cygwin object files, not knowing how to verify" fi fi elif echo "$output" | grep -q '\.s$'; then # Filter out .file directive, which may be '-' for the remote file. grep -v '[[:space:]]\.file[[:space:]]' "$output" > "$output".asm.text grep -v '[[:space:]]\.file[[:space:]]' "$output".localice > "$output".localice.asm.text grep -v '[[:space:]]\.file[[:space:]]' "$output".remoteice > "$output".remoteice.asm.text compare_outputs "$output".localice.asm.text "$output".asm.text "Output mismatch ($output.localice)" if test -z "$chroot_disabled"; then compare_outputs "$output".remoteice.asm.text "$output".asm.text "Output mismatch ($output.remoteice)" fi else compare_outputs "$output".localice "$output" "Output mismatch ($output.localice)" if test -z "$chroot_disabled"; then compare_outputs "$output".remoteice "$output" "Output mismatch ($output.remoteice)" fi fi fi if test -n "$split_dwarf"; then if file "$output" | grep ELF >/dev/null; then readelf -wlLiaprmfFoRt "$split_dwarf" | \ sed -e "$remove_debug_info" -e "$remove_offset_number" > "$split_dwarf".readelf.txt || cp "$split_dwarf" "$split_dwarf".readelf.txt readelf -wlLiaprmfFoRt "$split_dwarf".localice | \ sed -e $remove_debug_info -e "$remove_offset_number" > "$split_dwarf".local.readelf.txt || cp "$split_dwarf" "$split_dwarf".local.readelf.txt compare_outputs "$split_dwarf".local.readelf.txt "$split_dwarf".readelf.txt "Output DWO mismatch ($split_dwarf.localice)" if test -z "$chroot_disabled"; then readelf -wlLiaprmfFoRt "$split_dwarf".remoteice | \ sed -e "$remove_debug_info" -e "$remove_offset_number" > "$split_dwarf".remote.readelf.txt || cp "$split_dwarf" "$split_dwarf".remote.readelf.txt compare_outputs "$split_dwarf".remote.readelf.txt "$split_dwarf".readelf.txt "Output DWO mismatch ($split_dwarf.remoteice)" fi elif file "$output" | grep Mach >/dev/null; then # No idea how to check they are the same if they are not 100% identical if ! diff "$split_dwarf".localice "$split_dwarf" >/dev/null; then echo "Output mismatch ($split_dwarf.localice), Mach object files, not knowing how to verify" fi if test -z "$chroot_disabled"; then if ! diff "$split_dwarf".remoteice "$split_dwarf" >/dev/null; then echo "Output mismatch ($split_dwarf.remoteice), Mach object files, not knowing how to verify" fi fi elif echo "$output" | grep -q -e '\.o$' -e '\.dwo$'; then # possibly cygwin .o file, no idea how to check they are the same if they are not 100% identical if ! diff "$split_dwarf".localice "$split_dwarf" >/dev/null; then echo "Output mismatch ($split_dwarf.localice), assuming Cygwin object files, not knowing how to verify" fi if test -z "$chroot_disabled"; then if ! diff "$split_dwarf".remoteice "$split_dwarf" >/dev/null; then echo "Output mismatch ($split_dwarf.remoteice), assuming Cygwin object files, not knowing how to verify" fi fi fi fi if test $localice_exit -ne 0; then echo "Command failed as expected." echo else echo Command successful. echo fi if test -n "$output"; then if test -n "$keepoutput"; then if test -z "$chroot_disabled"; then mv "$output".remoteice "$output" else mv "$output".localice "$output" fi else rm -f "output" fi rm -f "$output".localice "$output".remoteice "$output".readelf.txt "$output".local.readelf.txt "$output".remote.readelf.txt fi if test -n "$split_dwarf"; then rm -f "$split_dwarf" "$split_dwarf".localice "$split_dwarf".remoteice "$split_dwarf".readelf.txt "$split_dwarf".local.readelf.txt "$split_dwarf".remote.readelf.txt fi rm -f "$testdir"/stderr "$testdir"/stderr.localice "$testdir"/stderr.remoteice } make_test() { # make test - actually try something somewhat realistic. Since each node is set up to serve # only 2 jobs max, at least some of the 10 jobs should be built remotely. echo Running make test. reset_logs "" "make test" make -f Makefile.test OUTDIR="$testdir" clean -s ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ make -f Makefile.test OUTDIR="$testdir" CXX="${icecc} $TESTCXX" -j10 -s 2>>"$testdir"/stderr.log if test $? -ne 0 -o ! -x "$testdir"/maketest; then echo Make test failed. stop_ice 0 abort_tests fi flush_logs check_logs_for_generic_errors check_everything_is_idle check_log_message icecc "Have to use host 127.0.0.1:10246" check_log_message icecc "Have to use host 127.0.0.1:10247" check_log_message_count icecc 1 "" check_log_message remoteice1 "Remote compilation completed with exit code 0" check_log_error remoteice1 "Remote compilation aborted with exit code" check_log_error remoteice1 "Remote compilation exited with exit code $expected_exit" check_log_message remoteice2 "Remote compilation completed with exit code 0" check_log_error remoteice2 "Remote compilation aborted with exit code" check_log_error remoteice2 "Remote compilation exited with exit code $expected_exit" echo Make test successful. echo make -f Makefile.test OUTDIR="$testdir" clean -s } # 1st argument, if set, means we run without scheduler icerun_serialize_test() { # test that icerun really serializes jobs and only up to 2 (max jobs of the local daemon) are run at any time noscheduler= test -n "$1" && noscheduler=" (no scheduler)" echo "Running icerun${noscheduler} test." reset_logs "" "icerun${noscheduler} test" # remove . from PATH if set save_path=$PATH export PATH=$(echo $PATH | sed 's/:.:/:/' | sed 's/^.://' | sed 's/:.$//') rm -rf "$testdir"/icerun mkdir -p "$testdir"/icerun if test -n "$valgrind"; then export ICERUN_TEST_VALGRIND=1 fi for i in $(seq 1 10); do path=$PATH if test $i -eq 1; then # check icerun with absolute path testbin=$(pwd)/icerun-test.sh elif test $i -eq 2; then # check with relative path testbin=../tests/icerun-test.sh elif test $i -eq 3; then # test with PATH testbin=icerun-test.sh path=$(pwd):$PATH else testbin=./icerun-test.sh fi PATH=$path ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ $valgrind "${icerun}" $testbin "$testdir"/icerun $i & done unset ICERUN_TEST_VALGRIND timeout=100 if test -n "$valgrind"; then timeout=500 fi seen2= while true; do runcount=$(ls -1 "$testdir"/icerun/running* 2>/dev/null | wc -l) if test $runcount -gt 2; then echo "Icerun${noscheduler} test failed, more than expected 2 processes running." stop_ice 0 abort_tests fi test $runcount -eq 2 && seen2=1 donecount=$(ls -1 "$testdir"/icerun/done* 2>/dev/null | wc -l) if test $donecount -eq 10; then break fi sleep 0.1 timeout=$((timeout-1)) if test $timeout -eq 0; then echo "Icerun${noscheduler} test timed out." stop_ice 0 abort_tests fi done if test -z "$seen2"; then # Daemon is set up to run 2 jobs max, which means icerun should serialize only up to (and including) 2 jobs at the same time. echo "Icerun${noscheduler} test failed, 2 processes were never run at the same time." stop_ice 0 abort_tests fi flush_logs check_logs_for_generic_errors if test -z "$noscheduler"; then check_everything_is_idle fi check_log_message_count icecc 10 "" check_log_error icecc "Have to use host 127.0.0.1:10246" check_log_error icecc "Have to use host 127.0.0.1:10247" check_log_error icecc "building myself, but telling localhost" check_log_error icecc "local build forced" echo "Icerun${noscheduler} test successful." echo rm -r "$testdir"/icerun export PATH=$save_path } icerun_nopath_test() { reset_logs "" "icerun nopath test" echo "Running icerun nopath test." # check that plain 'icerun-test.sh' doesn't work for the current directory (i.e. ./ must be required just like with normal execution) ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ $valgrind "${icerun}" icerun-test.sh check_log_error icecc "invoking:" check_log_message icecc "couldn't find any" check_log_message icecc "could not find icerun-test.sh in PATH." echo "Icerun nopath test successful." echo } icerun_nocompile_test() { # check that 'icerun gcc' still only runs the command without trying a remote compile reset_logs "" "icerun${noscheduler} nocompile test" echo "Running icerun nocompile test." rm -rf -- "$testdir"/fakegcc mkdir -p "$testdir"/fakegcc echo '#! /bin/sh' > "$testdir"/fakegcc/gcc echo 'echo "$@" >' "$testdir"/fakegcc/output >> "$testdir"/fakegcc/gcc echo 'exit 44' >> "$testdir"/fakegcc/gcc chmod +x "$testdir"/fakegcc/gcc args="-Wall a.c b.c -c -s" ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ PATH="$testdir"/fakegcc:$PATH $valgrind "${icerun}" gcc $args if test $? -ne 44; then echo Error, icerun gcc failed. stop_ice 0 abort_tests fi check_log_message icecc "invoking: $testdir/fakegcc/gcc $args\$" rm -rf -- "$testdir"/fakegcc echo "Icerun nocompile test successful." echo } symlink_wrapper_test() { cxxwrapper="$wrapperdir/$(basename $TESTCXX)" if ! test -e "$cxxwrapper"; then echo Cannot find wrapper symlink for $TESTCXX, symlink wrapper test skipped. echo skipped_tests="$skipped_tests symlink_wrapper" return fi reset_logs "local" "symlink wrapper test" echo "Running symlink wrapper test." ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ PATH=$(dirname $TESTCXX):$PATH ICECC_PREFERRED_HOST=localice $valgrind "$cxxwrapper" -Wall -c plain.cpp if test $? -ne 0; then echo Error, local symlink wrapper test failed. stop_ice 0 abort_tests fi flush_logs check_logs_for_generic_errors check_everything_is_idle check_log_error icecc "" check_log_error icecc "Have to use host 127.0.0.1:10246" check_log_error icecc "Have to use host 127.0.0.1:10247" check_log_message icecc "building myself, but telling localhost" check_log_message icecc "invoking: $(command -v $TESTCXX) -Wall" if test -z "$chroot_disabled"; then mark_logs "remote" "symlink wrapper test" ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ PATH=$(dirname $TESTCXX):$PATH ICECC_PREFERRED_HOST=remoteice1 $valgrind "$cxxwrapper" -Wall -c plain.cpp if test $? -ne 0; then echo Error, remote symlink wrapper test failed. stop_ice 0 abort_tests fi flush_logs check_logs_for_generic_errors check_everything_is_idle check_log_error icecc "" check_log_message icecc "Have to use host 127.0.0.1:10246" check_log_error icecc "Have to use host 127.0.0.1:10247" check_log_error icecc "building myself, but telling localhost" check_log_message icecc "preparing source to send: $(command -v $TESTCXX) -Wall" fi echo "Symlink wrapper test successful." echo } # Check that remote daemons handle gracefully when they get environment they cannot handle. unhandled_environment_test() { if test -n "$chroot_disabled"; then skipped_tests="$skipped_tests unhandled_environment" return fi reset_logs "broken" "unhandled environment test" echo "Running unhandled environment test." # Use a .tar.gz that's not an archive at all, to fake a tarball compressed by something the remote can't uncompress. ICECC_VERSION=brokenenvfile.tar.gz \ ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ PATH=$(dirname $TESTCXX):$PATH ICECC_PREFERRED_HOST=remoteice1 $valgrind "${icecc}" $TESTCXX -Wall -c plain.cpp if test $? -ne 0; then echo Error, unhandled environment test failed. stop_ice 0 abort_tests fi flush_logs check_logs_for_generic_errors "ignoreexception25" check_everything_is_idle # it will first try to build remotely, but because of the broken environment it'll have to retry locally check_log_message icecc "Have to use host 127.0.0.1:10246" check_log_message icecc "" check_log_error icecc "Have to use host 127.0.0.1:10247" check_log_error icecc "building myself, but telling localhost" check_log_message icecc "" check_log_message icecc "got exception Error 25 - other error verifying environment on remote" local compression= if grep -q "supported features:.* env_zstd" "$testdir"/remoteice1.log && command -v zstd >/dev/null; then compression=zstd elif grep -q "supported features:.* env_xz" "$testdir"/remoteice1.log && command -v xz >/dev/null; then compression=xz fi if test -n "$compression"; then # remoteice1 supports xz/zstd, but remoteice2 not (set in sources) mark_logs "supported" "unhandled environment test" # use ICECC_EXTRAFILES to force creating a new environment, otherwise the remote might already have it local extrafile="$testdir"/uhandled_env_extrafile.txt touch "$extrafile" ICECC_ENV_COMPRESSION="$compression" ICECC_EXTRAFILES="$extrafile" \ ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ PATH=$(dirname $TESTCXX):$PATH ICECC_PREFERRED_HOST=remoteice1 $valgrind "${icecc}" $TESTCXX -Wall -c plain.cpp if test $? -ne 0; then echo Error, unhandled environment test failed. stop_ice 0 abort_tests fi flush_logs check_everything_is_idle check_log_message icecc "Have to use host 127.0.0.1:10246" check_log_error icecc "" check_log_error icecc "Have to use host 127.0.0.1:10247" check_log_error icecc "building myself, but telling localhost" check_log_message icecc "" mark_logs "unsupported" "unhandled environment test" ICECC_ENV_COMPRESSION="$compression" ICECC_EXTRAFILES="$extrafile" \ ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ PATH=$(dirname $TESTCXX):$PATH ICECC_PREFERRED_HOST=remoteice2 $valgrind "${icecc}" $TESTCXX -Wall -c plain.cpp if test $? -ne 0; then echo Error, unhandled environment test failed. stop_ice 0 abort_tests fi flush_logs check_everything_is_idle check_log_message icecc "building myself, but telling localhost" check_log_error icecc "" check_log_error icecc "Have to use host 127.0.0.1:10246" check_log_error icecc "Have to use host 127.0.0.1:10247" check_log_message scheduler "No suitable host found, assigning submitter" check_log_error icecc "" rm -f "$extrafile" else skipped_tests="$skipped_tests unhandled_environment_type" fi echo "Unhandled environment test successful." echo } # Check that icecc --build-native works. buildnativetest() { echo Running icecc --build-native test. reset_logs "local" "Build native" test_build_native_helper $TESTCC 1 if test $? -ne 0; then echo Icecc --build-native test failed. cat "$testdir"/icecc-build-native-output stop_ice 0 abort_tests fi echo Icecc --build-native test successful. echo } buildnativewithsymlinktest() { reset_logs local "Native environment with symlink" echo Testing native environment with a compiler symlink. rm -rf -- "$testdir"/wrappers mkdir -p "$testdir"/wrappers ln -s $(command -v $TESTCC) "$testdir"/wrappers/ ln -s $(command -v $TESTCXX) "$testdir"/wrappers/ test_build_native_helper "$testdir"/wrappers/$(basename $TESTCC) 0 if test $? -ne 0; then echo Testing native environment with a compiler symlink failed. cat "$testdir"/icecc-build-native-output stop_ice 0 abort_tests fi rm -rf -- "$testdir"/wrappers echo Testing native environment with a compiler symlink successful. echo } buildnativewithwrappertest() { reset_logs local "Native environment with a compiler wrapper" echo Testing native environment with a compiler wrapper. rm -rf -- "$testdir"/wrappers mkdir -p "$testdir"/wrappers echo '#! /bin/sh' > "$testdir"/wrappers/$(basename $TESTCC) echo exec $TESTCC '"$@"' >> "$testdir"/wrappers/$(basename $TESTCC) echo '#! /bin/sh' > "$testdir"/wrappers/$(basename $TESTCXX) echo exec $TESTCXX '"$@"' >> "$testdir"/wrappers/$(basename $TESTCXX) chmod +x "$testdir"/wrappers/$(basename $TESTCC) "$testdir"/wrappers/$(basename $TESTCXX) test_build_native_helper "$testdir"/wrappers/$(basename $TESTCC) 0 if test $? -ne 0; then echo Testing native environment with a compiler symlink failed. cat "$testdir"/icecc-build-native-output stop_ice 0 abort_tests fi rm -rf -- "$testdir"/wrappers echo Testing native environment with a compiler symlink successful. echo } test_build_native_helper() { compiler=$1 add_skip=$2 pushd "$testdir" >/dev/null ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log ${icecc} --build-native $compiler > "$testdir"/icecc-build-native-output if test $? -ne 0; then return 1 fi local tarball=$(sed -En '/^creating (.*\.tar.*)/s//\1/p' "$testdir"/icecc-build-native-output) if test -z "$tarball"; then return 2 fi sudo -n -- ${icecc_test_env} -q "$tarball" retcode=$? if test $retcode -eq 1; then echo Cannot verify environment, use sudo, skipping test. if test "$add_skip" = "1"; then skipped_tests="$skipped_tests $testtype" fi elif test $retcode -ne 0; then echo icecc_test_env failed to validate the environment return 3 fi rm -f $tarball "$testdir"/icecc-build-native-output popd >/dev/null return 0 } # Check that icecc recursively invoking itself is detected. recursive_test() { echo Running recursive check test. reset_logs "" "recursive check" recursive_tester= if test -n "$using_clang"; then recursive_tester=./recursive_clang++ elif test -n "$using_gcc"; then recursive_tester=./recursive_g++ fi # We need to avoid automatic environment creation, which would normally be triggered # since the path of the "compiler" is different. So force ICECC_VERSION. mkdir -p "$testdir"/recursive_env pushd "$testdir"/recursive_env >/dev/null "${icecc}" --build-native $TESTCXX > "$testdir"/icecc-build-native-output if test $? -ne 0; then popd >/dev/null echo Creating environment for recursive check test failed. stop_ice 0 abort_tests fi popd >/dev/null local tarball=$(sed -En '/^creating (.*\.tar.*)/s//\1/p' "$testdir"/icecc-build-native-output) test_env="$testdir"/recursive_env/${tarball} PATH="$prefix"/lib/icecc/bin:"$prefix"/bin:/usr/local/bin:/usr/bin:/bin ICECC_TEST_SOCKET="$testdir"/socket-localice \ ICECC_VERSION=$test_env ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ "${icecc}" ./${recursive_tester} -Wall -c plain.c -o plain.o 2>>"$testdir"/stderr.log if test $? -ne 111; then echo Recursive check test failed. stop_ice 0 abort_tests fi flush_logs check_logs_for_generic_errors "localrebuild" check_everything_is_idle check_log_message icecc "icecream seems to have invoked itself recursively!" echo Recursive check test successful. echo # But a recursive invocations in the style of icerun->icecc is allowed. echo Running recursive icerun check test. reset_logs "" "recursive icerun check" PATH="$prefix"/lib/icecc/bin:"$prefix"/bin:/usr/local/bin:/usr/bin:/bin ICECC_TEST_SOCKET="$testdir"/socket-localice \ ICECC_VERSION=$test_env ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ "${icerun}" ${icecc} $TESTCC -Wall -c plain.c -o "$testdir"/plain.o 2>>"$testdir"/stderr.log if test $? -ne 0; then echo Recursive icerun check test failed. stop_ice 0 abort_tests fi rm -f "$testdir"/plain.o flush_logs check_logs_for_generic_errors "localrebuild" check_everything_is_idle check_log_error icecc "icecream seems to have invoked itself recursively!" check_log_message_count icecc 1 "recursive invocation from icerun" echo Recursive icerun check test successful. echo rm -rf "$testdir"/recursive_env } # Check that transfering Clang plugin(s) works. While at it, also test ICECC_EXTRAFILES. clangplugintest() { echo Running Clang plugin test. reset_logs "" "clang plugin" if test -z "$LLVM_CONFIG"; then LLVM_CONFIG=llvm-config fi clangcxxflags=$($LLVM_CONFIG --cxxflags 2>>"$testdir"/stderr.log) if test $? -ne 0; then echo Cannot find Clang development headers, clang plugin test skipped. echo skipped_tests="$skipped_tests clangplugin" return fi echo Clang plugin compile flags: $clangcxxflags $TESTCXX -shared -fPIC -g -o "$testdir"/clangplugin.so clangplugin.cpp $clangcxxflags 2>>"$testdir"/stderr.log if test $? -ne 0; then echo Failed to compile clang plugin, clang plugin test skipped. echo skipped_tests="$skipped_tests clangplugin" return fi # TODO This should be able to also handle the clangpluginextra.txt argument without the absolute path. export ICECC_EXTRAFILES=clangpluginextra.txt run_ice "$testdir/clangplugintest.o" "remote" 0 $TESTCXX -Wall -c -Xclang -load -Xclang "$testdir"/clangplugin.so \ -Xclang -add-plugin -Xclang icecreamtest -Xclang -plugin-arg-icecreamtest -Xclang $(realpath -s clangpluginextra.txt) \ clangplugintest.cpp -o "$testdir"/clangplugintest.o unset ICECC_EXTRAFILES also_remote= if test -z "$chroot_disabled"; then also_remote=".remoteice" fi for type in "" ".localice" $also_remote; do check_section_log_message_count stderr${type} 1 "clangplugintest.cpp:3:5: warning: Icecream plugin found return false" check_section_log_message_count stderr${type} 1 "warning: Extra file check successful" check_section_log_error stderr${type} "Extra file open error" check_section_log_error stderr${type} "Incorrect number of arguments" check_section_log_error stderr${type} "File contents do not match" done echo Clang plugin test successful. echo } # Both clang and gcc4.8+ produce different debuginfo depending on whether the source file is # given on the command line or using stdin (which is how icecream does it), so do not compare output. # But check the functionality is identical to local build. # First argument is the compiler. # Second argument is compile command, without -o argument. # Third argument is first line of debug at which to start comparing. # Follow optional arguments, in this order: # - hasdebug - specifies that there should be debug info present (will check for a variable value) debug_test() { compiler="$1" args="$2" cmd="$1 $2" debugstart="$3" shift shift shift hasdebug= if test "$1" = "hasdebug"; then hasdebug=1 shift fi echo "Running debug test ($cmd)." reset_logs "" "debug test ($cmd)" preferred=remoteice1 if test -n "$chroot_disabled"; then preferred=localice fi ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=$preferred ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "${icecc}" \ $cmd -o "$testdir"/debug-remote.o 2>>"$testdir"/stderr.log if test $? -ne 0; then echo Debug test compile failed. stop_ice 0 abort_tests fi flush_logs check_logs_for_generic_errors check_everything_is_idle if test -z "$chroot_disabled"; then check_log_message icecc "Have to use host 127.0.0.1:10246" check_log_error icecc "Have to use host 127.0.0.1:10247" check_log_error icecc "building myself, but telling localhost" check_log_error icecc "local build forced" check_log_error icecc "" check_log_message remoteice1 "Remote compilation completed with exit code 0" check_log_error remoteice1 "Remote compilation aborted with exit code" check_log_error remoteice1 "Remote compilation exited with exit code" else check_log_message icecc "building myself, but telling localhost" check_log_error icecc "Have to use host 127.0.0.1:10246" check_log_error icecc "Have to use host 127.0.0.1:10247" check_log_error icecc "local build forced" check_log_error icecc "" fi $compiler -o "$testdir"/debug-remote "$testdir"/debug-remote.o if test $? -ne 0; then echo Linking in debug test failed. stop_ice 0 abort_tests fi gdb -nx -batch -x debug-gdb.txt "$testdir"/debug-remote >"$testdir"/debug-stdout-remote.txt 2>/dev/null if ! grep -A 1000 "$debugstart" "$testdir"/debug-stdout-remote.txt >"$testdir"/debug-output-remote.txt ; then echo "Debug check failed (remote)." stop_ice 0 abort_tests fi $cmd -o "$testdir"/debug-local.o 2>>"$testdir"/stderr.log if test $? -ne 0; then echo Debug test compile failed. stop_ice 0 abort_tests fi $compiler -o "$testdir"/debug-local "$testdir"/debug-local.o if test $? -ne 0; then echo Linking in debug test failed. stop_ice 0 abort_tests fi gdb -nx -batch -x debug-gdb.txt "$testdir"/debug-local >"$testdir"/debug-stdout-local.txt 2>/dev/null if ! grep -A 1000 "$debugstart" "$testdir"/debug-stdout-local.txt >"$testdir"/debug-output-local.txt ; then echo "Debug check failed (local)." stop_ice 0 abort_tests fi if test -n "$hasdebug"; then # debug-gdb.txt prints the value of one variable, check it. It has to be present twice, once in the listing, once when printed. local value=$(grep "debugMember =" "$testdir"/debug-output-local.txt | sed 's/.*debugMember = \(.*\);/\1/') if test -z "$value"; then echo "Debug check variable value failed (not found)." stop_ice 0 abort_tests fi local count=$(grep "$value" "$testdir"/debug-output-local.txt | wc -l) if test "$count" != 2; then echo "Debug check variable value failed (count $count)." stop_ice 0 abort_tests fi fi # Binaries without debug infos use hex addresses for some symbols, which may differ between runs # or builds, but is technically harmless. So remove symbol and stack addresses and let the readelf check handle that. sed -i -e 's/=0x[0-9a-fA-F]*//g' "$testdir"/debug-output-remote.txt sed -i -e 's/=0x[0-9a-fA-F]*//g' "$testdir"/debug-output-local.txt if ! diff "$testdir"/debug-output-local.txt "$testdir"/debug-output-remote.txt >/dev/null; then echo Gdb output different. echo ===================== diff -u "$testdir"/debug-output-local.txt "$testdir"/debug-output-remote.txt echo ===================== stop_ice 0 abort_tests fi # gcc-4.8+ has -grecord-gcc-switches, which makes the .o differ because of the extra flags the daemon adds, # this changes DW_AT_producer and also offsets local remove_debug_info="s/\(Length\|DW_AT_\(GNU_dwo_\(id\|name\)\|comp_dir\|producer\|linkage_name\|name\)\).*/\1/g" local remove_offset_number="s/<[A-Fa-f0-9]*>/<>/g" local remove_size_of_area="s/\(Size of area in.*section:\)\s*[0-9]*/\1/g" local remove_debug_pubnames="/^\s*Offset\s*Name/,/^\s*$/s/\s*[A-Fa-f0-9]*\s*//" if file "$testdir"/debug-remote.o | grep ELF >/dev/null; then readelf -wlLiaprmfFoRt "$testdir"/debug-remote.o | sed -e 's/offset: 0x[0-9a-fA-F]*//g' \ -e 's/[ ]*--param ggc-min-expand.*heapsize\=[0-9]\+//g' \ -e "$remove_debug_info" \ -e "$remove_offset_number" \ -e "$remove_size_of_area" \ -e "$remove_debug_pubnames" > "$testdir"/readelf-remote.txt readelf -wlLiaprmfFoRt "$testdir"/debug-local.o | sed -e 's/offset: 0x[0-9a-fA-F]*//g' \ -e "$remove_debug_info" \ -e "$remove_offset_number" \ -e "$remove_size_of_area" \ -e "$remove_debug_pubnames" > "$testdir"/readelf-local.txt if ! diff "$testdir"/readelf-local.txt "$testdir"/readelf-remote.txt >/dev/null; then echo Readelf output different. echo ===================== diff -u "$testdir"/readelf-local.txt "$testdir"/readelf-remote.txt echo ===================== stop_ice 0 abort_tests fi elif file "$testdir"/debug-remote.o | grep Mach >/dev/null; then # No idea how to check they are the same if they are not 100% identical if ! diff "$testdir"/debug-local.o "$testdir"/debug-remote.o >/dev/null; then echo "Output mismatch, Mach object files, not knowing how to verify" fi else # possibly cygwin .o file, no idea how to check they are the same if they are not 100% identical if ! diff "$testdir"/debug-local.o "$testdir"/debug-remote.o >/dev/null; then echo "Output mismatch, assuming Cygwin object files, not knowing how to verify" fi fi rm -f "$testdir"/debug-remote.o "$testdir"/debug-local.o "$testdir"/debug-remote "$testdir"/debug-local "$testdir"/debug-*-*.txt "$testdir"/readelf-*.txt echo Debug test successful. echo } zero_local_jobs_test() { echo Running zero local jobs test. reset_logs "" "Running zero local jobs test" kill_daemon localice start_iceccd localice --no-remote -m 0 wait_for_ice_startup_complete localice libdir="${testdir}/libs" rm -rf "${libdir}" mkdir "${libdir}" mark_logs remote $TESTCXX -Wall -Werror -c testfunc.cpp -o "${testdir}/testfunc.o" echo Running: $TESTCXX -Wall -Werror -c testfunc.cpp -o "${testdir}/testfunc.o" ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "${icecc}" $TESTCXX -Wall -Werror -c testfunc.cpp -o "${testdir}/testfunc.o" if [[ $? -ne 0 ]]; then echo "failed to build testfunc.o" stop_ice 0 abort_tests fi mark_logs remote $TESTCXX -Wall -Werror -c testmainfunc.cpp -o "${testdir}/testmainfunc.o" echo Running: $TESTCXX -Wall -Werror -c testmainfunc.cpp -o "${testdir}/testmainfunc.o" ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice2 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "${icecc}" $TESTCXX -Wall -Werror -c testmainfunc.cpp -o "${testdir}/testmainfunc.o" if test $? -ne 0; then echo "Error, failed to compile testfunc.cpp" stop_ice 0 abort_tests fi ar rcs "${libdir}/libtestlib1.a" "${testdir}/testmainfunc.o" if test $? -ne 0; then echo "Error, 'ar' failed to create the ${libdir}/libtestlib1.a static library from object ${testdir}/testmainfunc.o" stop_ice 0 abort_tests fi ar rcs "${libdir}/libtestlib2.a" "${testdir}/testfunc.o" if test $? -ne 0; then echo "Error, 'ar' failed to create the ${libdir}/libtestlib2.a static library from object ${testdir}/testfunc.o" stop_ice 0 abort_tests fi mark_logs local $TESTCXX -Wall -Werror "-L${libdir}" "-ltestlib1" "-ltestlib2" -o "${testdir}/linkedapp" echo Running: $TESTCXX -Wall -Werror "-L${libdir}" "-ltestlib1" "-ltestlib2" -o "${testdir}/linkedapp" ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=localice ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "${icecc}" $TESTCXX -Wall -Werror "-L${libdir}" "-ltestlib1" "-ltestlib2" -o "${testdir}/linkedapp" 2>>"$testdir"/stderr.log if test $? -ne 0; then echo "Error, failed to link testlib1 and testlib2 into linkedapp" stop_ice 0 abort_tests fi "${testdir}/linkedapp" 2>>"$testdir"/stderr.log app_ret=$? if test ${app_ret} -ne 123; then echo "Error, failed to create a test app by building remotely and linking locally" stop_ice 0 abort_tests fi rm -rf "${libdir}" kill_daemon localice start_iceccd localice --no-remote -m 2 wait_for_ice_startup_complete localice echo Zero local jobs test successful. echo } ccache_test() { if ! command -v ccache >/dev/null; then echo Could not find ccache, ccache tests skipped. echo skipped_tests="$skipped_tests ccache" return fi reset_logs "verify" "Testing ccache error redirect" echo Testing ccache error redirect. # First check that everything actually works (the test itself doesn't have icecc debug enabled and uses only stderr, because of ccache). rm -rf "$testdir/ccache" ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ CCACHE_PREFIX=${icecc} CCACHE_DIR="$testdir"/ccache ICECC_VERSION=testbrokenenv ccache $TESTCXX -Wall -Werror -c plain.cpp -o "$testdir/"plain.o 2>>"$testdir"/stderr.log check_log_message icecc "ICECC_VERSION has to point to an existing file to be installed testbrokenenv" # Second run, will get cached result, so there's no icecc error in ccache's cached stderr ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ CCACHE_PREFIX=${icecc} CCACHE_DIR="$testdir"/ccache ICECC_VERSION=testbrokenenv ccache $TESTCXX -Wall -Werror -c plain.cpp -o "$testdir/"plain.o 2>>"$testdir"/stderr.log check_log_message_count icecc 1 "ICECC_VERSION has to point to an existing file to be installed testbrokenenv" # Now run it again, this time without icecc debug redirected, so that ccache has to handle icecc's stderr. reset_logs "cache" "Testing ccache error redirect" rm -rf "$testdir/ccache" ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice1 ICECC_DEBUG=debug \ CCACHE_PREFIX=${icecc} CCACHE_DIR="$testdir"/ccache ICECC_VERSION=testbrokenenv ccache $TESTCXX -Wall -Werror -c plain.cpp -o "$testdir/"plain.o 2>>"$testdir"/stderr.log if cat_log_last_mark stderr | grep -q "UNCACHED_ERR_FD provides an invalid file descriptor"; then echo UNCACHED_ERR_FD provided by ccache is invalid, skipping test. echo skipped_tests="$skipped_tests ccache" return fi if ! cat_log_last_mark stderr | grep -q "ICECC_VERSION has to point to an existing file to be installed testbrokenenv"; then # If ccache's UNCACHED_ERR_FD handling is broken, the fd number may match an unrelated open fd, in which case the log message just disappears. echo Missing icecc stderr output from ccache, assuming broken ccache, skipping test. echo skipped_tests="$skipped_tests ccache" return fi # second run, will get cached result, so there's no icecc error in ccache's cached stderr reset_logs "test" "Testing ccache error redirect" ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice1 \ CCACHE_PREFIX=${icecc} CCACHE_DIR="$testdir"/ccache ICECC_VERSION=testbrokenenv ccache $TESTCXX -Wall -Werror -c plain.cpp -o "$testdir/"plain.o 2>>"$testdir"/stderr.log check_section_log_error stderr "ICECC_VERSION has to point to an existing file to be installed testbrokenenv" echo Testing ccache error redirect successful. echo } # Try to find a different version of the used compiler, use both of them and verify the remote compilation # uses the matching version (e.g. /usr/bin/gcc -> gcc-8 and there's also /usr/bin/gcc-7). differentcompilerversiontest() { # First check $TESTCC. Compile to just assembler, which will output .ident "version". # If remote uses a different compiler, the test will find the difference and fail. run_ice "$testdir/plain.s" "remote" 0 "$TESTCC" -Wall -Werror -S plain.c -o "$testdir/"plain.s # Try to find a different version of $TESTCC. # Just search /usr/bin/. if test -n "$using_gcc"; then files="$(ls /usr/bin/gcc-[0-9\.-]* 2>/dev/null)" elif test -n "$using_clang"; then files="$(ls /usr/bin/clang-[0-9\.-]* 2>/dev/null)" fi different= if test -n "$files"; then for file in $files; do if test "$($TESTCC --version | head -1)" != "$($files --version | head -1)"; then different="$file" break fi done fi if test -z "$different"; then echo Could not find a different version for $TESTCC, skipping test. echo skipped_tests="$skipped_tests different_compiler" return fi echo Different compiler version: "$different" --version echo # Run a normal compile test for it first, this one already may find a difference. run_ice "$testdir/plain.o" "remote" 0 "$different" -Wall -Werror -c plain.c -o "$testdir/"plain.o # And now again compile to just assembler, which will output .ident "version". # If remote uses a different compiler (e.g. $TESTCC), the test will find the difference and fail. run_ice "$testdir/plain.s" "remote" 0 "$different" -Wall -Werror -S plain.c -o "$testdir/"plain.s } # All log files that are used by tests. Done here to keep the list in just one place. daemonlogs="scheduler scheduler2 localice remoteice1 remoteice2" otherlogs="icecc stderr stderr.localice stderr.remoteice" alltestlogs="$daemonlogs $otherlogs" # Call this at the start of a complete test (e.g. testing a feature). If a test fails, logs before this point will not be dumped. reset_logs() { local type="$1" shift last_reset_log_mark=$flush_log_mark mark_logs $type "$@" } # Call this at the start of a sub-test (e.g. remote vs local build). Functions such as check_log_message will not check before the mark. mark_logs() { local type="$1" shift last_section_log_mark=$flush_log_mark echo ================ > "$testdir"/log_header.txt if test -n "$type"; then echo "= Test ($type): $@" >> "$testdir"/log_header.txt else echo "= Test : $@" >> "$testdir"/log_header.txt fi echo ================ >> "$testdir"/log_header.txt # Make daemons write the header. flush_logs manual="$otherlogs" for daemon in $daemonlogs; do pid=${daemon}_pid if test -n "${!pid}"; then kill -0 ${!pid} if test $? -ne 0; then manual="$manual $daemon" fi else manual="$manual $daemon" fi done for log in $manual; do cat "$testdir"/log_header.txt >> "$testdir"/${log}.log done rm "$testdir"/log_header.txt } flush_logs() { echo "=${flush_log_mark}=" > "$testdir"/flush_log_mark.txt wait_for= manual="$otherlogs" for daemon in $daemonlogs; do pid=${daemon}_pid if test -n "${!pid}"; then kill -HUP ${!pid} if test $? -eq 0; then wait_for="$wait_for $daemon" else manual="$manual $daemon" fi else manual="$manual $daemon" fi done # wait for all daemons to log the mark in their log while test -n "$wait_for"; do ready=1 for daemon in $wait_for; do if ! grep -q "flush log mark: =${flush_log_mark}=" "$testdir"/${daemon}.log; then ready= fi done if test -n "$ready"; then break fi done for log in $manual; do echo "flush log mark: =${flush_log_mark}=" >> "$testdir"/${log}.log done rm "$testdir"/flush_log_mark.txt flush_log_mark=$((flush_log_mark + 1)) } dump_logs() { for log in $alltestlogs; do # Skip logs that have only headers and flush marks if cat_log_last_section ${log} | grep -q -v "^="; then echo ------------------------------------------------ echo "Log: ${log}" cat_log_last_section ${log} fi done valgrind_logs=$(ls "$testdir"/valgrind-*.log 2>/dev/null) for log in $valgrind_logs; do has_error= if test -n "$valgrind_error_markers"; then if grep -q ICEERRORBEGIN ${log}; then has_error=1 fi else # Let's guess that every error message has this. if grep -q '^==[0-9]*== at ' ${log}; then has_error=1 fi fi if test -n "$has_error"; then echo ------------------------------------------------ echo "Log: ${log}" | sed "s#${testdir}/##" grep -v ICEERRORBEGIN ${log} | grep -v ICEERROREND fi done } cat_log_last_mark() { log="$1" grep -A 100000 "flush log mark: =${last_section_log_mark}=" "$testdir"/${log}.log | grep -v "flush log mark: " } cat_log_last_section() { log="$1" grep -A 100000 "flush log mark: =${last_reset_log_mark}=" "$testdir"/${log}.log | grep -v "flush log mark: " } # Optional arguments, in this order: # - localrebuild - specifies that the command might have resulted in local recompile # - ignoreexception25 - ignore expection 25 (cannot verify environment) # - ignorenosuitablehost - ignore scheduler's "No suitable host found, assigning submitter" check_logs_for_generic_errors() { localrebuild= ignoreexception25= ignorenosuitablehost= if test "$1" = "localrebuild"; then localrebuild=1 shift fi if test "$1" = "ignoreexception25"; then ignoreexception25=1 shift fi if test "$1" = "ignorenosuitablehost"; then ignorenosuitablehost=1 shift fi check_log_error scheduler "that job isn't handled by" check_log_error scheduler "the server isn't the same for job" if test -z "ignoreexception25"; then check_log_error icecc "got exception " else check_log_error_except icecc "got exception " "got exception Error 25" fi check_log_error icecc "found another non option on command line. Two input files" for log in localice remoteice1 remoteice2; do check_log_error $log "Ignoring bogus version" check_log_error $log "scheduler closed connection" done for log in scheduler icecc localice remoteice1 remoteice2; do check_log_error $log "internal error" if test -n "$localrebuild"; then # If the client finds out it needs to do a local rebuild because of the need to fix # stderr, it will simply close the connection to the remote daemon, so the remote # daemon may get broken pipe when trying to write the object file. That's harmless. if test "$log" != remoteice1 -a "$log" != "remoteice2"; then check_log_error $log "setting error state for channel" fi fi done # consider all non-fatal errors such as running out of memory on the remote # still as problems, except for: # 102 - -fdiagnostics-show-caret forced local build (gcc-4.8+) if test -n "$localrebuild"; then check_log_error_except icecc "local build forced" "local build forced by remote exception" else check_log_error icecc "local build forced" fi if test -z "$ignorenosuitablehost"; then check_log_error scheduler "No suitable host found, assigning submitter" fi has_valgrind_error= if test -n "$valgrind_error_markers"; then if grep -q "ICEERRORBEGIN" "$testdir"/valgrind-*.log 2>/dev/null; then has_valgrind_error=1 fi else if grep -q '^==[0-9]*== at ' "$testdir"/valgrind-*.log 2>/dev/null; then has_valgrind_error=1 fi fi if test -n "$has_valgrind_error"; then echo Valgrind detected an error, aborting. stop_ice 0 abort_tests fi } check_log_error() { log="$1" if cat_log_last_mark ${log} | grep -q "$2"; then echo "Error, $log log contains error: $2" stop_ice 0 abort_tests fi } check_section_log_error() { log="$1" if cat_log_last_section ${log} | grep -q "$2"; then echo "Error, $log log contains error: $2" stop_ice 0 abort_tests fi } # check the error message ($2) is not present in log ($1), # but the exception ($3) is allowed check_log_error_except() { log="$1" if cat_log_last_mark ${log} | grep -v "$3" | grep -q "$2" ; then echo "Error, $log log contains error: $2" stop_ice 0 abort_tests fi } check_log_message() { log="$1" if ! cat_log_last_mark ${log} | grep -q "$2"; then echo "Error, $log log does not contain: $2" stop_ice 0 abort_tests fi } check_section_log_message() { log="$1" if ! cat_log_last_section ${log} | grep -q "$2"; then echo "Error, $log log does not contain: $2" stop_ice 0 abort_tests fi } check_log_message_count() { log="$1" expected_count="$2" count=$(cat_log_last_mark ${log} | grep "$3" | wc -l) if test $count -ne $expected_count; then echo "Error, $log log does not contain expected count (${count} vs ${expected_count}): $3" stop_ice 0 abort_tests fi } check_section_log_message_count() { log="$1" expected_count="$2" count=$(cat_log_last_section ${log} | grep "$3" | wc -l) if test $count -ne $expected_count; then echo "Error, $log log does not contain expected count (${count} vs ${expected_count}): $3" stop_ice 0 abort_tests fi } # Check that there are no pending jobs. check_everything_is_idle() { # Use expect, using telnet is broken as plain "echo foo | telnet" quits before getting the reply. local output=$(expect << EOF spawn telnet localhost 8768 expect "200 Use 'help' for help and 'quit' to quit." send "listcs\r" expect "200 done" send "quit\r" interact EOF ) echo "$output" | grep -q "200 Use 'help' for help and 'quit' to quit." if test $? -ne 0; then echo "Error, cannot reach scheduler control interface." echo "$output" stop_ice 0 abort_tests fi echo "$output" | grep -q " 3 hosts," if test $? -ne 0; then echo "Error, not all 3 nodes connected to the scheduler." echo "$output" stop_ice 0 abort_tests fi echo "$output" | grep -q " 0 jobs in queue" if test $? -ne 0; then echo "Error, there are still pending jobs." echo "$output" stop_ice 0 abort_tests fi echo "$output" | grep -E "jobs=[0-9]+/[0-9]+" | sed -E 's#^.*jobs=([0-9]+)/[0-9]+.*$#\1#' | grep -q -v "0" if test $? -eq 0; then echo "Error, nodes still have pending jobs." echo "$output" stop_ice 0 abort_tests fi } # ================================================================== # Main code starts here # ================================================================== echo check_compilers stop_ice 2 for log in $alltestlogs; do rm -f "$testdir"/${log}.log rm -f "$testdir"/${log}_section.log rm -f "$testdir"/${log}_all.log echo -n >"$testdir"/${log}.log done rm -f "$testdir"/valgrind-*.log 2>/dev/null buildnativetest echo Starting icecream. reset_logs local "Starting" start_ice check_logs_for_generic_errors check_everything_is_idle echo Starting icecream successful. echo run_ice "$testdir/plain.o" "remote" 0 $TESTCXX -Wall -Werror -c plain.cpp -o "$testdir/"plain.o run_ice "$testdir/plain.o" "remote" 0 $TESTCC -Wall -Werror -c plain.c -o "$testdir/"plain.o run_ice "$testdir/plain.o" "remote" 0 $TESTCXX -Wall -Werror -c plain.cpp -O2 -o "$testdir/"plain.o run_ice "$testdir/plain.ii" "local" 0 $TESTCXX -Wall -Werror -E plain.cpp -o "$testdir/"plain.ii run_ice "$testdir/includes.o" "remote" 0 $TESTCXX -Wall -Werror -c includes.cpp -o "$testdir"/includes.o run_ice "$testdir/includes.o" "remote" 0 $TESTCXX -Wall -Werror -c includes-without.cpp -include includes.h -o "$testdir"/includes.o run_ice "$testdir/plain.o" "local" 0 $TESTCXX -Wall -Werror -c plain.cpp -mtune=native -o "$testdir"/plain.o run_ice "$testdir/plain.o" "remote" 0 $TESTCC -Wall -Werror -x c++ -c plain -o "$testdir"/plain.o run_ice "$testdir/plain.s" "remote" 0 $TESTCC -Wall -Werror -S plain.c -o "$testdir"/plain.s $TESTCC -Wa,-al=listing.txt -Wall -Werror -c plain.c -o "$testdir/"plain.o 2>/dev/null if test $? -eq 0; then run_ice "$testdir/plain.o" "local" 0 $TESTCC -Wa,-al=listing.txt -Wall -Werror -c plain.c -o "$testdir/"plain.o else skipped_tests="$skipped_tests asm_listing" fi $TESTCC -Wa,macros.s -Wall -Werror -c plain.c -o "$testdir/"plain.o 2>/dev/null if test $? -eq 0; then run_ice "$testdir/plain.o" "remote" 0 $TESTCC -Wa,macros.s -Wall -Werror -c plain.c -o "$testdir/"plain.o else skipped_tests="$skipped_tests asm_macros" fi $TESTCC -Wa,--defsym,MYSYM=yes -Wall -Werror -c plain.c -o "$testdir/"plain.o 2>/dev/null if test $? -eq 0; then run_ice "$testdir/plain.o" "remote" 0 $TESTCC -Wa,--defsym,MYSYM=yes -Wall -Werror -c plain.c -o "$testdir/"plain.o else skipped_tests="$skipped_tests asm_defsym" fi $TESTCC -Wa,@assembler.args -Wall -Werror -c plain.c -o "$testdir/"plain.o 2>/dev/null if test $? -eq 0; then run_ice "$testdir/plain.o" "local" 0 $TESTCC -Wa,@assembler.args -Wall -Werror -c plain.c -o "$testdir/"plain.o else skipped_tests="$skipped_tests asm_defsym" fi run_ice "$testdir/testdefine.o" "remote" 0 $TESTCXX -Wall -Werror -DICECREAM_TEST_DEFINE=test -c testdefine.cpp -o "$testdir/"testdefine.o run_ice "$testdir/testdefine.o" "remote" 0 $TESTCXX -Wall -Werror -D ICECREAM_TEST_DEFINE=test -c testdefine.cpp -o "$testdir/"testdefine.o run_ice "" "remote" 300 "localrebuild" "remoteabort" "nostderrcheck" $TESTCXX -c nonexistent.cpp if test -e /bin/true; then run_ice "" "local" 0 /bin/true elif test -e /usr/bin/true; then run_ice "" "local" 0 /usr/bin/true else skipped_tests="$skipped_tests run-true" fi run_ice "" "local" 300 "nostderrcheck" /bin/nonexistent-at-all-doesnt-exist run_ice "$testdir/warninginmacro.o" "remote" 0 $TESTCXX -Wall -Wextra -Werror -c warninginmacro.cpp -o "$testdir/"warninginmacro.o run_ice "$testdir/unusedmacro.o" "remote" 0 "unusedmacrohack" $TESTCXX -Wall -Wunused-macros -c unusedmacro.cpp -o "$testdir/unusedmacro.o" if test -n "$using_gcc"; then # These all break because of -fdirectives-only bugs, check we manage to build them somehow. run_ice "$testdir/countermacro.o" "remote" 0 "localrebuild" "remoteabort" "nostderrcheck" $TESTCC -Wall -Werror -c countermacro.c -o "$testdir"/countermacro.o if $TESTCXX -std=c++11 -fsyntax-only -Werror -c rawliterals.cpp 2>/dev/null; then run_ice "$testdir/rawliterals.o" "remote" 0 "localrebuild" "remoteabort" "nostderrcheck" $TESTCXX -std=c++11 -Wall -Werror -c rawliterals.cpp -o "$testdir"/rawliterals.o fi fi if $TESTCXX -cxx-isystem ./ -fsyntax-only -Werror -c includes.cpp 2>/dev/null; then run_ice "$testdir/includes.o" "remote" 0 $TESTCXX -Wall -Werror -cxx-isystem ./ -c includes.cpp -o "$testdir"/includes.o else skipped_tests="$skipped_tests cxx-isystem" fi if test -n "$using_clang"; then target=$($TESTCXX -dumpmachine) run_ice "$testdir/plain.o" "remote" 0 $TESTCXX -Wall -Werror -target $target -c plain.cpp -o "$testdir"/plain.o if test -z "$chroot_disabled"; then check_section_log_message remoteice1 "remote compile arguments:.*-target $target" run_ice "$testdir/plain.o" "remote" 0 $TESTCXX -Wall -Werror -c plain.cpp -o "$testdir"/plain.o check_section_log_message remoteice1 "remote compile arguments:.*-target $target" fi run_ice "$testdir/plain.o" "remote" 0 $TESTCXX -Wall -Werror --target=$target -c plain.cpp -o "$testdir"/plain.o if test -z "$chroot_disabled"; then check_section_log_message remoteice1 "remote compile arguments:.*--target=$target" check_section_log_error remoteice1 "remote compile arguments:.*-target $target" fi else skipped_tests="$skipped_tests target" fi debug_fission_disabled=1 $TESTCXX -gsplit-dwarf true.cpp -o "$testdir"/true 2>/dev/null >/dev/null if test $? -eq 0; then "$testdir"/true if test $? -eq 0; then debug_fission_disabled= fi rm -f "$testdir"/true "$testdir"/true.dwo true.dwo fi if test -n "$debug_fission_disabled"; then skipped_tests="$skipped_tests split-dwarf" fi if test -z "$debug_fission_disabled"; then run_ice "$testdir/plain.o" "remote" 0 "split_dwarf" $TESTCXX -Wall -Werror -gsplit-dwarf -g -c plain.cpp -o "$testdir/"plain.o run_ice "$testdir/plain.o" "remote" 0 "split_dwarf" $TESTCC -Wall -Werror -gsplit-dwarf -c plain.c -o "$testdir/"plain.o run_ice "$testdir/plain.o" "remote" 0 "split_dwarf" $TESTCC -Wall -Werror -gsplit-dwarf -c plain.c -o "../../../../../../../..$testdir/plain.o" run_ice "" "remote" 300 "localrebuild" "split_dwarf" "remoteabort" "nostderrcheck" $TESTCXX -gsplit-dwarf -c nonexistent.cpp fi if test -z "$chroot_disabled"; then if test -z "$using_gcc"; then run_ice "" "remote" 1 $TESTCXX -c syntaxerror.cpp check_section_log_error icecc "local build forced by remote exception: Error 102 - command needs stdout/stderr workaround, recompiling locally" run_ice "$testdir/messages.o" "remote" 0 $TESTCXX -Wall -c messages.cpp -o "$testdir"/messages.o check_log_message stderr "warning: unused variable 'unused'" check_section_log_error icecc "local build forced by remote exception: Error 102 - command needs stdout/stderr workaround, recompiling locally" else if $TESTCXX -E -fdiagnostics-show-caret -Werror messages.cpp >/dev/null 2>/dev/null; then # check gcc stderr workaround, icecream will force a local recompile run_ice "" "remote" 1 "localrebuild" $TESTCXX -c -fdiagnostics-show-caret syntaxerror.cpp run_ice "$testdir/messages.o" "remote" 0 "localrebuild" $TESTCXX -Wall -c -fdiagnostics-show-caret messages.cpp -o "$testdir"/messages.o check_log_message stderr "warning: unused variable 'unused'" check_section_log_message icecc "local build forced by remote exception: Error 102 - command needs stdout/stderr workaround, recompiling locally" # try again without the local recompile run_ice "" "remote" 1 $TESTCXX -c -fno-diagnostics-show-caret syntaxerror.cpp run_ice "$testdir/messages.o" "remote" 0 $TESTCXX -Wall -c -fno-diagnostics-show-caret messages.cpp -o "$testdir"/messages.o check_log_message stderr "warning: unused variable 'unused'" check_section_log_error icecc "local build forced by remote exception: Error 102 - command needs stdout/stderr workaround, recompiling locally" else # This gcc is too old to have this problem, but we do not check the gcc version in icecc. run_ice "" "remote" 1 "localrebuild" $TESTCXX -c syntaxerror.cpp check_section_log_message icecc "local build forced by remote exception: Error 102 - command needs stdout/stderr workaround, recompiling locally" run_ice "$testdir/messages.o" "remote" 0 "localrebuild" $TESTCXX -Wall -c messages.cpp -o "$testdir"/messages.o check_log_message stderr "warning: unused variable 'unused'" check_section_log_message icecc "local build forced by remote exception: Error 102 - command needs stdout/stderr workaround, recompiling locally" fi fi else skipped_tests="$skipped_tests gcc-caret" fi if command -v gdb >/dev/null; then if command -v readelf >/dev/null; then debug_test "$TESTCXX" "-c -g debug.cpp" "Temporary breakpoint 1, main () at debug.cpp:8" "hasdebug" debug_test "$TESTCXX" "-c -g $(pwd)/debug/debug2.cpp" "Temporary breakpoint 1, main () at .*debug/debug2.cpp:8" "hasdebug" debug_test "$TESTCXX" "-c -g0 debug.cpp" "Temporary breakpoint 1, 0x" if test -z "$debug_fission_disabled"; then debug_test "$TESTCXX" "-c -g debug.cpp -gsplit-dwarf" "Temporary breakpoint 1, main () at debug.cpp:8" "hasdebug" debug_test "$TESTCXX" "-c -g $(pwd)/debug/debug2.cpp -gsplit-dwarf" "Temporary breakpoint 1, main () at .*debug/debug2.cpp:8" "hasdebug" debug_test "$TESTCXX" "-c debug.cpp -gsplit-dwarf -g0" "Temporary breakpoint 1, 0x" fi fi else skipped_tests="$skipped_tests debug" fi if $TESTCXX -fsanitize=address -Werror fsanitize.cpp -o /dev/null >/dev/null 2>/dev/null; then run_ice "$testdir/fsanitize.o" "remote" 0 keepoutput $TESTCXX -c -fsanitize=address -g fsanitize.cpp -o "$testdir"/fsanitize.o $TESTCXX -fsanitize=address -g "$testdir"/fsanitize.o -o "$testdir"/fsanitize 2>>"$testdir"/stderr.log if test $? -ne 0; then echo "Linking for -fsanitize test failed." stop_ice 0 abort_tests fi "$testdir"/fsanitize 2>>"$testdir"/stderr.log check_log_message stderr "ERROR: AddressSanitizer: heap-use-after-free" # Only newer versions of ASAN have the SUMMARY line. if grep -q "^SUMMARY:" "$testdir"/stderr.log; then check_log_message stderr "SUMMARY: AddressSanitizer: heap-use-after-free .*fsanitize.cpp:5.* test_fsanitize_function()" fi rm "$testdir"/fsanitize.o if $TESTCXX -fsanitize=address -fsanitize-blacklist=fsanitize-blacklist.txt -c -fsyntax-only fsanitize.cpp >/dev/null 2>/dev/null; then run_ice "$testdir/fsanitize.o" "remote" 0 keepoutput $TESTCXX -c -fsanitize=address -fsanitize-blacklist=fsanitize-blacklist.txt -g fsanitize.cpp -o "$testdir"/fsanitize.o $TESTCXX -fsanitize=address -fsanitize-blacklist=fsanitize-blacklist.txt -g "$testdir"/fsanitize.o -o "$testdir"/fsanitize 2>>"$testdir"/stderr.log if test $? -ne 0; then echo "Linking for -fsanitize test failed." stop_ice 0 abort_tests fi "$testdir"/fsanitize 2>>"$testdir"/stderr.log check_log_error stderr "ERROR: AddressSanitizer: heap-use-after-free" if grep -q "^SUMMARY:" "$testdir"/stderr.log; then check_log_error stderr "SUMMARY: AddressSanitizer: heap-use-after-free .*fsanitize.cpp:5 in test()" fi rm "$testdir"/fsanitize.o run_ice "" "local" 300 $TESTCXX -c -fsanitize=address -fsanitize-blacklist=nonexistent -g fsanitize.cpp -o "$testdir"/fsanitize.o check_section_log_message icecc "file for argument -fsanitize-blacklist=nonexistent missing, building locally" # Check that a path with /../ is resolved properly (use the debug/ subdir of another test). run_ice "$testdir/fsanitize.o" "remote" 0 $TESTCXX -c -fsanitize=address -fsanitize-blacklist=debug/../fsanitize-blacklist.txt -g fsanitize.cpp -o "$testdir"/fsanitize.o check_section_log_error icecc "file for argument -fsanitize-blacklist=.*/fsanitize-blacklist.txt missing, building locally" rm -rf "$testdir"/fsanitize else skipped_tests="$skipped_tests fsanitize-blacklist" fi else skipped_tests="$skipped_tests fsanitize" fi # test -frewrite-includes usage $TESTCXX -E -Werror -frewrite-includes messages.cpp 2>/dev/null | grep -q '^# 1 "messages.cpp"$' >/dev/null 2>/dev/null if test $? -eq 0; then run_ice "$testdir/messages.o" "remote" 0 $TESTCXX -Wall -c messages.cpp -o "$testdir"/messages.o check_log_message stderr "warning: unused variable 'unused'" else echo $TESTCXX does not provide functional -frewrite-includes, skipping test. echo skipped_tests="$skipped_tests clang_rewrite_includes" fi run_ice "$testdir/includes.h.gch" "local" 0 "keepoutput" $TESTCXX -x c++-header -Wall -Werror -c includes.h -o "$testdir"/includes.h.gch run_ice "$testdir/includes.o" "remote" 0 $TESTCXX -Wall -Werror -c includes.cpp -include "$testdir"/includes.h -Winvalid-pch -o "$testdir"/includes.o if test -n "$using_clang"; then run_ice "$testdir/includes.o" "remote" 0 $TESTCXX -Wall -Werror -c includes.cpp -include-pch "$testdir"/includes.h.gch -o "$testdir"/includes.o $TESTCXX -Werror -fsyntax-only -Xclang -building-pch-with-obj -c includes.cpp -include-pch "$testdir"/includes.h.gch 2>/dev/null if test $? -eq 0; then run_ice "$testdir/includes.o" "local" 0 $TESTCXX -Wall -Werror -Xclang -building-pch-with-obj -c includes.cpp -include-pch "$testdir"/includes.h.gch -o "$testdir"/includes.o # local and remote run will leave a message => 2 check_section_log_message_count icecc 2 "invoking: $(command -v $TESTCXX) -Wall -Werror -Xclang -building-pch-with-obj" else skipped_tests="$skipped_tests clang_building_pch_with_obj" fi fi rm "$testdir"/includes.h.gch if test -n "$using_clang"; then clangplugintest else skipped_tests="$skipped_tests clangplugin" fi differentcompilerversiontest icerun_serialize_test icerun_nopath_test icerun_nocompile_test recursive_test ccache_test unhandled_environment_test symlink_wrapper_test if test -z "$chroot_disabled"; then make_test else skipped_tests="$skipped_tests make_test" fi if test -z "$chroot_disabled"; then zero_local_jobs_test else skipped_tests="$skipped_tests zero_local_jobs_test" fi if test -z "$chroot_disabled"; then echo Testing different netnames. reset_logs remote "Different netnames" stop_ice 1 # Start the secondary scheduler before the primary, so that besides the different netname it would be the preferred scheduler. ICECC_TESTS=1 ICECC_TEST_SCHEDULER_PORTS=8767:8769 \ ICECC_TEST_FLUSH_LOG_MARK="$testdir"/flush_log_mark.txt ICECC_TEST_LOG_HEADER="$testdir"/log_header.txt \ $valgrind "${icecc_scheduler}" -p 8769 -l "$testdir"/scheduler2.log -n ${netname}_secondary -v -v -v & scheduler2_pid=$! echo $scheduler2_pid > "$testdir"/scheduler2.pid wait_for_ice_startup_complete scheduler2 start_ice check_log_message scheduler2 "Received scheduler announcement from .* (version $schedulerprotocolversion, netname ${netname})" check_log_error scheduler "has announced itself as a preferred scheduler, disconnecting all connections" check_log_message localice "Ignoring scheduler at .*:8769 because of a different netname (${netname}_secondary)" check_log_message remoteice1 "Ignoring scheduler at .*:8769 because of a different netname (${netname}_secondary)" check_log_message remoteice2 "Ignoring scheduler at .*:8769 because of a different netname (${netname}_secondary)" stop_secondary_scheduler 1 echo Different netnames test successful. echo echo Testing multiple schedulers. reset_logs remote "Multiple schedulers" # Make this scheduler fake its start time to appear to have been running a longer time, # so it should be the preferred scheduler. We could similarly fake the version to be higher, # but this should be safer. ICECC_TESTS=1 ICECC_TEST_SCHEDULER_PORTS=8767:8769 ICECC_FAKE_STARTTIME=1 \ ICECC_TEST_FLUSH_LOG_MARK="$testdir"/flush_log_mark.txt ICECC_TEST_LOG_HEADER="$testdir"/log_header.txt \ $valgrind "${icecc_scheduler}" -p 8769 -l "$testdir"/scheduler2.log -n ${netname} -v -v -v & scheduler2_pid=$! echo $scheduler2_pid > "$testdir"/scheduler2.pid wait_for_ice_startup_complete scheduler2 # Give the primary scheduler time to disconnect all clients. sleep 1 check_log_message scheduler "Received scheduler announcement from .* (version $schedulerprotocolversion, netname ${netname})" check_log_message scheduler "has announced itself as a preferred scheduler, disconnecting all connections" check_log_error scheduler2 "has announced itself as a preferred scheduler, disconnecting all connections" check_log_message localice "scheduler closed connection" check_log_message remoteice1 "scheduler closed connection" check_log_message remoteice2 "scheduler closed connection" # Daemons will not connect to the secondary debug scheduler (not implemented). stop_secondary_scheduler 1 echo Multiple schedulers test successful. echo echo Testing reconnect. reset_logs remote "Reconnect" wait_for_ice_startup_complete localice remoteice1 remoteice2 flush_logs check_log_message scheduler "login localice protocol version: ${daemonprotocolversion}" check_log_message scheduler "login remoteice1 protocol version: ${daemonprotocolversion}" check_log_message scheduler "login remoteice2 protocol version: ${daemonprotocolversion}" check_log_message localice "Connected to scheduler" check_log_message remoteice1 "Connected to scheduler" check_log_message remoteice2 "Connected to scheduler" echo Reconnect test successful. echo else skipped_tests="$skipped_tests scheduler_multiple" fi reset_logs local "Closing down" stop_ice 1 check_logs_for_generic_errors reset_logs local "Starting only daemon" start_only_daemon # even without scheduler, icerun should still serialize, but still run up to local number of jobs in parallel icerun_serialize_test "noscheduler" reset_logs local "Closing down (only daemon)" stop_only_daemon 1 buildnativewithsymlinktest buildnativewithwrappertest if test -n "$valgrind"; then rm -f "$testdir"/valgrind-*.log fi ignore= if test -n "$using_gcc"; then # gcc (as of now) doesn't know these options, ignore these tests if they fail ignore="cxx-isystem target fsanitize-blacklist clangplugin clang_rewrite_includes clang_building_pch_with_obj" elif test -n "$using_clang"; then # clang (as of now) doesn't know these ignore="asm_listing asm_macros asm_defsym asm_defsym" # This one is fairly new (clang7?), so do not require it. ignore="$ignore clang_building_pch_with_obj" fi ignored_tests= for item in $ignore; do if echo " $skipped_tests " | grep -q "$item"; then ignored_tests="$ignored_tests $item" skipped_tests="${skipped_tests/$item/}" fi skipped_tests=$(echo $skipped_tests | sed 's/ / /g' | sed 's/^ //') done if test -n "$ignored_tests"; then echo Ignored tests: $ignored_tests fi if test -n "$skipped_tests"; then if test -n "$strict"; then echo "All executed tests passed, but some were skipped: $skipped_tests" echo "Strict mode enabled, failing." echo ================================================== exit 1 else echo "All tests OK, some were skipped: $skipped_tests" echo ================================= fi else echo All tests OK. echo ============= fi exit 0 icecream-1.3.1/tests/testdefine.cpp000066400000000000000000000002271361626760200172760ustar00rootroot00000000000000#ifndef ICECREAM_TEST_DEFINE #error Failed. #endif // this should expand to test() void ICECREAM_TEST_DEFINE(); void test2() { test(); } icecream-1.3.1/tests/testfunc.cpp000066400000000000000000000000341361626760200167730ustar00rootroot00000000000000int f() { return 123; } icecream-1.3.1/tests/testmainfunc.cpp000066400000000000000000000000601361626760200176370ustar00rootroot00000000000000extern int f(); int main() { return f(); } icecream-1.3.1/tests/true.cpp000066400000000000000000000000451361626760200161210ustar00rootroot00000000000000int main() { return 0; } icecream-1.3.1/tests/unusedmacro.cpp000066400000000000000000000001111361626760200174610ustar00rootroot00000000000000#define FOO bar #define NUMBER 10 int f() { return NUMBER; } icecream-1.3.1/tests/unusedmacro1.txt000066400000000000000000000001331361626760200176030ustar00rootroot00000000000000unusedmacro.cpp:1:0: warning: macro "FOO" is not used [-Wunused-macros] #define FOO bar icecream-1.3.1/tests/unusedmacro2.txt000066400000000000000000000001341361626760200176050ustar00rootroot00000000000000unusedmacro.cpp:1:0: warning: macro "FOO" is not used [-Wunused-macros] #define FOO bar ^ icecream-1.3.1/tests/unusedmacro3.txt000066400000000000000000000001471361626760200176120ustar00rootroot00000000000000unusedmacro.cpp:1: warning: macro "FOO" is not used [-Wunused-macros] 1 | #define FOO bar | icecream-1.3.1/tests/valgrind_suppressions000066400000000000000000000004751361626760200210330ustar00rootroot00000000000000# The error seems to be about the second argument to the capget syscall, # but that seems to be actually no problem (at least to me). Either way, # not icecream's problem. { capget Memcheck:Param capget(data) fun:capget fun:init fun:capng_get_caps_process fun:capng_have_capability fun:main } icecream-1.3.1/tests/warninginmacro.cpp000066400000000000000000000002321361626760200201560ustar00rootroot00000000000000// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80369 #define MACRO if( arg != arg ) return 1; int f( int arg ) { MACRO return 2; } icecream-1.3.1/unittests/000077500000000000000000000000001361626760200153375ustar00rootroot00000000000000icecream-1.3.1/unittests/Makefile.am000066400000000000000000000004471361626760200174000ustar00rootroot00000000000000TESTS = testargs AM_CPPFLAGS = -I$(top_srcdir)/client -I$(top_srcdir)/services -I$(top_srcdir)/ testargs_LDADD = ../client/libclient.a ../services/libicecc.la check_PROGRAMS = testargs testargs_SOURCES = args.cpp # Make the tests also print the test log if they fail. check: export VERBOSE=1 icecream-1.3.1/unittests/README000066400000000000000000000005431361626760200162210ustar00rootroot00000000000000Unit Tests for Icecream ===================== In this directory are unit tests for Icecream. Unit tests check functionality by creating small binaries that test internal functionality (e.g. functions that analyze arguments). Unit tests are faster than actually testing Icecream binaries, but some tests may be difficult or impossible to implement here. icecream-1.3.1/unittests/TODO000066400000000000000000000006101361626760200160240ustar00rootroot00000000000000- More tests in args.cpp? - The existing ones are basic, if there are more things to be tested, it should preferably be done here rather than in tests/. - Test for Msg::send_to_channel() and Msg::fill_from_channel() - Test for various protocol versions (loop for all of them?), fill data, read it back and verify. - Will need a fake MsgChannel for the writing/reading? icecream-1.3.1/unittests/args.cpp000066400000000000000000000041351361626760200170020ustar00rootroot00000000000000#include "client.h" #include #include #include using namespace std; void test_run(const string &prefix, const char * const *argv, bool icerun, const string& expected) { list extrafiles; CompileJob job; bool local = analyse_argv(argv, job, icerun, &extrafiles); std::stringstream str; str << "local:" << local; str << " language:" << job.language(); str << " compiler:" << job.compilerName(); str << " local:" << concat_args(job.localFlags()); str << " remote:" << concat_args(job.remoteFlags()); str << " rest:" << concat_args(job.restFlags()); if (str.str() != expected) { cerr << prefix << " failed\n"; cerr << " got: \"" << str.str() << "\"\nexpected: \"" << expected << "\"\n"; exit(1); } } static void test_1() { const char * argv[] = { "gcc", "-D", "TEST=1", "-c", "main.cpp", "-o", "main.o", 0 }; test_run("1", argv, false, "local:0 language:C++ compiler:gcc local:'-D, TEST=1' remote:'-c' rest:''"); } static void test_2() { const char * argv[] = { "gcc", "-DTEST=1", "-c", "main.cpp", "-o", "main.o", 0 }; test_run("2", argv, false, "local:0 language:C++ compiler:gcc local:'-DTEST=1' remote:'-c' rest:''"); } static void test_3() { const char * argv[] = { "clang", "-D", "TEST1=1", "-I.", "-c", "make1.cpp", "-o", "make.o", "-target", "x86_64-unknown-linux-gnu", 0}; test_run("3", argv, false, "local:0 language:C++ compiler:clang local:'-D, TEST1=1, -I.' remote:'-c' rest:'-target, x86_64-unknown-linux-gnu'"); } int main() { unsetenv( "ICECC_COLOR_DIAGNOSTICS" ); unsetenv( "ICECC" ); unsetenv( "ICECC_VERSION" ); unsetenv( "ICECC_DEBUG" ); unsetenv( "ICECC_LOGFILE" ); unsetenv( "ICECC_REPEAT_RATE" ); unsetenv( "ICECC_PREFERRED_HOST" ); unsetenv( "ICECC_CC" ); unsetenv( "ICECC_CXX" ); unsetenv( "ICECC_CLANG_REMOTE_CPP" ); unsetenv( "ICECC_IGNORE_UNVERIFIED" ); unsetenv( "ICECC_EXTRAFILES" ); unsetenv( "ICECC_COLOR_DIAGNOSTICS" ); unsetenv( "ICECC_CARET_WORKAROUND" ); setenv( "ICECC_REMOTE_CPP", "0", 1 ); test_1(); test_2(); test_3(); return 0; }