pax_global_header00006660000000000000000000000064141023545320014511gustar00rootroot0000000000000052 comment=ded13ed9fe5cb6d6066d9e33287e78469a1a81c2 laminar-1.1/000077500000000000000000000000001410235453200127755ustar00rootroot00000000000000laminar-1.1/CMakeLists.txt000066400000000000000000000150551410235453200155430ustar00rootroot00000000000000### ### Copyright 2015-2021 Oliver Giles ### ### This file is part of Laminar ### ### Laminar is free software: you can redistribute it and/or modify ### it under the terms of the GNU General Public License as published by ### the Free Software Foundation, either version 3 of the License, or ### (at your option) any later version. ### ### Laminar is distributed in the hope that it will be useful, ### but WITHOUT ANY WARRANTY; without even the implied warranty of ### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ### GNU General Public License for more details. ### ### You should have received a copy of the GNU General Public License ### along with Laminar. If not, see ### project(laminar) cmake_minimum_required(VERSION 3.6) set(CMAKE_INCLUDE_CURRENT_DIR ON) set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wno-unused-parameter -Wno-sign-compare") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Werror -DDEBUG") # Allow passing in the version string, for e.g. patched/packaged versions if(NOT LAMINAR_VERSION AND EXISTS ${CMAKE_SOURCE_DIR}/.git) execute_process(COMMAND git describe --tags --abbrev=8 --dirty WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE LAMINAR_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE) endif() if(NOT LAMINAR_VERSION) set(LAMINAR_VERSION xx-unversioned) endif() set_source_files_properties(src/version.cpp PROPERTIES COMPILE_DEFINITIONS LAMINAR_VERSION=${LAMINAR_VERSION}) # This macro takes a list of files, gzips them and converts the output into # object files so they can be linked directly into the application. # ld generates symbols based on the string argument given to its executable, # so it is significant from which directory it is called. BASEDIR will be # removed from the beginning of paths to the remaining arguments macro(generate_compressed_bins BASEDIR) foreach(FILE ${ARGN}) set(COMPRESSED_FILE "${FILE}.z") set(OUTPUT_FILE "${FILE}.o") get_filename_component(DIR ${FILE} PATH) if(DIR) file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${DIR}) endif() add_custom_command(OUTPUT ${COMPRESSED_FILE} COMMAND gzip < ${BASEDIR}/${FILE} > ${COMPRESSED_FILE} DEPENDS ${BASEDIR}/${FILE} ) add_custom_command(OUTPUT ${OUTPUT_FILE} COMMAND ${CMAKE_LINKER} -r -b binary -o ${OUTPUT_FILE} ${COMPRESSED_FILE} COMMAND ${CMAKE_OBJCOPY} --rename-section .data=.rodata.alloc,load,readonly,data,contents --add-section .note.GNU-stack=/dev/null --set-section-flags .note.GNU-stack=contents,readonly ${OUTPUT_FILE} DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${COMPRESSED_FILE} ) list(APPEND COMPRESSED_BINS ${OUTPUT_FILE}) endforeach() endmacro() # Generates Cap'n Proto interface from definition file add_custom_command(OUTPUT laminar.capnp.c++ laminar.capnp.h COMMAND capnp compile -oc++:${CMAKE_BINARY_DIR} --src-prefix=${CMAKE_SOURCE_DIR}/src ${CMAKE_SOURCE_DIR}/src/laminar.capnp DEPENDS src/laminar.capnp) # Zip and compile statically served resources generate_compressed_bins(${CMAKE_SOURCE_DIR}/src/resources index.html js/app.js style.css manifest.webmanifest favicon.ico favicon-152.png icon.png) # The code that allows dynamic modifying of index.html requires knowing its original size add_custom_command(OUTPUT index_html_size.h COMMAND sh -c '( echo -n "\\#define INDEX_HTML_UNCOMPRESSED_SIZE " && wc -c < "${CMAKE_SOURCE_DIR}/src/resources/index.html" ) > index_html_size.h' DEPENDS src/resources/index.html) # Download 3rd-party frontend JS libs... file(DOWNLOAD https://cdnjs.cloudflare.com/ajax/libs/vue/2.6.12/vue.min.js js/vue.min.js EXPECTED_MD5 fb192338844efe86ec759a40152fcb8e) file(DOWNLOAD https://raw.githubusercontent.com/drudru/ansi_up/v4.0.4/ansi_up.js js/ansi_up.js EXPECTED_MD5 b31968e1a8fed0fa82305e978161f7f5) file(DOWNLOAD https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.7.2/Chart.min.js js/Chart.min.js EXPECTED_MD5 f6c8efa65711e0cbbc99ba72997ecd0e) # ...and compile them generate_compressed_bins(${CMAKE_BINARY_DIR} js/vue.min.js js/ansi_up.js js/Chart.min.js) # (see resources.cpp where these are fetched) set(LAMINARD_CORE_SOURCES src/conf.cpp src/database.cpp src/laminar.cpp src/leader.cpp src/http.cpp src/resources.cpp src/rpc.cpp src/run.cpp src/server.cpp src/version.cpp laminar.capnp.c++ index_html_size.h ) ## Server add_executable(laminard ${LAMINARD_CORE_SOURCES} src/main.cpp ${COMPRESSED_BINS}) target_link_libraries(laminard capnp-rpc capnp kj-http kj-async kj pthread sqlite3 z) ## Client add_executable(laminarc src/client.cpp src/version.cpp laminar.capnp.c++) target_link_libraries(laminarc capnp-rpc capnp kj-async kj pthread) ## Manpages macro(gzip SOURCE) get_filename_component(OUT_FILE ${SOURCE} NAME) add_custom_command(OUTPUT ${OUT_FILE}.gz COMMAND gzip < ${CMAKE_CURRENT_SOURCE_DIR}/${SOURCE} > ${OUT_FILE}.gz DEPENDS ${SOURCE}) endmacro() add_custom_target(laminar-manpages ALL DEPENDS laminard.8.gz laminarc.1.gz) gzip(etc/laminard.8) gzip(etc/laminarc.1) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/laminard.8.gz DESTINATION share/man/man8) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/laminarc.1.gz DESTINATION share/man/man1) ## Tests set(BUILD_TESTS FALSE CACHE BOOL "Build tests") if(BUILD_TESTS) find_package(GTest REQUIRED) include_directories(${GTEST_INCLUDE_DIRS} src) add_executable(laminar-tests ${LAMINARD_CORE_SOURCES} ${COMPRESSED_BINS} test/main.cpp test/laminar-functional.cpp test/unit-conf.cpp test/unit-database.cpp) target_link_libraries(laminar-tests ${GTEST_LIBRARIES} capnp-rpc capnp kj-http kj-async kj pthread sqlite3 z) endif() set(SYSTEMD_UNITDIR /lib/systemd/system CACHE PATH "Path to systemd unit files") set(BASH_COMPLETIONS_DIR /usr/share/bash-completion/completions CACHE PATH "Path to bash completions directory") set(ZSH_COMPLETIONS_DIR /usr/share/zsh/site-functions CACHE PATH "Path to zsh completions directory") install(TARGETS laminard RUNTIME DESTINATION sbin) install(TARGETS laminarc RUNTIME DESTINATION bin) install(FILES etc/laminar.conf DESTINATION /etc) install(FILES etc/laminarc-completion.bash DESTINATION ${BASH_COMPLETIONS_DIR} RENAME laminarc) install(FILES etc/laminarc-completion.zsh DESTINATION ${ZSH_COMPLETIONS_DIR} RENAME _laminarc) configure_file(etc/laminar.service.in laminar.service @ONLY) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/laminar.service DESTINATION ${SYSTEMD_UNITDIR}) laminar-1.1/COPYING000066400000000000000000001045151410235453200140360ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . laminar-1.1/README.md000066400000000000000000000045261410235453200142630ustar00rootroot00000000000000# Laminar CI [![status](https://ci.ohwg.net/badge/laminar.svg)](https://ci.ohwg.net/jobs/laminar) Laminar (https://laminar.ohwg.net) is a lightweight and modular Continuous Integration service for Linux. It is self-hosted and developer-friendly, eschewing a configuration UI in favour of simple version-controllable configuration files and scripts. Laminar encourages the use of existing GNU/Linux tools such as `bash` and `cron` instead of reinventing them. Although the status and progress front-end is very user-friendly, administering a Laminar instance requires writing shell scripts and manually editing configuration files. That being said, there is nothing esoteric here and the [guide](http://laminar.ohwg.net/docs.html) should be straightforward for anyone with even very basic Linux server administration experience. See [the website](https://laminar.ohwg.net) and the [documentation](https://laminar.ohwg.net/docs.html) for more information. ## Building from source First install development packages for `capnproto (version 0.7.0 or newer)`, `rapidjson`, `sqlite` and `boost` (for the header-only `multi_index_container` library) from your distribution's repository or other source. On Debian Bullseye, this can be done with: ```bash sudo apt install \ capnproto cmake g++ libboost-dev libcapnp-dev libsqlite3-dev rapidjson-dev zlib1g-dev ``` Then compile and install laminar with: ```bash git clone https://github.com/ohwgiles/laminar.git cd laminar cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr make -j "$(nproc)" # Warning: the following will overwrite an existing /etc/laminar.conf sudo make install ``` `make install` includes a systemd unit file. If you intend to use it, consider creating a new user `laminar` or modifying the user specified in the unit file. ## Packaging for distributions The `pkg` directory contains shell scripts which use docker to build native packages (deb,rpm) for common Linux distributions. Note that these are very simple packages which may not completely conform to the distribution's packaging guidelines, however they may serve as a starting point for creating an official package, or may be useful if the official package lags. ## Contributing Issues and pull requests via GitHub are most welcome. All pull requests must adhere to the [Developer Certificate of Origin](https://developercertificate.org/). laminar-1.1/UserManual.md000066400000000000000000000742171410235453200154060ustar00rootroot00000000000000# Introduction [Laminar](http://laminar.ohwg.net) is a lightweight and modular Continuous Integration service for Linux. It is self-hosted and developer-friendly, eschewing a configuration web UI in favor of simple version-controllable configuration files and scripts. Laminar encourages the use of existing GNU/Linux tools such as `bash` and `cron` instead of reinventing them. Although the status and progress front-end is very user-friendly, administering a Laminar instance requires writing shell scripts and manually editing configuration files. That being said, there is nothing esoteric here and the tutorial below should be straightforward for anyone with even very basic Linux server administration experience. Throughout this document, the fixed base path `/var/lib/laminar` is used. This is the default path and can be changed by setting `LAMINAR_HOME` in `/etc/laminar.conf` as desired. ## Terminology - *job*: a task, identified by a name, comprising of one or more executable scripts. - *run*: a numbered execution of a *job* --- # Installing Laminar Since Debian Bullseye, Laminar is available in [the official repositories](https://packages.debian.org/search?searchon=sourcenames&keywords=laminar). Alternatively, pre-built upstream packages are available for Debian 10 (Bullseye) on x86_64 and armhf, and for Rocky/CentOS/RHEL 7 and 8 on x86_64. Finally, Laminar may be built from source for any Linux distribution. ## Installation from upstream packages Under Debian: ```bash wget https://github.com/ohwgiles/laminar/releases/download/1.1/laminar_1.1-1.upstream-debian10_amd64.deb sudo apt install ./laminar_1.1-1.upstream-debian10_amd64.deb ``` Under Rocky/CentOS/RHEL: ```bash wget https://github.com/ohwgiles/laminar/releases/download/1.1/laminar-1.1.upstream_rocky8-1.x86_64.rpm sudo dnf install ./laminar-1.1.upstream_rocky8-1.x86_64.rpm ``` Both install packages will create a new `laminar` user and install (but not activate) a systemd service for launching the laminar daemon. ## Building from source See the [development README](https://github.com/ohwgiles/laminar) for instructions for installing from source. ## Building for Docker You can build an image that runs `laminard` by default, and contains `laminarc` for use based on `alpine:edge` using the `Dockerfile` in the `docker/` directory. ```bash # from the repository root: docker build [-t image:tag] -f docker/Dockerfile . ``` Keep in mind that this is meant to be used as a base image to build from, so it contains only the minimum packages required to run laminar. The only shell available by default is sh (so scripts with `#!/bin/bash` will fail to execute) and it does not have `ssh` or `git`. You can use this image to run a basic build server, but it is recommended that you build a custom image from this base to better suit your needs. The container will execute `laminard` by default. To start a laminar server with docker you can simply run the image as a daemon, for example: ```bash docker run -d --name laminar_server -p 8080:8080 -v path/to/laminardir:/var/lib/laminar --env-file path/to/laminar.conf laminar:latest ``` The [`-v` flag](https://docs.docker.com/storage/volumes/#choose-the--v-or---mount-flag) is necessary to persist job scripts and artefacts beyond the container lifetime. The [`--env-file` flag](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file) is necessary to pass configuration from `laminar.conf` to `laminard` because `laminard` does not read `/etc/laminar.conf` directly but expects variables within to be exported by `systemd` or other process supervisor. Executing `laminarc` may be done in any of the usual ways, for example: ```bash docker exec -i laminar_server laminarc queue example_task ``` Alternatively, you might [use an external `laminarc`](#Triggering-on-a-remote-laminar-instance). --- # Service configuration Use `systemctl start laminar` to start the laminar system service and `systemctl enable laminar` to launch it automatically on system boot. After starting the service, an empty laminar dashboard should be available at http://localhost:8080 Laminar's configuration file may be found at `/etc/laminar.conf`. Laminar will start with reasonable defaults if no configuration can be found. ## Running on a different HTTP port or Unix socket Edit `/etc/laminar.conf` and change `LAMINAR_BIND_HTTP` to `IPADDR:PORT`, `unix:PATH/TO/SOCKET` or `unix-abstract:SOCKETNAME`. `IPADDR` may be `*` to bind on all interfaces. The default is `*:8080`. Do not attempt to run laminar on port 80. This requires running as `root`, and Laminar will not drop privileges when executing job scripts! For a more complete integrated solution (including SSL), run laminar behind a regular webserver acting as a reverse proxy. ## Running behind a reverse proxy A reverse proxy is required if you want Laminar to share a port with other web services. It is also recommended to improve performance by serving artefacts directly or providing a caching layer for static assets. If you use [artefacts](#Archiving-artefacts), note that Laminar is not designed as a file server, and better performance will be achieved by allowing the frontend web server to serve the archive directory directly (e.g. using a `Location` directive). Laminar uses Server Sent Events to provide a responsive, auto-updating display without polling. Most frontend webservers should handle this without any extra configuration. If you use a reverse proxy to host Laminar at a subfolder instead of a subdomain root, the `` needs to be updated to ensure all links point to their proper targets. This can be done by setting `LAMINAR_BASE_URL` in `/etc/laminar.conf`. See [this example configuration file for nginx](https://github.com/ohwgiles/laminar/blob/master/examples/nginx-ssl-reverse-proxy.conf). ## More configuration options See the [reference section](#Service-configuration-file) --- # Defining a job To create a job that downloads and compiles [GNU Hello](https://www.gnu.org/software/hello/), create the file `/var/lib/laminar/cfg/jobs/hello.run` with the following content: ```bash #!/bin/bash -ex wget ftp://ftp.gnu.org/gnu/hello/hello-2.10.tar.gz tar xzf hello-2.10.tar.gz cd hello-2.10 ./configure make ``` Laminar uses your script's exit code to determine whether to mark the run as successful or failed. If your script is written in bash, the [`-e` option](http://tldp.org/LDP/abs/html/options.html) is helpful for this. See also [Exit and Exit Status](http://tldp.org/LDP/abs/html/exit-status.html). Don't forget to mark the script executable: ```bash chmod +x /var/lib/laminar/cfg/jobs/hello.run ``` --- # Triggering a run To queue execution of the `hello` job, run ```bash laminarc queue hello ``` In this case, `laminarc` returns immediately, with its error code indicating whether adding the job to the queue was sucessful. The run number will be printed to standard output. If the server is busy, a run may wait in the queue for some time. To have `laminarc` instead block until the run leaves the queue and starts executing, use ```bash laminarc start hello ``` In this case, `laminarc` blocks until the job starts executing, or returns immediately if queueing failed. The run number will be printed to standard output. Finally, to launch and run the `hello` job to completion, execute ```bash laminarc run hello ``` In this case, laminarc's return value indicates whether the run completed successfully. In all cases, a started run means the `/var/lib/laminar/cfg/jobs/hello.run` script will be executed, with a working directory of `/var/lib/laminar/run/hello/1` (or current run number) The result and log output should be visible in the Web UI at http://localhost:8080/jobs/hello/1 Also note that all the above commands can simultaneously trigger multiple different jobs: ```bash laminarc queue test-host test-target ``` ## Isn't there a "Build Now" button I can click? This is against the design principles of Laminar and was deliberately excluded. Laminar's web UI is strictly read-only, making it simple to deploy in mixed-permission or public environments without an authentication layer. Furthermore, Laminar tries to encourage ideal continuous integration, where manual triggering is an anti-pattern. Want to make a release? Push a git tag and implement a post-receive hook. Want to re-run a build due to sporadic failure/flaky tests? Fix the tests locally and push a patch. Experience shows that a manual trigger such as a "Build Now" button is often used as a crutch to avoid doing the correct thing, negatively impacting traceability and quality. ## Listing jobs from the command line `laminarc` may be used to inspect the server state: - `laminarc show-jobs`: Lists all files matching `/var/lib/laminar/cfg/jobs/*.run` on the server side. - `laminarc show-running`: Lists all currently running jobs and their run numbers. - `laminarc show-queued`: Lists all jobs waiting in the queue. ## Triggering a job at a certain time This is what `cron` is for. To trigger a build of `hello` every day at 0300, add ``` 0 3 * * * LAMINAR_REASON="Nightly build" laminarc queue hello ``` to `laminar`'s crontab. For more information about `cron`, see `man crontab`. `LAMINAR_REASON` is an optional human-readable string that will be displayed in the web UI as the cause of the build. ## Triggering on a git commit This is what [git hooks](https://git-scm.com/book/gr/v2/Customizing-Git-Git-Hooks) are for. To create a hook that triggers the `example-build` job when a push is made to the `example` repository, create the file `hooks/post-receive` in the `example.git` bare repository. ```bash #!/bin/bash LAMINAR_REASON="Push to git repository" laminarc queue example-build ``` For a more advanced example, see [examples/git-post-receive-hook-notes](https://github.com/ohwgiles/laminar/blob/master/examples/git-post-receive-hook-notes) What if your git server is not the same machine as the laminar instance? ## Triggering on a remote laminar instance `laminarc` and `laminard` communicate by default over an [abstract unix socket](http://man7.org/linux/man-pages/man7/unix.7.html). This means that any user **on the same machine** can send commands to the laminar service. On a trusted network, you might want `laminard` to listen for commands on a TCP port instead. To achieve this, in `/etc/laminar.conf`, set ``` LAMINAR_BIND_RPC=*:9997 ``` or any interface/port combination you like. This option uses the same syntax as `LAMINAR_BIND_HTTP`. Then, point `laminarc` to the new location using an environment variable: ```bash LAMINAR_HOST=192.168.1.1:9997 laminarc queue example ``` If you need more flexibility, consider running the communication channel as a regular unix socket and applying user and group permissions to the file. To achieve this, set ``` LAMINAR_BIND_RPC=unix:/var/run/laminar.sock ``` or similar path in `/etc/laminar.conf`. This can be securely and flexibly combined with remote triggering using `ssh`. There is no need to allow the client full shell access to the server machine, the ssh server can restrict certain users to certain commands (in this case `laminarc`). See [the authorized_keys section of the sshd man page](https://man.openbsd.org/sshd#AUTHORIZED_KEYS_FILE_FORMAT) for further information. ## Triggering on a push to GitHub Consider using [webhook](https://github.com/adnanh/webhook) or a similar application to call `laminarc`. ## Viewing job logs A job's console output can be viewed on the Web UI at http://localhost:8080/jobs/$NAME/$NUMBER. Additionally, the raw log output may be fetched over a plain HTTP request to http://localhost:8080/log/$NAME/$NUMBER. The response will be chunked, allowing this mechanism to also be used for in-progress jobs. Furthermore, the special endpoint http://localhost:8080/log/$NAME/latest will redirect to the most recent log output. Be aware that the use of this endpoint may be subject to races when new jobs start. --- # Job chains A typical pipeline may involve several steps, such as build, test and deploy. Depending on the project, these may be broken up into separate laminar jobs for maximal flexibility. The preferred way to accomplish this in Laminar is to use the same method as [regular run triggering](#Triggering-a-run), that is, calling `laminarc` directly in your `example.run` scripts. ```bash #!/bin/bash -xe # simultaneously starts example-test-qemu and example-test-target # and returns a non-zero error code if either of them fail laminarc run example-test-qemu example-test-target ``` An advantage to using this `laminarc` approach from bash or other scripting language is that it enables highly dynamic pipelines, since you can execute commands like ```bash if [ ... ]; then laminarc run example-downstream-special else laminarc run example-downstream-regular fi laminarc run example-test-$TARGET_PLATFORM ``` `laminarc` reads the `$JOB` and `$RUN` variables set by `laminard` and passes them as part of the queue/start/run request so the dependency chain can always be traced back. --- # Parameterized runs Any argument passed to `laminarc` of the form `var=value` will be exposed as an environment variable in the corresponding build scripts. For example: ```bash laminarc queue example foo=bar ``` In `/var/lib/laminar/cfg/jobs/example.run`: ```bash #!/bin/bash if [ "$foo" == "bar" ]; then ... else ... fi ``` --- # Pre- and post-build actions If the script `/var/lib/laminar/cfg/jobs/example.before` exists, it will be executed as part of the `example` job, before the primary `/var/lib/laminar/cfg/jobs/example.run` script. Similarly, if the script `/var/lib/laminar/cfg/jobs/example.after` script exists, it will be executed as part of the `example` job, after the primary `var/lib/laminar/cfg/jobs/example.run` script. In this script, the `$RESULT` variable will be `success`, `failed`, or `aborted` according to the result of `example.run`. See also [script execution order](#Script-execution-order) ## Conditionally trigger a downstream job Often, you may wish to only trigger the `example-test` job if the `example-build` job completed successfully. `example-build.after` might look like this: ```bash #!/bin/bash -xe if [ "$RESULT" == "success" ]; then laminarc queue example-test fi ``` ## Passing data between scripts Any script can set environment variables that will stay exposed for subsequent scripts of the same run using `laminarc set`. In `example.before`: ```bash #!/bin/bash laminarc set foo=bar ``` Then in `example.run` ```bash #!/bin/bash echo $foo # prints "bar" ``` --- # Archiving artefacts Laminar's default behaviour is to remove the run directory `/var/lib/laminar/run/JOB/RUN` after its completion. This prevents the typical CI disk usage explosion and encourages the user to judiciously select artefacts for archive. Laminar provides an archive directory `/var/lib/laminar/archive/JOB/RUN` and exposes its path in `$ARCHIVE`. `example-build.after` might look like this: ```bash #!/bin/bash -xe cp example.out $ARCHIVE/ ``` This folder structure has been chosen to make it easy for system administrators to host the archive on a separate partition or network drive. ## Accessing artefacts from an upstream build Rather than implementing a separate mechanism for this, the path of the upstream's archive should be passed to the downstream run as a parameter. See [Parameterized runs](#Parameterized-runs). --- # Email and IM Notifications As well as per-job `.after` scripts, a common use case is to send a notification for every job completion. If the global `after` script at `/var/lib/laminar/cfg/after` exists, it will be executed after every job. One way to use this might be: ```bash #!/bin/bash -xe if [ "$RESULT" != "$LAST_RESULT" ]; then sendmail -t < MyProject.tar.gz # Archive the artefact (consider moving this to the .after script) mv MyProject.tar.gz $ARCHIVE/ ``` For a project with a large git history, it can be more efficient to store the sources in the workspace: ```bash #!/bin/bash -ex cd $WORKSPACE/myproject git pull cd - cmake $WORKSPACE/myproject make -j4 ``` Laminar will automatically create the workspace for a job if it doesn't exist when a job is executed. In this case, the `/var/lib/laminar/cfg/jobs/JOBNAME.init` will be executed if it exists. This is an excellent place to prepare the workspace to a state where subsequent builds can rely on its content: ```bash #!/bin/bash -e echo Initializing workspace git clone git@example.com:company/project.git . ``` **CAUTION**: By default, laminar permits multiple simultaneous runs of the same job. If a job can **modify** the workspace, this might result in inconsistent builds when simultaneous runs access the same content. This is unlikely to be an issue for nightly builds, but for SCM-triggered builds it will be. To solve this, use [contexts](#Contexts) to restrict simultaneous execution of jobs, or consider [flock](https://linux.die.net/man/1/flock). The following example uses [flock](https://linux.die.net/man/1/flock) to efficiently share a git repository workspace between multiple simultaneous builds: ```bash #!/bin/bash -xe # This script expects to be passed the parameter 'rev' which # should refer to a specific git commit in its source repository. # The commit ids could have been read from a server-side # post-commit git hook, where many commits could have been pushed # at once, but we want to check them all individually. This means # this job can be executed several times (with different values # for $rev) simultaneously. # Locked subshell for modifying the workspace ( flock 200 cd $WORKSPACE # Download all the latest commits git fetch git checkout $rev cd - # Fast copy (hard-link) the source from the specific checkout # to the build dir. This relies on the fact that git unlinks # during checkout, effectively implementing copy-on-write. cp -al $WORKSPACE/src src ) 200>$WORKSPACE # run the (much longer) regular build process make -C src ``` --- # Aborting running jobs ## After a timeout To configure a maximum execution time in seconds for a job, add a line to `/var/lib/laminar/cfg/jobs/JOBNAME.conf`: ``` TIMEOUT=120 ``` ## Manually `laminarc abort $JOBNAME $NUMBER` --- # Contexts In Laminar, each run of a job is associated with a context. The context defines an integer number of *executors*, which is the amount of runs which the context will accept simultaneously. A context may also provide additional environment variables. Uses for this feature include limiting the amount of concurrent CPU-intensive jobs (such as compilation); and controlling access to jobs [executed remotely](#Remote-jobs). If no contexts are defined, Laminar will behave as if there is a single context named "default", with `6` executors. This is a reasonable default that allows simple setups to work without any consideration of contexts. ## Defining a context To create a context named "my-env" which only allows a single run at once, create `/var/lib/laminar/cfg/contexts/my-env.conf` with the content: ``` EXECUTORS=1 ``` ## Associating a job with a context When trying to start a job, laminar will wait until the job can be matched to a context which has at least one free executor. There are two ways to associate jobs and contexts. You can specify a comma-separated list of patterns `JOBS` in the context configuration file `/var/lib/laminar/cfg/contexts/CONTEXT.conf`: ``` JOBS=amd64-target-*,usage-monitor ``` This approach is often preferred when you have many jobs that need to share limited resources. Alternatively, you can set ``` CONTEXTS=my-env-*,special_context ``` in `/var/lib/laminar/cfg/jobs/JOB.conf`. This approach is often preferred when you have a small number of jobs that require exclusive access to an environment and you can supply alternative environments (e.g. target devices), because new contexts can be added without modifying the job configuration. In both cases, Laminar will iterate over the known contexts and associate the run with the first matching context with free executors. Patterns are [glob expressions](http://man7.org/linux/man-pages/man7/glob.7.html). If `CONTEXTS` is empty or absent (or if `JOB.conf` doesn't exist), laminar will behave as if `CONTEXTS=default` were defined. ## Adding environment to a context Append desired environment variables to `/var/lib/laminar/cfg/contexts/CONTEXT_NAME.conf`: ``` DUT_IP=192.168.3.2 FOO=bar ``` This environment will then be available the run script of jobs associated with this context. Note that these definitions are not expanded by a shell, so `FOO="bar"` would result in a variable `FOO` whose contents *include* double-quotes. --- # Remote jobs Laminar provides no specific support, `bash`, `ssh` and possibly NFS are all you need. For example, consider two identical target devices on which test jobs can be run in parallel. You might create a [context](#Contexts) for each, `/var/lib/laminar/cfg/contexts/target{1,2}.conf`: ``` EXECUTORS=1 ``` In each context's `.env` file, set the individual device's IP address: ``` TARGET_IP=192.168.0.123 ``` And mark the job accordingly in `/var/lib/laminar/cfg/jobs/myproject-test.conf`: ``` CONTEXTS=target* ``` This means the job script `/var/lib/laminar/cfg/jobs/myproject-test.run` can be generic: ```bash #!/bin/bash -e ssh root@$TARGET_IP /bin/bash -xe <<"EOF" uname -a ... EOF scp root@$TARGET_IP:result.xml "$ARCHIVE/" ``` Don't forget to add the `laminar` user's public ssh key to the remote's `authorized_keys`. --- # Docker container jobs Laminar provides no specific support, but just like [remote jobs](#Remote-jobs) these are easily implementable in plain bash: ```bash #!/bin/bash docker run --rm -ti -v $PWD:/root ubuntu /bin/bash -xe <unescaped. ``` ## Setting the page title Change `LAMINAR_TITLE` in `/etc/laminar.conf` to your preferred page title. Laminar must be restarted for this change to take effect. ## Custom HTML template If it exists, the file `/var/lib/laminar/custom/index.html` will be served by laminar instead of the default markup that is bundled into the Laminar binary. This file can be used to change any aspect of Laminar's WebUI, for example adding menu links or adding a custom stylesheet. Any required assets will need to be served directly from your [HTTP reverse proxy](#Service-configuration) or other HTTP server. An example customization can be found at [cweagans/semantic-laminar-theme](https://github.com/cweagans/semantic-laminar-theme). --- # Badges Laminar will serve a job's current status as a pretty badge at the url `/badge/JOBNAME.svg`. This can be used as a link to your server instance from your Github README.md file or cat blog: ``` ``` --- # Reference ## Service configuration file `laminard` reads the following variables from the environment, which are expected to be sourced by `systemd` from `/etc/laminar.conf`: - `LAMINAR_HOME`: The directory in which `laminard` should find job configuration and create run directories. Default `/var/lib/laminar` - `LAMINAR_BIND_HTTP`: The interface/port or unix socket on which `laminard` should listen for incoming connections to the web frontend. Default `*:8080` - `LAMINAR_BIND_RPC`: The interface/port or unix socket on which `laminard` should listen for incoming commands such as build triggers. Default `unix-abstract:laminar` - `LAMINAR_TITLE`: The page title to show in the web frontend. - `LAMINAR_KEEP_RUNDIRS`: Set to an integer defining how many rundirs to keep per job. The lowest-numbered ones will be deleted. The default is 0, meaning all run dirs will be immediately deleted. - `LAMINAR_ARCHIVE_URL`: If set, the web frontend served by `laminard` will use this URL to form links to artefacts archived jobs. Must be synchronized with web server configuration. ## Script execution order When `$JOB` is triggered, the following scripts (relative to `$LAMINAR_HOME/cfg`) may be executed: - `jobs/$JOB.init` if the [workspace](#Data-sharing-and-Workspaces) did not exist - `before` - `jobs/$JOB.before` - `jobs/$JOB.run` - `jobs/$JOB.after` - `after` ## Environment variables The following variables are available in run scripts: - `RUN` integer number of this *run* - `JOB` string name of this *job* - `RESULT` string run status: "success", "failed", etc. - `LAST_RESULT` string previous run status - `WORKSPACE` path to this job's workspace - `ARCHIVE` path to this run's archive - `CONTEXT` the context of this run In addition, `$LAMINAR_HOME/cfg/scripts` is prepended to `$PATH`. See [helper scripts](#Helper-scripts). Laminar will also export variables in the form `KEY=VALUE` found in these files: - `env` - `contexts/$CONTEXT.env` - `jobs/$JOB.env` Note that definitions in these files are not expanded by a shell, so `FOO="bar"` would result in a variable `FOO` whose contents *include* double-quotes. Finally, variables supplied on the command-line call to `laminarc queue`, `laminarc start` or `laminarc run` will be available. See [parameterized runs](#Parameterized-runs) ## laminarc `laminarc` commands are: - `queue [JOB [PARAMS...]]...` adds one or more jobs to the queue with optional parameters, returning immediately. - `start [JOB [PARAMS...]]...` starts one or more jobs with optional parameters, returning when the jobs begin execution. - `run [JOB [PARAMS...]]...` triggers one or more jobs with optional parameters and waits for the completion of all jobs. - `set [VARIABLE=VALUE]...` sets one or more variables to be exported in subsequent scripts for the run identified by the `$JOB` and `$RUN` environment variables - `show-jobs` shows the known jobs on the server (`$LAMINAR_HOME/cfg/jobs/*.run`). - `show-running` shows the currently running jobs with their numbers. - `show-queued` shows the names of the jobs waiting in the queue. - `abort JOB NUMBER` manually aborts a currently running job by name and number. `laminarc` connects to `laminard` using the address supplied by the `LAMINAR_HOST` environment variable. If it is not set, `laminarc` will first attempt to use `LAMINAR_BIND_RPC`, which will be available if `laminarc` is executed from a script within `laminard`. If neither `LAMINAR_HOST` nor `LAMINAR_BIND_RPC` is set, `laminarc` will assume a default host of `unix-abstract:laminar`. All commands return zero on success or a non-zero code if the command could not be executed. `laminarc run` will return a non-zero exit status if any executed job failed. laminar-1.1/docker/000077500000000000000000000000001410235453200142445ustar00rootroot00000000000000laminar-1.1/docker/Dockerfile000066400000000000000000000025101410235453200162340ustar00rootroot00000000000000FROM alpine:edge EXPOSE 8080 LABEL org.label-schema.name="laminar" \ org.label-schema.description="Fast and lightweight Continuous Integration" \ org.label-schema.usage="/usr/doc/UserManual.md" \ org.label-schema.url="https://laminar.ohwg.net" \ org.label-schema.vcs-url="https://github.com/ohwgiles/laminar" \ org.label-schema.schema-version="1.0" \ org.label-schema.docker.cmd="docker run -d -p 8080:8080 laminar" RUN apk add --no-cache -X http://dl-3.alpinelinux.org/alpine/edge/testing/ \ sqlite-dev \ zlib \ capnproto \ tini ADD UserManual.md /usr/doc/ ADD . /build/laminar RUN apk add --no-cache --virtual .build -X http://dl-3.alpinelinux.org/alpine/edge/testing/ \ build-base \ cmake \ capnproto-dev \ boost-dev \ zlib-dev \ rapidjson-dev && \ cd /build/laminar && \ cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr && \ make -j4 && \ make install && \ apk del .build && \ rm -rf /build # Create laminar system user in "users" group RUN adduser -SDh /var/lib/laminar -g 'Laminar' -G users laminar # Set the working directory to the laminar user's home WORKDIR /var/lib/laminar # Run the preceeding as the user laminar USER laminar ENTRYPOINT [ "/sbin/tini", "--" ] CMD [ "laminard" ] laminar-1.1/etc/000077500000000000000000000000001410235453200135505ustar00rootroot00000000000000laminar-1.1/etc/laminar.conf000066400000000000000000000030621410235453200160430ustar00rootroot00000000000000### ### LAMINAR_HOME ### ### Root location containing laminar configuration, database, ### build workspaces and archive. ### ### Default: /var/lib/laminar ### #LAMINAR_HOME=/var/lib/laminar ### LAMINAR_BIND_HTTP ### ### Interface on which laminard will bind to serve the Web UI. ### May be of the form IP:PORT, unix:PATH/TO/SOCKET or unix-abstract:NAME ### ### Default: *:8080 ### #LAMINAR_BIND_HTTP=*:8080 ### LAMINAR_BIND_RPC ### ### Interface on which laminard will bind to accept RPC from laminarc. ### May be of the form IP:PORT, unix:PATH/TO/SOCKET or unix-abstract:NAME ### ### Default: unix-abstract:laminar #LAMINAR_BIND_RPC=unix-abstract:laminar ### ### LAMINAR_TITLE ### ### Page title to show in web frontend ### #LAMINAR_TITLE= ### ### LAMINAR_KEEP_RUNDIRS ### ### Setting this prevents the immediate deletion of job rundirs ### $LAMINAR_HOME/run/$JOB/$RUN. Value should be an integer represeting ### the number of rundirs to keep. ### ### Default: 0 ### #LAMINAR_KEEP_RUNDIRS=0 ### ### LAMINAR_BASE_URL ### ### Base url for the frontend. This affects the tag and needs ### to be set if Laminar runs behind a reverse-proxy that hosts Laminar ### within a subfolder (rather than at a subdomain root) ### #LAMINAR_BASE_URL=/ ### ### LAMINAR_ARCHIVE_URL ### ### Base url used to request artifacts. Laminar can serve build ### artifacts (and it will if you leave this unset), but it ### uses a very naive and inefficient method. Best to let a real ### webserver handle serving those requests. ### #LAMINAR_ARCHIVE_URL=http://backbone.example.com/ci/archive/ laminar-1.1/etc/laminar.service.in000066400000000000000000000004571410235453200171700ustar00rootroot00000000000000[Unit] Description=Laminar continuous integration service After=network.target Documentation=man:laminard(8) Documentation=https://laminar.ohwg.net/docs.html [Service] User=laminar EnvironmentFile=-/etc/laminar.conf ExecStart=@CMAKE_INSTALL_PREFIX@/sbin/laminard [Install] WantedBy=multi-user.target laminar-1.1/etc/laminarc-completion.bash000066400000000000000000000013461410235453200203500ustar00rootroot00000000000000# Bash completion file for laminarc # vim: ft=sh _laminarc() { local cur prev words cword _init_completion || return if [ "$cword" -gt 1 ]; then case "${words[1]}" in queue|start|run) if [ "$cword" -eq 2 ]; then COMPREPLY+=($(compgen -W "$(laminarc show-jobs)" -- ${cur})) fi ;; abort) if [ "$cword" -eq 2 ]; then COMPREPLY+=($(compgen -W "$(laminarc show-running | cut -d : -f 1)" -- ${cur})) elif [ "$cword" -eq 3 ]; then COMPREPLY+=($(compgen -W "$(laminarc show-running | cut -d : -f 2)" -- ${cur})) fi ;; esac else local cmds="queue start run set show-jobs show-queued show-running abort" COMPREPLY+=($(compgen -W "${cmds}" -- ${cur})) fi } complete -F _laminarc laminarc laminar-1.1/etc/laminarc-completion.zsh000066400000000000000000000011271410235453200202340ustar00rootroot00000000000000#compdef laminarc #autoload _laminarc() { if (( CURRENT == 2 )); then _values "Operation" \ "queue" \ "start" \ "run" \ "set" \ "show-jobs" \ "show-queued" \ "show-running" \ "abort" else case "${words[2]}" in queue|start|run) if (( CURRENT == 3 )); then _values "Jobs" $(laminarc show-jobs) fi ;; abort) if (( CURRENT == 3 )); then _values "Jobs" $(laminarc show-running | cut -d : -f 1) elif (( CURRENT == 4 )); then _values "Runs" $(laminarc show-running | cut -d : -f 2) fi ;; esac fi } _laminarc # vim: ft=zsh laminar-1.1/etc/laminarc.1000066400000000000000000000041551410235453200154250ustar00rootroot00000000000000.Dd Apr 04, 2019 .Dt LAMINARC 1 .Sh NAME .Nm laminarc \- Laminar CI client application .Sh SYNOPSIS .Nm laminarc Li queue \fIJOB\fR [\fIPARAM=VALUE...\fR] ... .Nm laminarc Li queue \fIJOB\fR [\fIPARAM=VALUE...\fR] ... .Nm laminarc Li queue \fIJOB\fR [\fIPARAM=VALUE...\fR] ... .Nm laminarc Li set \fIPARAM=VALUE...\fR .Nm laminarc Li show-jobs .Nm laminarc Li show-running .Nm laminarc Li show-queued .Nm laminarc Li abort \fIJOB\fR \fINUMBER\fR .Sh DESCRIPTION The .Nm laminarc program connects to a Laminar server and perform one of following operations: .Bl -tag .It Sy queue adds job(s) (with optional parameters) to the queue and returns immediately. .It Sy start adds job(s) (with optional parameters) to the queue and returns when the jobs begin execution. .It Sy run adds job(s) (with optional parameters) to the queue and returns when the jobs complete execution. The exit code will be non-zero if any of the runs does not complete successfully. .It Sy set sets one or more parameters to be exported as environment variables in subsequent scripts for the run identified by the $JOB and $RUN environment variables. This is primarily intended for use from within a job execution, where those variables are already set by the server. .It Sy show-jobs list jobs known to the server. .It Sy show-running list the currently running jobs with their numbers. .It Sy show-queued list the names and numbers of the jobs waiting in the queue. .It Sy abort manually abort a currently running job by name and number. .El .Pp The laminar server to connect to is read from the .Ev LAMINAR_HOST environment variable. If empty, it falls back to .Ev LAMINAR_BIND_RPC and finally defaults to .Ad unix-abstract:laminar .Sh ENVIRONMENT .Bl -tag .It Ev LAMINAR_HOST address of server to connect. May be of the form .Ad IP:PORT, .Ad unix:PATH/TO/SOCKET or .Ad unix-abstract:NAME .It Ev LAMINAR_BIND_RPC fallback server address variable. It is set by .Nm laminard during execution of scripts. .El .Sh SEE ALSO .Xr laminard 8 .Sh AUTHORS .An Oliver Giles created Laminar CI. .An Dmitry Bogatov created this manual page for the Debian project (but it can be used by others). laminar-1.1/etc/laminard.8000066400000000000000000000032001410235453200154230ustar00rootroot00000000000000.Dd Apr 03, 2019 .Dt LAMINARD 1 .Sh NAME .Nm laminard \- Laminar CI server .Sh SYNOPSIS .Nm laminard Op Fl v .Sh DESCRIPTION Start Laminar CI server in the foreground. If option .Fl v is specified, verbose logging is enabled. Other aspects of operation are controlled by environment variables. .Sh ENVIRONMENT .Bl -tag .It Ev LAMINAR_HOME Root location containing laminar configuration, database, build workspaces and archive. .Pp Default: /var/lib/laminar .It Ev LAMINAR_BIND_HTTP Interface on which laminard will bind to serve the Web UI. May be of the form IP:PORT, unix:PATH/TO/SOCKET or unix-abstract:NAME .Pp Default: *:8080 .It Ev LAMINAR_BIND_HRPC Interface on which laminard will bind to accept RPC from laminarc. May be of the form IP:PORT, unix:PATH/TO/SOCKET or unix-abstract:NAME .Pp Default: unix-abstract:laminar .It Ev LAMINAR_TITLE Page title to show in web frontend .It Ev LAMINAR_KEEP_RUNDIRS Setting this prevents the immediate deletion of job rundirs $LAMINAR_HOME/run/$JOB/$RUN. Value should be an integer represeting the number of rundirs to keep. .Pp Default: 0 .It Ev LAMINAR_ARCHIVE_URL Base url used to request artifacts. Laminar can serve build artifacts (and it will if you leave this unset), but it uses a very naive and inefficient method. Best to let a real webserver handle serving those requests. .El .Sh FILES .Bl -tag .It Pa /etc/laminar.conf Variable assignments in this file are exported by systemd or other init system before launching the system-wide installation of Laminar. .El .Sh AUTHORS .An Oliver Giles created Laminar CI. .An Dmitry Bogatov created this manual page for Debian project (but it can be used by others). laminar-1.1/examples/000077500000000000000000000000001410235453200146135ustar00rootroot00000000000000laminar-1.1/examples/docker-advanced000077500000000000000000000027261410235453200175620ustar00rootroot00000000000000#!/bin/bash -eu # Any failing command in a pipe will cause an error, instead # of just an error in the last command in the pipe set -o pipefail # Log commands executed set -x # Simple way of getting the docker build tag: tag=$(docker build -q - <<\EOF FROM debian:bullseye RUN apt-get update && apt-get install -y build-essential EOF ) # But -q suppresses the log output. If you want to keep it, # you could use the following fancier way: exec {pfd}<><(:) # get a new pipe docker build - <<\EOF | FROM debian:bullseye RUN apt-get update && apt-get install -y build-essential EOF tee >(awk '/Successfully built/{print $3}' >&$pfd) # parse output to pipe read tag <&$pfd # read tag back from pipe exec {pfd}<&- # close pipe # Alternatively, you can use the -t option to docker build # to give the built image a name to refer to later. But then # you need to ensure that it does not conflict with any other # images, and handle cases where multiple instances of the # job attempt to update the tagged image. # If you want the image to be cleaned up on exit: trap "docker rmi $tag" EXIT # Now use the image to build something: docker run -i --rm \ -v "$PWD:$PWD" \ -w "$PWD" \ -u $(id -u):$(id -g) \ $tag /bin/bash -eux \ < output.html if [ ! -f "$1" ]; then echo "File not found: \"$1\"" exit 1 fi xsltproc --stringparam JOB $JOB --stringparam RUN $RUN <(cat <<\EOF Test report for <xsl:value-of select="$JOB" /> #<xsl:value-of select="$RUN" />

#

Test Report

Suite Tests run Failures Errors Elapsed time
failure success

EOF ) "$1" laminar-1.1/examples/git-post-receive-hook-notes000077500000000000000000000030341410235453200220130ustar00rootroot00000000000000#!/bin/bash -e # Simple post-receive hook that triggers a laminar run # for every commit pushed to every branch, and annotates # the commit with the run number using a git note. # On the cloned repository, useful config is # git config --add remote.origin.fetch "+refs/notes/*:refs/notes/*" # to automatically fetch all notes from the origin, and # git config --add notes.displayRef "refs/notes/*" # to display all notes in the git log by default # The laminar job to trigger LAMINAR_JOB=my-project # Default notes ref is refs/notes/commits NOTES_REF=refs/notes/ci # For each ref pushed... while read old new ref; do # Skip tags, notes, etc. Only do heads. # Extend this to only trigger on specific branches. if [[ $ref != refs/heads/* ]]; then continue fi # Otherwise, for each new commit in the ref... # (to only trigger on the newest, set commit=$new and delete the loop) git rev-list $([[ $old =~ ^0+$ ]] && echo $new || echo $old..$new) | while read commit; do # Queue the laminar run run=$(laminarc queue $LAMINAR_JOB commit=$commit ref=$ref) echo "Started Laminar $run for commit $commit to ref $ref" # Add a git note about the run blob=$(echo -n "Laminar-Run: $run" | git hash-object -w --stdin) if last_note=$(git show-ref -s $NOTES_REF); then git read-tree $last_note p_arg=-p fi git update-index --add --cacheinfo 100644 $blob $commit tree=$(git write-tree) new_note=$(echo "Notes added by post-receive hook" | git commit-tree $tree $p_arg $last_note) git update-ref $NOTES_REF $new_note $last_note done done laminar-1.1/examples/nginx-ssl-reverse-proxy.conf000066400000000000000000000036021410235453200222350ustar00rootroot00000000000000server { listen [::]:80; listen 80; server_name laminar.example.com; # rule for letsencrypt ACME challenge requests location ^~ /.well-known/acme-challenge/ { default_type "text/plain"; alias /srv/www/acme-challenge/; } # redirect all other http to https return 301 https://$server_name$request_uri; } server { # http2 is recommended because browsers will only open a small number of concurrent SSE streams over http1 listen [::]:443 ssl http2; listen 443 ssl http2; server_name laminar.example.com; # modern tls only, see https://syslink.pl/cipherlist/ for a more complete example ssl_protocols TLSv1.3; ssl_ciphers EECDH+AESGCM:EDH+AESGCM; # set according to ACME/letsencrypt client ssl_certificate /path/to/certificate.crt; ssl_certificate_key /path/to/private.key; # use "location /" if laminar is to be accessible at the (sub)domain root. # alteratively, use a subdirectory such as "location /my-laminar/" and ensure that # LAMINAR_BASE_URL=/my-laminar/ accordingly. location / { # set proxy_pass according to LAMINAR_BIND_HTTP. # note that the laminar default for LAMINAR_BIND_HTTP is *:8080, which binds on all interfaces # instead of just the loopback device and is almost certainly not what you want if you are using # a reverse proxy. It should be set to 127.0.0.1:8080 at a minimum, or use unix sockets for more # fine-grained control of permissions. # see http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass # and https://laminar.ohwg.net/docs.html#Running-on-a-different-HTTP-port-or-Unix-socket proxy_pass http://127.0.0.1:8080; # required to allow laminar's SSE stream to pass correctly proxy_http_version 1.1; proxy_set_header Connection ""; } # have nginx serve artefacts directly rather than having laminard do it location /archive/ { alias /var/lib/laminar/archive/; } } laminar-1.1/examples/notify-email-pretty000077500000000000000000000033111410235453200204610ustar00rootroot00000000000000#!/bin/bash -e # IMPORTANT: change these to appropriate values, or fetch them, for example # from the environment or from $(git show -s --format='%ae' $rev) TO_EMAIL=engineering@example.com FROM_EMAIL=laminar@example.com LAMINAR_URL=${LAMINAR_BASE_URL:-http://localhost:8080} LAMINAR_TITLE=${LAMINAR_TITLE:-Laminar CI} if [[ $RESULT = "success" ]]; then SVGICON=$(cat <<-EOF EOF ) else SVGICON=$(cat <<-EOF EOF ) fi sendmail -t <
$LAMINAR_TITLE
$SVGICON $JOB #$RUN
EOF laminar-1.1/examples/notify-email-text-log000077500000000000000000000007131410235453200207000ustar00rootroot00000000000000#!/bin/bash -e # IMPORTANT: change these to appropriate values, or fetch them, for example # from the environment or from $(git show -s --format='%ae' $rev) TO_EMAIL=engineering@example.com FROM_EMAIL=laminar@example.com LAMINAR_URL=${LAMINAR_BASE_URL:-http://localhost:8080} sendmail -t <$JOB #$RUN $RESULT" \ | jq .ok) == true ]] laminar-1.1/examples/plot-build-time-dist000077500000000000000000000032731410235453200205160ustar00rootroot00000000000000#!/usr/bin/env gnuplot # Deeper insights can be obtained by querying Laminar's database directly. # This example uses gnuplot to create a graph of the distribution of the # average run time of jobs. # The following will output a png... set terminal pngcairo size 800,580 enhanced font 'Helvetica,10' set output 'build-time-distribution.png' # ..comment it out to use an interactive widget # plot style set tics font "Helvetica,10" set title font "Helvetica,11" set xtics nomirror set ytics nomirror set border 3 back lt 1 lc rgb "#808080" set grid back lt 0 lc rgb "#d0d0d0" lw 0.5 set style line 1 lt 1 lc rgb "#7483af" lw 2 # Fetch the path to Laminar's sqlite database db = system("echo $LAMINAR_HOME") . '/laminar.sqlite' # Label the axes set xtics ("<30s" 0, "30s-1m" 1, "1m-5m" 2, "5m-10m" 3, "10m-20m" 4, "20m-40m" 5, "40m-60m" 6, ">60m" 7) set ylabel "Number of jobs" set xlabel "Average run time" set title "Distribution of average run times" plot '< sqlite3 -separator $''\n'' ' . db . ' \ "WITH ba AS (SELECT name,AVG(completedAt-startedAt) a FROM builds GROUP BY name) SELECT \ COUNT(CASE WHEN a < 30 THEN 1 END), \ COUNT(CASE WHEN a >= 30 AND a < 60 THEN 1 END), \ COUNT(CASE WHEN a >= 60 AND a < 300 THEN 1 END), \ COUNT(CASE WHEN a >= 300 AND a < 600 THEN 1 END), \ COUNT(CASE WHEN a >= 600 AND a < 1200 THEN 1 END), \ COUNT(CASE WHEN a >= 1200 AND a < 2400 THEN 1 END), \ COUNT(CASE WHEN a >= 2400 AND a < 3600 THEN 1 END), \ COUNT(CASE WHEN a >= 3600 THEN 1 END) FROM ba;"' \ using 0:1 with linespoints title '' ls 1 # uncomment this if using an interactive window #pause mouse close # Release the output set outputlaminar-1.1/pkg/000077500000000000000000000000001410235453200135565ustar00rootroot00000000000000laminar-1.1/pkg/centos7-x86_64.sh000077500000000000000000000043571410235453200164440ustar00rootroot00000000000000#!/bin/bash -e OUTPUT_DIR=$PWD SOURCE_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]})/..) VERSION=$(cd "$SOURCE_DIR" && git describe --tags --abbrev=8 --dirty | tr - .)~upstream_centos7 DOCKER_TAG=$(docker build -q - < laminar.spec Summary: Lightweight Continuous Integration Service Name: laminar Version: $VERSION Release: 1 License: GPL BuildRequires: systemd-units Requires: sqlite zlib %description Lightweight Continuous Integration Service %prep %build cmake3 -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLAMINAR_VERSION=$VERSION -DSYSTEMD_UNITDIR=%{_unitdir} %{_sourcedir}/laminar-$VERSION pwd make %install %make_install %files %{_bindir}/laminarc %{_sbindir}/laminard %{_unitdir}/laminar.service %config(noreplace) %{_sysconfdir}/laminar.conf %{_datarootdir}/bash-completion/completions/laminarc %{_datarootdir}/zsh/site-functions/_laminarc %{_mandir}/man8/laminard.8.gz %{_mandir}/man1/laminarc.1.gz %post echo Creating laminar user with home in %{_sharedstatedir}/laminar useradd -r -d %{_sharedstatedir}/laminar -s %{_sbindir}/nologin laminar mkdir -p %{_sharedstatedir}/laminar/cfg/{jobs,contexts,scripts} chown -R laminar: %{_sharedstatedir}/laminar EOF rpmbuild -ba laminar.spec mv rpmbuild/RPMS/x86_64/laminar-$VERSION-1.x86_64.rpm /output/ EOS laminar-1.1/pkg/debian10-amd64.sh000077500000000000000000000026241410235453200164150ustar00rootroot00000000000000#!/bin/bash -e OUTPUT_DIR=$PWD SOURCE_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]})/..) VERSION=$(cd "$SOURCE_DIR" && git describe --tags --abbrev=8 --dirty)-1~upstream-debian10 DOCKER_TAG=$(docker build -q - < laminar/DEBIAN/control Package: laminar Version: $VERSION Section: Priority: optional Architecture: amd64 Maintainer: Oliver Giles Depends: libcapnp-0.7.0, libsqlite3-0, zlib1g Description: Lightweight Continuous Integration Service EOF echo /etc/laminar.conf > laminar/DEBIAN/conffiles cat < laminar/DEBIAN/postinst #!/bin/bash echo Creating laminar user with home in /var/lib/laminar useradd -r -d /var/lib/laminar -s /usr/sbin/nologin laminar mkdir -p /var/lib/laminar/cfg/{jobs,contexts,scripts} chown -R laminar: /var/lib/laminar EOF chmod +x laminar/DEBIAN/postinst dpkg-deb --build laminar mv laminar.deb /output/laminar_${VERSION}_amd64.deb EOS laminar-1.1/pkg/debian10-armhf.sh000077500000000000000000000035701410235453200166000ustar00rootroot00000000000000#!/bin/bash -e OUTPUT_DIR=$PWD SOURCE_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]})/..) VERSION=$(cd "$SOURCE_DIR" && git describe --tags --abbrev=8 --dirty)-1~upstream-debian10 DOCKER_TAG=$(docker build -q - < toolchain.cmake < laminar/DEBIAN/control Package: laminar Version: $VERSION Section: Priority: optional Architecture: armhf Maintainer: Oliver Giles Depends: libcapnp-0.7.0, libsqlite3-0, zlib1g Description: Lightweight Continuous Integration Service EOF echo /etc/laminar.conf > laminar/DEBIAN/conffiles cat < laminar/DEBIAN/postinst #!/bin/bash echo Creating laminar user with home in /var/lib/laminar useradd -r -d /var/lib/laminar -s /usr/sbin/nologin laminar mkdir -p /var/lib/laminar/cfg/{jobs,contexts,scripts} chown -R laminar: /var/lib/laminar EOF chmod +x laminar/DEBIAN/postinst dpkg-deb --build laminar mv laminar.deb /output/laminar_${VERSION}_armhf.deb EOS laminar-1.1/pkg/rocky8-x86_64.sh000077500000000000000000000041241410235453200162710ustar00rootroot00000000000000#!/bin/bash -e OUTPUT_DIR=$PWD SOURCE_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]})/..) VERSION=$(cd "$SOURCE_DIR" && git describe --tags --abbrev=8 --dirty | tr - .)~upstream_rocky8 DOCKER_TAG=$(docker build -q - < laminar.spec Summary: Lightweight Continuous Integration Service Name: laminar Version: $VERSION Release: 1 License: GPL BuildRequires: systemd-units Requires: sqlite-libs zlib %description Lightweight Continuous Integration Service %prep %build cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLAMINAR_VERSION=$VERSION -DSYSTEMD_UNITDIR=%{_unitdir} %{_sourcedir}/laminar-$VERSION pwd make %install %make_install %files %{_bindir}/laminarc %{_sbindir}/laminard %{_unitdir}/laminar.service %config(noreplace) %{_sysconfdir}/laminar.conf %{_datarootdir}/bash-completion/completions/laminarc %{_datarootdir}/zsh/site-functions/_laminarc %{_mandir}/man8/laminard.8.gz %{_mandir}/man1/laminarc.1.gz %post echo Creating laminar user with home in %{_sharedstatedir}/laminar useradd -r -d %{_sharedstatedir}/laminar -s %{_sbindir}/nologin laminar mkdir -p %{_sharedstatedir}/laminar/cfg/{jobs,contexts,scripts} chown -R laminar: %{_sharedstatedir}/laminar EOF rpmbuild -ba laminar.spec mv rpmbuild/RPMS/x86_64/laminar-$VERSION-1.x86_64.rpm /output/ EOS laminar-1.1/src/000077500000000000000000000000001410235453200135645ustar00rootroot00000000000000laminar-1.1/src/client.cpp000066400000000000000000000233351410235453200155540ustar00rootroot00000000000000/// /// Copyright 2015-2020 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #include "laminar.capnp.h" #include "log.h" #include #include #include #include #include #include #define EXIT_BAD_ARGUMENT 1 #define EXIT_OPERATION_FAILED 2 #define EXIT_RUN_FAILED 3 // Definition needed for musl typedef unsigned int uint; template static int setParams(int argc, char** argv, T& request) { int n = 0; for(int i = 0; i < argc; ++i) { if(strchr(argv[i], '=') == NULL) break; n++; } char* job = getenv("JOB"); char* num = getenv("RUN"); char* reason = getenv("LAMINAR_REASON"); auto params = request.initParams(n + (job&&num?2:0) + (reason?1:0)); for(int i = 0; i < n; ++i) { char* name = argv[i]; char* val = strchr(name, '='); *val++ = '\0'; params[i].setName(name); params[i].setValue(val); } int argsConsumed = n; if(job && num) { params[n].setName("=parentJob"); params[n++].setValue(job); params[n].setName("=parentBuild"); params[n++].setValue(num); } if(reason) { params[n].setName("=reason"); params[n].setValue(reason); } return argsConsumed; } static void printTriggerLink(const char* job, uint run) { if(getenv("__LAMINAR_SETENV_PIPE")) { // use a private ANSI CSI sequence to mark the JOB:NUM so the // frontend can recognise it and generate a hyperlink. printf("\033[{%s:%d\033\\\n", job, run); } else { // not called from within a laminar job, let's not confuse // scripts with ANSI sequences. printf("%s:%d\n", job, run); } } static void usage(std::ostream& out) { out << "laminarc version " << laminar_version() << "\n"; out << "Usage: laminarc [-h|--help] COMMAND [PARAMETERS...]]\n"; out << " -h|--help show this help message\n"; out << "where COMMAND is:\n"; out << " queue JOB_LIST... queues one or more jobs for execution and returns immediately.\n"; out << " start JOB_LIST... queues one or more jobs for execution and blocks until it starts.\n"; out << " run JOB_LIST... queues one or more jobs for execution and blocks until it finishes.\n"; out << " set PARAMETER_LIST... sets the given parameters as environment variables in the currently\n"; out << " running job. Fails if run outside of a job context.\n"; out << " abort NAME NUMBER aborts the run identified by NAME and NUMBER.\n"; out << " show-jobs lists all known jobs.\n"; out << " show-queued lists currently queued jobs.\n"; out << " show-running lists currently running jobs.\n"; out << "JOB_LIST is of the form:\n"; out << " [JOB_NAME [PARAMETER_LIST...]]...\n"; out << "PARAMETER_LIST is of the form:\n"; out << " [KEY=VALUE]...\n"; out << "Example:\n"; out << " laminarc start \\\n"; out << " nightly-build branch=master type=release \\\n"; out << " nightly-build branch=master type=debug\n"; } int main(int argc, char** argv) { if(argc < 2) return usage(std::cerr), EXIT_BAD_ARGUMENT; else if(strcmp("-h", argv[1]) == 0 || strcmp("--help", argv[1]) == 0) return usage(std::cout), EXIT_SUCCESS; struct: public kj::TaskSet::ErrorHandler { void taskFailed(kj::Exception&& e) override { fprintf(stderr, "%s\n", e.getDescription().cStr()); ret = EXIT_OPERATION_FAILED; } int ret = 0; } errorHandler; kj::TaskSet ts(errorHandler); int& ret = errorHandler.ret; const char* address = getenv("LAMINAR_HOST") ?: getenv("LAMINAR_BIND_RPC") ?: "unix-abstract:laminar"; capnp::EzRpcClient client(address); LaminarCi::Client laminar = client.getMain(); auto& waitScope = client.getWaitScope(); if(strcmp(argv[1], "queue") == 0) { if(argc < 3) { fprintf(stderr, "Usage %s queue \n", argv[0]); return EXIT_BAD_ARGUMENT; } int jobNameIndex = 2; // make a request for each job specified on the commandline do { auto req = laminar.queueRequest(); req.setJobName(argv[jobNameIndex]); int n = setParams(argc - jobNameIndex - 1, &argv[jobNameIndex + 1], req); ts.add(req.send().then([&ret,argv,jobNameIndex](capnp::Response resp){ if(resp.getResult() != LaminarCi::MethodResult::SUCCESS) { fprintf(stderr, "Failed to queue job '%s'\n", argv[jobNameIndex]); ret = EXIT_OPERATION_FAILED; } else printTriggerLink(argv[jobNameIndex], resp.getBuildNum()); })); jobNameIndex += n + 1; } while(jobNameIndex < argc); } else if(strcmp(argv[1], "start") == 0) { if(argc < 3) { fprintf(stderr, "Usage %s queue \n", argv[0]); return EXIT_BAD_ARGUMENT; } kj::Vector> promises; int jobNameIndex = 2; // make a request for each job specified on the commandline do { auto req = laminar.startRequest(); req.setJobName(argv[jobNameIndex]); int n = setParams(argc - jobNameIndex - 1, &argv[jobNameIndex + 1], req); ts.add(req.send().then([&ret,argv,jobNameIndex](capnp::Response resp){ if(resp.getResult() != LaminarCi::MethodResult::SUCCESS) { fprintf(stderr, "Failed to start job '%s'\n", argv[2]); ret = EXIT_OPERATION_FAILED; } else printTriggerLink(argv[jobNameIndex], resp.getBuildNum()); })); jobNameIndex += n + 1; } while(jobNameIndex < argc); } else if(strcmp(argv[1], "run") == 0) { if(argc < 3) { fprintf(stderr, "Usage %s run \n", argv[0]); return EXIT_BAD_ARGUMENT; } int jobNameIndex = 2; // make a request for each job specified on the commandline do { auto req = laminar.runRequest(); req.setJobName(argv[jobNameIndex]); int n = setParams(argc - jobNameIndex - 1, &argv[jobNameIndex + 1], req); ts.add(req.send().then([&ret,argv,jobNameIndex](capnp::Response resp){ if(resp.getResult() == LaminarCi::JobResult::UNKNOWN) fprintf(stderr, "Failed to start job '%s'\n", argv[2]); else printTriggerLink(argv[jobNameIndex], resp.getBuildNum()); if(resp.getResult() != LaminarCi::JobResult::SUCCESS) ret = EXIT_RUN_FAILED; })); jobNameIndex += n + 1; } while(jobNameIndex < argc); } else if(strcmp(argv[1], "set") == 0) { if(argc < 3) { fprintf(stderr, "Usage %s set param=value\n", argv[0]); return EXIT_BAD_ARGUMENT; } if(char* pipeNum = getenv("__LAMINAR_SETENV_PIPE")) { LSYSCALL(write(atoi(pipeNum), argv[2], strlen(argv[2]))); } else { fprintf(stderr, "Must be run from within a laminar job\n"); return EXIT_BAD_ARGUMENT; } } else if(strcmp(argv[1], "abort") == 0) { if(argc != 4) { fprintf(stderr, "Usage %s abort \n", argv[0]); return EXIT_BAD_ARGUMENT; } auto req = laminar.abortRequest(); req.getRun().setJob(argv[2]); req.getRun().setBuildNum(atoi(argv[3])); ts.add(req.send().then([&ret](capnp::Response resp){ if(resp.getResult() != LaminarCi::MethodResult::SUCCESS) ret = EXIT_OPERATION_FAILED; })); } else if(strcmp(argv[1], "show-jobs") == 0) { if(argc != 2) { fprintf(stderr, "Usage: %s show-jobs\n", argv[0]); return EXIT_BAD_ARGUMENT; } auto jobs = laminar.listKnownRequest().send().wait(waitScope); for(auto it : jobs.getResult()) { printf("%s\n", it.cStr()); } } else if(strcmp(argv[1], "show-queued") == 0) { if(argc != 2) { fprintf(stderr, "Usage: %s show-queued\n", argv[0]); return EXIT_BAD_ARGUMENT; } auto queued = laminar.listQueuedRequest().send().wait(waitScope); for(auto it : queued.getResult()) { printf("%s\n", it.cStr()); } } else if(strcmp(argv[1], "show-running") == 0) { if(argc != 2) { fprintf(stderr, "Usage: %s show-running\n", argv[0]); return EXIT_BAD_ARGUMENT; } auto running = laminar.listRunningRequest().send().wait(waitScope); for(auto it : running.getResult()) { printf("%s:%d\n", it.getJob().cStr(), it.getBuildNum()); } } else { fprintf(stderr, "Unknown command %s\n", argv[1]); return EXIT_BAD_ARGUMENT; } ts.onEmpty().wait(waitScope); return ret; } laminar-1.1/src/conf.cpp000066400000000000000000000022761410235453200152240ustar00rootroot00000000000000/// /// Copyright 2015 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #include "conf.h" #include template <> int StringMap::convert(std::string e) { return atoi(e.c_str()); } StringMap parseConfFile(const char* path) { StringMap result; std::ifstream f(path); std::string line; while(std::getline(f, line)) { if(line[0] == '#') continue; size_t p = line.find('='); if(p != std::string::npos) { result.emplace(line.substr(0, p), line.substr(p+1)); } } return result; } laminar-1.1/src/conf.h000066400000000000000000000025731410235453200146710ustar00rootroot00000000000000/// /// Copyright 2015 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #ifndef LAMINAR_CONF_H_ #define LAMINAR_CONF_H_ #include #include class StringMap : public std::unordered_map { public: template T get(std::string key, T fallback = T()) { auto it = find(key); return it != end() ? convert(it->second) : fallback; } private: template T convert(std::string e) { return e; } }; template <> int StringMap::convert(std::string e); // Reads a file by line into a list of key/value pairs // separated by the first '=' character. Discards lines // beginning with '#' StringMap parseConfFile(const char* path); #endif // LAMINAR_CONF_H_ laminar-1.1/src/context.h000066400000000000000000000022431410235453200154220ustar00rootroot00000000000000/// /// Copyright 2015-2020 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #ifndef LAMINAR_CONTEXT_H_ #define LAMINAR_CONTEXT_H_ #include #include class Run; // Represents a context within which a Run will be executed. Allows applying // a certain environment to a set of Jobs, or setting a limit on the number // of parallel Runs class Context { public: Context() {} std::string name; int numExecutors; int busyExecutors = 0; std::set jobPatterns; }; #endif // LAMINAR_CONTEXT_H_ laminar-1.1/src/database.cpp000066400000000000000000000101201410235453200160260ustar00rootroot00000000000000/// /// Copyright 2015-2018 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #include "database.h" #include #include #include struct StdevCtx { double mean; double M2; int64_t count; }; static void stdevStep(sqlite3_context *ctx, int, sqlite3_value **args) { StdevCtx* p = static_cast(sqlite3_aggregate_context(ctx, sizeof(StdevCtx))); // Welford's Online Algorithm if(sqlite3_value_numeric_type(args[0]) != SQLITE_NULL) { p->count++; double val = sqlite3_value_double(args[0]); double delta = val - p->mean; p->mean += delta / p->count; p->M2 += delta * (val - p->mean); } } static void stdevFinalize(sqlite3_context *context){ StdevCtx* p = static_cast(sqlite3_aggregate_context(context, 0)); if(p && p->count > 1) sqlite3_result_double(context, sqrt(p->M2 / (p->count-1))); else sqlite3_result_null(context); } Database::Database(const char *path) { sqlite3_open(path, &hdl); int create_func_flags = SQLITE_UTF8; #if SQLITE_VERSION_NUMBER >= 3008003 create_func_flags |= SQLITE_DETERMINISTIC; #endif sqlite3_create_function(hdl, "STDEV", 1, create_func_flags, NULL, NULL, stdevStep, stdevFinalize); } Database::~Database() { sqlite3_close(hdl); } Database::Statement::Statement(sqlite3 *db, const char *query) : stmt(nullptr) { sqlite3_prepare_v2(db, query, -1, &stmt, nullptr); } Database::Statement::~Statement() { sqlite3_finalize(stmt); } bool Database::Statement::exec() { return sqlite3_step(stmt) == SQLITE_DONE; } void Database::Statement::bindValue(int i, int e) { sqlite3_bind_int(stmt, i, e); } void Database::Statement::bindValue(int i, uint e) { sqlite3_bind_int(stmt, i, static_cast(e)); } void Database::Statement::bindValue(int i, long e) { sqlite3_bind_int64(stmt, i, e); } void Database::Statement::bindValue(int i, ulong e) { sqlite3_bind_int64(stmt, i, static_cast(e)); } void Database::Statement::bindValue(int i, const char* e) { sqlite3_bind_text(stmt, i, e, -1, nullptr); } void Database::Statement::bindValue(int i, const std::string& e) { sqlite3_bind_text(stmt, i, e.data(), static_cast(e.size()), nullptr); } template<> std::string Database::Statement::fetchColumn(int col) { uint sz = static_cast(sqlite3_column_bytes(stmt, col)); // according to documentation will never be negative std::string res(sz, '\0'); memcpy(&res[0], sqlite3_column_text(stmt, col), sz); return res; } template<> const char* Database::Statement::fetchColumn(int col) { // while sqlite3_column_text maybe more correctly returns an unsigned const char*, signed const char* is more consistent return reinterpret_cast(sqlite3_column_text(stmt, col)); } template<> int Database::Statement::fetchColumn(int col) { return sqlite3_column_int(stmt, col); } template<> uint Database::Statement::fetchColumn(int col) { return static_cast(sqlite3_column_int(stmt, col)); } template<> long Database::Statement::fetchColumn(int col) { return static_cast(sqlite3_column_int64(stmt, col)); } template<> ulong Database::Statement::fetchColumn(int col) { return static_cast(sqlite3_column_int64(stmt, col)); } template<> double Database::Statement::fetchColumn(int col) { return sqlite3_column_double(stmt, col); } bool Database::Statement::row() { return sqlite3_step(stmt) == SQLITE_ROW; } laminar-1.1/src/database.h000066400000000000000000000130361410235453200155040ustar00rootroot00000000000000/// /// Copyright 2015-2018 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #ifndef LAMINAR_DATABASE_H_ #define LAMINAR_DATABASE_H_ #include #include // Definition needed for musl typedef unsigned int uint; typedef unsigned long ulong; struct sqlite3; struct sqlite3_stmt; // This is a small sqlite wrapper using some clever template action // to somewhat reduce verbosity. Usage: // db.stmt("SELECT result WHERE name = ?") // .bind(name) // .fetch([](int result) { // // function called for each retrieved row // doSomething(result); // }); class Database { public: Database(const char* path); ~Database(); private: // Represents a database statement. Call Database::stmt() to get // one, then call bind(), fetch() or exec() on the returned object class Statement { private: // Internal template helper that defines the type // in the variadic type array Args at offset N template struct typeindex : typeindex {}; template struct typeindex<0, T, Args...> { typedef T type; }; public: Statement(sqlite3* db, const char* query); Statement(const Statement&) =delete; Statement(Statement&& other) { stmt = other.stmt; other.stmt = nullptr; } ~Statement(); // Bind several parameters in a single call. They are bound // by index in the order passed into this function. Must be // passed by reference because arguments may be std::strings, // which must be passed by reference because sqlite requires // the bound string's lifetime to exist until sqlite3_step template Statement& bind(const Args&...args) { return bindRecursive(1, args...); } // Fetch columns. Supply a callback that will be executed for // each row in the resultset, with arguments matching the // expected column types template void fetch(typename typeindex<0, std::function>::type callback) { FetchMarshaller fm(this, callback); } // execute without fetching any parameters. Intended for // non-SELECT statements; bool exec(); private: // Internal template helper used to unpack arguments into // the fetch callback. template struct rng { }; // Internal template helper to generate a rng<> object: // genrng<4>::type is rng<0,1,2,3> template struct genrng : genrng {}; template struct genrng<0, N...> { typedef rng type; }; template struct FetchMarshaller { FetchMarshaller(Statement* st, std::function cb){ marshal(st, cb, typename genrng::type()); } template void marshal(Statement* st, std::function cb, rng) { while(st->row()) { cb(st->fetchColumn::type>(N)...); } } }; template friend struct FetchMarshaller; bool row(); template Statement& bindRecursive(int i, const T& v, const Args&...args) { bindValue(i, v); // specialization must exist for T return bindRecursive(i + 1, args...); } // template terminating condition Statement& bindRecursive(int) { return *this; } // Bind value specializations void bindValue(int i, int e); void bindValue(int i, uint e); void bindValue(int i, long e); void bindValue(int i, unsigned long e); void bindValue(int i, const char* e); void bindValue(int i, const std::string& e); // Declaration for fetch column interface, // intentionally missing definition template T fetchColumn(int col); sqlite3_stmt* stmt; }; public: Statement stmt(const char* q) { return Statement(hdl, q); } // shorthand bool exec(const char* q) { return Statement(hdl, q).exec(); } private: sqlite3* hdl; }; // specialization declarations, defined in source file template<> std::string Database::Statement::fetchColumn(int col); template<> const char* Database::Statement::fetchColumn(int col); template<> int Database::Statement::fetchColumn(int col); template<> uint Database::Statement::fetchColumn(int col); template<> long Database::Statement::fetchColumn(int col); template<> ulong Database::Statement::fetchColumn(int col); template<> double Database::Statement::fetchColumn(int col); #endif // LAMINAR_DATABASE_H_ laminar-1.1/src/http.cpp000066400000000000000000000261051410235453200152530ustar00rootroot00000000000000/// /// Copyright 2015-2019 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #include "http.h" #include "resources.h" #include "monitorscope.h" #include "log.h" #include "laminar.h" // Helper class which wraps another class with calls to // adding and removing a pointer to itself from a passed // std::set reference. Used to keep track of currently // connected clients template struct WithSetRef : public T { WithSetRef(std::set& set, Args&& ...args) : T(std::forward(args)...), _set(set) { _set.insert(this); } ~WithSetRef() { _set.erase(this); } private: std::set& _set; }; struct EventPeer { MonitorScope scope; std::list pendingOutput; kj::Own> fulfiller; }; struct LogWatcher { std::string job; uint run; std::list pendingOutput; kj::Own> fulfiller; }; kj::Maybe fromUrl(std::string resource, char* query) { MonitorScope scope; if(query) { char *sk; for(char* k = strtok_r(query, "&", &sk); k; k = strtok_r(nullptr, "&", &sk)) { if(char* v = strchr(k, '=')) { *v++ = '\0'; if(strcmp(k, "page") == 0) scope.page = atoi(v); else if(strcmp(k, "field") == 0) scope.field = v; else if(strcmp(k, "order") == 0) scope.order_desc = (strcmp(v, "dsc") == 0); } } } if(resource == "/") { scope.type = MonitorScope::HOME; return kj::mv(scope); } if(resource == "/jobs" || resource == "/wallboard") { scope.type = MonitorScope::ALL; return kj::mv(scope); } if(resource.substr(0, 5) != "/jobs") return nullptr; resource = resource.substr(5); size_t split = resource.find('/',1); std::string job = resource.substr(1,split-1); if(job.empty()) return nullptr; scope.job = job; scope.type = MonitorScope::JOB; if(split == std::string::npos) return kj::mv(scope); size_t split2 = resource.find('/', split+1); std::string run = resource.substr(split+1, split2-split); if(run.empty()) return nullptr; scope.num = static_cast(atoi(run.c_str())); scope.type = MonitorScope::RUN; return kj::mv(scope); } // Parses the url of the form /log/NAME/NUMBER, filling in the passed // references and returning true if successful. /log/NAME/latest is // also allowed, in which case the num reference is set to 0 bool Http::parseLogEndpoint(kj::StringPtr url, std::string& name, uint& num) { if(url.startsWith("/log/")) { kj::StringPtr path = url.slice(5); KJ_IF_MAYBE(sep, path.findFirst('/')) { name = path.slice(0, *sep).begin(); kj::StringPtr tail = path.slice(*sep+1); num = static_cast(atoi(tail.begin())); name.erase(*sep); if(tail == "latest") num = laminar.latestRun(name); if(num > 0) return true; } } return false; } kj::Promise Http::cleanupPeers(kj::Timer& timer) { return timer.afterDelay(15 * kj::SECONDS).then([&]{ for(EventPeer* p : eventPeers) { // an empty SSE message is a colon followed by two newlines p->pendingOutput.push_back(":\n\n"); p->fulfiller->fulfill(); } return cleanupPeers(timer); }).eagerlyEvaluate(nullptr); } kj::Promise writeEvents(EventPeer* peer, kj::AsyncOutputStream* stream) { auto paf = kj::newPromiseAndFulfiller(); peer->fulfiller = kj::mv(paf.fulfiller); return paf.promise.then([=]{ kj::Promise p = kj::READY_NOW; std::list chunks = kj::mv(peer->pendingOutput); for(std::string& s : chunks) { p = p.then([=,&s]{ return stream->write(s.data(), s.size()); }); } return p.attach(kj::mv(chunks)).then([=]{ return writeEvents(peer, stream); }); }); } kj::Promise writeLogChunk(LogWatcher* client, kj::AsyncOutputStream* stream) { auto paf = kj::newPromiseAndFulfiller(); client->fulfiller = kj::mv(paf.fulfiller); return paf.promise.then([=](bool done){ kj::Promise p = kj::READY_NOW; std::list chunks = kj::mv(client->pendingOutput); for(std::string& s : chunks) { p = p.then([=,&s]{ return stream->write(s.data(), s.size()); }); } return p.attach(kj::mv(chunks)).then([=]{ return done ? kj::Promise(kj::READY_NOW) : writeLogChunk(client, stream); }); }); } kj::Promise Http::request(kj::HttpMethod method, kj::StringPtr url, const kj::HttpHeaders &headers, kj::AsyncInputStream &requestBody, HttpService::Response &response) { const char* start, *end, *content_type; std::string badge; // for log requests std::string name; uint num; kj::HttpHeaders responseHeaders(*headerTable); responseHeaders.clear(); bool is_sse = false; char* queryString = nullptr; // Clients usually expect that http servers will ignore unknown query parameters, // and expect to use this feature to work around browser limitations like there // being no way to programatically force a resource to be reloaded from the server // (without "Cache-Control: no-store", which is overkill). See issue #89. // So first parse any query parameters we *are* interested in, then simply remove // them from the URL, to make comparisions easier. KJ_IF_MAYBE(queryIdx, url.findFirst('?')) { const_cast(url.begin())[*queryIdx] = '\0'; queryString = const_cast(url.begin() + *queryIdx + 1); url = url.begin(); } KJ_IF_MAYBE(accept, headers.get(ACCEPT)) { is_sse = (*accept == "text/event-stream"); } if(is_sse) { KJ_IF_MAYBE(s, fromUrl(url.cStr(), queryString)) { responseHeaders.set(kj::HttpHeaderId::CONTENT_TYPE, "text/event-stream"); // Disables nginx reverse-proxy's buffering. Necessary for streamed events. responseHeaders.add("X-Accel-Buffering", "no"); auto peer = kj::heap>(eventPeers); peer->scope = *s; std::string st = "data: " + laminar.getStatus(peer->scope) + "\n\n"; auto stream = response.send(200, "OK", responseHeaders); return stream->write(st.data(), st.size()).attach(kj::mv(st)).then([=,s=stream.get(),p=peer.get()]{ return writeEvents(p,s); }).attach(kj::mv(stream)).attach(kj::mv(peer)); } } else if(url.startsWith("/archive/")) { KJ_IF_MAYBE(file, laminar.getArtefact(url.slice(strlen("/archive/")))) { auto array = (*file)->mmap(0, (*file)->stat().size); responseHeaders.add("Content-Transfer-Encoding", "binary"); auto stream = response.send(200, "OK", responseHeaders, array.size()); return stream->write(array.begin(), array.size()).attach(kj::mv(array)).attach(kj::mv(file)).attach(kj::mv(stream)); } } else if(parseLogEndpoint(url, name, num)) { bool complete; std::string output; if(laminar.handleLogRequest(name, num, output, complete)) { responseHeaders.set(kj::HttpHeaderId::CONTENT_TYPE, "text/plain; charset=utf-8"); responseHeaders.add("Content-Transfer-Encoding", "binary"); // Disables nginx reverse-proxy's buffering. Necessary for dynamic log output. responseHeaders.add("X-Accel-Buffering", "no"); auto stream = response.send(200, "OK", responseHeaders, nullptr); auto s = stream.get(); auto lw = kj::heap>(logWatchers); lw->job = name; lw->run = num; auto promise = writeLogChunk(lw.get(), stream.get()).attach(kj::mv(stream)).attach(kj::mv(lw)); return s->write(output.data(), output.size()).attach(kj::mv(output)).then([p=kj::mv(promise),complete]() mutable { if(complete) return kj::Promise(kj::READY_NOW); return kj::mv(p); }); } } else if(resources->handleRequest(url.cStr(), &start, &end, &content_type)) { responseHeaders.set(kj::HttpHeaderId::CONTENT_TYPE, content_type); responseHeaders.add("Content-Encoding", "gzip"); responseHeaders.add("Content-Transfer-Encoding", "binary"); auto stream = response.send(200, "OK", responseHeaders, end-start); return stream->write(start, end-start).attach(kj::mv(stream)); } else if(url.startsWith("/badge/") && url.endsWith(".svg") && laminar.handleBadgeRequest(std::string(url.begin()+7, url.size()-11), badge)) { responseHeaders.set(kj::HttpHeaderId::CONTENT_TYPE, "image/svg+xml"); responseHeaders.add("Cache-Control", "no-cache"); auto stream = response.send(200, "OK", responseHeaders, badge.size()); return stream->write(badge.data(), badge.size()).attach(kj::mv(badge)).attach(kj::mv(stream)); } return response.sendError(404, "Not Found", responseHeaders); } Http::Http(Laminar &li) : laminar(li), resources(kj::heap()) { kj::HttpHeaderTable::Builder builder; ACCEPT = builder.add("Accept"); headerTable = builder.build(); } Http::~Http() { LASSERT(logWatchers.size() == 0); LASSERT(eventPeers.size() == 0); } kj::Promise Http::startServer(kj::Timer& timer, kj::Own&& listener) { kj::Own server = kj::heap(timer, *headerTable, *this); return server->listenHttp(*listener).attach(cleanupPeers(timer)).attach(kj::mv(listener)).attach(kj::mv(server)); } void Http::notifyEvent(const char *data, std::string job) { for(EventPeer* c : eventPeers) { if(c->scope.wantsStatus(job)) { c->pendingOutput.push_back("data: " + std::string(data) + "\n\n"); c->fulfiller->fulfill(); } } } void Http::notifyLog(std::string job, uint run, std::string log_chunk, bool eot) { for(LogWatcher* lw : logWatchers) { if(lw->job == job && lw->run == run) { lw->pendingOutput.push_back(log_chunk); lw->fulfiller->fulfill(kj::mv(eot)); } } } void Http::setHtmlTemplate(std::string tmpl) { resources->setHtmlTemplate(tmpl); } laminar-1.1/src/http.h000066400000000000000000000044731410235453200147240ustar00rootroot00000000000000/// /// Copyright 2019-2020 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #ifndef LAMINAR_HTTP_H_ #define LAMINAR_HTTP_H_ #include #include #include #include // Definition needed for musl typedef unsigned int uint; typedef unsigned long ulong; class Laminar; class Resources; struct LogWatcher; struct EventPeer; class Http : public kj::HttpService { public: Http(Laminar&li); virtual ~Http(); kj::Promise startServer(kj::Timer &timer, kj::Own &&listener); void notifyEvent(const char* data, std::string job = nullptr); void notifyLog(std::string job, uint run, std::string log_chunk, bool eot); // Allows supplying a custom HTML template. Pass an empty string to use the default. void setHtmlTemplate(std::string tmpl = std::string()); private: virtual kj::Promise request(kj::HttpMethod method, kj::StringPtr url, const kj::HttpHeaders& headers, kj::AsyncInputStream& requestBody, Response& response) override; bool parseLogEndpoint(kj::StringPtr url, std::string &name, uint &num); // With SSE, there is no notification if a client disappears. Also, an idle // client must be kept alive if there is no activity in their MonitorScope. // Deal with these by sending a periodic keepalive and reaping the client if // the write fails. kj::Promise cleanupPeers(kj::Timer &timer); Laminar& laminar; std::set eventPeers; kj::Own headerTable; kj::Own resources; std::set logWatchers; kj::HttpHeaderId ACCEPT; }; #endif //LAMINAR_HTTP_H_ laminar-1.1/src/laminar.capnp000066400000000000000000000015261410235453200162360ustar00rootroot00000000000000@0xc2cbd510f16dab57; interface LaminarCi { queue @0 (jobName :Text, params :List(JobParam)) -> (result :MethodResult, buildNum :UInt32); start @1 (jobName :Text, params :List(JobParam)) -> (result :MethodResult, buildNum :UInt32); run @2 (jobName :Text, params :List(JobParam)) -> (result :JobResult, buildNum :UInt32); listQueued @3 () -> (result :List(Text)); listRunning @4 () -> (result :List(Run)); listKnown @5 () -> (result :List(Text)); abort @6 (run :Run) -> (result :MethodResult); struct Run { job @0 :Text; buildNum @1 :UInt32; } struct JobParam { name @0 :Text; value @1 :Text; } enum MethodResult { failed @0; success @1; } enum JobResult { unknown @0; failed @1; aborted @2; success @3; } } laminar-1.1/src/laminar.cpp000066400000000000000000001025341410235453200157200ustar00rootroot00000000000000/// /// Copyright 2015-2020 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #include "laminar.h" #include "server.h" #include "conf.h" #include "log.h" #include "http.h" #include "rpc.h" #include #include #include #include #include #include #include #include #define COMPRESS_LOG_MIN_SIZE 1024 #include #include // FNM_EXTMATCH isn't supported under musl #if !defined(FNM_EXTMATCH) #define FNM_EXTMATCH 0 #endif // rapidjson::Writer with a StringBuffer is used a lot in Laminar for // preparing JSON messages to send to HTTP clients. A small wrapper // class here reduces verbosity later for this common use case. class Json : public rapidjson::Writer { public: Json() : rapidjson::Writer(buf) { StartObject(); } template Json& set(const char* key, T value) { String(key); Int64(value); return *this; } Json& startObject(const char* key) { String(key); StartObject(); return *this; } Json& startArray(const char* key) { String(key); StartArray(); return *this; } const char* str() { EndObject(); return buf.GetString(); } private: rapidjson::StringBuffer buf; }; template<> Json& Json::set(const char* key, double value) { String(key); Double(value); return *this; } template<> Json& Json::set(const char* key, const char* value) { String(key); String(value); return *this; } template<> Json& Json::set(const char* key, std::string value) { String(key); String(value.c_str()); return *this; } // short syntax helpers for kj::Path template inline kj::Path operator/(const kj::Path& p, const T& ext) { return p.append(ext); } template inline kj::Path operator/(const std::string& p, const T& ext) { return kj::Path{p}/ext; } typedef std::string str; Laminar::Laminar(Server &server, Settings settings) : srv(server), homePath(kj::Path::parse(&settings.home[1])), fsHome(kj::newDiskFilesystem()->getRoot().openSubdir(homePath, kj::WriteMode::MODIFY)), http(kj::heap(*this)), rpc(kj::heap(*this)) { LASSERT(settings.home[0] == '/'); if(fsHome->exists(homePath/"cfg"/"nodes")) { LLOG(ERROR, "Found node configuration directory cfg/nodes. Nodes have been deprecated, please migrate to contexts. Laminar will now exit."); exit(EXIT_FAILURE); } archiveUrl = settings.archive_url; if(archiveUrl.back() != '/') archiveUrl.append("/"); numKeepRunDirs = 0; db = new Database((homePath/"laminar.sqlite").toString(true).cStr()); // Prepare database for first use // TODO: error handling db->exec("CREATE TABLE IF NOT EXISTS builds(" "name TEXT, number INT UNSIGNED, node TEXT, queuedAt INT, " "startedAt INT, completedAt INT, result INT, output TEXT, " "outputLen INT, parentJob TEXT, parentBuild INT, reason TEXT, " "PRIMARY KEY (name, number))"); db->exec("CREATE INDEX IF NOT EXISTS idx_completion_time ON builds(" "completedAt DESC)"); // retrieve the last build numbers db->stmt("SELECT name, MAX(number) FROM builds GROUP BY name") .fetch([this](str name, uint build){ buildNums[name] = build; }); srv.watchPaths([this]{ LLOG(INFO, "Reloading configuration"); loadConfiguration(); // config change may allow stuck jobs to dequeue assignNewJobs(); }).addPath((homePath/"cfg"/"contexts").toString(true).cStr()) .addPath((homePath/"cfg"/"jobs").toString(true).cStr()) .addPath((homePath/"cfg").toString(true).cStr()); // for groups.conf loadCustomizations(); srv.watchPaths([this]{ LLOG(INFO, "Reloading customizations"); loadCustomizations(); }).addPath((homePath/"custom").toString(true).cStr()); srv.listenRpc(*rpc, settings.bind_rpc); srv.listenHttp(*http, settings.bind_http); // Load configuration, may be called again in response to an inotify event // that the configuration files have been modified loadConfiguration(); } void Laminar::loadCustomizations() { KJ_IF_MAYBE(templ, fsHome->tryOpenFile(kj::Path{"custom","index.html"})) { http->setHtmlTemplate((*templ)->readAllText().cStr()); } else { http->setHtmlTemplate(); } } uint Laminar::latestRun(std::string job) { if(auto it = buildNums.find(job); it != buildNums.end()) return it->second; return 0; } bool Laminar::handleLogRequest(std::string name, uint num, std::string& output, bool& complete) { if(Run* run = activeRun(name, num)) { output = run->log; complete = false; return true; } else { // it must be finished, fetch it from the database db->stmt("SELECT output, outputLen FROM builds WHERE name = ? AND number = ?") .bind(name, num) .fetch([&](str maybeZipped, unsigned long sz) { str log(sz,'\0'); if(sz >= COMPRESS_LOG_MIN_SIZE) { int res = ::uncompress((uint8_t*) log.data(), &sz, (const uint8_t*) maybeZipped.data(), maybeZipped.size()); if(res == Z_OK) std::swap(output, log); else LLOG(ERROR, "Failed to uncompress log", res); } else { std::swap(output, maybeZipped); } }); if(output.size()) { complete = true; return true; } } return false; } bool Laminar::setParam(std::string job, uint buildNum, std::string param, std::string value) { if(Run* run = activeRun(job, buildNum)) { run->params[param] = value; return true; } return false; } const std::list>& Laminar::listQueuedJobs() { return queuedJobs; } const RunSet& Laminar::listRunningJobs() { return activeJobs; } std::list Laminar::listKnownJobs() { std::list res; KJ_IF_MAYBE(dir, fsHome->tryOpenSubdir(kj::Path{"cfg","jobs"})) { for(kj::Directory::Entry& entry : (*dir)->listEntries()) { if(entry.name.endsWith(".run")) { res.emplace_back(entry.name.cStr(), entry.name.findLast('.').orDefault(0)); } } } return res; } void Laminar::populateArtifacts(Json &j, std::string job, uint num, kj::Path subdir) const { kj::Path runArchive{job,std::to_string(num)}; runArchive = runArchive.append(subdir); KJ_IF_MAYBE(dir, fsHome->tryOpenSubdir("archive"/runArchive)) { for(kj::StringPtr file : (*dir)->listNames()) { kj::FsNode::Metadata meta = (*dir)->lstat(kj::Path{file}); if(meta.type == kj::FsNode::Type::FILE) { j.StartObject(); j.set("url", archiveUrl + (runArchive/file).toString().cStr()); j.set("filename", (subdir/file).toString().cStr()); j.set("size", meta.size); j.EndObject(); } else if(meta.type == kj::FsNode::Type::DIRECTORY) { populateArtifacts(j, job, num, subdir/file); } } } } std::string Laminar::getStatus(MonitorScope scope) { Json j; j.set("type", "status"); j.set("title", getenv("LAMINAR_TITLE") ?: "Laminar"); j.set("version", laminar_version()); j.set("time", time(nullptr)); j.startObject("data"); if(scope.type == MonitorScope::RUN) { db->stmt("SELECT queuedAt,startedAt,completedAt,result,reason,parentJob,parentBuild,q.lr IS NOT NULL,q.lr FROM builds " "LEFT JOIN (SELECT name n, MAX(number), completedAt-startedAt lr FROM builds WHERE result IS NOT NULL GROUP BY n) q ON q.n = name " "WHERE name = ? AND number = ?") .bind(scope.job, scope.num) .fetch([&](time_t queued, time_t started, time_t completed, int result, std::string reason, std::string parentJob, uint parentBuild, uint lastRuntimeKnown, uint lastRuntime) { j.set("queued", queued); j.set("started", started); if(completed) j.set("completed", completed); j.set("result", to_string(completed ? RunState(result) : started ? RunState::RUNNING : RunState::QUEUED)); j.set("reason", reason); j.startObject("upstream").set("name", parentJob).set("num", parentBuild).EndObject(2); if(lastRuntimeKnown) j.set("etc", started + lastRuntime); }); if(auto it = buildNums.find(scope.job); it != buildNums.end()) j.set("latestNum", int(it->second)); j.startArray("artifacts"); populateArtifacts(j, scope.job, scope.num); j.EndArray(); } else if(scope.type == MonitorScope::JOB) { const uint runsPerPage = 20; j.startArray("recent"); // ORDER BY param cannot be bound std::string order_by; std::string direction = scope.order_desc ? "DESC" : "ASC"; if(scope.field == "number") order_by = "number " + direction; else if(scope.field == "result") order_by = "result " + direction + ", number DESC"; else if(scope.field == "started") order_by = "startedAt " + direction + ", number DESC"; else if(scope.field == "duration") order_by = "(completedAt-startedAt) " + direction + ", number DESC"; else order_by = "number DESC"; std::string stmt = "SELECT number,startedAt,completedAt,result,reason FROM builds " "WHERE name = ? AND result IS NOT NULL ORDER BY " + order_by + " LIMIT ?,?"; db->stmt(stmt.c_str()) .bind(scope.job, scope.page * runsPerPage, runsPerPage) .fetch([&](uint build,time_t started,time_t completed,int result,str reason){ j.StartObject(); j.set("number", build) .set("completed", completed) .set("started", started) .set("result", to_string(RunState(result))) .set("reason", reason) .EndObject(); }); j.EndArray(); db->stmt("SELECT COUNT(*),AVG(completedAt-startedAt) FROM builds WHERE name = ? AND result IS NOT NULL") .bind(scope.job) .fetch([&](uint nRuns, uint averageRuntime){ j.set("averageRuntime", averageRuntime); j.set("pages", (nRuns-1) / runsPerPage + 1); j.startObject("sort"); j.set("page", scope.page) .set("field", scope.field) .set("order", scope.order_desc ? "dsc" : "asc") .EndObject(); }); j.startArray("running"); auto p = activeJobs.byJobName().equal_range(scope.job); for(auto it = p.first; it != p.second; ++it) { const std::shared_ptr run = *it; j.StartObject(); j.set("number", run->build); j.set("context", run->context->name); j.set("started", run->startedAt); j.set("result", to_string(RunState::RUNNING)); j.set("reason", run->reason()); j.EndObject(); } j.EndArray(); int nQueued = 0; for(const auto& run : queuedJobs) { if (run->name == scope.job) { nQueued++; } } j.set("nQueued", nQueued); db->stmt("SELECT number,startedAt FROM builds WHERE name = ? AND result = ? " "ORDER BY completedAt DESC LIMIT 1") .bind(scope.job, int(RunState::SUCCESS)) .fetch([&](int build, time_t started){ j.startObject("lastSuccess"); j.set("number", build).set("started", started); j.EndObject(); }); db->stmt("SELECT number,startedAt FROM builds " "WHERE name = ? AND result <> ? " "ORDER BY completedAt DESC LIMIT 1") .bind(scope.job, int(RunState::SUCCESS)) .fetch([&](int build, time_t started){ j.startObject("lastFailed"); j.set("number", build).set("started", started); j.EndObject(); }); auto desc = jobDescriptions.find(scope.job); j.set("description", desc == jobDescriptions.end() ? "" : desc->second); } else if(scope.type == MonitorScope::ALL) { j.startArray("jobs"); db->stmt("SELECT name,number,startedAt,completedAt,result,reason FROM builds b " "JOIN (SELECT name n,MAX(number) latest FROM builds WHERE result IS NOT NULL GROUP BY n) q " "ON b.name = q.n AND b.number = latest") .fetch([&](str name,uint number, time_t started, time_t completed, int result, str reason){ j.StartObject(); j.set("name", name); j.set("number", number); j.set("result", to_string(RunState(result))); j.set("started", started); j.set("completed", completed); j.set("reason", reason); j.EndObject(); }); j.EndArray(); j.startArray("running"); for(const auto& run : activeJobs.byStartedAt()) { j.StartObject(); j.set("name", run->name); j.set("number", run->build); j.set("context", run->context->name); j.set("started", run->startedAt); j.EndObject(); } j.EndArray(); j.startObject("groups"); for(const auto& group : jobGroups) j.set(group.first.c_str(), group.second); j.EndObject(); } else { // Home page j.startArray("recent"); db->stmt("SELECT name,number,node,queuedAt,startedAt,completedAt,result,reason FROM builds WHERE completedAt IS NOT NULL ORDER BY completedAt DESC LIMIT 20") .fetch([&](str name,uint build,str context,time_t queued,time_t started,time_t completed,int result,str reason){ j.StartObject(); j.set("name", name) .set("number", build) .set("context", context) .set("queued", queued) .set("started", started) .set("completed", completed) .set("result", to_string(RunState(result))) .set("reason", reason) .EndObject(); }); j.EndArray(); j.startArray("running"); for(const auto& run : activeJobs.byStartedAt()) { j.StartObject(); j.set("name", run->name); j.set("number", run->build); j.set("context", run->context->name); j.set("started", run->startedAt); db->stmt("SELECT completedAt - startedAt FROM builds " "WHERE completedAt IS NOT NULL AND name = ? " "ORDER BY completedAt DESC LIMIT 1") .bind(run->name) .fetch([&](uint lastRuntime){ j.set("etc", run->startedAt + lastRuntime); }); j.EndObject(); } j.EndArray(); j.startArray("queued"); for(const auto& run : queuedJobs) { j.StartObject(); j.set("name", run->name); j.EndObject(); } j.EndArray(); int execTotal = 0; int execBusy = 0; for(const auto& it : contexts) { const std::shared_ptr& context = it.second; execTotal += context->numExecutors; execBusy += context->busyExecutors; } j.set("executorsTotal", execTotal); j.set("executorsBusy", execBusy); j.startArray("buildsPerDay"); for(int i = 6; i >= 0; --i) { j.StartObject(); db->stmt("SELECT result, COUNT(*) FROM builds WHERE completedAt > ? AND completedAt < ? GROUP BY result") .bind(86400*(time(nullptr)/86400 - i), 86400*(time(nullptr)/86400 - (i-1))) .fetch([&](int result, int num){ j.set(to_string(RunState(result)).c_str(), num); }); j.EndObject(); } j.EndArray(); j.startObject("buildsPerJob"); db->stmt("SELECT name, COUNT(*) c FROM builds WHERE completedAt > ? GROUP BY name ORDER BY c DESC LIMIT 5") .bind(time(nullptr) - 86400) .fetch([&](str job, int count){ j.set(job.c_str(), count); }); j.EndObject(); j.startObject("timePerJob"); db->stmt("SELECT name, AVG(completedAt-startedAt) av FROM builds WHERE completedAt > ? GROUP BY name ORDER BY av DESC LIMIT 8") .bind(time(nullptr) - 7 * 86400) .fetch([&](str job, double time){ j.set(job.c_str(), time); }); j.EndObject(); j.startArray("resultChanged"); db->stmt("SELECT b.name,MAX(b.number) as lastSuccess,lastFailure FROM builds AS b JOIN (SELECT name,MAX(number) AS lastFailure FROM builds WHERE result<>? GROUP BY name) AS t ON t.name=b.name WHERE b.result=? GROUP BY b.name ORDER BY lastSuccess>lastFailure, lastFailure-lastSuccess DESC LIMIT 8") .bind(int(RunState::SUCCESS), int(RunState::SUCCESS)) .fetch([&](str job, uint lastSuccess, uint lastFailure){ j.StartObject(); j.set("name", job) .set("lastSuccess", lastSuccess) .set("lastFailure", lastFailure); j.EndObject(); }); j.EndArray(); j.startArray("lowPassRates"); db->stmt("SELECT name,CAST(SUM(result==?) AS FLOAT)/COUNT(*) AS passRate FROM builds GROUP BY name ORDER BY passRate ASC LIMIT 8") .bind(int(RunState::SUCCESS)) .fetch([&](str job, double passRate){ j.StartObject(); j.set("name", job).set("passRate", passRate); j.EndObject(); }); j.EndArray(); j.startArray("buildTimeChanges"); db->stmt("SELECT name,GROUP_CONCAT(number),GROUP_CONCAT(completedAt-startedAt) FROM builds WHERE number > (SELECT MAX(number)-10 FROM builds b WHERE b.name=builds.name) GROUP BY name ORDER BY (MAX(completedAt-startedAt)-MIN(completedAt-startedAt))-STDEV(completedAt-startedAt) DESC LIMIT 8") .fetch([&](str name, str numbers, str durations){ j.StartObject(); j.set("name", name); j.startArray("numbers"); j.RawValue(numbers.data(), numbers.length(), rapidjson::Type::kArrayType); j.EndArray(); j.startArray("durations"); j.RawValue(durations.data(), durations.length(), rapidjson::Type::kArrayType); j.EndArray(); j.EndObject(); }); j.EndArray(); j.startObject("completedCounts"); db->stmt("SELECT name, COUNT(*) FROM builds WHERE result IS NOT NULL GROUP BY name") .fetch([&](str job, uint count){ j.set(job.c_str(), count); }); j.EndObject(); } j.EndObject(); return j.str(); } Laminar::~Laminar() noexcept try { delete db; } catch (std::exception& e) { LLOG(ERROR, e.what()); return; } bool Laminar::loadConfiguration() { if(const char* ndirs = getenv("LAMINAR_KEEP_RUNDIRS")) numKeepRunDirs = static_cast(atoi(ndirs)); std::set knownContexts; KJ_IF_MAYBE(contextsDir, fsHome->tryOpenSubdir(kj::Path{"cfg","contexts"})) { for(kj::Directory::Entry& entry : (*contextsDir)->listEntries()) { if(!entry.name.endsWith(".conf")) continue; StringMap conf = parseConfFile((homePath/"cfg"/"contexts"/entry.name).toString(true).cStr()); std::string name(entry.name.cStr(), entry.name.findLast('.').orDefault(0)); auto existing = contexts.find(name); std::shared_ptr context = existing == contexts.end() ? contexts.emplace(name, std::shared_ptr(new Context)).first->second : existing->second; context->name = name; context->numExecutors = conf.get("EXECUTORS", 6); std::string jobPtns = conf.get("JOBS"); std::set jobPtnsList; if(!jobPtns.empty()) { std::istringstream iss(jobPtns); std::string job; while(std::getline(iss, job, ',')) jobPtnsList.insert(job); } context->jobPatterns.swap(jobPtnsList); knownContexts.insert(name); } } // remove any contexts whose config files disappeared. // if there are no known contexts, take care not to remove and re-add the default context. for(auto it = contexts.begin(); it != contexts.end();) { if((it->first == "default" && knownContexts.size() == 0) || knownContexts.find(it->first) != knownContexts.end()) it++; else it = contexts.erase(it); } // add a default context if(contexts.empty()) { LLOG(INFO, "Creating a default context with 6 executors"); std::shared_ptr context(new Context); context->name = "default"; context->numExecutors = 6; contexts.emplace("default", context); } KJ_IF_MAYBE(jobsDir, fsHome->tryOpenSubdir(kj::Path{"cfg","jobs"})) { for(kj::Directory::Entry& entry : (*jobsDir)->listEntries()) { if(!entry.name.endsWith(".conf")) continue; StringMap conf = parseConfFile((homePath/"cfg"/"jobs"/entry.name).toString(true).cStr()); std::string jobName(entry.name.cStr(), entry.name.findLast('.').orDefault(0)); std::string ctxPtns = conf.get("CONTEXTS"); std::set ctxPtnList; if(!ctxPtns.empty()) { std::istringstream iss(ctxPtns); std::string ctx; while(std::getline(iss, ctx, ',')) ctxPtnList.insert(ctx); } // Must be present both here and in queueJob because otherwise if a context // were created while a job is already queued, the default context would be // dropped when the set of contexts is updated here. if(ctxPtnList.empty()) ctxPtnList.insert("default"); jobContexts[jobName].swap(ctxPtnList); std::string desc = conf.get("DESCRIPTION"); if(!desc.empty()) { jobDescriptions[jobName] = desc; } } } jobGroups.clear(); KJ_IF_MAYBE(groupsConf, fsHome->tryOpenFile(kj::Path{"cfg","groups.conf"})) jobGroups = parseConfFile((homePath/"cfg"/"groups.conf").toString(true).cStr()); if(jobGroups.empty()) jobGroups["All Jobs"] = ".*"; return true; } std::shared_ptr Laminar::queueJob(std::string name, ParamMap params) { if(!fsHome->exists(kj::Path{"cfg","jobs",name+".run"})) { LLOG(ERROR, "Non-existent job", name); return nullptr; } // jobContexts[name] can be empty if there is no .conf file at all if(jobContexts[name].empty()) jobContexts.at(name).insert("default"); std::shared_ptr run = std::make_shared(name, ++buildNums[name], kj::mv(params), homePath.clone()); queuedJobs.push_back(run); db->stmt("INSERT INTO builds(name,number,queuedAt,parentJob,parentBuild,reason) VALUES(?,?,?,?,?,?)") .bind(run->name, run->build, run->queuedAt, run->parentName, run->parentBuild, run->reason()) .exec(); // notify clients Json j; j.set("type", "job_queued") .startObject("data") .set("name", name) .set("number", run->build) .EndObject(); http->notifyEvent(j.str(), name.c_str()); assignNewJobs(); return run; } bool Laminar::abort(std::string job, uint buildNum) { if(Run* run = activeRun(job, buildNum)) return run->abort(); return false; } void Laminar::abortAll() { for(std::shared_ptr run : activeJobs) { run->abort(); } } bool Laminar::canQueue(const Context& ctx, const Run& run) const { if(ctx.busyExecutors >= ctx.numExecutors) return false; // match may be jobs as defined by the context... for(std::string p : ctx.jobPatterns) { if(fnmatch(p.c_str(), run.name.c_str(), FNM_EXTMATCH) == 0) return true; } // ...or context as defined by the job. for(std::string p : jobContexts.at(run.name)) { if(fnmatch(p.c_str(), ctx.name.c_str(), FNM_EXTMATCH) == 0) return true; } return false; } bool Laminar::tryStartRun(std::shared_ptr run, int queueIndex) { for(auto& sc : contexts) { std::shared_ptr ctx = sc.second; if(canQueue(*ctx, *run)) { RunState lastResult = RunState::UNKNOWN; // set the last known result if exists. Runs which haven't started yet should // have completedAt == NULL and thus be at the end of a DESC ordered query db->stmt("SELECT result FROM builds WHERE name = ? ORDER BY completedAt DESC LIMIT 1") .bind(run->name) .fetch([&](int result){ lastResult = RunState(result); }); kj::Promise onRunFinished = run->start(lastResult, ctx, *fsHome,[this](kj::Maybe& pid){return srv.onChildExit(pid);}); db->stmt("UPDATE builds SET node = ?, startedAt = ? WHERE name = ? AND number = ?") .bind(ctx->name, run->startedAt, run->name, run->build) .exec(); ctx->busyExecutors++; kj::Promise exec = srv.readDescriptor(run->output_fd, [this, run](const char*b, size_t n){ // handle log output std::string s(b, n); run->log += s; http->notifyLog(run->name, run->build, s, false); }).then([run, p = kj::mv(onRunFinished)]() mutable { // wait until leader reaped return kj::mv(p); }).then([this, run](RunState){ handleRunFinished(run.get()); }); if(run->timeout > 0) { exec = exec.attach(srv.addTimeout(run->timeout, [r=run.get()](){ r->abort(); })); } srv.addTask(kj::mv(exec)); LLOG(INFO, "Started job", run->name, run->build, ctx->name); // notify clients Json j; j.set("type", "job_started") .startObject("data") .set("queueIndex", queueIndex) .set("name", run->name) .set("queued", run->queuedAt) .set("started", run->startedAt) .set("number", run->build) .set("reason", run->reason()); db->stmt("SELECT completedAt - startedAt FROM builds WHERE name = ? ORDER BY completedAt DESC LIMIT 1") .bind(run->name) .fetch([&](uint etc){ j.set("etc", time(nullptr) + etc); }); j.EndObject(); http->notifyEvent(j.str(), run->name.c_str()); return true; } } return false; } void Laminar::assignNewJobs() { auto it = queuedJobs.begin(); while(it != queuedJobs.end()) { if(tryStartRun(*it, std::distance(it, queuedJobs.begin()))) { activeJobs.insert(*it); it = queuedJobs.erase(it); } else { ++it; } } } void Laminar::handleRunFinished(Run * r) { std::shared_ptr ctx = r->context; ctx->busyExecutors--; LLOG(INFO, "Run completed", r->name, to_string(r->result)); time_t completedAt = time(nullptr); // compress log std::string maybeZipped = r->log; size_t logsize = r->log.length(); if(r->log.length() >= COMPRESS_LOG_MIN_SIZE) { std::string zipped(r->log.size(), '\0'); unsigned long zippedSize = zipped.size(); if(::compress((uint8_t*) zipped.data(), &zippedSize, (const uint8_t*) r->log.data(), logsize) == Z_OK) { zipped.resize(zippedSize); std::swap(maybeZipped, zipped); } } db->stmt("UPDATE builds SET completedAt = ?, result = ?, output = ?, outputLen = ? WHERE name = ? AND number = ?") .bind(completedAt, int(r->result), maybeZipped, logsize, r->name, r->build) .exec(); // notify clients Json j; j.set("type", "job_completed") .startObject("data") .set("name", r->name) .set("number", r->build) .set("queued", r->queuedAt) .set("completed", completedAt) .set("started", r->startedAt) .set("result", to_string(r->result)) .set("reason", r->reason()); j.startArray("artifacts"); populateArtifacts(j, r->name, r->build); j.EndArray(); j.EndObject(); http->notifyEvent(j.str(), r->name); http->notifyLog(r->name, r->build, "", true); // erase reference to run from activeJobs. Since runFinished is called in a // lambda whose context contains a shared_ptr, the run won't be deleted // until the context is destroyed at the end of the lambda execution. activeJobs.byRunPtr().erase(r); // remove old run directories // We cannot count back the number of directories to keep from the currently // finishing job because there may well be older, still-running instances of // this job and we don't want to delete their rundirs. So instead, check // whether there are any more active runs of this job, and if so, count back // from the oldest among them. If there are none, count back from the latest // known build number of this job, which may not be that of the run that // finished here. auto it = activeJobs.byJobName().equal_range(r->name); uint oldestActive = (it.first == it.second)? buildNums[r->name] : (*it.first)->build - 1; for(int i = static_cast(oldestActive - numKeepRunDirs); i > 0; i--) { kj::Path d{"run",r->name,std::to_string(i)}; // Once the directory does not exist, it's probably not worth checking // any further. 99% of the time this loop should only ever have 1 iteration // anyway so hence this (admittedly debatable) optimization. if(!fsHome->exists(d)) break; // must use a try/catch because remove will throw if deletion fails. Using // tryRemove does not help because it still throws an exception for some // errors such as EACCES try { fsHome->remove(d); } catch(kj::Exception& e) { LLOG(ERROR, "Could not remove directory", e.getDescription()); } } fsHome->symlink(kj::Path{"archive", r->name, "latest"}, std::to_string(r->build), kj::WriteMode::CREATE|kj::WriteMode::MODIFY); // in case we freed up an executor, check the queue assignNewJobs(); } kj::Maybe> Laminar::getArtefact(std::string path) { return fsHome->openFile(kj::Path("archive").append(kj::Path::parse(path))); } bool Laminar::handleBadgeRequest(std::string job, std::string &badge) { RunState rs = RunState::UNKNOWN; db->stmt("SELECT result FROM builds WHERE name = ? AND result IS NOT NULL ORDER BY number DESC LIMIT 1") .bind(job) .fetch([&](int result){ rs = RunState(result); }); if(rs == RunState::UNKNOWN) return false; std::string status = to_string(rs); // Empirical approximation of pixel width. Not particularly stable. const int jobNameWidth = job.size() * 7 + 10; const int statusWidth = status.size() * 7 + 10; const char* gradient1 = (rs == RunState::SUCCESS) ? "#2aff4d" : "#ff2a2a"; const char* gradient2 = (rs == RunState::SUCCESS) ? "#24b43c" : "#b42424"; char* svg = NULL; if(asprintf(&svg, R"x( %s %s )x", jobNameWidth+statusWidth, jobNameWidth+statusWidth, gradient1, gradient2, jobNameWidth, jobNameWidth/2+1, job.data(), jobNameWidth, statusWidth, jobNameWidth+statusWidth/2, status.data()) < 0) return false; badge = svg; return true; } laminar-1.1/src/laminar.h000066400000000000000000000111141410235453200153560ustar00rootroot00000000000000/// /// Copyright 2015-2020 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #ifndef LAMINAR_LAMINAR_H_ #define LAMINAR_LAMINAR_H_ #include "run.h" #include "monitorscope.h" #include "context.h" #include "database.h" #include #include #include // Context name to context object map typedef std::unordered_map> ContextMap; class Server; class Json; class Http; class Rpc; struct Settings { const char* home; const char* bind_rpc; const char* bind_http; const char* archive_url; }; // The main class implementing the application's business logic. class Laminar final { public: Laminar(Server& server, Settings settings); ~Laminar() noexcept; // Queues a job, returns immediately. Return value will be nullptr if // the supplied name is not a known job. std::shared_ptr queueJob(std::string name, ParamMap params = ParamMap()); // Return the latest known number of the named job uint latestRun(std::string job); // Given a job name and number, return existence and (via reference params) // its current log output and whether the job is ongoing bool handleLogRequest(std::string name, uint num, std::string& output, bool& complete); // Given a relevant scope, returns a JSON string describing the current // server status. Content differs depending on the page viewed by the user, // which should be provided as part of the scope. std::string getStatus(MonitorScope scope); // Implements the laminarc function of setting arbitrary parameters on a run, // (typically the current run) which will be made available in the environment // of subsequent scripts. bool setParam(std::string job, uint buildNum, std::string param, std::string value); // Gets the list of jobs currently waiting in the execution queue const std::list>& listQueuedJobs(); // Gets the list of currently executing jobs const RunSet& listRunningJobs(); // Gets the list of known jobs - scans cfg/jobs for *.run files std::list listKnownJobs(); // Fetches the content of an artifact given its filename relative to // $LAMINAR_HOME/archive. Ideally, this would instead be served by a // proper web server which handles this url. kj::Maybe> getArtefact(std::string path); // Given the name of a job, populate the provided string reference with // SVG content describing the last known state of the job. Returns false // if the job is unknown. bool handleBadgeRequest(std::string job, std::string& badge); // Aborts a single job bool abort(std::string job, uint buildNum); // Abort all running jobs void abortAll(); private: bool loadConfiguration(); void loadCustomizations(); void assignNewJobs(); bool canQueue(const Context& ctx, const Run& run) const; bool tryStartRun(std::shared_ptr run, int queueIndex); void handleRunFinished(Run*); // expects that Json has started an array void populateArtifacts(Json& out, std::string job, uint num, kj::Path subdir = kj::Path::parse(".")) const; Run* activeRun(const std::string name, uint num) { auto it = activeJobs.byNameNumber().find(boost::make_tuple(name, num)); return it == activeJobs.byNameNumber().end() ? nullptr : it->get(); } std::list> queuedJobs; std::unordered_map buildNums; std::unordered_map> jobContexts; std::unordered_map jobDescriptions; std::unordered_map jobGroups; RunSet activeJobs; Database* db; Server& srv; ContextMap contexts; kj::Path homePath; kj::Own fsHome; uint numKeepRunDirs; std::string archiveUrl; kj::Own http; kj::Own rpc; }; #endif // LAMINAR_LAMINAR_H_ laminar-1.1/src/leader.cpp000066400000000000000000000273521410235453200155350ustar00rootroot00000000000000/// /// Copyright 2019-2020 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #include "log.h" #include #include #include #include #include #include #include #include #include #include #include "run.h" // short syntax helper for kj::Path template inline kj::Path operator/(const kj::Path& p, const T& ext) { return p.append(ext); } template inline kj::Path operator/(const kj::PathPtr& p, const T& ext) { return p.append(ext); } struct Script { kj::Path path; kj::Path cwd; bool runOnAbort; }; static void aggressive_recursive_kill(pid_t parent) { DIR* proc = opendir("/proc"); if(!proc) return; while(struct dirent* de = readdir(proc)) { if(!isdigit(*de->d_name)) continue; char status_file[640]; sprintf(status_file, "/proc/%s/status", de->d_name); FILE* status_fp = fopen(status_file, "rb"); if(!status_fp) continue; char status_buffer[512]; int n = fread(status_buffer, 1, 512, status_fp); if(char* p = (char*)memmem(status_buffer, n, "PPid:\t", 6)) { pid_t ppid = strtol(p + 6, NULL, 10); if(ppid == parent) { pid_t pid = atoi(de->d_name); aggressive_recursive_kill(pid); fprintf(stderr, "[laminar] sending SIGKILL to pid %d\n", pid); kill(pid, SIGKILL); } } fclose(status_fp); } closedir(proc); } class Leader final : public kj::TaskSet::ErrorHandler { public: Leader(kj::AsyncIoContext& ioContext, kj::Filesystem& fs, const char* jobName, uint runNumber); RunState run(); private: void taskFailed(kj::Exception&& exception) override; kj::Promise step(std::queue
Connecting...
laminar-1.1/src/resources/js/000077500000000000000000000000001410235453200162125ustar00rootroot00000000000000laminar-1.1/src/resources/js/app.js000066400000000000000000000715011410235453200173340ustar00rootroot00000000000000/* laminar.js * frontend application for Laminar Continuous Integration * https://laminar.ohwg.net */ // A hash function added to String helps generating consistent // colours from job names for use in charts String.prototype.hashCode = function() { for(var r=0, i=0; i { const exp = Math.floor(Math.log(bytes) / Math.log(1024)); return (bytes / Math.pow(1024, exp)).toFixed(1) + ' ' + ['B', 'KiB', 'MiB', 'GiB', 'TiB'][exp]; }); // Mixin for periodically updating a progress bar Vue.mixin({ data: () => ({ jobsRunning: [] }), methods: { updateProgress(o) { if (o.etc) { const p = (Math.floor(Date.now()/1000) + this.$root.clockSkew - o.started) / (o.etc - o.started); if (p > 1.2) o.overtime = true; o.progress = (p >= 1) ? 99 : 100 * p; } } }, beforeDestroy: () => { clearInterval(this.updateTimer); }, watch: { jobsRunning(val) { // this function handles several cases: // - the route has changed to a different run of the same job // - the current job has ended // - the current job has started (practically hard to reach) clearInterval(this.updateTimer); if (val.length) { // set the current progress update first this.jobsRunning.forEach(this.updateProgress); this.$forceUpdate(); // then update with animation every second this.updateTimer = setInterval(() => { this.jobsRunning.forEach(this.updateProgress); this.$forceUpdate(); }, 1000); } } } }); // Utility methods Vue.mixin({ methods: { // Get an svg icon given a run result runIcon: result => (result == 'success') ? /* checkmark */ ` ` : (result == 'failed' || result == 'aborted') ? /* cross */ ` ` : (result == 'queued') ? /* clock */ ` ` : /* spinner */ ` `, // Pretty-print a unix date formatDate: unix => { // TODO: reimplement when toLocaleDateString() accepts formatting options on most browsers const d = new Date(1000 * unix); let m = d.getMinutes(); if (m < 10) m = '0' + m; return d.getHours() + ':' + m + ' on ' + ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'][d.getDay()] + ' ' + d.getDate() + '. ' + ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][d.getMonth()] + ' ' + d.getFullYear(); }, // Pretty-print a duration formatDuration: function(start, end) { if(!end) end = Math.floor(Date.now()/1000) + this.$root.clockSkew; if(end - start > 3600) return Math.floor((end-start)/3600) + ' hours, ' + Math.floor(((end-start)%3600)/60) + ' minutes'; else if(end - start > 60) return Math.floor((end-start)/60) + ' minutes, ' + ((end-start)%60) + ' seconds'; else return (end-start) + ' seconds'; } } }); // Chart factory const Charts = (() => { // TODO usage is broken! const timeScale = max => max > 3600 ? { factor: 1/3600, ticks: v => v.toFixed(1), label:'Hours' } : max > 60 ? { factor: 1/60, ticks: v => v.toFixed(1), label:'Minutes' } : { factor: 1, ticks: v => v, label:'Seconds' }; return { createExecutorUtilizationChart: (id, nBusy, nTotal) => { const c = new Chart(document.getElementById(id), { type: 'pie', data: { labels: [ "Busy", "Idle" ], datasets: [{ data: [ nBusy, nTotal - nBusy ], backgroundColor: [ "#afa674", "#7483af" ] }] }, options: { hover: { mode: null } } }); c.executorBusyChanged = busy => { c.data.datasets[0].data[0] += busy ? 1 : -1; c.data.datasets[0].data[1] -= busy ? 1 : -1; c.update(); } return c; }, createRunsPerDayChart: (id, data) => { const dayNames = (() => { const res = []; var now = new Date(); for (var i = 6; i >= 0; --i) { var then = new Date(now.getTime() - i * 86400000); res.push({ short: ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"][then.getDay()], long: then.toLocaleDateString()} ); } return res; })(); const c = new Chart(document.getElementById(id), { type: 'line', data: { labels: dayNames.map(e => e.short), datasets: [{ label: 'Failed Builds', backgroundColor: "#883d3d", data: data.map(e => e.failed || 0) },{ label: 'Successful Builds', backgroundColor: "#74af77", data: data.map(e => e.success || 0) }] }, options:{ title: { display: true, text: 'Runs per day' }, tooltips:{callbacks:{title: (tip, data) => dayNames[tip[0].index].long}}, scales:{yAxes:[{ ticks:{userCallback: (label, index, labels) => Number.isInteger(label) ? label: null}, stacked: true }]} } }); c.jobCompleted = success => { c.data.datasets[success ? 1 : 0].data[6]++; c.update(); } return c; }, createRunsPerJobChart: (id, data) => { const c = new Chart(document.getElementById("chartBpj"), { type: 'horizontalBar', data: { labels: Object.keys(data), datasets: [{ label: 'Runs in last 24 hours', backgroundColor: "#7483af", data: Object.keys(data).map(e => data[e]) }] }, options:{ title: { display: true, text: 'Runs per job' }, hover: { mode: null }, scales:{xAxes:[{ticks:{userCallback: (label, index, labels)=> Number.isInteger(label) ? label: null}}]} } }); c.jobCompleted = name => { for (var j = 0; j < c.data.datasets[0].data.length; ++j) { if (c.data.labels[j] == name) { c.data.datasets[0].data[j]++; c.update(); return; } } // if we get here, it's a new/unknown job c.data.labels.push(name); c.data.datasets[0].data.push(1); c.update(); } return c; }, createTimePerJobChart: (id, data, completedCounts) => { const scale = timeScale(Math.max(...Object.values(data))); const c = new Chart(document.getElementById(id), { type: 'horizontalBar', data: { labels: Object.keys(data), datasets: [{ label: 'Mean run time this week', backgroundColor: "#7483af", data: Object.keys(data).map(e => data[e] * scale.factor) }] }, options:{ title: { display: true, text: 'Mean run time this week' }, hover: { mode: null }, scales:{xAxes:[{ ticks:{userCallback: scale.ticks}, scaleLabel: { display: true, labelString: scale.label } }]}, tooltips:{callbacks:{ label: (tip, data) => data.datasets[tip.datasetIndex].label + ': ' + tip.xLabel.toFixed(2) + ' ' + scale.label.toLowerCase() }} } }); c.jobCompleted = (name, time) => { for (var j = 0; j < c.data.datasets[0].data.length; ++j) { if (c.data.labels[j] == name) { c.data.datasets[0].data[j] = ((completedCounts[name]-1) * c.data.datasets[0].data[j] + time * scale.factor) / completedCounts[name]; c.update(); return; } } // if we get here, it's a new/unknown job c.data.labels.push(name); c.data.datasets[0].data.push(time * scale.factor); c.update(); }; return c; }, createRunTimeChangesChart: (id, data) => { const scale = timeScale(Math.max(...data.map(e => Math.max(...e.durations)))); const dataValue = (name, durations) => ({ label: name, data: durations.map(x => x * scale.factor), borderColor: 'hsl('+(name.hashCode() % 360)+', 27%, 57%)', backgroundColor: 'transparent' }); const c = new Chart(document.getElementById(id), { type: 'line', data: { labels: [...Array(10).keys()], datasets: data.map(e => dataValue(e.name, e.durations)) }, options:{ title: { display: true, text: 'Run time changes' }, legend:{ display: true, position: 'bottom' }, scales:{ xAxes:[{ticks:{display: false}}], yAxes:[{ ticks:{userCallback: scale.ticks}, scaleLabel: { display: true, labelString: scale.label } }] }, tooltips:{ enabled:false } } }); c.jobCompleted = (name, time) => { for (var j = 0; j < c.data.datasets.length; ++j) { if (c.data.datasets[j].label == name) { if(c.data.datasets[j].data.length == 10) c.data.datasets[j].data.shift(); c.data.datasets[j].data.push(time * scale.factor); c.update(); return; } } // if we get here, it's a new/unknown job c.data.datasets.push(dataValue(name, [time])); c.update(); }; return c; }, createRunTimeChart: (id, jobs, avg) => { const scale = timeScale(Math.max(...jobs.map(v=>v.completed-v.started))); const c = new Chart(document.getElementById(id), { type: 'bar', data: { labels: jobs.map(e => '#' + e.number).reverse(), datasets: [{ label: 'Average', type: 'line', data: [{x:0, y:avg * scale.factor}, {x:1, y:avg * scale.factor}], borderColor: '#7483af', backgroundColor: 'transparent', xAxisID: 'avg', pointRadius: 0, pointHitRadius: 0, pointHoverRadius: 0, },{ label: 'Build time', backgroundColor: jobs.map(e => e.result == 'success' ? '#74af77': '#883d3d').reverse(), data: jobs.map(e => (e.completed - e.started) * scale.factor).reverse() }] }, options: { title: { display: true, text: 'Build time' }, hover: { mode: null }, scales:{ xAxes:[{ categoryPercentage: 0.95, barPercentage: 1.0 },{ id: 'avg', type: 'linear', ticks: { display: false }, gridLines: { display: false, drawBorder: false } }], yAxes:[{ ticks:{userCallback: scale.ticks}, scaleLabel:{display: true, labelString: scale.label} }] }, tooltips:{callbacks:{ label: (tip, data) => scale.ticks(tip.yLabel) + ' ' + scale.label.toLowerCase() }} } }); c.jobCompleted = (num, result, time) => { let avg = c.data.datasets[0].data[0].y / scale.factor; avg = ((avg * (num - 1)) + time) / num; c.data.datasets[0].data[0].y = avg * scale.factor; c.data.datasets[0].data[1].y = avg * scale.factor; if(c.data.datasets[1].data.length == 20) { c.data.labels.shift(); c.data.datasets[1].data.shift(); c.data.datasets[1].backgroundColor.shift(); } c.data.labels.push('#' + num); c.data.datasets[1].data.push(time * scale.factor); c.data.datasets[1].backgroundColor.push(result == 'success' ? '#74af77': '#883d3d'); c.update(); }; return c; } }; })(); // For all charts, set miniumum Y to 0 Chart.scaleService.updateScaleDefaults('linear', { ticks: { suggestedMin: 0 } }); // Don't display legend by default Chart.defaults.global.legend.display = false; // Disable tooltip hover animations Chart.defaults.global.hover.animationDuration = 0; // Component for the / endpoint const Home = templateId => { const state = { jobsQueued: [], jobsRecent: [], resultChanged: [], lowPassRates: [], }; let chtUtilization, chtBuildsPerDay, chtBuildsPerJob, chtTimePerJob; let completedCounts; return { template: templateId, data: () => state, methods: { status: function(msg) { state.jobsQueued = msg.queued; state.jobsRunning = msg.running; state.jobsRecent = msg.recent; state.resultChanged = msg.resultChanged; state.lowPassRates = msg.lowPassRates; completedCounts = msg.completedCounts; this.$forceUpdate(); // defer charts to nextTick because they get DOM elements which aren't rendered yet this.$nextTick(() => { chtUtilization = Charts.createExecutorUtilizationChart("chartUtil", msg.executorsBusy, msg.executorsTotal); chtBuildsPerDay = Charts.createRunsPerDayChart("chartBpd", msg.buildsPerDay); chtBuildsPerJob = Charts.createRunsPerJobChart("chartBpj", msg.buildsPerJob); chtTimePerJob = Charts.createTimePerJobChart("chartTpj", msg.timePerJob, completedCounts); chtBuildTimeChanges = Charts.createRunTimeChangesChart("chartBuildTimeChanges", msg.buildTimeChanges); }); }, job_queued: function(data) { state.jobsQueued.splice(0, 0, data); this.$forceUpdate(); }, job_started: function(data) { state.jobsQueued.splice(state.jobsQueued.length - data.queueIndex - 1, 1); state.jobsRunning.splice(0, 0, data); this.$forceUpdate(); chtUtilization.executorBusyChanged(true); }, job_completed: function(data) { if(!(job.name in completedCounts)) completedCounts[job.name] = 0; for(let i = 0; i < state.jobsRunning.length; ++i) { const job = state.jobsRunning[i]; if (job.name == data.name && job.number == data.number) { state.jobsRunning.splice(i, 1); state.jobsRecent.splice(0, 0, data); this.$forceUpdate(); break; } } for(let i = 0; i < state.resultChanged.length; ++i) { const job = state.resultChanged[i]; if(job.name == data.name) { job[data.result === 'success' ? 'lastSuccess' : 'lastFailure'] = data.number; this.$forceUpdate(); break; } } for(let i = 0; i < state.lowPassRates.length; ++i) { const job = state.lowPassRates[i]; if(job.name == data.name) { job.passRate = ((completedCounts[job.name] - 1) * job.passRate + (data.result === 'success' ? 1 : 0)) / completedCounts[job.name]; this.$forceUpdate(); break; } } completedCounts[job.name]++; chtBuildsPerDay.jobCompleted(data.result === 'success') chtUtilization.executorBusyChanged(false); chtBuildsPerJob.jobCompleted(data.name); chtTimePerJob.jobCompleted(data.name, data.completed - data.started); chtBuildTimeChanges.jobCompleted(data.name, data.completed - data.started); } } }; }; // Component for the /jobs and /wallboard endpoints const All = templateId => { const state = { jobs: [], search: '', groups: {}, regexps: {}, group: null, ungrouped: [] }; return { template: templateId, data: () => state, methods: { status: function(msg) { state.jobs = msg.jobs; state.jobsRunning = msg.running; // mix running and completed jobs msg.running.forEach(job => { const idx = state.jobs.findIndex(j => j.name === job.name); if (idx > -1) state.jobs[idx] = job; else { // special case: first run of a job. state.jobs.unshift(j); state.jobs.sort((a, b) => a.name < b.name ? -1 : a.name > b.name ? 1 : 0); } }); state.groups = {}; Object.keys(msg.groups).forEach(k => state.regexps[k] = new RegExp(state.groups[k] = msg.groups[k])); state.ungrouped = state.jobs.filter(j => !Object.values(state.regexps).some(r => r.test(j.name))).map(j => j.name); state.group = state.ungrouped.length ? null : Object.keys(state.groups)[0]; }, job_started: function(data) { data.result = 'running'; // for wallboard css // jobsRunning must be maintained for ProgressUpdater let updAt = state.jobsRunning.findIndex(j => j.name === data.name); if (updAt === -1) { state.jobsRunning.unshift(data); } else { state.jobsRunning[updAt] = data; } updAt = state.jobs.findIndex(j => j.name === data.name); if (updAt === -1) { // first execution of new job. TODO insert without resort state.jobs.unshift(data); state.jobs.sort((a, b) => a.name < b.name ? -1 : a.name > b.name ? 1 : 0); if(!Object.values(state.regexps).some(r => r.test(data.name))) state.ungrouped.push(data.name); } else { state.jobs[updAt] = data; } this.$forceUpdate(); }, job_completed: function(data) { let updAt = state.jobs.findIndex(j => j.name === data.name); if (updAt > -1) state.jobs[updAt] = data; updAt = state.jobsRunning.findIndex(j => j.name === data.name); if (updAt > -1) { state.jobsRunning.splice(updAt, 1); this.$forceUpdate(); } }, filteredJobs: function() { let ret = []; if (state.group) ret = state.jobs.filter(job => state.regexps[state.group].test(job.name)); else ret = state.jobs.filter(job => state.ungrouped.includes(job.name)); if (this.search) ret = ret.filter(job => job.name.indexOf(this.search) > -1); return ret; }, wallboardJobs: function() { let ret = []; const expr = (new URLSearchParams(window.location.search)).get('filter'); if (expr) ret = state.jobs.filter(job => (new RegExp(expr)).test(job.name)); else ret = state.jobs; // sort failed before success, newest first ret.sort((a,b) => a.result == b.result ? a.started - b.started : 2*(b.result == 'success')-1); return ret; }, wallboardLink: function() { return 'wallboard' + (state.group ? '?filter=' + state.groups[state.group] : ''); } } }; }; // Component for the /job/:name endpoint const Job = templateId => { const state = { description: '', jobsRunning: [], jobsRecent: [], lastSuccess: null, lastFailed: null, nQueued: 0, pages: 0, sort: {} }; let chtBuildTime = null; return { template: templateId, props: ['route'], data: () => state, methods: { status: function(msg) { state.description = msg.description; state.jobsRunning = msg.running; state.jobsRecent = msg.recent; state.lastSuccess = msg.lastSuccess; state.lastFailed = msg.lastFailed; state.nQueued = msg.nQueued; state.pages = msg.pages; state.sort = msg.sort; // "status" comes again if we change page/sorting. Delete the // old chart and recreate it to prevent flickering of old data if(chtBuildTime) chtBuildTime.destroy(); // defer chart to nextTick because they get DOM elements which aren't rendered yet this.$nextTick(() => { chtBuildTime = Charts.createRunTimeChart("chartBt", msg.recent, msg.averageRuntime); }); }, job_queued: function() { state.nQueued++; }, job_started: function(data) { state.nQueued--; state.jobsRunning.splice(0, 0, data); this.$forceUpdate(); }, job_completed: function(data) { const i = state.jobsRunning.findIndex(j => j.number === data.number); if (i > -1) { state.jobsRunning.splice(i, 1); state.jobsRecent.splice(0, 0, data); this.$forceUpdate(); } chtBuildTime.jobCompleted(data.number, data.result, data.completed - data.started); }, page_next: function() { state.sort.page++; this.query(state.sort) }, page_prev: function() { state.sort.page--; this.query(state.sort) }, do_sort: function(field) { if(state.sort.field == field) { state.sort.order = state.sort.order == 'asc' ? 'dsc' : 'asc'; } else { state.sort.order = 'dsc'; state.sort.field = field; } this.query(state.sort) }, query: function(q) { this.$root.$emit('navigate', q); } } }; }; // Component for the /job/:name/:number endpoint const Run = templateId => { const utf8decoder = new TextDecoder('utf-8'); const ansi_up = new AnsiUp; ansi_up.use_classes = true; const state = { job: { artifacts: [], upstream: {} }, latestNum: null, log: '', }; const logFetcher = (vm, name, num) => { const abort = new AbortController(); fetch('log/'+name+'/'+num, {signal:abort.signal}).then(res => { // ATOW pipeThrough not supported in Firefox //const reader = res.body.pipeThrough(new TextDecoderStream).getReader(); const reader = res.body.getReader(); return function pump() { return reader.read().then(({done, value}) => { value = utf8decoder.decode(value); if (done) return; state.log += ansi_up.ansi_to_html( value.replace(/\033\[\{([^:]+):(\d+)\033\\/g, (m, $1, $2) => ''+$1+':'+ '#'+$2+'' ) ); vm.$forceUpdate(); return pump(); }); }(); }).catch(e => {}); return abort; } return { template: templateId, data: () => state, props: ['route'], methods: { status: function(data) { // Check for the /latest endpoint const params = this._props.route.params; if(params.number === 'latest') return this.$router.replace('jobs/' + params.name + '/' + data.latestNum); state.number = parseInt(params.number); state.jobsRunning = []; state.job = data; state.latestNum = data.latestNum; state.jobsRunning = [data]; state.log = ''; if(this.logstream) this.logstream.abort(); if(data.started) this.logstream = logFetcher(this, params.name, params.number); }, job_queued: function(data) { state.latestNum = data.number; this.$forceUpdate(); }, job_started: function(data) { if(data.number === state.number) { state.job = Object.assign(state.job, data); state.job.result = 'running'; if(this.logstream) this.logstream.abort(); this.logstream = logFetcher(this, data.name, data.number); this.$forceUpdate(); } }, job_completed: function(data) { if(data.number === state.number) { state.job = Object.assign(state.job, data); state.jobsRunning = []; this.$forceUpdate(); } }, runComplete: function(run) { return !!run && (run.result === 'aborted' || run.result === 'failed' || run.result === 'success'); }, } }; }; Vue.component('RouterLink', { name: 'router-link', props: { to: { type: String }, tag: { type: String, default: 'a' } }, template: ``, methods: { navigate: function(e) { e.preventDefault(); history.pushState(null, null, this.to); this.$root.$emit('navigate'); } } }); Vue.component('RouterView', (() => { const routes = [ { path: /^$/, component: Home('#home') }, { path: /^jobs$/, component: All('#jobs') }, { path: /^wallboard$/, component: All('#wallboard') }, { path: /^jobs\/(?[^\/]+)$/, component: Job('#job') }, { path: /^jobs\/(?[^\/]+)\/(?\d+)$/, component: Run('#run') } ]; const resolveRoute = path => { for(i in routes) { const r = routes[i].path.exec(path); if(r) return [routes[i].component, r.groups]; } } let eventSource = null; const setupEventSource = (view, query) => { // drop any existing event source if(eventSource) eventSource.close(); const path = (location.origin+location.pathname).substr(document.head.baseURI.length); const search = query ? '?' + Object.entries(query).map(([k,v])=>`${k}=${v}`).join('&') : ''; eventSource = new EventSource(document.head.baseURI + path + search); eventSource.reconnectInterval = 500; eventSource.onmessage = msg => { msg = JSON.parse(msg.data); if(msg.type === 'status') { // Event source is connected. Update static data document.title = view.$root.title = msg.title; view.$root.version = msg.version; // Calculate clock offset (used by ProgressUpdater) view.$root.clockSkew = msg.time - Math.floor((new Date()).getTime()/1000); view.$root.connected = true; [view.currentView, route.params] = resolveRoute(path); // the component won't be instantiated until nextTick view.$nextTick(() => { // component is ready, update it with the data from the eventsource eventSource.comp = view.$children[0]; // and finally run the component handler eventSource.comp[msg.type](msg.data); }); } else { // at this point, the component must be defined if (!eventSource.comp) return console.error("Page component was undefined"); view.$root.connected = true; view.$root.showNotify(msg.type, msg.data); if(typeof eventSource.comp[msg.type] === 'function') eventSource.comp[msg.type](msg.data); } } eventSource.onerror = err => { let ri = eventSource.reconnectInterval; view.$root.connected = false; setTimeout(() => { setupEventSource(view); if(ri < 7500) ri *= 1.5; eventSource.reconnectInterval = ri }, ri); eventSource.close(); } }; let route = {}; return { name: 'router-view', template: ``, data: () => ({ currentView: routes[0].component, // default to home route: route }), created: function() { this.$root.$on('navigate', query => { setupEventSource(this, query); }); window.addEventListener('popstate', () => { this.$root.$emit('navigate'); }); // initial navigation this.$root.$emit('navigate'); } }; })()); new Vue({ el: '#app', data: { title: '', // populated by status message version: '', clockSkew: 0, connected: false, notify: 'localStorage' in window && localStorage.getItem('showNotifications') == 1, route: { path: '', params: {} } }, computed: { supportsNotifications: () => 'Notification' in window && Notification.permission !== 'denied' }, methods: { toggleNotifications: function(en) { if(Notification.permission !== 'granted') Notification.requestPermission(p => this.notify = (p === 'granted')) else this.notify = en; }, showNotify: function(msg, data) { if(this.notify && msg === 'job_completed') new Notification('Job ' + data.result, { body: data.name + ' ' + '#' + data.number + ': ' + data.result }); } }, watch: { notify: e => localStorage.setItem('showNotifications', e ? 1 : 0) } }); laminar-1.1/src/resources/manifest.webmanifest000066400000000000000000000006251410235453200216350ustar00rootroot00000000000000{ "short_name": "Laminar", "name": "Laminar", "description": "Lightweight Continuous Integration", "icons": [ { "src": "/icon.png", "type": "image/png", "sizes": "36x36" }, { "src": "/favicon-152.png", "type": "image/png", "sizes": "152x152" } ], "start_url": "/", "background_color": "#2F3340", "display": "standalone", "scope": "/" }laminar-1.1/src/resources/style.css000066400000000000000000000221561410235453200174560ustar00rootroot00000000000000/* colour scheme */ :root { --main-bg: #fff; --main-fg: #333; --nav-bg: #2F3340; --nav-bg-darker: #292b33; --nav-fg: #d0d0d0; --nav-fg-light: #fafafa; --icon-enabled: #d8cb83; --success: #74af77; --failure: #883d3d; --running: #4786ab; --warning: #de9a34; --link-fg: #2f4579; --alt-row-bg: #fafafa; --border-grey: #d0d0d0; } /* basic resets */ html { box-sizing: border-box; } *, *:before, *:after { box-sizing: inherit; } body, h1, h2, h3, h4, h5, h6, p, ol, ul { margin: 0; padding: 0; font-weight: normal; } ol, ul { list-style: none; } body, html { height: 100%; } body { font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; font-size: 16px; color: var(--main-fg); } /* main header bar */ #nav-top { background-color: var(--nav-bg); } #nav-top-links { background-color: var(--nav-bg-darker); } #nav-top a { color: var(--nav-fg); } #nav-top a:hover { color: white; text-decoration: none; } .version { align-self: center; font-size: x-small; color: rgba(255,255,255,0.3); } /* navbar svg icons (enable notifications) */ .nav-icon { display: inherit; } .nav-icon svg { fill: var(--nav-fg); stroke: #000; } .nav-icon:hover { cursor: pointer; } .nav-icon:hover svg { fill: var(--nav-fg-light); } .nav-icon.active svg { fill: var(--icon-enabled); } /* anchors */ a { color: var(--link-fg); text-decoration: none; } a:visited { color: var(--link-fg); } a:active { color: var(--link-fg); } a:hover { text-decoration: underline; } /* charts */ canvas { width: 100% !important; max-width: 800px; height: auto !important; } #popup-connecting { position: fixed; background: white; border: 1px solid #ddd; bottom: 10px; right: 10px; padding: 20px; } /* status icons */ .status { display: inline-block; width: 1em; vertical-align: middle; margin-top: -2px; /* pixel-pushing */ } svg.success path { fill: var(--success); } svg.failed path { fill: var(--failure); } svg.running circle { stroke: var(--running); } svg.queued circle { fill: var(--nav-fg); } svg.queued path { stroke: white; } /* sort indicators */ a.sort { position: relative; margin-left: 7px; } a.sort:before, a.sort:after { border: 4px solid transparent; content: ""; position: absolute; display: block; height: 0; width: 0; right: 0; top: 50%; } a.sort:before { border-bottom-color: var(--border-grey); margin-top: -9px; } a.sort:after { border-top-color: var(--border-grey); margin-top: 1px; } a.sort.dsc:after { border-top-color: var(--main-fg); } a.sort.asc:before { border-bottom-color: var(--main-fg); } a.sort:hover { text-decoration: none; cursor:pointer; } a.sort:not(.asc):hover:before { border-bottom-color: var(--main-fg); } a.sort:not(.dsc):hover:after { border-top-color: var(--main-fg); } /* job group tabs */ a.active { color: var(--main-fg); } a.active:hover { text-decoration: none; } /* run console ansi colors (based on base16-default-dark and base16-bright) */ :root { --ansi-black: #181818; --ansi-red: #f8f8f8; --ansi-green: #a1b56c; --ansi-yellow: #f7ca88; --ansi-blue: #7cafc2; --ansi-magenta: #ba8baf; --ansi-cyan: #86c1b9; --ansi-white: #d8d8d8; --ansi-brightblack: #000000; --ansi-brightred: #fb0120; --ansi-brightgreen: #a1c659; --ansi-brightyellow: #fda331; --ansi-brightblue: #6fb3d2; --ansi-brightmagenta: #d381c3; --ansi-brightcyan: #76c7b7; --ansi-brightwhite: #e0e0e0; } .ansi-black-fg { color: var(--ansi-black); } .ansi-black-bg { background-color: var(--ansi-black); } .ansi-red-fg { color: var(--ansi-red); } .ansi-red-bg { background-color: var(--ansi-red); } .ansi-green-fg { color: var(--ansi-green); } .ansi-green-bg { background-color: var(--ansi-green); } .ansi-yellow-fg { color: var(--ansi-yellow); } .ansi-yellow-bg { background-color: var(--ansi-yellow); } .ansi-blue-fg { color: var(--ansi-blue); } .ansi-blue-bg { background-color: var(--ansi-blue); } .ansi-magenta-fg { color: var(--ansi-magenta); } .ansi-magenta-bg { background-color: var(--ansi-magenta); } .ansi-cyan-fg { color: var(--ansi-cyan); } .ansi-cyan-bg { background-color: var(--ansi-cyan); } .ansi-white-fg { color: var(--ansi-white); } .ansi-white-bg { background-color: var(--ansi-white); } .ansi-bright-black-fg { color: var(--ansi-brightblack); } .ansi-bright-black-bg { background-color: var(--ansi-brightblack); } .ansi-bright-red-fg { color: var(--ansi-brightred); } .ansi-bright-red-bg { background-color: var(--ansi-brightred); } .ansi-bright-green-fg { color: var(--ansi-brightgreen); } .ansi-bright-green-bg { background-color: var(--ansi-brightgreen); } .ansi-bright-yellow-fg { color: var(--ansi-brightyellow); } .ansi-bright-yellow-bg { background-color: var(--ansi-brightyellow); } .ansi-bright-blue-fg { color: var(--ansi-brightblue); } .ansi-bright-blue-bg { background-color: var(--ansi-brightblue); } .ansi-bright-magenta-fg { color: var(--ansi-brightmagenta); } .ansi-bright-magenta-bg { background-color: var(--ansi-brightmagenta); } .ansi-bright-cyan-fg { color: var(--ansi-brightcyan); } .ansi-bright-cyan-bg { background-color: var(--ansi-brightcyan); } .ansi-bright-white-fg { color: var(--ansi-brightwhite); } .ansi-bright-white-bg { background-color: var(--ansi-brightwhite); } /* run console */ .console-log { padding: 15px; background-color: var(--ansi-black); } .console-log code { white-space: pre-wrap; color: var(--ansi-white); } .console-log a { color: var(--ansi-brightwhite); } /* text input (job filtering) */ input { padding: 5px 8px; } /* description list (run detail) */ dl { display: grid; grid-template-columns: auto 1fr; } dt { text-align: right; font-weight: bold; min-width: 85px; } dt,dd { line-height: 2; } /* tables */ table { border-spacing: 0; width: 100%; } th { text-align: left; border-bottom: 1px solid var(--border-grey); } td, th { padding: 8px; } table.striped td { border-top: 1px solid var(--border-grey); } table.striped tr:nth-child(even) { background-color: var(--alt-row-bg); } td:first-child, th:first-child { padding-left: 15px; } td:last-child, th:last-child { padding-right: 15px; } /* next/prev navigation buttons */ button { border: 1px solid var(--border-grey); background-color: var(--alt-row-bg); padding: 6px; min-width: 29px; } button[disabled] { cursor: not-allowed; color: var(--border-grey); } button:not([disabled]) { cursor: pointer; color: var(--main-fg); } /* progress bar */ .progress { width: 100%; height: 8px; border: 1px solid; border-radius: 4px; overflow: hidden; border-color: var(--border-grey); background-color: var(--alt-row-bg); } .progress-bar { height: 100%; background-color: var(--running); background-image: linear-gradient(45deg, transparent 35%, rgba(255,255,255,0.18) 35% 65%, transparent 65%); background-size: 1rem; transition: width .6s linear; } .progress-bar.overtime { background-color: var(--warning); } .progress-bar.indeterminate { animation: animate-stripes 1s linear infinite; } @keyframes animate-stripes { from { background-position: 1rem 0; } to { background-position: 0 0; } } /* wallboard */ .wallboard { display: flex; flex-wrap: wrap-reverse; flex-direction: row-reverse; gap: 20px; padding: 20px; position: fixed; height: 100%; width: 100%; overflow: auto; background-color: #000 } .wallboard > div { padding: 30px; flex-grow: 1; background-color: var(--failure); color: var(--nav-fg-light); } .wallboard > div:hover { cursor: pointer; } .wallboard > div[data-result="running"] { animation: wallboard-bg-fade 2s ease infinite; } @keyframes wallboard-bg-fade { from { background-color: #4786ab; } 50% { background-color: #446597; } to { background-color: #4786ab; } } .wallboard > div[data-result="success"] { background-color: var(--success); color: var(--main-fg); } /* connecting overlay */ #connecting-overlay { position: fixed; top: 0; right: 0; bottom: 0; left: 0; display: grid; align-content: end; justify-content: end; color: var(--nav-fg-light); font-size: 18px; padding: 30px; visibility: hidden; background-color: rgba(0,0,0,0.75); opacity: 0; transition: opacity 0.5s ease, visibility 0s 0.5s; } #connecting-overlay.shown { visibility: visible; opacity: 1; transition: opacity 0.5s ease 2s; } #connecting-overlay > div { opacity: 1; } /* responsive layout */ #page-home-main { display: grid; grid-template-columns: auto 1fr; } @media (max-width: 865px) { #page-home-main { grid-template-columns: 1fr; } .vp-sm-hide { display: none; } } #page-home-stats { display: grid; grid-template-columns: 1fr 1fr 1fr; padding: 15px; gap: 15px; max-width: 1600px; margin: auto; text-align: center; } @media (max-width: 650px) { #page-home-stats { grid-template-columns: 1fr; } } #page-home-plots { display: grid; grid-template-columns: 1fr 1fr; padding: 5px; gap: 5px; max-width: 1600px; margin: auto; } @media (max-width: 1095px) { #page-home-plots { grid-template-columns: 1fr; } } #page-job-main { display: grid; grid-template: auto 1fr / minmax(550px, 1fr) 1fr; } @media (max-width: 965px) { #page-job-main { grid-template: auto auto 1fr / 1fr; } } #page-run-detail { display: grid; grid-template-columns: minmax(400px, auto) 1fr; gap: 5px; } @media (max-width: 780px) { #page-run-detail { grid-template-columns: 1fr; } } laminar-1.1/src/rpc.cpp000066400000000000000000000146501410235453200150620ustar00rootroot00000000000000/// /// Copyright 2015-2019 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #include "rpc.h" #include "laminar.capnp.h" #include "laminar.h" #include "log.h" namespace { // Used for returning run state to RPC clients LaminarCi::JobResult fromRunState(RunState state) { switch(state) { case RunState::SUCCESS: return LaminarCi::JobResult::SUCCESS; case RunState::FAILED: return LaminarCi::JobResult::FAILED; case RunState::ABORTED: return LaminarCi::JobResult::ABORTED; default: return LaminarCi::JobResult::UNKNOWN; } } } // This is the implementation of the Laminar Cap'n Proto RPC interface. // As such, it implements the pure virtual interface generated from // laminar.capnp with calls to the primary Laminar class class RpcImpl : public LaminarCi::Server { public: RpcImpl(Laminar& l) : LaminarCi::Server(), laminar(l) { } virtual ~RpcImpl() { } // Queue a job, without waiting for it to start kj::Promise queue(QueueContext context) override { std::string jobName = context.getParams().getJobName(); LLOG(INFO, "RPC queue", jobName); std::shared_ptr run = laminar.queueJob(jobName, params(context.getParams().getParams())); if(Run* r = run.get()) { context.getResults().setResult(LaminarCi::MethodResult::SUCCESS); context.getResults().setBuildNum(r->build); } else { context.getResults().setResult(LaminarCi::MethodResult::FAILED); } return kj::READY_NOW; } // Start a job, without waiting for it to finish kj::Promise start(StartContext context) override { std::string jobName = context.getParams().getJobName(); LLOG(INFO, "RPC start", jobName); std::shared_ptr run = laminar.queueJob(jobName, params(context.getParams().getParams())); if(Run* r = run.get()) { return r->whenStarted().then([context,r]() mutable { context.getResults().setResult(LaminarCi::MethodResult::SUCCESS); context.getResults().setBuildNum(r->build); }); } else { context.getResults().setResult(LaminarCi::MethodResult::FAILED); return kj::READY_NOW; } } // Start a job and wait for the result kj::Promise run(RunContext context) override { std::string jobName = context.getParams().getJobName(); LLOG(INFO, "RPC run", jobName); std::shared_ptr run = laminar.queueJob(jobName, params(context.getParams().getParams())); if(run) { return run->whenFinished().then([context,run](RunState state) mutable { context.getResults().setResult(fromRunState(state)); context.getResults().setBuildNum(run->build); }); } else { context.getResults().setResult(LaminarCi::JobResult::UNKNOWN); return kj::READY_NOW; } } // List jobs in queue kj::Promise listQueued(ListQueuedContext context) override { const std::list>& queue = laminar.listQueuedJobs(); auto res = context.getResults().initResult(queue.size()); int i = 0; for(auto it : queue) { res.set(i++, it->name); } return kj::READY_NOW; } // List running jobs kj::Promise listRunning(ListRunningContext context) override { const RunSet& active = laminar.listRunningJobs(); auto res = context.getResults().initResult(active.size()); int i = 0; for(auto it : active) { res[i].setJob(it->name); res[i].setBuildNum(it->build); i++; } return kj::READY_NOW; } // List known jobs kj::Promise listKnown(ListKnownContext context) override { std::list known = laminar.listKnownJobs(); auto res = context.getResults().initResult(known.size()); int i = 0; for(auto it : known) { res.set(i++, it); } return kj::READY_NOW; } kj::Promise abort(AbortContext context) override { std::string jobName = context.getParams().getRun().getJob(); uint buildNum = context.getParams().getRun().getBuildNum(); LLOG(INFO, "RPC abort", jobName, buildNum); LaminarCi::MethodResult result = laminar.abort(jobName, buildNum) ? LaminarCi::MethodResult::SUCCESS : LaminarCi::MethodResult::FAILED; context.getResults().setResult(result); return kj::READY_NOW; } private: // Helper to convert an RPC parameter list to a hash map ParamMap params(const capnp::List::Reader& paramReader) { ParamMap res; for(auto p : paramReader) { res[p.getName().cStr()] = p.getValue().cStr(); } return res; } Laminar& laminar; std::unordered_map>> runWaiters; }; Rpc::Rpc(Laminar& li) : rpcInterface(kj::heap(li)) {} // Context for an RPC connection struct RpcConnection { RpcConnection(kj::Own&& stream, capnp::Capability::Client bootstrap, capnp::ReaderOptions readerOpts) : stream(kj::mv(stream)), network(*this->stream, capnp::rpc::twoparty::Side::SERVER, readerOpts), rpcSystem(capnp::makeRpcServer(network, bootstrap)) { } kj::Own stream; capnp::TwoPartyVatNetwork network; capnp::RpcSystem rpcSystem; }; kj::Promise Rpc::accept(kj::Own&& connection) { auto server = kj::heap(kj::mv(connection), rpcInterface, capnp::ReaderOptions()); return server->network.onDisconnect().attach(kj::mv(server)); } laminar-1.1/src/rpc.h000066400000000000000000000020501410235453200145160ustar00rootroot00000000000000/// /// Copyright 2019-2020 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #ifndef LAMINAR_RPC_H_ #define LAMINAR_RPC_H_ #include #include #include class Laminar; class Rpc { public: Rpc(Laminar&li); kj::Promise accept(kj::Own&& connection); capnp::Capability::Client rpcInterface; }; #endif //LAMINAR_RPC_H_ laminar-1.1/src/run.cpp000066400000000000000000000147131410235453200151020ustar00rootroot00000000000000/// /// Copyright 2015-2020 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #include "run.h" #include "context.h" #include "conf.h" #include "log.h" #include #include #include // short syntax helper for kj::Path template inline kj::Path operator/(const kj::Path& p, const T& ext) { return p.append(ext); } std::string to_string(const RunState& rs) { switch(rs) { case RunState::QUEUED: return "queued"; case RunState::RUNNING: return "running"; case RunState::ABORTED: return "aborted"; case RunState::FAILED: return "failed"; case RunState::SUCCESS: return "success"; default: return "unknown"; } } Run::Run(std::string name, uint num, ParamMap pm, kj::Path&& rootPath) : result(RunState::SUCCESS), name(name), build(num), params(kj::mv(pm)), queuedAt(time(nullptr)), rootPath(kj::mv(rootPath)), started(kj::newPromiseAndFulfiller()), startedFork(started.promise.fork()), finished(kj::newPromiseAndFulfiller()), finishedFork(finished.promise.fork()) { for(auto it = params.begin(); it != params.end();) { if(it->first[0] == '=') { if(it->first == "=parentJob") { parentName = it->second; } else if(it->first == "=parentBuild") { parentBuild = atoi(it->second.c_str()); } else if(it->first == "=reason") { reasonMsg = it->second; } else { LLOG(ERROR, "Unknown internal job parameter", it->first); } it = params.erase(it); } else ++it; } } Run::~Run() { LLOG(INFO, "Run destroyed"); } static void setEnvFromFile(const kj::Path& rootPath, kj::Path file) { StringMap vars = parseConfFile((rootPath/file).toString(true).cStr()); for(auto& it : vars) { setenv(it.first.c_str(), it.second.c_str(), true); } } kj::Promise Run::start(RunState lastResult, std::shared_ptr ctx, const kj::Directory &fsHome, std::function(kj::Maybe&)> getPromise) { kj::Path cfgDir{"cfg"}; // add job timeout if specified if(fsHome.exists(cfgDir/"jobs"/(name+".conf"))) { timeout = parseConfFile((rootPath/cfgDir/"jobs"/(name+".conf")).toString(true).cStr()).get("TIMEOUT", 0); } int plog[2]; LSYSCALL(pipe(plog)); // Fork a process leader to run all the steps of the job. This gives us a nice // process tree output (job name and number as the process name) and helps // contain any wayward descendent processes. pid_t leader; LSYSCALL(leader = fork()); if(leader == 0) { // All output from this process will be captured in the plog pipe close(plog[0]); dup2(plog[1], STDOUT_FILENO); dup2(plog[1], STDERR_FILENO); close(plog[1]); // All initial/fixed env vars can be set here. Dynamic ones, including // "RESULT" and any set by `laminarc set` have to be handled in the subprocess. // add environment files if(fsHome.exists(cfgDir/"env")) setEnvFromFile(rootPath, cfgDir/"env"); if(fsHome.exists(cfgDir/"contexts"/(ctx->name+".env"))) setEnvFromFile(rootPath, cfgDir/"contexts"/(ctx->name+".env")); if(fsHome.exists(cfgDir/"jobs"/(name+".env"))) setEnvFromFile(rootPath, cfgDir/"jobs"/(name+".env")); // parameterized vars for(auto& pair : params) { setenv(pair.first.c_str(), pair.second.c_str(), false); } std::string PATH = (rootPath/"cfg"/"scripts").toString(true).cStr(); if(const char* p = getenv("PATH")) { PATH.append(":"); PATH.append(p); } std::string runNumStr = std::to_string(build); setenv("PATH", PATH.c_str(), true); setenv("RUN", runNumStr.c_str(), true); setenv("JOB", name.c_str(), true); setenv("CONTEXT", ctx->name.c_str(), true); setenv("LAST_RESULT", to_string(lastResult).c_str(), true); setenv("WORKSPACE", (rootPath/"run"/name/"workspace").toString(true).cStr(), true); setenv("ARCHIVE", (rootPath/"archive"/name/runNumStr).toString(true).cStr(), true); // RESULT set in leader process // leader process assumes $LAMINAR_HOME as CWD LSYSCALL(chdir(rootPath.toString(true).cStr())); setenv("PWD", rootPath.toString(true).cStr(), 1); // We could just fork/wait over all the steps here directly, but then we // can't set a nice name for the process tree. There is pthread_setname_np, // but it's limited to 16 characters, which most of the time probably isn't // enough. Instead, we'll just exec ourselves and handle that in laminard's // main() by calling leader_main() char* procName; if(asprintf(&procName, "{laminar} %s:%d", name.data(), build) > 0) execl("/proc/self/exe", procName, NULL); // does not return _exit(EXIT_FAILURE); } // All good, we've "started" startedAt = time(nullptr); context = ctx; output_fd = plog[0]; close(plog[1]); pid = leader; // notifies the rpc client if the start command was used started.fulfiller->fulfill(); return getPromise(pid).then([this](int status){ // The leader process passes a RunState through the return value. // Check it didn't die abnormally, then cast to get it back. result = WIFEXITED(status) ? RunState(WEXITSTATUS(status)) : RunState::ABORTED; finished.fulfiller->fulfill(RunState(result)); return result; }); } std::string Run::reason() const { return reasonMsg; } bool Run::abort() { // if the Maybe is empty, wait() was already called on this process KJ_IF_MAYBE(p, pid) { kill(-*p, SIGTERM); return true; } return false; } laminar-1.1/src/run.h000066400000000000000000000120321410235453200145370ustar00rootroot00000000000000/// /// Copyright 2015-2018 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #ifndef LAMINAR_RUN_H_ #define LAMINAR_RUN_H_ #include #include #include #include #include #include #include #include #include // Definition needed for musl typedef unsigned int uint; enum class RunState { UNKNOWN, QUEUED, RUNNING, ABORTED, FAILED, SUCCESS }; std::string to_string(const RunState& rs); class Context; typedef std::unordered_map ParamMap; // Represents an execution of a job. class Run { public: Run(std::string name, uint num, ParamMap params, kj::Path&& rootPath); ~Run(); // copying this class would be asking for trouble... Run(const Run&) = delete; Run& operator=(const Run&) = delete; kj::Promise start(RunState lastResult, std::shared_ptr ctx, const kj::Directory &fsHome, std::function(kj::Maybe&)> getPromise); // aborts this run bool abort(); std::string reason() const; kj::Promise whenStarted() { return startedFork.addBranch(); } kj::Promise whenFinished() { return finishedFork.addBranch(); } std::shared_ptr context; RunState result; std::string name; std::string parentName; int parentBuild = 0; uint build = 0; std::string log; kj::Maybe pid; int output_fd; std::unordered_map params; int timeout = 0; time_t queuedAt; time_t startedAt; private: // adds a script to the queue of scripts to be executed by this run void addScript(kj::Path scriptPath, kj::Path scriptWorkingDir, bool runOnAbort = false); // adds an environment file that will be sourced before this run void addEnv(kj::Path path); struct Script { kj::Path path; kj::Path cwd; bool runOnAbort; }; kj::Path rootPath; std::string reasonMsg; kj::PromiseFulfillerPair started; kj::ForkedPromise startedFork; kj::PromiseFulfillerPair finished; kj::ForkedPromise finishedFork; }; // All this below is a somewhat overengineered method of keeping track of // currently executing builds (Run objects). This would probably scale // very well, but it's completely gratuitous since we are not likely to // be executing thousands of builds at the same time #include #include #include #include #include #include #include namespace bmi = boost::multi_index; struct _run_same { typedef const Run* result_type; const Run* operator()(const std::shared_ptr& run) const { return run.get(); } }; // A single Run can be fetched by... struct _run_index : bmi::indexed_by< bmi::hashed_unique, // a combination of their job name and build number bmi::member, bmi::member >>, // or a pointer to a Run object. bmi::hashed_unique<_run_same>, // A group of Runs can be fetched by the time they started bmi::ordered_non_unique>, // or by their job name bmi::ordered_non_unique> > {}; struct RunSet: public boost::multi_index_container< std::shared_ptr, _run_index > { typename bmi::nth_index::type& byNameNumber() { return get<0>(); } typename bmi::nth_index::type const& byNameNumber() const { return get<0>(); } typename bmi::nth_index::type& byRunPtr() { return get<1>(); } typename bmi::nth_index::type const& byRunPtr() const { return get<1>(); } typename bmi::nth_index::type& byStartedAt() { return get<2>(); } typename bmi::nth_index::type const& byStartedAt() const { return get<2>(); } typename bmi::nth_index::type& byJobName() { return get<3>(); } typename bmi::nth_index::type const& byJobName() const { return get<3>(); } }; #endif // LAMINAR_RUN_H_ laminar-1.1/src/server.cpp000066400000000000000000000133701410235453200156020ustar00rootroot00000000000000/// /// Copyright 2015-2019 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #include "server.h" #include "log.h" #include "rpc.h" #include "http.h" #include "laminar.h" #include #include #include #include #include #include #include // Size of buffer used to read from file descriptors. Should be // a multiple of sizeof(struct signalfd_siginfo) == 128 #define PROC_IO_BUFSIZE 4096 Server::Server(kj::AsyncIoContext& io) : ioContext(io), listeners(kj::heap(*this)), childTasks(*this) { } Server::~Server() { } void Server::start() { // The eventfd is used to quit the server later since we need to trigger // a reaction from the event loop efd_quit = eventfd(0, EFD_CLOEXEC|EFD_NONBLOCK); kj::evalLater([this](){ static uint64_t _; auto wakeEvent = ioContext.lowLevelProvider->wrapInputFd(efd_quit); return wakeEvent->read(&_, sizeof(uint64_t)).attach(std::move(wakeEvent)); }).wait(ioContext.waitScope); // Execution arrives here when the eventfd is triggered (in stop()) // Shutdown sequence: // 1. stop accepting new connections listeners = nullptr; // 2. wait for all children to close childTasks.onEmpty().wait(ioContext.waitScope); // TODO not sure the comments below are true // 3. run the loop once more to send any pending output to http clients ioContext.waitScope.poll(); // 4. return: http connections will be destructed when class is deleted } void Server::stop() { // This method is expected to be called in signal context, so an eventfd // is used to get the main loop to react. See run() eventfd_write(efd_quit, 1); } kj::Promise Server::readDescriptor(int fd, std::function cb) { auto event = this->ioContext.lowLevelProvider->wrapInputFd(fd, kj::LowLevelAsyncIoProvider::TAKE_OWNERSHIP); auto buffer = kj::heapArrayBuilder(PROC_IO_BUFSIZE); return handleFdRead(event, buffer.asPtr().begin(), cb).attach(std::move(event)).attach(std::move(buffer)); } void Server::addTask(kj::Promise&& task) { childTasks.add(kj::mv(task)); } kj::Promise Server::addTimeout(int seconds, std::function cb) { return ioContext.lowLevelProvider->getTimer().afterDelay(seconds * kj::SECONDS).then([cb](){ cb(); }).eagerlyEvaluate(nullptr); } kj::Promise Server::onChildExit(kj::Maybe &pid) { return ioContext.unixEventPort.onChildExit(pid); } Server::PathWatcher& Server::watchPaths(std::function fn) { struct PathWatcherImpl final : public PathWatcher { PathWatcher& addPath(const char* path) override { inotify_add_watch(fd, path, IN_ONLYDIR | IN_CLOSE_WRITE | IN_CREATE | IN_DELETE); return *this; } int fd; }; auto pwi = kj::heap(); PathWatcher* pw = pwi.get(); pwi->fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC); listeners->add(readDescriptor(pwi->fd, [fn](const char*, size_t){ fn(); }).attach(kj::mv(pwi))); return *pw; } void Server::listenRpc(Rpc &rpc, kj::StringPtr rpcBindAddress) { if(rpcBindAddress.startsWith("unix:")) unlink(rpcBindAddress.slice(strlen("unix:")).cStr()); listeners->add(ioContext.provider->getNetwork().parseAddress(rpcBindAddress) .then([this,&rpc](kj::Own&& addr) { return acceptRpcClient(rpc, addr->listen()); })); } void Server::listenHttp(Http &http, kj::StringPtr httpBindAddress) { if(httpBindAddress.startsWith("unix:")) unlink(httpBindAddress.slice(strlen("unix:")).cStr()); listeners->add(ioContext.provider->getNetwork().parseAddress(httpBindAddress) .then([this,&http](kj::Own&& addr) { return http.startServer(ioContext.lowLevelProvider->getTimer(), addr->listen()); })); } kj::Promise Server::acceptRpcClient(Rpc& rpc, kj::Own&& listener) { kj::ConnectionReceiver& cr = *listener.get(); return cr.accept().then(kj::mvCapture(kj::mv(listener), [this, &rpc](kj::Own&& listener, kj::Own&& connection) { addTask(rpc.accept(kj::mv(connection))); return acceptRpcClient(rpc, kj::mv(listener)); })); } // returns a promise which will read a chunk of data from the file descriptor // wrapped by stream and invoke the provided callback with the read data. // Repeats until ::read returns <= 0 kj::Promise Server::handleFdRead(kj::AsyncInputStream* stream, char* buffer, std::function cb) { return stream->tryRead(buffer, 1, PROC_IO_BUFSIZE).then([this,stream,buffer,cb](size_t sz) { if(sz > 0) { cb(buffer, sz); return handleFdRead(stream, kj::mv(buffer), cb); } return kj::Promise(kj::READY_NOW); }); } void Server::taskFailed(kj::Exception &&exception) { //kj::throwFatalException(kj::mv(exception)); fprintf(stderr, "taskFailed: %s\n", exception.getDescription().cStr()); } laminar-1.1/src/server.h000066400000000000000000000045771410235453200152600ustar00rootroot00000000000000/// /// Copyright 2015-2020 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #ifndef LAMINAR_SERVER_H_ #define LAMINAR_SERVER_H_ #include #include #include #include #include #include class Laminar; class Http; class Rpc; // This class manages the program's asynchronous event loop class Server final : public kj::TaskSet::ErrorHandler { public: Server(kj::AsyncIoContext& ioContext); ~Server(); void start(); void stop(); // add a file descriptor to be monitored for output. The callback will be // invoked with the read data kj::Promise readDescriptor(int fd, std::function cb); void addTask(kj::Promise &&task); // add a one-shot timer callback kj::Promise addTimeout(int seconds, std::function cb); // get a promise which resolves when a child process exits kj::Promise onChildExit(kj::Maybe& pid); struct PathWatcher { virtual PathWatcher& addPath(const char* path) = 0; }; PathWatcher& watchPaths(std::function); void listenRpc(Rpc& rpc, kj::StringPtr rpcBindAddress); void listenHttp(Http& http, kj::StringPtr httpBindAddress); private: kj::Promise acceptRpcClient(Rpc& rpc, kj::Own&& listener); kj::Promise handleFdRead(kj::AsyncInputStream* stream, char* buffer, std::function cb); void taskFailed(kj::Exception&& exception) override; private: int efd_quit; kj::AsyncIoContext& ioContext; kj::Own listeners; kj::TaskSet childTasks; kj::Maybe> reapWatch; }; #endif // LAMINAR_SERVER_H_ laminar-1.1/src/version.cpp000066400000000000000000000015021410235453200157530ustar00rootroot00000000000000/// /// Copyright 2020 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #define str(x) #x #define xstr(x) str(x) const char* laminar_version() { return xstr(LAMINAR_VERSION); } laminar-1.1/test/000077500000000000000000000000001410235453200137545ustar00rootroot00000000000000laminar-1.1/test/eventsource.h000066400000000000000000000051221410235453200164670ustar00rootroot00000000000000/// /// Copyright 2019 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #ifndef LAMINAR_EVENTSOURCE_H_ #define LAMINAR_EVENTSOURCE_H_ #include #include #include #include class EventSource { public: EventSource(kj::AsyncIoContext& ctx, const char* httpConnectAddr, const char* path) : networkAddress(ctx.provider->getNetwork().parseAddress(httpConnectAddr).wait(ctx.waitScope)), httpClient(kj::newHttpClient(ctx.lowLevelProvider->getTimer(), headerTable, *networkAddress)), headerTable(), headers(headerTable), buffer(kj::heapArrayBuilder(BUFFER_SIZE)) { headers.add("Accept", "text/event-stream"); auto resp = httpClient->request(kj::HttpMethod::GET, path, headers).response.wait(ctx.waitScope); promise = waitForMessages(resp.body.get(), 0).attach(kj::mv(resp)); } const std::vector& messages() { return receivedMessages; } private: kj::Own networkAddress; kj::Own httpClient; kj::HttpHeaderTable headerTable; kj::HttpHeaders headers; kj::ArrayBuilder buffer; kj::Maybe> promise; std::vector receivedMessages; kj::Promise waitForMessages(kj::AsyncInputStream* stream, ulong offset) { return stream->read(buffer.asPtr().begin() + offset, 1, BUFFER_SIZE).then([=](size_t s) { ulong end = offset + s; buffer.asPtr().begin()[end] = '\0'; if(strcmp(&buffer.asPtr().begin()[end - 2], "\n\n") == 0) { rapidjson::Document d; d.Parse(buffer.begin() + strlen("data: ")); receivedMessages.emplace_back(kj::mv(d)); end = 0; } return waitForMessages(stream, end); }); } static const int BUFFER_SIZE = 1024; }; #endif // LAMINAR_EVENTSOURCE_H_ laminar-1.1/test/laminar-fixture.h000066400000000000000000000126311410235453200172370ustar00rootroot00000000000000/// /// Copyright 2019-2020 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #ifndef LAMINAR_FIXTURE_H_ #define LAMINAR_FIXTURE_H_ #include "laminar.capnp.h" #include "eventsource.h" #include "tempdir.h" #include "laminar.h" #include "log.h" #include "server.h" #include "conf.h" #include #include class LaminarFixture : public ::testing::Test { public: LaminarFixture() { bind_rpc = std::string("unix:/") + tmp.path.toString(true).cStr() + "/rpc.sock"; bind_http = std::string("unix:/") + tmp.path.toString(true).cStr() + "/http.sock"; home = tmp.path.toString(true).cStr(); tmp.fs->openSubdir(kj::Path{"cfg", "jobs"}, kj::WriteMode::CREATE | kj::WriteMode::CREATE_PARENT); settings.home = home.c_str(); settings.bind_rpc = bind_rpc.c_str(); settings.bind_http = bind_http.c_str(); settings.archive_url = "/test-archive/"; server = new Server(*ioContext); laminar = new Laminar(*server, settings); } ~LaminarFixture() noexcept(true) { delete server; delete laminar; } kj::Own eventSource(const char* path) { return kj::heap(*ioContext, bind_http.c_str(), path); } void defineJob(const char* name, const char* scriptContent, const char* configContent = nullptr) { KJ_IF_MAYBE(f, tmp.fs->tryOpenFile(kj::Path{"cfg", "jobs", std::string(name) + ".run"}, kj::WriteMode::CREATE | kj::WriteMode::CREATE_PARENT | kj::WriteMode::EXECUTABLE)) { (*f)->writeAll(std::string("#!/bin/sh\n") + scriptContent + "\n"); } if(configContent) { KJ_IF_MAYBE(f, tmp.fs->tryOpenFile(kj::Path{"cfg", "jobs", std::string(name) + ".conf"}, kj::WriteMode::CREATE)) { (*f)->writeAll(configContent); } } } struct RunExec { LaminarCi::JobResult result; kj::String log; }; RunExec runJob(const char* name, kj::Maybe params = nullptr) { auto req = client().runRequest(); req.setJobName(name); KJ_IF_MAYBE(p, params) { auto params = req.initParams(p->size()); int i = 0; for(auto kv : *p) { params[i].setName(kv.first); params[i].setValue(kv.second); i++; } } auto res = req.send().wait(ioContext->waitScope); std::string path = std::string{"/log/"} + name + "/" + std::to_string(res.getBuildNum()); kj::HttpHeaderTable headerTable; kj::String log = kj::newHttpClient(ioContext->lowLevelProvider->getTimer(), headerTable, *ioContext->provider->getNetwork().parseAddress(bind_http.c_str()).wait(ioContext->waitScope)) ->request(kj::HttpMethod::GET, path, kj::HttpHeaders(headerTable)).response.wait(ioContext->waitScope).body ->readAllText().wait(ioContext->waitScope); return { res.getResult(), kj::mv(log) }; } kj::String stripLaminarLogLines(const kj::String& str) { auto out = kj::heapString(str.size()); char *o = out.begin(); for(const char *p = str.cStr(), *e = p + str.size(); p < e;) { const char *nl = strchrnul(p, '\n'); if(!kj::StringPtr{p}.startsWith("[laminar]")) { memcpy(o, p, nl - p + 1); o += nl - p + 1; } p = nl + 1; } *o = '\0'; return out; } StringMap parseFromString(kj::StringPtr content) { char tmp[16] = "/tmp/lt.XXXXXX"; int fd = mkstemp(tmp); LSYSCALL(write(fd, content.begin(), content.size())); close(fd); StringMap map = parseConfFile(tmp); unlink(tmp); return map; } LaminarCi::Client client() { if(!rpc) { auto stream = ioContext->provider->getNetwork().parseAddress(bind_rpc).wait(ioContext->waitScope)->connect().wait(ioContext->waitScope); auto net = kj::heap(*stream, capnp::rpc::twoparty::Side::CLIENT); rpc = kj::heap>(*net, nullptr).attach(kj::mv(net), kj::mv(stream)); } static capnp::word scratch[4]; memset(scratch, 0, sizeof(scratch)); auto hostId = capnp::MallocMessageBuilder(scratch).getRoot(); hostId.setSide(capnp::rpc::twoparty::Side::SERVER); return rpc->bootstrap(hostId).castAs(); } kj::Own> rpc; TempDir tmp; std::string home, bind_rpc, bind_http; Settings settings; Server* server; Laminar* laminar; static kj::AsyncIoContext* ioContext; }; #endif // LAMINAR_FIXTURE_H_ laminar-1.1/test/laminar-functional.cpp000066400000000000000000000135131410235453200202460ustar00rootroot00000000000000/// /// Copyright 2019 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #include #include "laminar-fixture.h" #include "conf.h" // TODO: consider handling this differently kj::AsyncIoContext* LaminarFixture::ioContext; TEST_F(LaminarFixture, EmptyStatusMessageStructure) { auto es = eventSource("/"); ioContext->waitScope.poll(); ASSERT_EQ(1, es->messages().size()); auto json = es->messages().front().GetObject(); EXPECT_STREQ("status", json["type"].GetString()); EXPECT_STREQ("Laminar", json["title"].GetString()); EXPECT_LT(time(nullptr) - json["time"].GetInt64(), 1); auto data = json["data"].GetObject(); EXPECT_TRUE(data.HasMember("recent")); EXPECT_TRUE(data.HasMember("running")); EXPECT_TRUE(data.HasMember("queued")); EXPECT_TRUE(data.HasMember("executorsTotal")); EXPECT_TRUE(data.HasMember("executorsBusy")); EXPECT_TRUE(data.HasMember("buildsPerDay")); EXPECT_TRUE(data.HasMember("buildsPerJob")); EXPECT_TRUE(data.HasMember("timePerJob")); EXPECT_TRUE(data.HasMember("resultChanged")); EXPECT_TRUE(data.HasMember("lowPassRates")); EXPECT_TRUE(data.HasMember("buildTimeChanges")); } TEST_F(LaminarFixture, JobNotifyHomePage) { defineJob("foo", "true"); auto es = eventSource("/"); runJob("foo"); ASSERT_EQ(4, es->messages().size()); auto job_queued = es->messages().at(1).GetObject(); EXPECT_STREQ("job_queued", job_queued["type"].GetString()); EXPECT_STREQ("foo", job_queued["data"]["name"].GetString()); auto job_started = es->messages().at(2).GetObject(); EXPECT_STREQ("job_started", job_started["type"].GetString()); EXPECT_STREQ("foo", job_started["data"]["name"].GetString()); auto job_completed = es->messages().at(3).GetObject(); EXPECT_STREQ("job_completed", job_completed["type"].GetString()); EXPECT_STREQ("foo", job_completed["data"]["name"].GetString()); } TEST_F(LaminarFixture, OnlyRelevantNotifications) { defineJob("job1", "true"); defineJob("job2", "true"); auto esHome = eventSource("/"); auto esJobs = eventSource("/jobs"); auto es1Job = eventSource("/jobs/job1"); auto es2Job = eventSource("/jobs/job2"); auto es1Run = eventSource("/jobs/job1/1"); auto es2Run = eventSource("/jobs/job2/1"); runJob("job1"); runJob("job2"); EXPECT_EQ(7, esHome->messages().size()); EXPECT_EQ(7, esJobs->messages().size()); EXPECT_EQ(4, es1Job->messages().size()); EXPECT_EQ(4, es2Job->messages().size()); EXPECT_EQ(4, es1Run->messages().size()); EXPECT_EQ(4, es2Run->messages().size()); } TEST_F(LaminarFixture, FailedStatus) { defineJob("job1", "false"); auto run = runJob("job1"); ASSERT_EQ(LaminarCi::JobResult::FAILED, run.result); } TEST_F(LaminarFixture, WorkingDirectory) { defineJob("job1", "pwd"); auto run = runJob("job1"); ASSERT_EQ(LaminarCi::JobResult::SUCCESS, run.result); std::string cwd{tmp.path.append(kj::Path{"run","job1","1"}).toString(true).cStr()}; EXPECT_EQ(cwd + "\n", stripLaminarLogLines(run.log).cStr()); } TEST_F(LaminarFixture, Environment) { defineJob("foo", "env"); auto run = runJob("foo"); std::string ws{tmp.path.append(kj::Path{"run","foo","workspace"}).toString(true).cStr()}; std::string archive{tmp.path.append(kj::Path{"archive","foo","1"}).toString(true).cStr()}; StringMap map = parseFromString(run.log); EXPECT_EQ("1", map["RUN"]); EXPECT_EQ("foo", map["JOB"]); EXPECT_EQ("success", map["RESULT"]); EXPECT_EQ("unknown", map["LAST_RESULT"]); EXPECT_EQ(ws, map["WORKSPACE"]); EXPECT_EQ(archive, map["ARCHIVE"]); } TEST_F(LaminarFixture, ParamsToEnv) { defineJob("foo", "env"); StringMap params; params["foo"] = "bar"; auto run = runJob("foo", params); StringMap map = parseFromString(run.log); EXPECT_EQ("bar", map["foo"]); } TEST_F(LaminarFixture, Abort) { defineJob("job1", "sleep inf"); auto req = client().runRequest(); req.setJobName("job1"); auto res = req.send(); // There isn't a nice way of knowing when the leader process is ready to // handle SIGTERM. Just wait until it prints something to the log ioContext->waitScope.poll(); kj::HttpHeaderTable headerTable; char _; kj::newHttpClient(ioContext->lowLevelProvider->getTimer(), headerTable, *ioContext->provider->getNetwork().parseAddress(bind_http.c_str()).wait(ioContext->waitScope)) ->request(kj::HttpMethod::GET, "/log/job1/1", kj::HttpHeaders(headerTable)).response.wait(ioContext->waitScope).body ->tryRead(&_, 1, 1).wait(ioContext->waitScope); // now it should be ready to abort ASSERT_TRUE(laminar->abort("job1", 1)); EXPECT_EQ(LaminarCi::JobResult::ABORTED, res.wait(ioContext->waitScope).getResult()); } TEST_F(LaminarFixture, JobDescription) { defineJob("foo", "true", "DESCRIPTION=bar"); auto es = eventSource("/jobs/foo"); ioContext->waitScope.poll(); ASSERT_EQ(1, es->messages().size()); auto json = es->messages().front().GetObject(); ASSERT_TRUE(json.HasMember("data")); auto data = json["data"].GetObject(); ASSERT_TRUE(data.HasMember("description")); EXPECT_STREQ("bar", data["description"].GetString()); } laminar-1.1/test/main.cpp000066400000000000000000000024711410235453200154100ustar00rootroot00000000000000/// /// Copyright 2019 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #include #include #include #include "laminar-fixture.h" #include "leader.h" // gtest main supplied in order to call captureChildExit and handle process leader int main(int argc, char **argv) { if(argv[0][0] == '{') return leader_main(); // TODO: consider handling this differently auto ioContext = kj::setupAsyncIo(); LaminarFixture::ioContext = &ioContext; kj::UnixEventPort::captureChildExit(); //kj::_::Debug::setLogLevel(kj::_::Debug::Severity::INFO); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } laminar-1.1/test/tempdir.h000066400000000000000000000026041410235453200155730ustar00rootroot00000000000000/// /// Copyright 2018-2020 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #ifndef LAMINAR_TEMPDIR_H_ #define LAMINAR_TEMPDIR_H_ #include "log.h" #include #include class TempDir { public: TempDir() : path(mkdtemp()), fs(kj::newDiskFilesystem()->getRoot().openSubdir(path, kj::WriteMode::CREATE|kj::WriteMode::MODIFY)) { } ~TempDir() noexcept { kj::newDiskFilesystem()->getRoot().remove(path); } kj::Path path; kj::Own fs; private: static kj::Path mkdtemp() { char dir[] = "/tmp/laminar-test-XXXXXX"; LASSERT(::mkdtemp(dir) != nullptr, "mkdtemp failed"); return kj::Path::parse(&dir[1]); } }; #endif // LAMINAR_TEMPDIR_H_ laminar-1.1/test/unit-conf.cpp000066400000000000000000000033431410235453200163650ustar00rootroot00000000000000/// /// Copyright 2018-2020 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #include "conf.h" #include "log.h" #include class ConfTest : public ::testing::Test { protected: void SetUp() override { fd = mkstemp(tmpFile); } void TearDown() override { close(fd); unlink(tmpFile); } void parseConf(std::string conf) { lseek(fd, SEEK_SET, 0); LSYSCALL(write(fd, conf.data(), conf.size())); cfg = parseConfFile(tmpFile); } StringMap cfg; int fd; char tmpFile[32] = "/tmp/lt.XXXXXX"; }; TEST_F(ConfTest, Empty) { EXPECT_TRUE(cfg.empty()); parseConf(""); EXPECT_TRUE(cfg.empty()); } TEST_F(ConfTest, Comments) { parseConf("#"); EXPECT_TRUE(cfg.empty()); parseConf("#foo=bar"); EXPECT_TRUE(cfg.empty()); } TEST_F(ConfTest, Parse) { parseConf("foo=bar\nbar=3"); ASSERT_EQ(2, cfg.size()); EXPECT_EQ("bar", cfg.get("foo", std::string("fallback"))); EXPECT_EQ(3, cfg.get("bar", 0)); } TEST_F(ConfTest, Fallback) { EXPECT_EQ("foo", cfg.get("test", std::string("foo"))); } laminar-1.1/test/unit-database.cpp000066400000000000000000000044251410235453200172060ustar00rootroot00000000000000/// /// Copyright 2018 Oliver Giles /// /// This file is part of Laminar /// /// Laminar is free software: you can redistribute it and/or modify /// it under the terms of the GNU General Public License as published by /// the Free Software Foundation, either version 3 of the License, or /// (at your option) any later version. /// /// Laminar is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU General Public License for more details. /// /// You should have received a copy of the GNU General Public License /// along with Laminar. If not, see /// #include #include "database.h" class DatabaseTest : public ::testing::Test { protected: DatabaseTest() : ::testing::Test(), db(":memory:") {} Database db; }; TEST_F(DatabaseTest, Exec) { EXPECT_FALSE(db.exec("garbage non-sql")); EXPECT_TRUE(db.exec("create temporary table test(id int)")); } TEST_F(DatabaseTest, Fetch) { int n = 0; db.stmt("select 2, 'cat', 4294967299").fetch([&](int i, std::string s, uint64_t ui){ n++; EXPECT_EQ(2, i); EXPECT_EQ("cat", s); EXPECT_EQ(4294967299, ui); }); EXPECT_EQ(1, n); } TEST_F(DatabaseTest, Bind) { int n = 0; db.stmt("select ? * 2").bind(2).fetch([&](int i){ n++; EXPECT_EQ(4, i); }); EXPECT_EQ(1, n); } TEST_F(DatabaseTest, Strings) { std::string res; db.stmt("select ? || ?").bind("a", "b").fetch([&res](std::string s){ EXPECT_TRUE(res.empty()); res = s; }); EXPECT_EQ("ab", res); } TEST_F(DatabaseTest, MultiRow) { ASSERT_TRUE(db.exec("create table test(id int)")); int i = 0; while(i < 10) EXPECT_TRUE(db.stmt("insert into test values(?)").bind(i++).exec()); i = 0; db.stmt("select * from test").fetch([&](int r){ EXPECT_EQ(i++, r); }); EXPECT_EQ(10, i); } TEST_F(DatabaseTest, StdevFunc) { double res = 0; db.stmt("with a (x) as (values (7),(3),(45),(23)) select stdev(x) from a").fetch([&](double r){ res = r; }); EXPECT_FLOAT_EQ(19.0700463205171, res); }