pax_global_header00006660000000000000000000000064134666231300014516gustar00rootroot0000000000000052 comment=56d85049b4dbe20e696a0582ecd92d1215919b5d charliecloud-0.9.10/000077500000000000000000000000001346662313000142435ustar00rootroot00000000000000charliecloud-0.9.10/.travis.yml000077700000000000000000000000001346662313000213612test/travis.ymlustar00rootroot00000000000000charliecloud-0.9.10/LICENSE000066400000000000000000000261361346662313000152600ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. charliecloud-0.9.10/Makefile000066400000000000000000000172721346662313000157140ustar00rootroot00000000000000SHELL=/bin/sh # Add some good stuff to CFLAGS. export CFLAGS += -std=c11 -Wall -g .PHONY: all all: VERSION.full bin/version.h bin/version.sh cd bin && $(MAKE) all # only descend into test/ if the right Python is available if (command -v "$$(head -1 test/make-auto | sed -E 's/^.+ //')"); then \ cd test && $(MAKE) all; \ fi cd examples/syscalls && $(MAKE) all .PHONY: clean clean: cd bin && $(MAKE) clean cd doc-src && $(MAKE) clean cd test && $(MAKE) clean cd examples/syscalls && $(MAKE) clean # VERSION.full contains the version string reported by executables; see FAQ. ifeq ($(shell test -d .git && fgrep -q \~ VERSION && echo true),true) .PHONY: VERSION.full # depends on git metadata, not a simple file VERSION.full: VERSION (git --version > /dev/null 2>&1) || \ (echo "This is a Git working directory but no git found." && false) printf '%s+%s%s%s\n' \ $$(cat $<) \ $$( git rev-parse --abbrev-ref HEAD \ | sed 's/[^A-Za-z0-9]//g' \ | sed 's/$$/./g' \ | sed 's/master.//g') \ $$(git rev-parse --short HEAD) \ $$(git diff-index --quiet HEAD || echo '.dirty') \ > $@ else VERSION.full: VERSION cp $< $@ endif bin/version.h: VERSION.full echo "#define VERSION \"$$(cat $<)\"" > $@ bin/version.sh: VERSION.full echo "version () { echo 1>&2 '$$(cat $<)'; }" > $@ # These targets provide tarballs of HEAD (not the Git working directory) that # are self-contained, including the source code as well as the man pages # (both) and Bats (export-bats). To use them in an unclean working directory, # set $CH_UNCLEAN_EXPORT_OK to non-empty. # # You must "cd doc-src && make" before they will work. The targets depend on # the man pages but don't know how to build them. # # They are phony because I haven't figured out their real dependencies. .PHONY: main.tar main.tar: VERSION.full man/charliecloud.1 doc/index.html git diff-index --quiet HEAD || [ -n "$$CH_MAKE_EXPORT_UNCLEAN_OK" ] git archive HEAD --prefix=charliecloud-$$(cat VERSION.full)/ \ -o main.tar tar --xform=s,^,charliecloud-$$(cat VERSION.full)/, \ --exclude='.*' \ -rf main.tar doc man/*.1 VERSION.full .PHONY: export export: main.tar gzip -9 main.tar mv main.tar.gz charliecloud-$$(cat VERSION.full).tar.gz ls -lh charliecloud-$$(cat VERSION.full).tar.gz .PHONY: export-bats export-bats: main.tar test -d .git -a -f test/bats/.git # need recursive Git checkout cd test/bats && \ git archive HEAD \ --prefix=charliecloud-$$(cat ../../VERSION.full)/test/bats/ \ -o ../../bats.tar tar Af main.tar bats.tar gzip -9 main.tar mv main.tar.gz charliecloud-$$(cat VERSION.full).tar.gz rm bats.tar ls -lh charliecloud-$$(cat VERSION.full).tar.gz # PREFIX is the prefix expected at runtime (usually /usr or /usr/local for # system-wide installations). # More: https://www.gnu.org/prep/standards/html_node/Directory-Variables.html # # DESTDIR is the installation directory using during make install, which # usually coincides for manual installation, but is chosen to be a temporary # directory in packaging environments. PREFIX needs to be appended. # More: https://www.gnu.org/prep/standards/html_node/DESTDIR.html # # Reasoning here: Users performing manual install *have* to specify PREFIX; # default is to use that also for DESTDIR. If DESTDIR is provided in addition, # we use that for installation. # # PREFIX can be relative unless DESTDIR is set. Absolute paths are not # canonicalized. ifneq ($(PREFIX),) ifneq ($(shell echo "$(PREFIX)" | cut -c1),/) ifdef DESTDIR $(error PREFIX must be absolute if DESTDIR is set) endif override PREFIX := $(abspath $(PREFIX)) $(warning Relative PREFIX converted to $(PREFIX)) endif endif INSTALL_PREFIX := $(if $(DESTDIR),$(DESTDIR)/$(PREFIX),$(PREFIX)) BIN := $(INSTALL_PREFIX)/bin DOC := $(INSTALL_PREFIX)/share/doc/charliecloud # LIBEXEC_DIR is modeled after FHS 3.0 and # https://www.gnu.org/prep/standards/html_node/Directory-Variables.html. It # contains any executable helpers that are not needed in PATH. Default is # libexec/charliecloud which will be preprended with the PREFIX. LIBEXEC_DIR ?= libexec/charliecloud LIBEXEC_INST := $(INSTALL_PREFIX)/$(LIBEXEC_DIR) LIBEXEC_RUN := $(PREFIX)/$(LIBEXEC_DIR) TEST := $(LIBEXEC_INST)/test .PHONY: install install: all @test -n "$(PREFIX)" || \ (echo "No PREFIX specified. Lasciando ogni speranza." && false) @echo Installing in $(INSTALL_PREFIX) # binaries install -d $(BIN) install -pm 755 -t $(BIN) $$(find bin -type f -executable) # Modify scripts to relate to new libexec location. for scriptfile in $$(find bin -type f -executable -printf "%f\n"); do \ sed -i "s#^libexec=.*#libexec=$(LIBEXEC_RUN)#" $(BIN)/$${scriptfile}; \ done # executable helpers install -d $(LIBEXEC_INST) install -pm 644 -t $(LIBEXEC_INST) bin/base.sh bin/version.sh sed -i "s#^libexec=.*#libexec=$(LIBEXEC_RUN)#" $(LIBEXEC_INST)/base.sh # man pages if they were built if [ -f man/charliecloud.1 ]; then \ install -d $(INSTALL_PREFIX)/share/man/man1; \ install -pm 644 -t $(INSTALL_PREFIX)/share/man/man1 man/*.1; \ fi # license and readme install -d $(DOC) install -pm 644 -t $(DOC) LICENSE README.rst # html files if they were built if [ -f doc/index.html ]; then \ cp -r doc $(DOC)/html; \ rm -f $(DOC)/html/.nojekyll; \ for i in $$(find $(DOC)/html -type d); do \ chmod 755 $$i; \ done; \ for i in $$(find $(DOC)/html -type f); do \ chmod 644 $$i; \ done; \ fi # examples for i in examples/syscalls \ examples/serial/* examples/mpi/* examples/other/*; do \ install -d $(LIBEXEC_INST)/$$i; \ install -pm 644 -t $(LIBEXEC_INST)/$$i $$i/*; \ done chmod 755 $(LIBEXEC_INST)/examples/serial/hello/hello.sh \ $(LIBEXEC_INST)/examples/syscalls/pivot_root \ $(LIBEXEC_INST)/examples/syscalls/userns \ $(LIBEXEC_INST)/examples/*/*/*.sh find $(LIBEXEC_INST)/examples -name Build -exec chmod 755 {} \; # tests install -d $(TEST) $(TEST)/run install -pm 644 -t $(TEST) test/*.bats test/common.bash test/Makefile install -pm 644 -t $(TEST)/run test/run/*.bats install -pm 755 -t $(TEST) test/Build.* install -pm 644 -t $(TEST) test/Dockerfile.* test/Docker_Pull.* install -pm 644 -t $(TEST) test/*.patch install -pm 755 -t $(TEST) test/make-auto test/make-perms-test install -d $(TEST)/chtest install -pm 644 -t $(TEST)/chtest test/chtest/* chmod 755 $(TEST)/chtest/Build \ $(TEST)/chtest/*.py \ $(TEST)/chtest/printns ln -sf ../../../bin $(TEST)/bin # shared library tests install -d $(TEST)/sotest $(TEST)/sotest/bin $(TEST)/sotest/lib install -pm 755 -t $(TEST)/sotest test/sotest/libsotest.so.1.0 \ test/sotest/sotest install -pm 644 -t $(TEST)/sotest test/sotest/files_inferrable.txt \ test/sotest/sotest.c ln -sf ./libsotest.so.1.0 $(TEST)/sotest/libsotest.so ln -sf ./libsotest.so.1.0 $(TEST)/sotest/libsotest.so.1 install -pm 755 -t $(TEST)/sotest/bin test/sotest/bin/sotest install -pm 755 -t $(TEST)/sotest/lib test/sotest/lib/libsotest.so.1.0 # Bats (if embedded) if [ -d test/bats/bin ]; then \ install -d $(TEST)/bats && \ install -pm 644 -t $(TEST)/bats test/bats/CONDUCT.md \ test/bats/LICENSE \ test/bats/README.md && \ install -d $(TEST)/bats/libexec && \ install -pm 755 -t $(TEST)/bats/libexec test/bats/libexec/* && \ install -d $(TEST)/bats/bin && \ ln -sf ../libexec/bats $(TEST)/bats/bin/bats && \ ln -sf bats/bin/bats $(TEST)/bats; \ fi .PHONY: deb deb: ln -s packaging/debian debuild -d -i -us -uc rm -f debian charliecloud-0.9.10/README.rst000066400000000000000000000074371346662313000157450ustar00rootroot00000000000000What is Charliecloud? --------------------- Charliecloud provides user-defined software stacks (UDSS) for high-performance computing (HPC) centers. This "bring your own software stack" functionality addresses needs such as: * software dependencies that are numerous, complex, unusual, differently configured, or simply newer/older than what the center provides; * build-time requirements unavailable within the center, such as relatively unfettered internet access; * validated software stacks and configuration to meet the standards of a particular field of inquiry; * portability of environments between resources, including workstations and other test and development system not managed by the center; * consistent environments, even archivally so, that can be easily, reliabily, and verifiably reproduced in the future; and/or * usability and comprehensibility. How does it work? ----------------- Charliecloud uses Linux user namespaces to run containers with no privileged operations or daemons and minimal configuration changes on center resources. This simple approach avoids most security risks while maintaining access to the performance and functionality already on offer. Container images can be built using Docker or anything else that can generate a standard Linux filesystem tree. How do I learn more? -------------------- * Documentation: https://hpc.github.io/charliecloud * GitHub repository: https://github.com/hpc/charliecloud * We wrote an article for USENIX's magazine *;login:* that explains in more detail the motivation for Charliecloud and the technology upon which it is based: https://www.usenix.org/publications/login/fall2017/priedhorsky * A more technical resource is our Supercomputing 2017 paper: https://dl.acm.org/citation.cfm?id=3126925 Who is responsible? ------------------- The core Charliecloud team at Los Alamos is: * Reid Priedhorsky , co-founder and BDFL * Tim Randles , co-founder * Michael Jennings * Jordan Ogas Patches (code, documentation, etc.) contributed by: * Reid Priedhorsky * Rusty Davis * Oliver Freyermuth * Christoph Junghans * Jordan Ogas * Matthew Vernon * Peter Wienemann * Lowell Wofford How can I participate? ---------------------- Questions, comments, feature requests, bug reports, etc. can be directed to: * our mailing list: *charliecloud@groups.io* or https://groups.io/g/charliecloud * issues on GitHub Patches are much appreciated on the software itself as well as documentation. Optionally, please include in your first patch a credit for yourself in the list above. We are friendly and welcoming of diversity on all dimensions. Copyright and license --------------------- Charliecloud is copyright © 2014–2018 Los Alamos National Security, LLC. This software has been approved for open source release, LA-CC 14-096. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this software except in compliance with the License. A copy of the license is included in file LICENSE. This material was produced under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National Laboratory (LANL), which is operated by Los Alamos National Security, LLC for the U.S. Department of Energy. The U.S. Government has rights to use, reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified to produce derivative works, such modified software should be clearly marked, so as not to confuse it with the version available from LANL. charliecloud-0.9.10/VERSION000066400000000000000000000000071346662313000153100ustar00rootroot000000000000000.9.10 charliecloud-0.9.10/bin/000077500000000000000000000000001346662313000150135ustar00rootroot00000000000000charliecloud-0.9.10/bin/Makefile000066400000000000000000000003601346662313000164520ustar00rootroot00000000000000bin := ch-run ch-ssh src := $(wildcard *.c) obj := $(src:.c=.o) CFLAGS += -pthread LDLIBS += -pthread -lrt .PHONY: all all: $(bin) $(obj): charliecloud.h version.h Makefile $(bin): charliecloud.o .PHONY: clean clean: rm -Rf *.o $(bin) charliecloud-0.9.10/bin/base.sh000066400000000000000000000023031346662313000162570ustar00rootroot00000000000000set -e # shellcheck disable=SC2034 ch_bin="$(cd "$(dirname "$0")" && pwd)" libexec="$(cd "$(dirname "$0")" && pwd)" . "${libexec}/version.sh" parse_basic_args () { for i in "$@"; do if [ "$i" = --help ]; then usage 0 fi if [ "$i" = --libexec-path ]; then echo "$libexec" exit 0 fi if [ "$1" = --version ]; then version exit 0 fi done } usage () { echo "${usage:?}" 1>&2 exit "${1:-1}" } # Do we need sudo to run docker? if ( docker info > /dev/null 2>&1 ); then docker_ () { docker "$@" } else docker_ () { sudo docker "$@" } fi # Use parallel gzip if it's available. ("command -v" is POSIX.1-2008.) if ( command -v pigz >/dev/null 2>&1 ); then gzip_ () { pigz "$@" } else gzip_ () { gzip "$@" } fi # Use pv to show a progress bar, if it's available. (We also don't want a # progress bar if stdin is not a terminal, but pv takes care of that.) if ( command -v pv >/dev/null 2>&1 ); then pv_ () { pv -pteb "$@" } else pv_ () { # Arguments may be present, but we ignore them. cat } fi charliecloud-0.9.10/bin/ch-build000077500000000000000000000020461346662313000164320ustar00rootroot00000000000000#!/bin/sh libexec="$(cd "$(dirname "$0")" && pwd)" . "${libexec}/base.sh" # shellcheck disable=SC2034 usage=$(cat < "$tar" docker_ rm "$cid" > /dev/null # Add the Docker environment variables in ./environment for later consumption # by "ch-run --set-env". # # 1. mktemp(1) isn't POSIX, but it seemed very likely to be installed if # Docker is, and I couldn't find a more portable way of securely creating # temporary files. (In particular, I would have preferred to pipe in the # data rather than creating and deleting a temporary file.) # # 2. Blocking factor 1 (-b1) for tar is a bug workaround. Without this switch, # tar 1.26, which is in RHEL, corrupts the tarball instead of appending to # it. This doesn't happen in 1.29 in Debian Stretch, and building GNU tar # from Git source was too hard, so I couldn't bisect a specific commit that # fixed the bug to learn what exactly was going on. (See PR #371.) # # 3. This assumes that the tarball from Docker does not have a single # top-level directory (i.e., is a tarbomb). # echo "adding environment" temp=$(mktemp --tmpdir ch-docker2tar.XXXXXX) docker_ inspect "$image" --format='{{range .Config.Env}}{{println .}}{{end}}' \ > "$temp" tar rf "$tar" -b1 -P --xform="s|${temp}|environment|" "$temp" rm "$temp" # Finish up. echo "compressing" cat "$tar" | pv_ -s "$size" | gzip_ -6 > "${tar}.gz" rm "$tar" ls -lh "${tar}.gz" charliecloud-0.9.10/bin/ch-fromhost000077500000000000000000000317201346662313000171750ustar00rootroot00000000000000#!/bin/sh # The basic algorithm here is that we build up a list of file # source:destination pairs separated by newlines, then walk through them and # copy them into the image. We also maintain a list of directories to create # and a list of file globs to remove. # # The colon separator to avoid the difficulty of iterating through a sequence # of pairs with no arrays or structures in POSIX sh. We could avoid it by # taking action immediately upon encountering each file in the argument list, # but that would (a) yield a half-injected image for basic errors like # misspellings on the command line and (b) would require the image to be first # on the command line, which seems awkward. # # The newline separator is for the same reason and also because it's # convenient for input from --cmd and --file. # # Note on looping through the newlines in a variable: The approach in this # script is to set IFS to newline, loop, then restore. This is awkward but # seemed the least bad. Alternatives include: # # 1. Piping echo into "while read -r": This executes the while in a # subshell, so variables don't stick. # # 2. Here document used as input, e.g.: # # while IFS= read -r FILE; do # ... # done <&2 fi } ensure_nonempty () { [ "$2" ] || fatal "$1 must not be empty" } fatal () { printf 'ch-fromhost: %s\n' "$1" 1>&2 exit 1 } info () { printf 'ch-fromhost: %s\n' "$1" 1>&2 } is_bin () { case $1 in */bin*|*/sbin*) return 0 ;; *) return 1 esac } is_so () { case $1 in */lib*) return 0 ;; *.so) return 0 ;; *) return 1 esac } queue_files () { old_ifs="$IFS" IFS="$newline" d="${dest:-$2}" for f in $1; do case $f in *:*) fatal "paths can't contain colon: ${f}" ;; esac if is_so "$f"; then debug "found shared library: ${f}" lib_found=yes fi # This adds a delimiter only for the second and subsequent files. # https://chris-lamb.co.uk/posts/joining-strings-in-posix-shell # # If destination empty, we'll infer it later. inject_files="${inject_files:+$inject_files$newline}$f:$d" done IFS="$old_ifs" } queue_mkdir () { [ "$1" ] inject_mkdirs="${inject_mkdirs:+$inject_mkdirs$newline}$1" } queue_unlink () { [ "$1" ] inject_unlinks="${inject_unlinks:+$inject_unlinks$newline}$1" } parse_basic_args "$@" while [ $# -gt 0 ]; do opt=$1; shift case $opt in -c|--cmd) ensure_nonempty --cmd "$1" out=$($1) || fatal "command failed: $1" queue_files "$out" shift ;; --cray-mpi) # Can't act right away because we need the image path. cray_mpi=yes lib_found=yes ;; -d|--dest) ensure_nonempty --dest "$1" dest=$1 shift ;; -f|--file) ensure_nonempty --file "$1" out=$(cat "$1") || fatal "cannot read file: ${1}" queue_files "$out" shift ;; --lib-path) # Note: If this is specified along with one of the file # specification options, all the file gathering and checking work # will happen, but it will be discarded. lib_found=yes lib_dest_print=yes ;; --no-ldconfig) no_ldconfig=yes ;; --nvidia) out=$(nvidia-container-cli list --binaries --libraries) \ || fatal "nvidia-container-cli failed; does this host have GPUs?" queue_files "$out" ;; -p|--path) ensure_nonempty --path "$1" queue_files "$1" shift ;; -v|--verbose) verbose=yes ;; -*) info "invalid option: ${opt}" usage ;; *) ensure_nonempty "image path" "${opt}" [ -z "$image" ] || fatal "duplicate image: ${opt}" [ -d "$opt" ] || fatal "image not a directory: ${opt}" image="$opt" ;; esac done [ "$image" ] || fatal "no image specified" if [ $cray_mpi ]; then sentinel=/etc/opt/cray/release/cle-release [ -f $sentinel ] || fatal "not found: ${sentinel}: are you on a Cray?" mpi_version=$("${ch_bin}/ch-run" "$image" -- mpirun --version || true) case $mpi_version in *mpich*) cray_mpich=yes ;; *'Open MPI'*) cray_openmpi=yes ;; *) fatal "can't find MPI in image" ;; esac fi if [ $lib_found ]; then # We want to put the libraries in the first directory that ldconfig # searches, so that we can override (or overwrite) any of the same library # that may already be in the image. debug "asking ldconfig for shared library destination" # "ldconfig -Nv" gives some pointless warnings on stderr even if # successful; we don't want to show those to users. However, we don't want # to simply pipe stderr to /dev/null because this hides real errors. Thus, # use the following abomination to pipe stdout and stderr to *separate # grep commands*. See: https://stackoverflow.com/a/31151808 lib_dest=$( { "${ch_bin}/ch-run" "$image" -- /sbin/ldconfig -Nv \ 2>&1 1>&3 3>&- | grep -Ev '(^|dynamic linker, ignoring|given more than once)$' ; } \ 3>&1 1>&2 | grep -E '^/' | cut -d: -f1 | head -1 ) [ -n "$lib_dest" ] || fatal 'empty path from ldconfig' [ -z "${lib_dest%%/*}" ] || fatal "bad path from ldconfig: ${lib_dest}" debug "shared library destination: ${lib_dest}" fi if [ $lib_dest_print ]; then echo "$lib_dest" exit 0 fi if [ $cray_mpich ]; then # Remove open source libmpi.so. # # FIXME: These versions are specific to MPICH 3.2.1. I haven't figured out # how to use glob patterns here (they don't get expanded when I tried # basic things). queue_unlink "$lib_dest/libmpi.so" queue_unlink "$lib_dest/libmpi.so.12" queue_unlink "$lib_dest/libmpi.so.12.1.1" # Directory containing Cray's libmpi.so.12. # shellcheck disable=SC2016 [ "$CRAY_MPICH_DIR" ] \ || fatal '$CRAY_MPICH_DIR not set; is module cray-mpich-abi loaded?' cray_libmpi=$CRAY_MPICH_DIR/lib/libmpi.so.12 [ -f "$cray_libmpi" ] \ || fatal "not found: ${cray_libmpi}; is module cray-mpich-abi loaded?" # Note: Most or all of these filenames are symlinks, and the copy will # convert them to normal files (with the same content). In the # documentation, we say not to do that. However, it seems to work, it's # simpler than resolving them, and we apply greater abuse to libmpi.so.12 # below. # Cray libmpi.so.12. queue_files "$cray_libmpi" # Linked dependencies. queue_files "$( ldd "$cray_libmpi" \ | grep -F /opt \ | sed -E 's/^.+ => (.+) \(0x.+\)$/\1/')" # dlopen(3)'ed dependencies. I don't know how to not hard-code these. queue_files /opt/cray/alps/default/lib64/libalpsutil.so.0.0.0 queue_files /opt/cray/alps/default/lib64/libalpslli.so.0.0.0 queue_files /opt/cray/wlm_detect/default/lib64/libwlm_detect.so.0.0.0 #queue_files /opt/cray/alps/default/lib64/libalps.so.0.0.0 fi if [ $cray_openmpi ]; then # Both MPI_ROOT and MPIHOME are the base of the OpenMPI install tree. # We use MPIHOME because some users unset MPI_ROOT. # # Note also that the OpenMPI module name is not standardized. [ "$MPIHOME" ] \ || fatal "MPIHOME not set; is OpenMPI module loaded?" # Inject libmpi from the host host_libmpi=${MPIHOME}/lib/libmpi.so [ -f "$host_libmpi" ] \ || fatal "not found: ${host_libmpi}; is OpenMPI module loaded?" queue_files "$host_libmpi" queue_files "$( ldd "$host_libmpi" \ | grep -E "/usr|/opt" \ | sed -E 's/^.+ => (.+) \(0x.+\)$/\1/')" # This appears to be the only dependency in /lib64 that we can't get from # glibc in the image. queue_files /lib64/liblustreapi.so # Remove libmpi.so* from image. This works around ParaView # dlopen(3)-related errors that we don't understand. image_libmpis=$( "${ch_bin}/ch-run" "$image" -- /sbin/ldconfig -p \ | sed -nr 's|^.* => (/.*/libmpi\.so([0-9.]+)?)$|\1|p') [ "$image_libmpis" ] || fatal "can't find libmpi.so* in image" for f in $image_libmpis; do queue_unlink "$f" queue_unlink "$("${ch_bin}/ch-run" "$image" -- readlink -f "$f")" done fi if [ $cray_mpi ]; then # ALPS libraries require the contents of this directory to be present at # the same path as the host. Create the mount point here, then ch-run # bind-mounts it later. queue_mkdir /var/opt/cray/alps/spool # libwlm_detect.so requires this file to be present. queue_mkdir /etc/opt/cray/wlm_detect queue_files /etc/opt/cray/wlm_detect/active_wlm /etc/opt/cray/wlm_detect # uGNI needs a pile of hugetlbfs filesystems at paths that are arbitrary # but in a specific order in /proc/mounts. ch-run bind-mounts here later. queue_mkdir /var/opt/cray/hugetlbfs fi [ "$inject_files" ] || fatal "empty file list" debug "injecting into image: ${image}" old_ifs="$IFS" IFS="$newline" for u in $inject_unlinks; do debug " rm -f ${image}${u}" rm -f "${image}${u}" done for d in $inject_mkdirs; do debug " mkdir -p ${image}${d}" mkdir -p "${image}${d}" done for file in $inject_files; do f="${file%%:*}" d="${file#*:}" infer= if is_bin "$f" && [ -z "$d" ]; then d=/usr/bin infer=" (inferred)" elif is_so "$f" && [ -z "$d" ]; then d=$lib_dest infer=" (inferred)" fi debug " ${f} -> ${d}${infer}" [ "$d" ] || fatal "no destination for: ${f}" [ -z "${d%%/*}" ] || fatal "not an absolute path: ${d}" [ -d "${image}${d}" ] || fatal "not a directory: ${image}${d}" if [ ! -w "${image}${d}" ]; then # Some images unpack with unwriteable directories; fix. This seems # like a bit of a kludge to me, so I'd like to remove this special # case in the future if possible. (#323) info "${image}${d} not writeable; fixing" chmod u+w "${image}${d}" || fatal "can't chmod u+w: ${image}${d}" fi cp --dereference --preserve=all "$f" "${image}${d}" \ || fatal "cannot inject: ${f}" done IFS="$old_ifs" if [ $cray_mpich ]; then # Restore libmpi.so symlink (it's part of several chains). debug " ln -s libmpi.so.12 ${image}${lib_dest}/libmpi.so" ln -s libmpi.so.12 "${image}${lib_dest}/libmpi.so" # Patch libmpi.so.12 so its soname is "libmpi.so.12" instead of e.g. # "libmpich_gnu_51.so.3". Otherwise, the application won't link without # LD_LIBRARY_PATH, and LD_LIBRARY_PATH is to be avoided. # # Note: This currently requires our patched patchelf (issue #256). debug "fixing soname on libmpi.so.12" "${ch_bin}/ch-run" -w "$image" -- \ patchelf --set-soname libmpi.so.12 "$lib_dest/libmpi.so.12" fi if [ $lib_found ] && [ -z "$no_ldconfig" ]; then debug "running ldconfig" "${ch_bin}/ch-run" -w "$image" -- /sbin/ldconfig else debug "not running ldconfig" fi charliecloud-0.9.10/bin/ch-pull2dir000077500000000000000000000011641346662313000170700ustar00rootroot00000000000000#!/bin/sh libexec="$(cd "$(dirname "$0")" && pwd)" . "${libexec}/base.sh" set -e usage () { cat 1>&2 <&2 < #include #include #include #include #include #include #include "charliecloud.h" /** Constants and macros **/ /* Environment variables used by --join parameters. */ char *JOIN_CT_ENV[] = { "OMPI_COMM_WORLD_LOCAL_SIZE", "SLURM_STEP_TASKS_PER_NODE", "SLURM_CPUS_ON_NODE", NULL }; char *JOIN_TAG_ENV[] = { "SLURM_STEP_ID", NULL }; /** Command line options **/ const char usage[] = "\ \n\ Run a command in a Charliecloud container.\n\ \v\ Example:\n\ \n\ $ ch-run /data/foo -- echo hello\n\ hello\n\ \n\ You cannot use this program to actually change your UID."; const char args_doc[] = "NEWROOT CMD [ARG...]"; const struct argp_option options[] = { { "bind", 'b', "SRC[:DST]", 0, "mount SRC at guest DST (default /mnt/0, /mnt/1, etc.)"}, { "cd", 'c', "DIR", 0, "initial working directory in container"}, { "ch-ssh", -8, 0, 0, "bind ch-ssh into image"}, { "gid", 'g', "GID", 0, "run as GID within container" }, { "join", 'j', 0, 0, "use same container as peer ch-run" }, { "join-pid", -5, "PID", 0, "join a namespace using a PID" }, { "join-ct", -3, "N", 0, "number of ch-run peers (implies --join)" }, { "join-tag", -4, "TAG", 0, "label for peer group (implies --join)" }, { "no-home", -2, 0, 0, "do not bind-mount your home directory"}, { "private-tmp", 't', 0, 0, "use container-private /tmp" }, { "set-env", -6, "FILE", 0, "set environment variables in FILE"}, { "uid", 'u', "UID", 0, "run as UID within container" }, { "unset-env", -7, "GLOB", 0, "unset environment variable(s)" }, { "verbose", 'v', 0, 0, "be more verbose (debug if repeated)" }, { "version", 'V', 0, 0, "print version and exit" }, { "write", 'w', 0, 0, "mount image read-write"}, { 0 } }; /* One possible future here is that fix_environment() ends up in charliecloud.c and we add other actions such as SET, APPEND_PATH, etc. */ enum env_action { END, SET_FILE, UNSET_GLOB }; // END must be zero struct env_delta { enum env_action action; char *arg; }; struct args { struct container c; struct env_delta *env_deltas; char *initial_dir; }; /** Function prototypes **/ void env_delta_append(struct env_delta **ds, enum env_action act, char *arg); void fix_environment(struct args *args); bool get_first_env(char **array, char **name, char **value); int join_ct(int cli_ct); char *join_tag(char *cli_tag); int parse_int(char *s, bool extra_ok, char *error_tag); static error_t parse_opt(int key, char *arg, struct argp_state *state); void privs_verify_invoking(); /** Global variables **/ const struct argp argp = { options, parse_opt, args_doc, usage }; extern char **environ; // see environ(7) /** Main **/ int main(int argc, char *argv[]) { bool argp_help_fmt_set; struct args args; int arg_next; int c_argc; char ** c_argv; privs_verify_invoking(); T_ (args.c.binds = calloc(1, sizeof(struct bind))); args.c.ch_ssh = false; args.c.container_gid = getegid(); args.c.container_uid = geteuid(); args.c.join = false; args.c.join_ct = 0; args.c.join_pid = 0; args.c.join_tag = NULL; args.c.private_home = false; args.c.private_tmp = false; args.c.old_home = getenv("HOME"); args.c.writable = false; T_ (args.env_deltas = calloc(1, sizeof(struct env_delta))); args.initial_dir = NULL; verbose = 1; // in charliecloud.h /* I couldn't find a way to set argp help defaults other than this environment variable. Kludge sets/unsets only if not already set. */ if (getenv("ARGP_HELP_FMT")) argp_help_fmt_set = true; else { argp_help_fmt_set = false; Z_ (setenv("ARGP_HELP_FMT", "opt-doc-col=25,no-dup-args-note", 0)); } Z_ (argp_parse(&argp, argc, argv, 0, &(arg_next), &args)); if (!argp_help_fmt_set) Z_ (unsetenv("ARGP_HELP_FMT")); Te (arg_next < argc - 1, "NEWROOT and/or CMD not specified"); args.c.newroot = realpath(argv[arg_next], NULL); Tf (args.c.newroot != NULL, "can't find image: %s", argv[arg_next]); arg_next++; if (args.c.join) { args.c.join_ct = join_ct(args.c.join_ct); args.c.join_tag = join_tag(args.c.join_tag); } c_argc = argc - arg_next; T_ (c_argv = calloc(c_argc + 1, sizeof(char *))); for (int i = 0; i < c_argc; i++) c_argv[i] = argv[i + arg_next]; INFO("verbosity: %d", verbose); INFO("newroot: %s", args.c.newroot); INFO("container uid: %u", args.c.container_uid); INFO("container gid: %u", args.c.container_gid); INFO("join: %d %d %s %d", args.c.join, args.c.join_ct, args.c.join_tag, args.c.join_pid); INFO("private /tmp: %d", args.c.private_tmp); fix_environment(&args); containerize(&args.c); run_user_command(c_argv, args.initial_dir); // should never return exit(EXIT_FAILURE); } /** Supporting functions **/ /* Append a new env_delta to an existing null-terminated list. */ void env_delta_append(struct env_delta **ds, enum env_action act, char *arg) { int i; for (i = 0; (*ds)[i].action != END; i++) // count existing ; T_ (*ds = realloc(*ds, (i+2) * sizeof(struct env_delta))); (*ds)[i+1].action = END; (*ds)[i].action = act; (*ds)[i].arg = arg; } /* Adjust environment variables. */ void fix_environment(struct args *args) { char *name, *old_value, *new_value; // $HOME: Set to /home/$USER unless --no-home specified. if (!args->c.private_home) { old_value = getenv("USER"); if (old_value == NULL) { WARNING("$USER not set; cannot rewrite $HOME"); } else { T_ (1 <= asprintf(&new_value, "/home/%s", old_value)); Z_ (setenv("HOME", new_value, 1)); } } // $PATH: Append /bin if not already present. old_value = getenv("PATH"); if (old_value == NULL) { WARNING("$PATH not set"); } else if ( strstr(old_value, "/bin") != old_value && !strstr(old_value, ":/bin")) { T_ (1 <= asprintf(&new_value, "%s:/bin", old_value)); Z_ (setenv("PATH", new_value, 1)); INFO("new $PATH: %s", new_value); } // --set-env and --unset-env. for (int i = 0; args->env_deltas[i].action != END; i++) { char *arg = args->env_deltas[i].arg; if (args->env_deltas[i].action == SET_FILE) { FILE *fp; Tf (fp = fopen(arg, "r"), "--set-env: can't open: %s", arg); for (int j = 1; true; j++) { char *line = NULL; size_t len = 0; errno = 0; if (-1 == getline(&line, &len, fp)) { if (errno == 0) // EOF break; else // error Tf (0, "--set-env: error reading: %s", arg); } if (strlen(line) == 0 || line[0] == '\n') continue; // skip empty line if (line[strlen(line) - 1] == '\n') line[strlen(line) - 1] = 0; // remove newline split(&name, &new_value, line, '='); Te (name != NULL, "--set-env: no delimiter: %s:%d", arg, j); Te (strlen(name) != 0, "--set-env: empty name: %s:%d", arg, j); if ( strlen(new_value) >= 2 && new_value[0] == '\'' && new_value[strlen(new_value) - 1] == '\'') { new_value[strlen(new_value) - 1] = 0; // strip trailing quote new_value++; // strip leading } INFO("environment: %s=%s", name, new_value); Z_ (setenv(name, new_value, 1)); } fclose(fp); } else { T_ (args->env_deltas[i].action == UNSET_GLOB); /* Removing variables from the environment is tricky, because there is no standard library function to iterate through the environment, and the environ global array can be re-ordered after unsetenv(3) [1]. Thus, the only safe way without additional storage is an O(n^2) search until no matches remain. It is legal to assign to environ [2]. We build up a copy, omitting variables that match the glob, which is O(n), and then do so. [1]: https://unix.stackexchange.com/a/302987 [2]: http://man7.org/linux/man-pages/man3/exec.3p.html */ char **new_environ; int old_i, new_i; for (old_i = 0; environ[old_i] != NULL; old_i++) ; T_ (new_environ = calloc(old_i + 1, sizeof(char *))); for (old_i = 0, new_i = 0; environ[old_i] != NULL; old_i++) { int matchp; split(&name, &old_value, environ[old_i], '='); T_ (name != NULL); // env lines should always have equals matchp = fnmatch(arg, name, 0); if (!matchp) { INFO("environment: unset %s", name); } else { T_ (matchp == FNM_NOMATCH); *(old_value - 1) = '='; // rejoin line new_environ[new_i++] = name; } } environ = new_environ; } } } /* Find the first environment variable in array that is set; put its name in *name and its value in *value, and return true. If none are set, return false, and *name and *value are undefined. */ bool get_first_env(char **array, char **name, char **value) { for (int i = 0; array[i] != NULL; i++) { *name = array[i]; *value = getenv(*name); if (*value != NULL) return true; } return false; } /* Find an appropriate join count; assumes --join was specified or implied. Exit with error if no valid value is available. */ int join_ct(int cli_ct) { int j = 0; char *ev_name, *ev_value; if (cli_ct != 0) { INFO("join: peer group size from command line"); j = cli_ct; goto end; } if (get_first_env(JOIN_CT_ENV, &ev_name, &ev_value)) { INFO("join: peer group size from %s", ev_name); j = parse_int(ev_value, true, ev_name); goto end; } end: Te(j > 0, "join: no valid peer group size found"); return j; } /* Find an appropriate join tag; assumes --join was specified or implied. Exit with error if no valid value is found. */ char *join_tag(char *cli_tag) { char *tag; char *ev_name, *ev_value; if (cli_tag != NULL) { INFO("join: peer group tag from command line"); tag = cli_tag; goto end; } if (get_first_env(JOIN_TAG_ENV, &ev_name, &ev_value)) { INFO("join: peer group tag from %s", ev_name); tag = ev_value; goto end; } INFO("join: peer group tag from getppid(2)"); T_ (1 <= asprintf(&tag, "%d", getppid())); end: Te(tag[0] != '\0', "join: peer group tag cannot be empty string"); return tag; } /* Parse an integer string arg and return the result. If an error occurs, print a message prefixed by error_tag and exit. If not extra_ok, additional characters remaining after the integer are an error. */ int parse_int(char *s, bool extra_ok, char *error_tag) { char *end; long l; errno = 0; l = strtol(s, &end, 10); Tf (errno == 0, error_tag); Ze (end == s, "%s: no digits found", error_tag); if (!extra_ok) Te (*end == 0, "%s: extra characters after digits", error_tag); Te (l >= INT_MIN && l <= INT_MAX, "%s: out of range", error_tag); return (int)l; } /* Parse one command line option. Called by argp_parse(). */ static error_t parse_opt(int key, char *arg, struct argp_state *state) { struct args *args = state->input; int i; switch (key) { case -2: // --private-home args->c.private_home = true; break; case -3: // --join-ct args->c.join = true; args->c.join_ct = parse_int(arg, false, "--join-ct"); break; case -4: // --join-tag args->c.join = true; args->c.join_tag = arg; break; case -5: // --join-pid args->c.join_pid = parse_int(arg, false, "--join-pid"); break; case -6: // --set-env env_delta_append(&(args->env_deltas), SET_FILE, arg); break; case -7: // --unset-env Te (strlen(arg) > 0, "--unset-env: GLOB must have non-zero length"); env_delta_append(&(args->env_deltas), UNSET_GLOB, arg); break;; case -8: // --ch-ssh args->c.ch_ssh = true; break; case 'c': args->initial_dir = arg; break; case 'b': for (i = 0; args->c.binds[i].src != NULL; i++) // count existing binds ; T_ (args->c.binds = realloc(args->c.binds, (i+2) * sizeof(struct bind))); args->c.binds[i+1].src = NULL; // terminating zero args->c.binds[i].src = strsep(&arg, ":"); assert(args->c.binds[i].src != NULL); if (arg) args->c.binds[i].dst = arg; else // arg is NULL => no destination specified T_ (1 <= asprintf(&(args->c.binds[i].dst), "/mnt/%d", i)); Te (args->c.binds[i].src[0] != 0, "--bind: no source provided"); Te (args->c.binds[i].dst[0] != 0, "--bind: no destination provided"); break; case 'g': i = parse_int(arg, false, "--gid"); Te (i >= 0, "--gid: must be non-negative"); args->c.container_gid = (gid_t) i; break; case 'j': args->c.join = true; break; case 't': args->c.private_tmp = true; break; case 'u': i = parse_int(arg, false, "--uid"); Te (i >= 0, "--uid: must be non-negative"); args->c.container_uid = (uid_t) i; break; case 'V': version(); exit(EXIT_SUCCESS); break; case 'v': verbose++; Te(verbose <= 3, "--verbose can be specified at most twice"); break; case 'w': args->c.writable = true; break; default: return ARGP_ERR_UNKNOWN; }; return 0; } /* Validate that the UIDs and GIDs are appropriate for program start, and abort if not. Note: If the binary is setuid, then the real UID will be the invoking user and the effective and saved UIDs will be the owner of the binary. Otherwise, all three IDs are that of the invoking user. */ void privs_verify_invoking() { uid_t ruid, euid, suid; gid_t rgid, egid, sgid; Z_ (getresuid(&ruid, &euid, &suid)); Z_ (getresgid(&rgid, &egid, &sgid)); // Calling the program if user is really root is OK. if ( ruid == 0 && euid == 0 && suid == 0 && rgid == 0 && egid == 0 && sgid == 0) return; // Now that we know user isn't root, no GID privilege is allowed. T_ (egid != 0); // no privilege T_ (egid == rgid && egid == sgid); // no setuid or funny business // No UID privilege allowed either. T_ (euid != 0); // no privilege T_ (euid == ruid && euid == suid); // no setuid or funny business } charliecloud-0.9.10/bin/ch-ssh.c000066400000000000000000000036521346662313000163520ustar00rootroot00000000000000/* Copyright © Los Alamos National Security, LLC, and others. */ #define _GNU_SOURCE #include #include #include #include #include #include "charliecloud.h" const char usage[] = "\ Usage: CH_RUN_ARGS=\"NEWROOT [ARG...]\" ch-ssh [OPTION...] HOST CMD [ARG...]\n\ \n\ Run a remote command in a Charliecloud container.\n\ \n\ Example:\n\ \n\ $ export CH_RUN_ARGS=/data/foo\n\ $ ch-ssh example.com -- echo hello\n\ hello\n\ \n\ Arguments to ch-run, including the image to activate, are specified in the\n\ CH_RUN_ARGS environment variable. Important caveat: Words in CH_RUN_ARGS are\n\ delimited by spaces only; it is not shell syntax. In particular, quotes and\n\ and backslashes are not interpreted.\n"; #define ARGS_MAX 262143 // assume 2MB buffer and length of each argument >= 7 int main(int argc, char *argv[]) { int i, j; char *ch_run_args; char *args[ARGS_MAX+1]; if (argc >= 2 && strcmp(argv[1], "--help") == 0) { fprintf(stderr, usage); return 0; } if (argc >= 2 && strcmp(argv[1], "--version") == 0) { version(); return 0; } memset(args, 0, sizeof(args)); args[0] = "ssh"; // ssh option arguments for (i = 1; i < argc && i < ARGS_MAX && argv[i][0] == '-'; i++) args[i] = argv[i]; // destination host if (i < argc && i < ARGS_MAX) { args[i] = argv[i]; i++; } // insert ch-run command ch_run_args = getenv("CH_RUN_ARGS"); Te (ch_run_args != NULL, "CH_RUN_ARGS not set"); args[i] = "ch-run"; for (j = 1; i + j < ARGS_MAX; j++, ch_run_args = NULL) { args[i+j] = strtok(ch_run_args, " "); if (args[i+j] == NULL) break; } // copy remaining arguments for ( ; i < argc && i + j < ARGS_MAX; i++) args[i+j] = argv[i]; //for (i = 0; args[i] != NULL; i++) // printf("%d: %s\n", i, args[i]); execvp("ssh", args); Tf (0, "can't execute ssh"); } charliecloud-0.9.10/bin/ch-tar2dir000077500000000000000000000071151346662313000167040ustar00rootroot00000000000000#!/bin/sh set -e libexec="$(cd "$(dirname "$0")" && pwd)" . "${libexec}/base.sh" # shellcheck disable=SC2034 usage=$(cat <&2 exit 1 fi if [ ! -d "${2}" ]; then echo "can't unpack: ${2} is not a directory" 1>&2 exit 1 fi # Figure out the real tarball name. If the provided $1 already has a tar # extension, just test that name; if not, also append the plausible extensions # and try those too. for ext in '' .tar.gz .tar.xz .tgz; do c=${1}${ext} if [ ! -f "$c" ] || [ ! -r "$c" ]; then echo "can't read: ${c}" 1>&2 case $1 in *.tar.*|*.tgz) break ;; *) continue ;; esac fi tarball=$c if [ -n "$ext" ]; then echo "found: ${tarball}" 1>&2 fi # Infer decompression argument because GNU tar is unable to do so if input # is a pipe, and we want to keep PV. See: # https://www.gnu.org/software/tar/manual/html_section/tar_68.html case $tarball in *.tar.gz) newroot=${2}/$(basename "${tarball%.tar.gz}") decompress=z ;; *.tar.xz) newroot=${2}/$(basename "${tarball%.tar.xz}") decompress=J ;; *.tgz) newroot=${2}/$(basename "${tarball%.tgz}") decompress=z ;; *) echo "unknown extension: ${tarball}" 1>&2 exit 1 ;; esac break done if [ -z "$tarball" ]; then echo "no input found" 1>&2 exit 1 fi if [ ! -d "$newroot" ]; then echo "creating new image ${newroot}" else if [ -f "${newroot}/${sentinel}" ] \ && [ -d "${newroot}/bin" ] \ && [ -d "${newroot}/lib" ] \ && [ -d "${newroot}/usr" ]; then echo "replacing existing image ${newroot}" 1>&2 rm -Rf --one-file-system "${newroot}" else echo "${newroot} exists but does not appear to be an image" 1>&2 exit 1 fi fi mkdir "$newroot" # Use a pipe because PV ignores arguments if it's cat rather than PV. # # See FAQ on /dev exclusion. --no-wildcards-match-slash is needed to prevent * # matching multiple directories; the tar default differs from sh behavior. size=$(stat -c%s "$tarball") pv_ -s "$size" < "$tarball" \ | tar x$decompress -C "$newroot" -f - \ --anchored --no-wildcards-match-slash \ --exclude='dev/*' --exclude='*/dev/*' # Make all directories writeable so we can delete image later (hello, Red Hat). find "$newroot" -type d -a ! -perm /200 -exec chmod u+w {} + # If tarball had a single containing directory, move the contents up a level # and remove the containing directory. It is non-trivial in POSIX sh to deal # with hidden files; see https://unix.stackexchange.com/a/6397. files=$(ls -Aq "$newroot") if [ "$(echo "$files" | wc -l)" -eq 1 ]; then ( cd "${newroot}/${files}" for f in * .[!.]* ..?*; do if [ -e "$f" ]; then mv -- "$f" ..; fi done ) rmdir "${newroot}/${files}" fi # Ensure directories that ch-run needs exist. echo 'This directory is a Charliecloud image.' > "${newroot}/${sentinel}" mkdir -p "${newroot}/dev" "${newroot}/etc" "${newroot}/proc" "${newroot}/sys" touch "${newroot}/etc/hosts" "${newroot}/etc/resolv.conf" for i in $(seq 0 9); do mkdir -p "${newroot}/mnt/${i}"; done echo "${newroot} unpacked ok" charliecloud-0.9.10/bin/charliecloud.c000066400000000000000000000452071346662313000176250ustar00rootroot00000000000000/* Copyright © Los Alamos National Security, LLC, and others. */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "charliecloud.h" #include "version.h" /** Macros **/ /* Log the current UIDs. */ #define LOG_IDS log_ids(__func__, __LINE__) /* Timeout in seconds for waiting for join semaphore. */ #define JOIN_TIMEOUT 30 /* Maximum length of paths we're willing to deal with. (Note that system-defined PATH_MAX isn't reliable.) */ #define PATH_CHARS 4096 /* Number of supplemental GIDs we can deal with. */ #define SUPP_GIDS_MAX 128 /** Constants **/ /* Names of verbosity levels. */ const char *VERBOSE_LEVELS[] = { "error", "warning", "info", "debug" }; /* Default bind-mounts. */ struct bind BINDS_REQUIRED[] = { { "/dev", "/dev" }, { "/etc/hosts", "/etc/hosts" }, { "/etc/resolv.conf", "/etc/resolv.conf" }, { "/proc", "/proc" }, { "/sys", "/sys" }, { NULL, NULL } }; struct bind BINDS_OPTIONAL[] = { { "/var/opt/cray/alps/spool", "/var/opt/cray/alps/spool" }, { "/var/lib/hugetlbfs", "/var/opt/cray/hugetlbfs" }, { NULL, NULL } }; /** External variables **/ /* Level of chatter on stderr desired (0-3). */ int verbose; /** Global variables **/ /* Variables for coordinating --join. */ struct { bool winner_p; char *sem_name; sem_t *sem; char *shm_name; struct { pid_t winner_pid; // access anytime after initialization (write-once) int proc_left_ct; // access only while serial } *shared; } join; /** Function prototypes **/ void bind_mount(char *src, char *dst, char *newroot, enum bind_dep dep, unsigned long flags); void bind_mounts(struct bind *binds, char *newroot, enum bind_dep dep, unsigned long flags); char *cat(char *a, char *b); void enter_udss(struct container *c); void join_begin(int join_ct, char *join_tag); void join_namespace(pid_t pid, char *ns); void join_namespaces(pid_t pid); void join_end(); void log_ids(const char *func, int line); bool path_exists(char *path); unsigned long path_mount_flags(char *path); void path_split(char *path, char **dir, char **base); void sem_timedwait_relative(sem_t *sem, int timeout); void setup_namespaces(struct container *c); void setup_passwd(struct container *c); void tmpfs_mount(char *dst, char *newroot, char *data); /** Functions **/ /* Bind-mount the given path into the container image. */ void bind_mount(char *src, char *dst, char *newroot, enum bind_dep dep, unsigned long flags) { char *dst_full = cat(newroot, dst); if (!path_exists(src)) { Te (dep == BD_OPTIONAL, "can't bind: not found: %s", src); return; } if (!path_exists(dst_full)) { Te (dep == BD_OPTIONAL, "can't bind: not found: %s", dst_full); return; } Zf (mount(src, dst_full, NULL, MS_REC|MS_BIND|flags, NULL), "can't bind %s to %s", src, dst_full); } /* Bind-mount a null-terminated array of struct bind objects. */ void bind_mounts(struct bind *binds, char * newroot, enum bind_dep dep, unsigned long flags) { for (int i = 0; binds[i].src != NULL; i++) bind_mount(binds[i].src, binds[i].dst, newroot, dep, flags); } /* Concatenate strings a and b, then return the result. */ char *cat(char *a, char *b) { char *ret; T_ (1 <= asprintf(&ret, "%s%s", a, b)); return ret; } /* Set up new namespaces or join existing namespaces. */ void containerize(struct container *c) { if (c->join_pid) { join_namespaces(c->join_pid); return; } if (c->join) join_begin(c->join_ct, c->join_tag); if (!c->join || join.winner_p) { setup_namespaces(c); enter_udss(c); } else join_namespaces(join.shared->winner_pid); if (c->join) join_end(); } /* Enter the UDSS. After this, we are inside the UDSS. Note that pivot_root(2) requires a complex dance to work, i.e., to avoid multiple undocumented error conditions. This dance is explained in detail in examples/syscalls/pivot_root.c. */ void enter_udss(struct container *c) { char *newroot_parent, *newroot_base; LOG_IDS; path_split(c->newroot, &newroot_parent, &newroot_base); // Claim new root for this namespace. We do need both calls to avoid // pivot_root(2) failing with EBUSY later. bind_mount(c->newroot, c->newroot, "", BD_REQUIRED, MS_PRIVATE); bind_mount(newroot_parent, newroot_parent, "", BD_REQUIRED, MS_PRIVATE); // Bind-mount default files and directories. bind_mounts(BINDS_REQUIRED, c->newroot, BD_REQUIRED, MS_RDONLY); bind_mounts(BINDS_OPTIONAL, c->newroot, BD_OPTIONAL, MS_RDONLY); // /etc/passwd and /etc/group. setup_passwd(c); // Container /tmp. if (c->private_tmp) { tmpfs_mount("/tmp", c->newroot, NULL); } else { bind_mount("/tmp", "/tmp", c->newroot, BD_REQUIRED, 0); } // Container /home. if (!c->private_home) { char *newhome; // Mount tmpfs on guest /home because guest root is read-only tmpfs_mount("/home", c->newroot, "size=4m"); // Bind-mount user's home directory at /home/$USER. The main use case is // dotfiles. Tf (c->old_home != NULL, "cannot find home directory: is $HOME set?"); newhome = cat("/home/", getenv("USER")); Z_ (mkdir(cat(c->newroot, newhome), 0755)); bind_mount(c->old_home, newhome, c->newroot, BD_REQUIRED, 0); } // Container /usr/bin/ch-ssh. if (c->ch_ssh) { char chrun_file[PATH_CHARS]; int len = readlink("/proc/self/exe", chrun_file, PATH_CHARS); T_ (len >= 0); Te (path_exists(cat(c->newroot, "/usr/bin/ch-ssh")), "--ch-ssh: /usr/bin/ch-ssh not in image"); chrun_file[ lennewroot, BD_REQUIRED, 0); } // Bind-mount user-specified directories at guest DST and|or /mnt/i, // which must exist. bind_mounts(c->binds, c->newroot, BD_REQUIRED, 0); // Overmount / to avoid EINVAL if it's a rootfs. Z_ (chdir(newroot_parent)); Z_ (mount(newroot_parent, "/", NULL, MS_MOVE, NULL)); Z_ (chroot(".")); c->newroot = cat("/", newroot_base); // Re-mount new root read-only unless --write or already read-only. if (!c->writable && !(access(c->newroot, W_OK) == -1 && errno == EROFS)) { unsigned long flags = path_mount_flags(c->newroot) | MS_REMOUNT // Re-mount ... | MS_BIND // only this mount point ... | MS_RDONLY; // read-only. Zf (mount(NULL, c->newroot, NULL, flags, NULL), "can't re-mount image read-only (is it on NFS?)"); } // Pivot into the new root. Use /dev because it's available even in // extremely minimal images. Zf (chdir(c->newroot), "can't chdir into new root"); Zf (syscall(SYS_pivot_root, c->newroot, cat(c->newroot, "/dev")), "can't pivot_root(2)"); Zf (chroot("."), "can't chroot(2) into new root"); Zf (umount2("/dev", MNT_DETACH), "can't umount old root"); } /* Begin coordinated section of namespace joining. */ void join_begin(int join_ct, char *join_tag) { int fd; join.sem_name = cat("/ch-run_", join_tag); join.shm_name = cat("/ch-run_", join_tag); // Serialize. join.sem = sem_open(join.sem_name, O_CREAT, 0600, 1); T_ (join.sem != SEM_FAILED); sem_timedwait_relative(join.sem, JOIN_TIMEOUT); // Am I the winner? fd = shm_open(join.shm_name, O_CREAT|O_EXCL|O_RDWR, 0600); if (fd > 0) { INFO("join: I won"); join.winner_p = true; Z_ (ftruncate(fd, sizeof(*join.shared))); } else if (errno == EEXIST) { join.winner_p = false; fd = shm_open(join.shm_name, O_RDWR, 0); T_ (fd > 0); } else { T_ (0); } join.shared = mmap(NULL, sizeof(*join.shared), PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); T_ (join.shared != NULL); Z_ (close(fd)); if (join.winner_p) { join.shared->winner_pid = getpid(); join.shared->proc_left_ct = join_ct; // Keep lock; winner still serialized. } else { INFO("join: winner pid: %d", join.shared->winner_pid); Z_ (sem_post(join.sem)); // Losers run in parallel (winner will be done by now). } } /* End coordinated section of namespace joining. */ void join_end() { // Serialize (winner never released lock). if (!join.winner_p) sem_timedwait_relative(join.sem, JOIN_TIMEOUT); join.shared->proc_left_ct--; INFO("join: %d peers left excluding myself", join.shared->proc_left_ct); if (join.shared->proc_left_ct <= 0) { INFO("join: cleaning up IPC resources"); Te (join.shared->proc_left_ct == 0, "expected 0 peers left but found %d", join.shared->proc_left_ct); Zf (sem_unlink(join.sem_name), "can't unlink sem: %s", join.sem_name); Zf (shm_unlink(join.shm_name), "can't unlink shm: %s", join.shm_name); } // Parallelize. Z_ (sem_post(join.sem)); Z_ (munmap(join.shared, sizeof(*join.shared))); Z_ (sem_close(join.sem)); INFO("join: done"); } /* Join a specific namespace. */ void join_namespace(pid_t pid, char *ns) { char *path; int fd; T_ (1 <= asprintf(&path, "/proc/%d/ns/%s", pid, ns)); fd = open(path, O_RDONLY); if (fd == -1) { if (errno == ENOENT) { Te (0, "join: no PID %d: %s not found", pid, path); } else { Tf (0, "join: can't open %s", path); } } Zf (setns(fd, 0), "can't join %s namespace of pid %d", ns, pid); } /* Join the existing namespaces created by the join winner. */ void join_namespaces(pid_t pid) { INFO("joining namespaces of pid %d", pid); join_namespace(pid, "user"); join_namespace(pid, "mnt"); } /* If verbose, print uids and gids on stderr prefixed with where. */ void log_ids(const char *func, int line) { uid_t ruid, euid, suid; gid_t rgid, egid, sgid; gid_t supp_gids[SUPP_GIDS_MAX]; int supp_gid_ct; if (verbose >= 3) { Z_ (getresuid(&ruid, &euid, &suid)); Z_ (getresgid(&rgid, &egid, &sgid)); fprintf(stderr, "%s %d: uids=%d,%d,%d, gids=%d,%d,%d + ", func, line, ruid, euid, suid, rgid, egid, sgid); supp_gid_ct = getgroups(SUPP_GIDS_MAX, supp_gids); if (supp_gid_ct == -1) { T_ (errno == EINVAL); Te (0, "more than %d groups", SUPP_GIDS_MAX); } for (int i = 0; i < supp_gid_ct; i++) { if (i > 0) fprintf(stderr, ","); fprintf(stderr, "%d", supp_gids[i]); } fprintf(stderr, "\n"); } } /* Print a formatted message on stderr if the level warrants it. Levels: 0 : "error" : always print; exit unsuccessfully afterwards 1 : "warning" : always print 1 : "info" : print if verbose >= 2 2 : "debug" : print if verbose >= 3 */ void msg(int level, char *file, int line, int errno_, char *fmt, ...) { va_list ap; if (level > verbose) return; fprintf(stderr, "%s[%d]: ", program_invocation_short_name, getpid()); if (fmt == NULL) fputs(VERBOSE_LEVELS[level], stderr); else { va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); } if (errno_) fprintf(stderr, ": %s (%s:%d %d)\n", strerror(errno_), file, line, errno_); else fprintf(stderr, " (%s:%d)\n", file, line); if (level == 0) exit(EXIT_FAILURE); } /* Return true if the given path exists, false otherwise. On error, exit. */ bool path_exists(char *path) { struct stat sb; if (stat(path, &sb) == 0) return true; Tf (errno == ENOENT, "can't stat: %s", path); return false; } /* Return the mount flags of the file system containing path, suitable for passing to mount(2). This is messy because, the flags we get from statvfs(3) are ST_* while the flags needed by mount(2) are MS_*. My glibc has a comment in bits/statvfs.h that the ST_* "should be kept in sync with" the MS_* flags, and the values do seem to match, but there are additional undocumented flags in there. Also, the kernel contains a test "unprivileged-remount-test.c" that manually translates the flags. Thus, I wasn't comfortable simply passing the output of statvfs(3) to mount(2). */ unsigned long path_mount_flags(char *path) { struct statvfs sv; unsigned long known_flags = ST_MANDLOCK | ST_NOATIME | ST_NODEV | ST_NODIRATIME | ST_NOEXEC | ST_NOSUID | ST_RDONLY | ST_RELATIME | ST_SYNCHRONOUS; Z_ (statvfs(path, &sv)); Ze (sv.f_flag & ~known_flags, "unknown mount flags: 0x%lx %s", sv.f_flag & ~known_flags, path); return (sv.f_flag & ST_MANDLOCK ? MS_MANDLOCK : 0) | (sv.f_flag & ST_NOATIME ? MS_NOATIME : 0) | (sv.f_flag & ST_NODEV ? MS_NODEV : 0) | (sv.f_flag & ST_NODIRATIME ? MS_NODIRATIME : 0) | (sv.f_flag & ST_NOEXEC ? MS_NOEXEC : 0) | (sv.f_flag & ST_NOSUID ? MS_NOSUID : 0) | (sv.f_flag & ST_RDONLY ? MS_RDONLY : 0) | (sv.f_flag & ST_RELATIME ? MS_RELATIME : 0) | (sv.f_flag & ST_SYNCHRONOUS ? MS_SYNCHRONOUS : 0); } /* Split path into dirname and basename. */ void path_split(char *path, char **dir, char **base) { char *path2; T_ (path2 = strdup(path)); *dir = dirname(path2); T_ (path2 = strdup(path)); *base = basename(path2); } /* Replace the current process with user command and arguments. */ void run_user_command(char *argv[], char *initial_dir) { LOG_IDS; if (initial_dir != NULL) Zf (chdir(initial_dir), "can't cd to %s", initial_dir); if (verbose >= 3) { fprintf(stderr, "argv:"); for (int i = 0; argv[i] != NULL; i++) fprintf(stderr, " \"%s\"", argv[i]); fprintf(stderr, "\n"); } Zf (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0), "can't set no_new_privs"); execvp(argv[0], argv); // only returns if error Tf (0, "can't execve(2): %s", argv[0]); } /* Wait for semaphore sem for up to timeout seconds. If timeout or an error, exit unsuccessfully. */ void sem_timedwait_relative(sem_t *sem, int timeout) { struct timespec deadline; // sem_timedwait() requires a deadline rather than a timeout. Z_ (clock_gettime(CLOCK_REALTIME, &deadline)); deadline.tv_sec += timeout; if (sem_timedwait(sem, &deadline)) { Ze (errno == ETIMEDOUT, "timeout waiting for join lock"); Tf (0, "failure waiting for join lock"); } } /* Activate the desired isolation namespaces. */ void setup_namespaces(struct container *c) { int fd; uid_t euid = -1; gid_t egid = -1; euid = geteuid(); egid = getegid(); LOG_IDS; Zf (unshare(CLONE_NEWNS|CLONE_NEWUSER), "can't init user+mount namespaces"); LOG_IDS; /* Write UID map. What we are allowed to put here is quite limited. Because we do not have CAP_SETUID in the *parent* user namespace, we can map exactly one UID: an arbitrary container UID to our EUID in the parent namespace. This is sufficient to change our UID within the container; no setuid(2) or similar required. This is because the EUID of the process in the parent namespace is unchanged, so the kernel uses our new 1-to-1 map to convert that EUID into the container UID for most (maybe all) purposes. */ T_ (-1 != (fd = open("/proc/self/uid_map", O_WRONLY))); T_ (1 <= dprintf(fd, "%d %d 1\n", c->container_uid, euid)); Z_ (close(fd)); LOG_IDS; T_ (-1 != (fd = open("/proc/self/setgroups", O_WRONLY))); T_ (1 <= dprintf(fd, "deny\n")); Z_ (close(fd)); T_ (-1 != (fd = open("/proc/self/gid_map", O_WRONLY))); T_ (1 <= dprintf(fd, "%d %d 1\n", c->container_gid, egid)); Z_ (close(fd)); LOG_IDS; } /* Build /etc/passwd and /etc/group files and bind-mount them into newroot. We do it this way so that we capture the relevant host username and group name mappings regardless of where they come from. (We used to simply bind-mount the host's /etc/passwd and /etc/group, but this fails for LDAP at least; see issue #212.) After bind-mounting, we remove them on the host side; they'll persist inside the container and then disappear completely when the latter exits. */ void setup_passwd(struct container *c) { int fd; char *path; struct group *g; struct passwd *p; // /etc/passwd T_ (path = strdup("/tmp/ch-run_passwd.XXXXXX")); T_ (-1 != (fd = mkstemp(path))); if (c->container_uid != 0) T_ (1 <= dprintf(fd, "root:x:0:0:root:/root:/bin/sh\n")); if (c->container_uid != 65534) T_ (1 <= dprintf(fd, "nobody:x:65534:65534:nobody:/:/bin/false\n")); T_ (p = getpwuid(c->container_uid)); T_ (1 <= dprintf(fd, "%s:x:%u:%u:%s:/home/%s:/bin/sh\n", p->pw_name, c->container_uid, c->container_gid, p->pw_gecos, getenv("USER"))); Z_ (close(fd)); bind_mount(path, "/etc/passwd", c->newroot, BD_REQUIRED, 0); Z_ (unlink(path)); // /etc/group T_ (path = strdup("/tmp/ch-run_group.XXXXXX")); T_ (-1 != (fd = mkstemp(path))); if (c->container_gid != 0) T_ (1 <= dprintf(fd, "root:x:0:\n")); if (c->container_gid != 65534) T_ (1 <= dprintf(fd, "nogroup:x:65534:\n")); T_ (g = getgrgid(c->container_gid)); T_ (1 <= dprintf(fd, "%s:x:%u:\n", g->gr_name, c->container_gid)); Z_ (close(fd)); bind_mount(path, "/etc/group", c->newroot, BD_REQUIRED, 0); Z_ (unlink(path)); } /* Split string str at first instance of delimiter del. Set *a to the part before del, and *b to the part after. Both can be empty; if no token is present, set both to NULL. Unlike strsep(3), str is unchanged; *a and *b point into a new buffer allocated with malloc(3). This has two implications: (1) the caller must free(3) *a but not *b, and (2) the parts can be rejoined by setting *(*b-1) to del. The point here is to provide an easier wrapper for strsep(3). */ void split(char **a, char **b, char *str, char del) { char delstr[2] = { del, 0 }; T_ (str != NULL); str = strdup(str); *b = str; *a = strsep(b, delstr); if (*b == NULL) *a = NULL; } /* Mount a tmpfs at the given path. */ void tmpfs_mount(char *dst, char *newroot, char *data) { char *dst_full = cat(newroot, dst); Zf (mount(NULL, dst_full, "tmpfs", 0, data), "can't mount tmpfs at %s", dst_full); } /* Report the version number. */ void version(void) { fprintf(stderr, "%s\n", VERSION); } charliecloud-0.9.10/bin/charliecloud.h000066400000000000000000000063701346662313000176300ustar00rootroot00000000000000/* Copyright © Los Alamos National Security, LLC, and others. */ #define _GNU_SOURCE #include /* Test some value, and if it's not what we expect, exit with an error. These are macros so we have access to the file and line number. verify x is true (non-zero); otherwise print then exit: T_ (x) default error message including file, line, errno Tf (x, fmt, ...) printf-style message followed by file, line, errno Te (x, fmt, ...) same without errno verify x is zero (false); otherwise print as above & exit Z_ (x) Zf (x, fmt, ...) Ze (x, fmt, ...) errno is omitted if it's zero. Examples: Z_ (chdir("/does/not/exist")); -> ch-run: error: No such file or directory (ch-run.c:138 2) Zf (chdir("/does/not/exist"), "foo"); -> ch-run: foo: No such file or directory (ch-run.c:138 2) Ze (chdir("/does/not/exist"), "foo"); -> ch-run: foo (ch-run.c:138) errno = 0; Zf (0, "foo"); -> ch-run: foo (ch-run.c:138) Typically, Z_ and Zf are used to check system and standard library calls, while T_ and Tf are used to assert developer-specified conditions. errno is not altered by these macros unless they exit the program. FIXME: It would be nice if we could collapse these to fewer macros. However, when looking into that I ended up in preprocessor black magic (e.g. https://stackoverflow.com/a/2308651) that I didn't understand. */ #define T_(x) if (!(x)) msg(0, __FILE__, __LINE__, errno, NULL) #define Tf(x, ...) if (!(x)) msg(0, __FILE__, __LINE__, errno, __VA_ARGS__) #define Te(x, ...) if (!(x)) msg(0, __FILE__, __LINE__, 0, __VA_ARGS__) #define Z_(x) if (x) msg(0, __FILE__, __LINE__, errno, NULL) #define Zf(x, ...) if (x) msg(0, __FILE__, __LINE__, errno, __VA_ARGS__) #define Ze(x, ...) if (x) msg(0, __FILE__, __LINE__, 0, __VA_ARGS__) #define FATAL(...) msg(0, __FILE__, __LINE__, 0, __VA_ARGS__); #define WARNING(...) msg(1, __FILE__, __LINE__, 0, __VA_ARGS__); #define INFO(...) msg(2, __FILE__, __LINE__, 0, __VA_ARGS__); #define DEBUG(...) msg(3, __FILE__, __LINE__, 0, __VA_ARGS__); /** Types **/ struct bind { char *src; char *dst; }; enum bind_dep { BD_REQUIRED, // both source and destination must exist BD_OPTIONAL // if either source or destination missing, do nothing }; struct container { struct bind *binds; bool ch_ssh; // bind /usr/bin/ch-ssh? gid_t container_gid; uid_t container_uid; char *newroot; bool join; // is this a synchronized join? int join_ct; // number of peers in a synchronized join pid_t join_pid; // process in existing namespace to join char *join_tag; // identifier for synchronized join bool private_home; bool private_tmp; char *old_home; // host path to user's home directory (i.e. $HOME) bool writable; }; /** External variables from charliecloud.c **/ extern int verbose; /** Function prototypes from charliecloud.c **/ void containerize(struct container *c); void msg(int level, char *file, int line, int errno_, char *fmt, ...); void run_user_command(char *argv[], char *initial_dir); void split(char **a, char **b, char *str, char del); void version(void); charliecloud-0.9.10/doc-src/000077500000000000000000000000001346662313000155755ustar00rootroot00000000000000charliecloud-0.9.10/doc-src/Makefile000066400000000000000000000156211346662313000172420ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html web dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext all: html man help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* rm -rf ../doc/* ../doc/.buildinfo ../doc/.nojekyll rm -rf ../man/*.1 # This target works around a race condition in Sphinx that's triggered when # two instances (e.g., for html and man targets) try to mkdir(2) # _build/doctrees simultaneously. It is temporary and can be removed when # Sphinx >= 1.6.6 is an appropriate dependency. See issue #115. mkdir_issue115: mkdir -p $(BUILDDIR)/doctrees html: mkdir_issue115 $(SPHINXBUILD) -W -b html -D release="$$(cat ../VERSION)" -D version="$$(git rev-parse --short HEAD)" $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." @echo rsync -a --delete --exclude-from RSYNC_EXCLUDE $(BUILDDIR)/html/ ../doc touch ../doc/.nojekyll @echo @echo "HTML pages copied to doc/." # Upload HTML documentation to the web via GitHub's Pages service [1]. Prevent # uploading if there are any uncommitted changes [2], to avoid placing docs # for work-in-progress in public. # # [1]: https://help.github.com/categories/github-pages-basics # [2]: http://stackoverflow.com/a/2659808 web: html @echo @echo Making sure there are no uncommitted changes git diff-index --quiet --cached HEAD git diff-files --quiet @echo Can we talk to GitHub? cd ../doc && git ls-remote > /dev/null @echo Publishing new docs cd ../doc && git add --all cd ../doc && git commit -a -m "docs for commit $$(cd .. && git rev-parse --short HEAD)" cd ../doc && git push origin gh-pages dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/QUAC.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/QUAC.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/QUAC" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/QUAC" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: mkdir_issue115 $(SPHINXBUILD) -b man -D release="$$(cat ../VERSION)" -D version="$$(git rev-parse --short HEAD)" $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." @echo cp $(BUILDDIR)/man/*.1 ../man @echo "Man pages copied to man/." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." charliecloud-0.9.10/doc-src/RSYNC_EXCLUDE000066400000000000000000000000601346662313000175630ustar00rootroot00000000000000.git .buildinfo _sources objects.inv ch-*.html charliecloud-0.9.10/doc-src/bugs.rst000066400000000000000000000003251346662313000172670ustar00rootroot00000000000000Reporting bugs ============== If Charliecloud was obtained from your Linux distribution, use your distribution's bug reporting procedures. Otherwise, report bugs to: charliecloud-0.9.10/doc-src/ch-build.rst000066400000000000000000000002431346662313000200150ustar00rootroot00000000000000:orphan: ch-build man page +++++++++++++++++ .. include:: ./ch-build_desc.rst .. include:: ./bugs.rst .. include:: ./see_also.rst .. include:: ./docker_tips.rst charliecloud-0.9.10/doc-src/ch-build2dir.rst000066400000000000000000000002571346662313000206030ustar00rootroot00000000000000:orphan: ch-build2dir man page +++++++++++++++++++++ .. include:: ./ch-build2dir_desc.rst .. include:: ./bugs.rst .. include:: ./see_also.rst .. include:: ./docker_tips.rst charliecloud-0.9.10/doc-src/ch-build2dir_desc.rst000066400000000000000000000017351346662313000216030ustar00rootroot00000000000000Synopsis ======== :: $ ch-build2dir -t TAG [ARGS ...] CONTEXT OUTDIR Description =========== Build a Docker image named :code:`TAG` described by a Dockerfile (default :code:`./Dockerfile`) and unpack it into :code:`OUTDIR/TAG`. This is a wrapper for :code:`ch-build`, :code:`ch-docker2tar`, and :code:`ch-tar2dir`; see also those man pages. Arguments: :code:`ARGS` additional arguments passed to :code:`ch-build` :code:`CONTEXT` Docker context directory :code:`OUTDIR` directory in which to place image directory (named :code:`TAG`) and temporary tarball :code:`-t TAG` name (tag) of Docker image to build :code:`--help` print help and exit :code:`--version` print version and exit Examples ======== To build using :code:`./Dockerfile` and create image directory :code:`/var/tmp/foo`:: $ ch-build2dir -t foo . /var/tmp Same as above, but build with a different Dockerfile:: $ ch-build2dir -t foo -f ./Dockerfile.foo . /var/tmp charliecloud-0.9.10/doc-src/ch-build_desc.rst000066400000000000000000000034311346662313000210150ustar00rootroot00000000000000Synopsis ======== :: $ ch-build -t TAG [ARGS ...] CONTEXT Description =========== Build a Docker image named :code:`TAG` described by Dockerfile :code:`./Dockerfile` or as specified. This is a wrapper for :code:`docker build` with various enhancements. Sudo privileges are required to run the :code:`docker` command. Arguments: :code:`--file` Dockerfile to use (default: :code:`./Dockerfile`) :code:`-t` name (tag) of Docker image to build :code:`--help` print help and exit :code:`--version` print version and exit Additional arguments are accepted and passed unchanged to :code:`docker build`. Improvements over plain :code:`docker build` ============================================ :code:`ch-build` adds the following features to :code:`docker build`: * If there is a file :code:`Dockerfile` in the current working directory and :code:`-f` is not already specified, add :code:`-f $PWD/Dockerfile`. * Pass the HTTP proxy environment variables through with :code:`--build-arg`. .. note:: The suffix :code:`:latest` is somewhat misleading, as neither :code:`ch-build` nor bare :code:`docker build` will notice if the base :code:`FROM` image has been updated. Use :code:`--no-cache` to make sure you have the latest base image, at the cost of rebuilding every layer. Examples ======== Create a Docker image tagged :code:`foo` and specified by the file :code:`Dockerfile` located in the current working directory. Use :code:`/bar` as the Docker context directory:: $ ch-build -t foo /bar Equivalent to above:: $ ch-build -t foo --file=./Dockerfile /bar Instead, use the Dockerfile :code:`/baz/qux.docker`:: $ ch-build -t foo --file=/baz/qux.docker /bar Note that calling your Dockerfile anything other than :code:`Dockerfile` will confuse people. charliecloud-0.9.10/doc-src/ch-docker2tar.rst000066400000000000000000000002621346662313000207570ustar00rootroot00000000000000:orphan: ch-docker2tar man page ++++++++++++++++++++++ .. include:: ./ch-docker2tar_desc.rst .. include:: ./bugs.rst .. include:: ./see_also.rst .. include:: ./docker_tips.rst charliecloud-0.9.10/doc-src/ch-docker2tar_desc.rst000066400000000000000000000012561346662313000217610ustar00rootroot00000000000000Synopsis ======== :: $ ch-docker2tar IMAGE OUTDIR Description =========== Flattens the Docker image tagged :code:`IMAGE` into a Charliecloud tarball in directory :code:`OUTDIR`. The Docker environment (e.g., :code:`ENV` statements) is placed in a file in the tarball at :code:`./environment`, in a form suitable for :code:`ch-run --set-env`. Sudo privileges are required to run :code:`docker export`. Additional arguments: :code:`--help` print help and exit :code:`--version` print version and exit Example ======= :: $ ch-docker2tar hello /var/tmp 57M /var/tmp/hello.tar.gz $ ls -lh /var/tmp -rw-r----- 1 reidpr reidpr 57M Feb 13 16:14 hello.tar.gz charliecloud-0.9.10/doc-src/ch-fromhost.rst000066400000000000000000000002151346662313000205560ustar00rootroot00000000000000:orphan: ch-fromhost man page ++++++++++++++++++++ .. include:: ./ch-fromhost_desc.rst .. include:: ./bugs.rst .. include:: ./see_also.rst charliecloud-0.9.10/doc-src/ch-fromhost_desc.rst000066400000000000000000000156171346662313000215700ustar00rootroot00000000000000Synopsis ======== :: $ ch-fromhost [OPTION ...] [FILE_OPTION ...] IMGDIR Description =========== .. note:: This command is experimental. Features may be incomplete and/or buggy. Please report any issues you find, so we can fix them! Inject files from the host into the Charliecloud image directory :code:`IMGDIR`. The purpose of this command is to provide host-specific files, such as GPU libraries, to a container. It should be run after :code:`ch-tar2dir` and before :code:`ch-run`. After invocation, the image is no longer portable to other hosts. Injection is not atomic; if an error occurs partway through injection, the image is left in an undefined state. Injection is currently implemented using a simple file copy, but that may change in the future. By default, file paths that contain the strings :code:`/bin` or :code:`/sbin` are assumed to be executables and placed in :code:`/usr/bin` within the container. File paths that contain the strings :code:`/lib` or :code:`.so` are assumed to be shared libraries and are placed in the first-priority directory reported by :code:`ldconfig` (see :code:`--lib-path` below). Other files are placed in the directory specified by :code:`--dest`. If any shared libraries are injected, run :code:`ldconfig` inside the container (using :code:`ch-run -w`) after injection. Options ======= To specify which files to inject -------------------------------- :code:`-c`, :code:`--cmd CMD` Inject files listed in the standard output of command :code:`CMD`. :code:`-f`, :code:`--file FILE` Inject files listed in the file :code:`FILE`. :code:`-p`, :code:`--path PATH` Inject the file at :code:`PATH`. :code:`--cray-mpi` Cray-enable an MPICH installed inside the image. See important details below. :code:`--nvidia` Use :code:`nvidia-container-cli list` (from :code:`libnvidia-container`) to find executables and libraries to inject. These can be repeated, and at least one must be specified. To specify the destination within the image ------------------------------------------- :code:`-d`, :code:`--dest DST` Place files specified later in directory :code:`IMGDIR/DST`, overriding the inferred destination, if any. If a file's destination cannot be inferred and :code:`--dest` has not been specified, exit with an error. This can be repeated to place files in varying destinations. Additional arguments -------------------- :code:`--lib-path` Print the guest destination path for shared libraries inferred as described above. :code:`--no-ldconfig` Don't run :code:`ldconfig` even if we appear to have injected shared libraries. :code:`-h`, :code:`--help` Print help and exit. :code:`-v`, :code:`--verbose` List the injected files. :code:`--version` Print version and exit. :code:`--cray-mpi` dependencies and quirks ========================================== The implementation of :code:`--cray-mpi` for MPICH is messy, foul smelling, and brittle. It replaces or overrides the open source MPICH libraries installed in the container. Users should be aware of the following. 1. Containers must have the following software installed: a. Open source `MPICH `_. b. `PatchELF with our patches `_. Use the :code:`shrink-soname` branch. c. :code:`libgfortran.so.3`, because Cray's :code:`libmpi.so.12` links to it. 2. Applications must be linked to :code:`libmpi.so.12` (not e.g. :code:`libmpich.so.12`). How to configure MPICH to accomplish this is not yet clear to us; :code:`test/Dockerfile.mpich` does it, while the Debian packages do not. 3. One of the :code:`cray-mpich-abi` modules must be loaded when :code:`ch-fromhost` is invoked. 4. Tested only for C programs compiled with GCC, and it probably won't work otherwise. If you'd like to use another compiler or another programming language, please get in touch so we can implement the necessary support. Please file a bug if we missed anything above or if you know how to make the code better. Notes ===== Symbolic links are dereferenced, i.e., the files pointed to are injected, not the links themselves. As a corollary, do not include symlinks to shared libraries. These will be re-created by :code:`ldconfig`. There are two alternate approaches for nVidia GPU libraries: 1. Link :code:`libnvidia-containers` into :code:`ch-run` and call the library functions directly. However, this would mean that Charliecloud would either (a) need to be compiled differently on machines with and without nVidia GPUs or (b) have :code:`libnvidia-containers` available even on machines without nVidia GPUs. Neither of these is consistent with Charliecloud's philosophies of simplicity and minimal dependencies. 2. Use :code:`nvidia-container-cli configure` to do the injecting. This would require that containers have a half-started state, where the namespaces are active and everything is mounted but :code:`pivot_root(2)` has not been performed. This is not feasible because Charliecloud has no notion of a half-started container. Further, while these alternate approaches would simplify or eliminate this script for nVidia GPUs, they would not solve the problem for other situations. Bugs ==== File paths may not contain colons or newlines. Examples ======== Place shared library :code:`/usr/lib64/libfoo.so` at path :code:`/usr/lib/libfoo.so` (assuming :code:`/usr/lib` is the first directory searched by the dynamic loader in the image), within the image :code:`/var/tmp/baz` and executable :code:`/bin/bar` at path :code:`/usr/bin/bar`. Then, create appropriate symlinks to :code:`libfoo` and update the :code:`ld.so` cache. :: $ cat qux.txt /bin/bar /usr/lib64/libfoo.so $ ch-fromhost --file qux.txt /var/tmp/baz Same as above:: $ ch-fromhost --cmd 'cat qux.txt' /var/tmp/baz Same as above:: $ ch-fromhost --path /bin/bar --path /usr/lib64/libfoo.so /var/tmp/baz Same as above, but place the files into :code:`/corge` instead (and the shared library will not be found by :code:`ldconfig`):: $ ch-fromhost --dest /corge --file qux.txt /var/tmp/baz Same as above, and also place file :code:`/etc/quux` at :code:`/etc/quux` within the container:: $ ch-fromhost --file qux.txt --dest /etc --path /etc/quux /var/tmp/baz Inject the executables and libraries recommended by nVidia into the image, and then run :code:`ldconfig`:: $ ch-fromhost --nvidia /var/tmp/baz Acknowledgements ================ This command was inspired by the similar `Shifter `_ feature that allows Shifter containers to use the Cray Aires network. We particularly appreciate the help provided by Shane Canon and Doug Jacobsen during our implementation of :code:`--cray-mpi`. We appreciate the advice of Ryan Olson at nVidia on implementing :code:`--nvidia`. .. LocalWords: libmpi libmpich nvidia charliecloud-0.9.10/doc-src/ch-pull2dir.rst000066400000000000000000000002541346662313000204550ustar00rootroot00000000000000:orphan: ch-pull2dir man page ++++++++++++++++++++ .. include:: ./ch-pull2dir_desc.rst .. include:: ./bugs.rst .. include:: ./see_also.rst .. include:: ./docker_tips.rst charliecloud-0.9.10/doc-src/ch-pull2dir_desc.rst000066400000000000000000000026411346662313000214550ustar00rootroot00000000000000Synopsis ======== :: $ ch-pull2dir IMAGE[:TAG] DIR Description =========== Pull Docker image named :code:`IMAGE[:TAG]` from Docker Hub and extract it into a subdirectory of :code:`DIR`. A temporary tarball is stored in :code:`DIR`. Sudo privileges are required to run the :code:`docker pull` command. This runs the following command sequence: :code:`ch-pull2tar`, :code:`ch-tar2dir`. See warning in the documentation for :code:`ch-tar2dir`. Additional arguments: :code:`--help` print help and exit :code:`--version` print version and exit Examples ======== :: $ ch-pull2dir alpine /var/tmp Using default tag: latest latest: Pulling from library/alpine Digest: sha256:621c2f39f8133acb8e64023a94dbdf0d5ca81896102b9e57c0dc184cadaf5528 Status: Image is up to date for alpine:latest -rw-r--r--. 1 charlie charlie 2.1M Oct 5 19:52 /var/tmp/alpine.tar.gz creating new image /var/tmp/alpine /var/tmp/alpine unpacked ok removed '/var/tmp/alpine.tar.gz' Same as above, except optional :code:`TAG` is specified: :: $ ch-pull2dir alpine:3.6 /var/tmp 3.6: Pulling from library/alpine Digest: sha256:cc24af836d1377e092ecb4e8f0a4324c3b1aa2b5295c2239edcc7bbc86a9cbc6 Status: Image is up to date for alpine:3.6 -rw-r--r--. 1 charlie charlie 2.1M Oct 5 19:54 /var/tmp/alpine:3.6.tar.gz creating new image /var/tmp/alpine:3.6 /var/tmp/alpine:3.6 unpacked ok removed '/var/tmp/alpine:3.6.tar.gz' charliecloud-0.9.10/doc-src/ch-pull2tar.rst000066400000000000000000000002541346662313000204650ustar00rootroot00000000000000:orphan: ch-pull2tar man page ++++++++++++++++++++ .. include:: ./ch-pull2tar_desc.rst .. include:: ./bugs.rst .. include:: ./see_also.rst .. include:: ./docker_tips.rst charliecloud-0.9.10/doc-src/ch-pull2tar_desc.rst000066400000000000000000000022421346662313000214620ustar00rootroot00000000000000Synopsis ======== :: $ ch-pull2tar IMAGE[:TAG] OUTDIR Description =========== Pull a Docker image named :code:`IMAGE[:TAG]` from Docker Hub and flatten it into a Charliecloud tarball in directory :code:`OUTDIR`. This runs the following command sequence: :code:`docker pull`, :code:`ch-docker2tar` but provides less flexibility than the individual commands. Sudo privileges are required for :code:`docker pull`. Additional arguments: :code:`--help` print help and exit :code:`--version` print version and exit Examples ======== :: $ ch-pull2tar alpine /var/tmp Using default tag: latest latest: Pulling from library/alpine Digest: sha256:621c2f39f8133acb8e64023a94dbdf0d5ca81896102b9e57c0dc184cadaf5528 Status: Image is up to date for alpine:latest -rw-r--r--. 1 charlie charlie 2.1M Oct 5 19:52 /var/tmp/alpine.tar.gz Same as above, except optional :code:`TAG` is specified: :: $ ch-pull2tar alpine:3.6 3.6: Pulling from library/alpine Digest: sha256:cc24af836d1377e092ecb4e8f0a4324c3b1aa2b5295c2239edcc7bbc86a9cbc6 Status: Image is up to date for alpine:3.6 -rw-r--r--. 1 charlie charlie 2.1M Oct 5 19:54 /var/tmp/alpine:3.6.tar.gz charliecloud-0.9.10/doc-src/ch-run.rst000066400000000000000000000001761346662313000175270ustar00rootroot00000000000000:orphan: ch-run man page +++++++++++++++ .. include:: ./ch-run_desc.rst .. include:: ./bugs.rst .. include:: ./see_also.rst charliecloud-0.9.10/doc-src/ch-run_desc.rst000066400000000000000000000315331346662313000205260ustar00rootroot00000000000000Synopsis ======== :: $ ch-run [OPTION...] NEWROOT CMD [ARG...] Description =========== Run command :code:`CMD` in a Charliecloud container using the flattened and unpacked image directory located at :code:`NEWROOT`. :code:`-b`, :code:`--bind=SRC[:DST]` mount :code:`SRC` at guest :code:`DST` (default :code:`/mnt/0`, :code:`/mnt/1`, etc.) :code:`-c`, :code:`--cd=DIR` initial working directory in container :code:`--ch-ssh` bind :code:`ch-ssh(1)` into container at :code:`/usr/bin/ch-ssh` :code:`-g`, :code:`--gid=GID` run as group :code:`GID` within container :code:`-j`, :code:`--join` use the same container (namespaces) as peer :code:`ch-run` invocations :code:`--join-pid=PID` join the namespaces of an existing process :code:`--join-ct=N` number of :code:`ch-run` peers (implies :code:`--join`; default: see below) :code:`--join-tag=TAG` label for :code:`ch-run` peer group (implies :code:`--join`; default: see below) :code:`--no-home` do not bind-mount your home directory (by default, your home directory is mounted at :code:`/home/$USER` in the container) :code:`-t`, :code:`--private-tmp` use container-private :code:`/tmp` (by default, :code:`/tmp` is shared with the host) :code:`--set-env=FILE` set environment variables as specified in host path :code:`FILE` :code:`-u`, :code:`--uid=UID` run as user :code:`UID` within container :code:`--unset-env=GLOB` unset environment variables whose names match :code:`GLOB` :code:`-v`, :code:`--verbose` be more verbose (debug if repeated) :code:`-w`, :code:`--write` mount image read-write (by default, the image is mounted read-only) :code:`-?`, :code:`--help` print help and exit :code:`--usage` print a short usage message and exit :code:`-V`, :code:`--version` print version and exit Host files and directories available in container via bind mounts ================================================================= In addition to any directories specified by the user with :code:`--bind`, :code:`ch-run` has standard host files and directories that are bind-mounted in as well. The following host files and directories are bind-mounted at the same location in the container. These cannot be disabled. * :code:`/dev` * :code:`/etc/passwd` * :code:`/etc/group` * :code:`/etc/hosts` * :code:`/etc/resolv.conf` * :code:`/proc` * :code:`/sys` Three additional bind mounts can be disabled by the user: * Your home directory (i.e., :code:`$HOME`) is mounted at guest :code:`/home/$USER` by default. This is accomplished by mounting a new :code:`tmpfs` at :code:`/home`, which hides any image content under that path. If :code:`--no-home` is specified, neither of these things happens and the image's :code:`/home` is exposed unaltered. * :code:`/tmp` is shared with the host by default. If :code:`--private-tmp` is specified, a new :code:`tmpfs` is mounted on the guest's :code:`/tmp` instead. * If file :code:`/usr/bin/ch-ssh` is present in the image, it is over-mounted with the :code:`ch-ssh` binary in the same directory as :code:`ch-run`. Multiple processes in the same container with :code:`--join` ============================================================= By default, different :code:`ch-run` invocations use different user and mount namespaces (i.e., different containers). While this has no impact on sharing most resources between invocations, there are a few important exceptions. These include: 1. :code:`ptrace(2)`, used by debuggers and related tools. One can attach a debugger to processes in descendant namespaces, but not sibling namespaces. The practical effect of this is that (without :code:`--join`), you can't run a command with :code:`ch-run` and then attach to it with a debugger also run with :code:`ch-run`. 2. *Cross-memory attach* (CMA) is used by cooperating processes to communicate by simply reading and writing one another's memory. This is also not permitted between sibling namespaces. This affects various MPI implementations that use CMA to pass messages between ranks on the same node, because it’s faster than traditional shared memory. :code:`--join` is designed to address this by placing related :code:`ch-run` commands (the “peer group”) in the same container. This is done by one of the peers creating the namespaces with :code:`unshare(2)` and the others joining with :code:`setns(2)`. To do so, we need to know the number of peers and a name for the group. These are specified by additional arguments that can (hopefully) be left at default values in most cases: * :code:`--join-ct` sets the number of peers. The default is the value of the first of the following environment variables that is defined: :code:`OMPI_COMM_WORLD_LOCAL_SIZE`, :code:`SLURM_STEP_TASKS_PER_NODE`, :code:`SLURM_CPUS_ON_NODE`. * :code:`--join-tag` sets the tag that names the peer group. The default is environment variable :code:`SLURM_STEP_ID`, if defined; otherwise, the PID of :code:`ch-run`'s parent. Tags can be re-used for peer groups that start at different times, i.e., once all peer :code:`ch-run` have replaced themselves with the user command, the tag can be re-used. Caveats: * One cannot currently add peers after the fact, for example, if one decides to start a debugger after the fact. (This is only required for code with bugs and is thus an unusual use case.) * :code:`ch-run` instances race. The winner of this race sets up the namespaces, and the other peers use the winner to find the namespaces to join. Therefore, if the user command of the winner exits, any remaining peers will not be able to join the namespaces, even if they are still active. There is currently no general way to specify which :code:`ch-run` should be the winner. * If :code:`--join-ct` is too high, the winning :code:`ch-run`'s user command exits before all peers join, or :code:`ch-run` itself crashes, IPC resources such as semaphores and shared memory segments will be leaked. These appear as files in :code:`/dev/shm/` and can be removed with :code:`rm(1)`. * Many of the arguments given to the race losers, such as the image path and :code:`--bind`, will be ignored in favor of what was given to the winner. Environment variables ===================== :code:`ch-run` leaves environment variables unchanged, i.e. the host environment is passed through unaltered, except: * limited tweaks to avoid significant guest breakage; * user-set variables via :code:`--set-env`; and * user-unset variables via :code:`--unset-env`. This section describes these features. The default tweaks happen first, and then :code:`--set-env` and :code:`--unset-env` in the order specified on the command line. The latter two can be repeated arbitrarily many times, e.g. to add/remove multiple variable sets or add only some variables in a file. Default behavior ---------------- By default, :code:`ch-run` makes the following environment variable changes: * :code:`$HOME`: If the path to your home directory is not :code:`/home/$USER` on the host, then an inherited :code:`$HOME` will be incorrect inside the guest. This confuses some software, such as Spack. Thus, we change :code:`$HOME` to :code:`/home/$USER`, unless :code:`--no-home` is specified, in which case it is left unchanged. * :code:`$PATH`: Newer Linux distributions replace some root-level directories, such as :code:`/bin`, with symlinks to their counterparts in :code:`/usr`. Some of these distributions (e.g., Fedora 24) have also dropped :code:`/bin` from the default :code:`$PATH`. This is a problem when the guest OS does *not* have a merged :code:`/usr` (e.g., Debian 8 “Jessie”). Thus, we add :code:`/bin` to :code:`$PATH` if it's not already present. Further reading: * `The case for the /usr Merge `_ * `Fedora `_ * `Debian `_ Setting variables with :code:`--set-env` ---------------------------------------- The purpose of :code:`--set-env=FILE` is to set environment variables that cannot be inherited from the host shell, e.g. Dockerfile :code:`ENV` directives or other build-time configuration. :code:`FILE` is a host path to provide the greatest flexibility; guest paths can be specified by prepending the image path. :code:`ch-docker2tar(1)` lists variables specified at build time in Dockerfiles in the image in file :code:`/environment`. To set these variables: :code:`--set-env=$IMG/environment`. Variable values in :code:`FILE` replace any already set. If a variable is repeated, the last value wins. The syntax of :code:`FILE` is key-value pairs separated by the first equals character (:code:`=`, ASCII 61), one per line, with optional single straight quotes (:code:`'`, ASCII 39) around the value. Empty lines are ignored. Newlines (ASCII 10) are not permitted in either key or value. No variable expansion, comments, etc. are provided. The value may be empty, but not the key. (This syntax is designed to accept the output of :code:`printenv` and be easily produced by other simple mechanisms.) Examples of valid lines: .. list-table:: :header-rows: 1 * - Line - Key - Value * - :code:`FOO=bar` - :code:`FOO` - :code:`bar` * - :code:`FOO=bar=baz` - :code:`FOO` - :code:`bar=baz` * - :code:`FLAGS=-march=foo -mtune=bar` - :code:`FLAGS` - :code:`-march=foo -mtune=bar` * - :code:`FLAGS='-march=foo -mtune=bar'` - :code:`FLAGS` - :code:`-march=foo -mtune=bar` * - :code:`FOO=` - :code:`FOO` - (empty string) * - :code:`FOO=''` - :code:`FOO` - (empty string) * - :code:`FOO=''''` - :code:`FOO` - :code:`''` (two single quotes) Example invalid lines: .. list-table:: :header-rows: 1 * - Line - Problem * - :code:`FOO bar` - no separator * - :code:`=bar` - key cannot be empty Example valid lines that are probably not what you want: .. Note: Plain leading space screws up ReST parser. We use ZERO WIDTH SPACE U+200B, then plain space. This will copy and paste incorrectly, but that seems unlikely. .. list-table:: :header-rows: 1 * - Line - Key - Value - Problem * - :code:`FOO="bar"` - :code:`FOO` - :code:`"bar"` - double quotes aren't stripped * - :code:`FOO=bar # baz` - :code:`FOO` - :code:`bar # baz` - comments not supported * - :code:`PATH=$PATH:/opt/bin` - :code:`PATH` - :code:`$PATH:/opt/bin` - variables not expanded * - :code:`​ FOO=bar` - :code:`​ FOO` - :code:`bar` - leading space in key * - :code:`FOO= bar` - :code:`FOO` - :code:`​ bar` - leading space in value Removing variables with :code:`--unset-env` ------------------------------------------- The purpose of :code:`--unset-env=GLOB` is to remove unwanted environment variables. The argument :code:`GLOB` is a glob pattern (`dialect `_ :code:`fnmatch(3)` with no flags); all variables with matching names are removed from the environment. .. warning:: Because the shell also interprets glob patterns, if any wildcard characters are in :code:`GLOB`, it is important to put it in single quotes to avoid surprises. :code:`GLOB` must be a non-empty string. Example 1: Remove the single environment variable :code:`FOO`:: $ export FOO=bar $ env | fgrep FOO FOO=bar $ ch-run --unset-env=FOO $CH_TEST_IMGDIR/chtest -- env | fgrep FOO $ Example 2: Hide from a container the fact that it's running in a Slurm allocation, by removing all variables beginning with :code:`SLURM`. You might want to do this to test an MPI program with one rank and no launcher:: $ salloc -N1 $ env | egrep '^SLURM' | wc 44 44 1092 $ ch-run $CH_TEST_IMGDIR/mpihello-openmpi -- /hello/hello [... long error message ...] $ ch-run --unset-env='SLURM*' $CH_TEST_IMGDIR/mpihello-openmpi -- /hello/hello 0: MPI version: Open MPI v3.1.3, package: Open MPI root@c897a83f6f92 Distribution, ident: 3.1.3, repo rev: v3.1.3, Oct 29, 2018 0: init ok cn001.localdomain, 1 ranks, userns 4026532530 0: send/receive ok 0: finalize ok Example 3: Clear the environment completely (remove all variables):: $ ch-run --unset-env='*' $CH_TEST_IMGDIR/chtest -- env $ Note that some programs, such as shells, set some environment variables even if started with no init files:: $ ch-run --unset-env='*' $CH_TEST_IMGDIR/debian9 -- bash --noprofile --norc -c env SHLVL=1 PWD=/ _=/usr/bin/env $ Examples ======== Run the command :code:`echo hello` inside a Charliecloud container using the unpacked image at :code:`/data/foo`:: $ ch-run /data/foo -- echo hello hello Run an MPI job that can use CMA to communicate:: $ srun ch-run --join /data/foo -- bar .. LocalWords: mtune charliecloud-0.9.10/doc-src/ch-ssh.rst000066400000000000000000000001761346662313000175200ustar00rootroot00000000000000:orphan: ch-ssh man page +++++++++++++++ .. include:: ./ch-ssh_desc.rst .. include:: ./bugs.rst .. include:: ./see_also.rst charliecloud-0.9.10/doc-src/ch-ssh_desc.rst000066400000000000000000000013301346662313000205070ustar00rootroot00000000000000Synopsis ======== :: $ CH_RUN_ARGS="NEWROOT [ARG...]" $ ch-ssh [OPTION...] HOST CMD [ARG...] Description =========== Runs command :code:`CMD` in a Charliecloud container on remote host :code:`HOST`. Use the content of environment variable :code:`CH_RUN_ARGS` as the arguments to :code:`ch-run` on the remote host. .. note:: Words in :code:`CH_RUN_ARGS` are delimited by spaces only; it is not shell syntax. Example ======= On host bar.example.com, run the command :code:`echo hello` inside a Charliecloud container using the unpacked image at :code:`/data/foo` with starting directory :code:`/baz`:: $ hostname foo $ export CH_RUN_ARGS='--cd /baz /data/foo' $ ch-ssh bar.example.com -- hostname bar charliecloud-0.9.10/doc-src/ch-tar2dir.rst000066400000000000000000000002121346662313000202610ustar00rootroot00000000000000:orphan: ch-tar2dir man page +++++++++++++++++++ .. include:: ./ch-tar2dir_desc.rst .. include:: ./bugs.rst .. include:: ./see_also.rst charliecloud-0.9.10/doc-src/ch-tar2dir_desc.rst000066400000000000000000000027571346662313000212770ustar00rootroot00000000000000Synopsis ======== :: $ ch-tar2dir TARBALL DIR Description =========== Extract the tarball :code:`TARBALL` into a subdirectory of :code:`DIR`. :code:`TARBALL` must contain a Linux filesystem image, e.g. as created by :code:`ch-docker2tar`, and be compressed with :code:`gzip` or :code:`xz`. If :code:`TARBALL` has no extension, try appending :code:`.tar.gz` and :code:`.tar.xz`. Inside :code:`DIR`, a subdirectory will be created whose name corresponds to the name of the tarball with :code:`.tar.gz` or other suffix removed. If such a directory exists already and appears to be a Charliecloud container image, it is removed and replaced. If the existing directory doesn't appear to be a container image, the script aborts with an error. Additional arguments: :code:`--help` print help and exit :code:`--version` print version and exit .. warning:: Placing :code:`DIR` on a shared file system can cause significant metadata load on the file system servers. This can result in poor performance for you and all your colleagues who use the same file system. Please consult your site admin for a suitable location. Example ======= :: $ ls -lh /var/tmp total 57M -rw-r----- 1 reidpr reidpr 57M Feb 13 16:14 hello.tar.gz $ ch-tar2dir /var/tmp/hello.tar.gz /var/tmp creating new image /var/tmp/hello /var/tmp/hello unpacked ok $ ls -lh /var/tmp total 57M drwxr-x--- 22 reidpr reidpr 4.0K Feb 13 16:29 hello -rw-r----- 1 reidpr reidpr 57M Feb 13 16:14 hello.tar.gz charliecloud-0.9.10/doc-src/charliecloud.rst000066400000000000000000000006151346662313000207670ustar00rootroot00000000000000:orphan: charliecloud man page +++++++++++++++++++++ .. include:: ../README.rst .. include:: ./bugs.rst See also -------- ch-build(1), ch-build2dir(1), ch-docker2tar(1), ch-pull2dir(1), ch-pull2tar(1), ch-run(1), ch-ssh(1), ch-tar2dir(1) Full documentation at: Note ---- These man pages are for Charliecloud version |release| (Git commit |version|). charliecloud-0.9.10/doc-src/command-usage.rst000066400000000000000000000033011346662313000210440ustar00rootroot00000000000000Charliecloud command reference ****************************** This section is a comprehensive description of the usage and arguments of the Charliecloud commands. Its content is identical to the commands' man pages. .. contents:: :depth: 1 :local: .. WARNING: The one-line summaries below are duplicated in list man_pages in conf.py. Any updates need to be made there also. .. Note the unusual heading level. This is so the man page .rst files can still use double underscores as their top-level headers, which in turn lets us do things like include docker_tips.rst. You will also find this in the man page .rst files. ch-build ++++++++ Wrapper for :code:`docker build` that works around some of its annoying behaviors. .. include:: ./ch-build_desc.rst ch-build2dir ++++++++++++ Build a Charliecloud image and unpack it into a directory in one command. .. include:: ./ch-build2dir_desc.rst ch-docker2tar +++++++++++++ Flatten a Docker image into a Charliecloud image tarball. .. include:: ./ch-docker2tar_desc.rst ch-fromhost +++++++++++ Inject files from the host into an image directory. .. include:: ./ch-fromhost_desc.rst .. _man_ch-run: ch-pull2dir +++++++++++ Download image via :code:`docker pull` and unpack it into directory. .. include:: ./ch-pull2dir_desc.rst ch-pull2tar +++++++++++ Download image via :code:`docker pull` and flatten it to tarball. .. include:: ./ch-pull2tar_desc.rst ch-run ++++++ Run a command in a Charliecloud container. .. include:: ./ch-run_desc.rst ch-ssh ++++++ Run a remote command in a Charliecloud container. .. include:: ./ch-ssh_desc.rst ch-tar2dir ++++++++++ Unpack an image tarball into a directory. .. include:: ./ch-tar2dir_desc.rst charliecloud-0.9.10/doc-src/conf.py000066400000000000000000000227731346662313000171070ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # QUAC documentation build configuration file, created by # sphinx-quickstart on Wed Feb 20 12:04:35 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.4.9' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.mathjax', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Charliecloud' copyright = u'2014–2018, Los Alamos National Security, LLC' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. #version = '0.1' # The full version, including alpha/beta/rc tags. #release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%Y-%m-%d %H:%M %Z' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. #pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] highlight_language = 'console' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {'bodyfont': 'serif', # for agogo # 'pagewidth': '60em', # 'documentwidth': '43em', # 'sidebarwidth': '17em', # 'textalign':'left'} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "logo-sidebar.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = "favicon.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_domain_indices = False # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = False # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'charliedoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'charlie.tex', u'Charliecloud Documentation', u'Reid Priedhorsky, Tim Randles, and others', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). # # WARNING: Any updates also need to be made in command-usage.rst. # man_pages = [ ('charliecloud', 'charliecloud', u'Lightweight user-defined software stacks for high-performance computing', [u'Reid Priedhorsky, Tim Randles, and others'], 1), ('ch-build', 'ch-build', u'Wrapper for "docker build" that works around some of its annoying behaviors', [u'Reid Priedhorsky, Tim Randles, and others'], 1), ('ch-build2dir', 'ch-build2dir', u'Build a Charliecloud image from Dockerfile and unpack it', [u'Reid Priedhorsky, Tim Randles, and others'], 1), ('ch-docker2tar', 'ch-docker2tar', u'Flatten a Docker image into a Charliecloud image tarball', [u'Reid Priedhorsky, Tim Randles, and others'], 1), ('ch-fromhost', 'ch-fromhost', u'Inject files from the host into an image directory', [u'Reid Priedhorsky, Tim Randles, and others'], 1), ('ch-pull2dir', 'ch-pull2dir', u'Pull image from Docker Hub, flatten and unpack it', [u'Reid Priedhorsky, Tim Randles, and others'], 1), ('ch-pull2tar', 'ch-pull2tar', u'Pull image from Docker Hub and flatten into tarball', [u'Reid Priedhorsky, Tim Randles, and others'], 1), ('ch-run', 'ch-run', u'Run a command in a Charliecloud container', [u'Reid Priedhorsky, Tim Randles, and others'], 1), ('ch-ssh', 'ch-ssh', u'Run a remote command in a Charliecloud container', [u'Reid Priedhorsky, Tim Randles, and others'], 1), ('ch-tar2dir', 'ch-tar2dir', u'Unpack an image tarball into a directory', [u'Reid Priedhorsky, Tim Randles, and others'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Charliecloud', u'Charliecloud Documentation', u'Reid Priedhorsky, Tim Randles, and others', 'Charliecloud', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' charliecloud-0.9.10/doc-src/dev.rst000066400000000000000000000654121346662313000171150ustar00rootroot00000000000000Contributor's guide ******************* This section is notes on contributing to Charliecloud development. Currently, it is messy and incomplete. Patches welcome! It documents public stuff only. If you are on the core team at LANL, also consult the internal documentation and other resources. .. contents:: :depth: 2 :local: .. note:: We're interested in and will consider all good-faith contributions. While it does make things easier and faster if you follow the guidelines here, they are not required. We'll either clean it up for you or walk you through any necessary changes. Workflow ======== We try to keep procedures and the Git branching model simple. Right now, we're pretty similar to Scott Chacon's “`GitHub Flow `_”: Master is stable; work on short-lived topic branches; use pull requests to ask for merging; keep issues organized with tags and milestones. The standard workflow is: 1. Propose a change in an issue. 2. Tag the issue with its kind (bug, enhancement, question). 3. Get consensus on what to do and how to do it, with key information recorded in the issue. 4. Submit a PR that refers to the issue. 5. Assign the issue to a milestone. 6. Review/iterate. 7. Project lead merges. Core team members may deliberate in public on GitHub or internally, whichever they are comfortable with, making sure to follow LANL policy and taking into account the probable desires of the recipient as well. Milestones ---------- We use milestones to organize what we plan to do next and what happened in a given release. There are two groups of milestones: * :code:`next` contains the issues that we plan to complete soon but have not yet landed on a specific release. Generally, we avoid putting PRs in here because of their ticking clocks. * Each release has a milestone. These are dated with the target date for that release. We put an issue in when it has actually landed in that release or we are willing to delay that release until it does. We put a PR in when we think it's reasonably likely to be merged for that release. If an issue is assigned to a person, that means they are actively leading the work on it or will do so in the near future. Typically this happens when the issue ends up in :code:`next`. Issues in a status of "I'll get to this later" should not be assigned to a person. Peer review ----------- **Issues and pull requests.** The standard workflow is to introduce a change in an issue, get consensus on what to do, and then create a `pull request `_ (PR) for the implementation. The issue, not the PR, should be tagged and milestoned so a given change shows up only once in the various views. If consensus is obtained through other means (e.g., in-person discussion), then open a PR directly. In this case, the PR should be tagged and milestoned, since there is no issue. Trivial changes (e.g., fix Travis, fix a regression within a release, code formatting) can be done without an issue or PR. **Address a single concern.** When possible, issues and PRs should address completely one self-contained change. If there are multiple concerns, make separate issues and/or PRs. For example, PRs should not tidy unrelated code, and non-essential complications should be split into a follow-on issue. **Documentation and tests first.** The best practice for significant changes is to draft documentation and/or tests first, get feedback on that, and then implement the code. Reviews of the form "you need a completely different approach" are no fun. **Tests must pass.** PRs will not be merged until they pass the tests. While this most saliently includes Travis, the tests should also pass on your development box as well as all relevant clusters (if appropriate for the changes). **No close keywords in PRs.** While GitHub will interpret issue-closing keywords (variations on `"closes", "fixes", and "resolves" `_) in PR descriptions, don't use this feature, because often the specific issues a PR closes change over time, and we don't want to have to edit the description to deal with that. We also want this information in only one place (the commit log). Instead, use "addresses", and we'll edit the keywords into the commit message(s) at merge time if needed. **PR review procedure.** When your PR is ready for review — which may or may not be when you want it considered for merging — do one or both of: * Request review from the person(s) you want to look at it. If you think it may be ready for merge, that should include the project lead. The purpose of requsting review is so the person is notified you need their help. * If you think it may be ready to merge (even if you're not sure), then also tag the PR :code:`ready to merge`. The purpose of this is so the project lead can see which PRs are ready to consider for merging. If the project lead decides it's ready, they will merge; otherwise, they'll untag. In both cases, the person from whom you requested review now owns the branch, and you should stop work on it unless and until you get it back. Do not hesitate to pester your reviewer if you haven't heard back promptly. *Special case 1:* Often, the review consists of code changes, and the reviewer will want you to assess those changes. GitHub doesn't let you request review from the PR submitter, so this must be done with a comment, either online or offline. *Special case 2:* GitHub will not let you request review from external people, so this needs to be done with a comment too. Generally you should ask the original bug reporter to review, to make sure it solves their problem. **Use multi-comment reviews.** Review comments should all be packaged up into a single review; click *Start a review* rather than *Add single comment*. Then the PR author gets only a single notification instead of one for every comment you make, and it's clear when they branch is theirs again. Branching and merging --------------------- **Don't commit directly to master.** Even the project lead doesn't do this. While it may appear that some trivial fixes are being committed to the master directly, what's really happening is that these are prototyped on a branch and then fast-forward merged after the tests pass. **Merging to master.** Only the project lead should do this. **Branch merge procedure.** Generally, branches are merged in the GitHub web interface with the *Squash and merge* button, which is :code:`git merge --squash` under the hood. This squashes the branch into a single commit on master. Commit message example:: PR #268 from @j-ogas: remove ch-docker-run (closes #258) If the branch closes multiple issues and it's reasonable to separate those issues into independent commits, then the branch is rebased, interactively squashed, and force-pushed into a tidy history with close instructions, then merged in the web interface with *Create a merge commit*. Example history and commit messages:: * 18aa2b8 merge PR #254 from @j-ogas and me: Dockerfile.openmpi: use snapshot |\ | * 79fa89a upgrade to ibverbs 20.0-1 (closes #250) | * 385ce16 Dockerfile.debian9: use snapshot.debian.org (closes #249) |/ * 322df2f ... The reason to prefer merge via web interface is that GitHub often doesn't notice merges done on the command line. After merge, the branch is deleted via the web interface. **Branch history tidiness.** Commit frequently at semantically relevant times, and keep in mind that this history will probably be squashed per above. It is not necessary to rebase or squash to keep branch history tidy. But, don't go crazy. Commit messages like "try 2" and "fix Travis again" are a bad sign; so are carefully proofread ones. Commit messages that are brief, technically relevant, and quick to write are what you want on feature branches. **Keep branches up to date.** Merge master into your branch, rather than rebasing. This lets you resolve conflicts once rather than multiple times as rebase works through a stack of commits. Note that PRs with merge conflicts will generally not be merged. Resolve conflicts before asking for merge. **Remove obsolete branches.** Keep your repo free of old branches with :code:`git branch -d` (or :code:`-D`) and :code:`git fetch --prune --all`. Miscellaneous issue and pull request notes ------------------------------------------ **Acknowledging issues.** Issues and PRs submitted from outside should be acknowledged promptly, including adding or correcting tags. **Closing issues.** We close issues when we've taken the requested action, decided not to take action, resolved the question, or actively determined an issue is obsolete. It is OK for "stale" issues to sit around indefinitely awaiting this. Unlike many projects, we do not automatically close issues just because they're old. **Closing PR.** Stale PRs, on the other hand, are to be avoided due to bit rot. We try to either merge or reject PRs in a timely manner. **Re-opening issues.** Closed issues can be re-opened if new information arises, for example a :code:`worksforme` issue with new reproduction steps. Continuous integration testing ------------------------------ **Quality of testing.** Tagged versions currently get more testing for various reasons. We are working to improve testing for normal commits on master, but full parity is probably unlikely. **Travis budget.** Because we're on the free tier, we only get 5 Travis jobs running at a time. Currently, each job takes about ten minutes, there are seven of them per tested commit, and PRs double this (once on the branch and once with a test merge commit). The resource is there for your use, so take advantage of it, but be mindful of the cost, since your fellow developers might be trying to get in too. Things you can do include testing locally first, cancelling jobs you know will fail or that won't give you additional information, and not pushing every commit (Travis tests only the most recent commit in a pushed group). **Iterating with Travis.** When trying to make Travis happy, use a throwaway branch that you force-push or squash-merge. Don't submit a PR with half a dozen "fix Travis" commits. **Purging Docker cache.** :code:`test/docker-clean.sh` can be used to purge your Docker cache, either by removing all tags or deleting all containers and images. The former is generally preferred, as it lets you update only those base images that have actually changed (the ones that haven't will be re-tagged). GitHub tags ----------- What kind of issue is it? ~~~~~~~~~~~~~~~~~~~~~~~~~ :code:`bug` Problem of some kind that needs to be fixed; i.e., something doesn't work. This includes usability and documentation problems. Should have steps to reproduce with expected and actual behavior. :code:`enhancement` Things work, but it would be better if something was different. For example, a new feature proposal or refactoring. Should have steps to reproduce with desired and actual behavior. :code:`help wanted` The core team does not plan to address this issue, perhaps because we don't know how, but we think it would be good to address it. We hope someone from the community will volunteer. :code:`key issue` A particularly important or notable issue. :code:`question` Support request that does not report a problem or ask for a change. Close these after the question is answered or several days with no activity. What do we plan to do about it? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For all of these, leave other tags in place, e.g. :code:`bug`. :code:`deferred` No plans to do this, but not rejected. These issues stay open, because we do not consider the deferred state resolved. Submitting PRs on these issues is risky; you probably want to argue successfully that it should be done before starting work on it. :code:`duplicate` Same as some other previously reported issue. In addition to this tag, duplicates should refer to the other issue and be closed. :code:`obsolete` No longer relevant, moot, etc. Close. :code:`erroneous` Not a Charliecloud issue; close. *Use caution when blaming a problem on user error. Often (or usually) there is a documentation or usability bug that caused the "user error".* :code:`ready to merge` PRs only. Adding this tag speculates that the PR is complete and requests it be considered for merging to master. If the project lead requests changes, they'll remove the tag. Re-add it when you're ready to try again. Lead removes tag after merging. :code:`wontfix` We are not going to do this, and we won't merge PRs. Close issue after tagging, though sometimes you'll want to leave a few days to allow for further discussion to catch mistaken tags. :code:`worksforme` We cannot reproduce the issue. Typical workflow is to tag, then wait a few days for clarification before closing. .. _doc-build: Documentation ============= This documentation is built using Sphinx with the sphinx-rtd-theme. It lives in :code:`doc-src`. Dependencies ------------ * Python 3.4+ * Sphinx 1.4.9+ * docutils 0.13.1+ * sphinx-rtd-theme 0.2.4+ Older versions may work but are untested. To build the HTML ----------------- Install the dependencies:: $ pip3 install sphinx sphinx-rtd-theme Then:: $ cd doc-src $ make The HTML files are copied to :code:`doc` with :code:`rsync`. Anything to not copy is listed in :code:`RSYNC_EXCLUDE`. There is also a :code:`make clean` target that removes all the derived files as well as everything in :code:`doc`. .. note:: If you're on Debian Stretch or some version of Ubuntu, this will silently install into :code:`~/.local`, leaving the :code:`sphinx-build` binary in :code:`~/.local/bin`, which is often not on your path. One workaround (untested) is to run :code:`pip3` as root, which violates principle of least privilege. A better workaround, assuming you can write to :code:`/usr/local`, is to add the undocumented and non-standard :code:`--system` argument to install in :code:`/usr/local` instead. (This matches previous :code:`pip` behavior.) See Debian bugs `725848 `_ and `820856 `_. Publishing to the web --------------------- If you have write access to the repository, you can update the web documentation (i.e., http://hpc.github.io/charliecloud). Normally, :code:`doc` is a normal directory ignored by Git. To publish to the web, that diretory needs to contain a Git checkout of the :code:`gh-pages` branch (not a submodule). To set that up:: $ rm -Rf doc $ git clone git@github.com:hpc/charliecloud.git doc $ cd doc $ git checkout gh-pages To publish:: $ make web It sometimes takes a few minutes for the web pages to update. Test suite ========== Timing the tests ---------------- The :code:`ts` utility from :code:`moreutils` is quite handy. The following prepends each line with the elapsed time since the previous line:: $ CH_TEST_SCOPE=quick make test | ts -i '%M:%.S' Note: a skipped test isn't free; I see ~0.15 seconds to do a skip. Writing a test image using the standard workflow ------------------------------------------------ The Charliecloud test suite has a workflow that can build images by three methods: 1. From a Dockerfile, using :code:`ch-build`. 2. By pulling a Docker image, with :code:`docker pull`. 3. By running a custom script. To create an image that will be built, unpacked, and basic tests run within, create a file in :code:`test/` called :code:`{Dockerfile,Docker_Pull,Build}.foo`. This will create an image tagged :code:`foo`. To create an image with its own tests, documentation, etc., create a directory in :code:`examples/*`. In this directory, place :code:`{Dockerfile,Docker_Pull,Build}[.foo]` to build the image and :code:`test.bats` with your tests. For example, the file :code:`examples/mpi/foo/Dockerfile` will create an image tagged :code:`foo`, and :code:`examples/mpi/foo/Dockerfile.bar` tagged :code:`foo-bar`. These images also get the basic tests. Image tags in the test suite must be unique. Each of these image build files must specify its scope for building and running, which must be greater than or equal than the scope of all tests in the corresponding :code:`test.bats`. Exactly one of the following strings must be in each file: .. code-block:: none ch-test-scope: quick ch-test-scope: standard ch-test-scope: full Other stuff on the line (e.g., comment syntax) is ignored. Similarly, you can exclude an architecture with e.g.: .. code-block:: none ch-test-arch-exclude: aarch64 # ARM not supported upstream Additional subdirectories can be symlinked into :code:`examples/` and will be integrated into the test suite. This allows you to create a site-specific test suite. :code:`Dockerfile`: * It's a Dockerfile. :code:`Docker_Pull`: * First line states the address to pull from Docker Hub. * Second line is a scope expression as described above. * Examples (these refer to the same image as of this writing): .. code-block:: none alpine:3.6 alpine@sha256:f006ecbb824d87947d0b51ab8488634bf69fe4094959d935c0c103f4820a417d :code:`Build`: * Script or program that builds the image. * Arguments: * :code:`$1`: Absolute path to directory containing :code:`Build`. * :code:`$2`: Absolute path and name of output archive, without extension. The script should use an archive format compatible with :code:`ch-tar2dir` and append the appropriate extension (e.g., :code:`.tar.gz`). * :code:`$3`: Absolute path to appropriate temporary directory. * The script must not write anything in the current directory. * Temporary directory can be used for whatever and need not be cleaned up. It will be deleted by the test harness. * The first entry in :code:`$PATH` will be the Charliecloud under test, i.e., bare :code:`ch-*` commands will be the right ones. * The tarball must not contain leading directory components; top-level filesystem directories such as bin and usr must be at the root of the tarball with no leading path (:code:`./` is acceptable). * Any programming language is permitted. To be included in the Charliecloud source code, a language already in the test suite dependencies is required. * The script must test for its dependencies and fail with appropriate error message and exit code if something is missing. To be included in the Charliecloud source code, all dependencies must be something we are willing to install and test. * Exit codes: * 0: Image tarball successfully created. * 65: One or more dependencies were not met. * 126 or 127: No interpreter available for script language (the shell takes care of this). * else: An error occurred. Building RPMs ============= We maintain :code:`.spec` files and infrastructure for building RPMs in the Charliecloud source code. This is for two purposes: 1. We maintain our own Fedora RPMs (see `packaging guidelines `_). 2. We want to be able to build an RPM of any commit. Item 2 is tested; i.e., if you break the RPM build, the test suite will fail. This section describes how to build the RPMs and the pain we've hopefully abstracted away. Dependencies ------------ * Python 2.7 or 3.4+ * Either: * RPM-based system of roughly RHEL/CentOS 7 vintage or newer, with RPM build tools installed * System that can run Charliecloud containers :code:`rpmbuild` wrapper script ------------------------------- While building the Charliecloud RPMs is not too weird, we provide a script to streamline it. The purpose is to (a) make it easy to build versions not matching the working directory, (b) use an arbitrary :code:`rpmbuild` directory, and (c) build in a Charliecloud container for non-RPM-based environments. The script must be run from the root of a Charliecloud Git working directory. Usage:: $ packaging/fedora/build [OPTIONS] VERSION Options: * :code:`--image=DIR` : Build in Charliecloud image directory :code:`DIR`. * :code:`--install` : Install the RPMs after building into the build environment. * :code:`--rpmbuild=DIR` : Use RPM build directory root :code:`DIR` (default: :code:`~/rpmbuild`). For example, to build a version 0.9.7 RPM, on an RPM system, and leave the results in :code:`~/rpmbuild/RPMS`:: $ packaging/fedora/build 0.9.7-1 To build a pre-release RPM of Git HEAD using the CentOS 7 image provided with the test suite (note that the test suite would also build the necessary image directory):: $ bin/ch-build -t centos7 -f test/Dockerfile.centos7 test $ bin/ch-docker2tar centos7 $CH_TEST_TARDIR $ bin/ch-tar2dir $CH_TEST_TARDIR/centos7.tar.gz $CH_TEST_IMGDIR $ packaging/fedora/build --image $CH_TEST_IMGDIR/centos7 HEAD Gotchas and quirks ------------------ RPM versions and releases ~~~~~~~~~~~~~~~~~~~~~~~~~ If :code:`VERSION` is :code:`HEAD`, then the RPM version will be the content of :code:`VERSION.full` for that commit, including Git gobbledygook, and the RPM release will be :code:`0`. Note that such RPMs cannot be reliably upgraded because their version numbers are unordered. Otherwise, :code:`VERSION` should be a released Charliecloud version followed by a hyphen and the desired RPM release, e.g. :code:`0.9.7-3`. Other values of :code:`VERSION` (e.g., a branch name) may work but are not supported. Packaged source code and RPM build config come from different commits ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The spec file, :code:`build` script, :code:`.rpmlintrc`, etc. come from the working directory, but the package source is from the specified commit. This is what enables us to make additional RPM releases for a given Charliecloud release (e.g. 0.9.7-2). Corollaries of this policy are that RPM build configuration can be any or no commit, and it's not possible to create an RPM of uncommitted source code. Changelog maintenance ~~~~~~~~~~~~~~~~~~~~~ The spec file contains a manually maintained changelog. Add a new entry for each new RPM release; do not include the Charliecloud release notes. For released versions, :code:`build` verifies that the most recent changelog entry matches the given :code:`VERSION` argument. The timestamp is not automatically verified. For other Charliecloud versions, :code:`build` adds a generic changelog entry with the appropriate version stating that it's a pre-release RPM. Coding style ============ We haven't written down a comprehensive style guide. Generally, follow the style of the surrounding code, think in rectangles rather than lines of code or text, and avoid CamelCase. Note that Reid is very picky about style, so don’t feel singled out if he complains (or even updates this section based on your patch!). He tries to be nice about it. Writing English --------------- * When describing what something does (e.g., your PR or a command), use the `imperative mood `_, i.e., write the orders you are giving rather than describe what the thing does. For example, do: | Inject files from the host into an image directory. | Add :code:`--join-pid` option to :code:`ch-run`. Do not (indicative mood): | Injects files from the host into an image directory. | Adds :code:`--join-pid` option to :code:`ch-run`. * Use sentence case for titles, not title case. * If it's not a sentence, start with a lower-case character. * Use spell check. Keep your personal dictionary updated so your editor is not filled with false positives. .. _dependency-policy: Dependency policy ----------------- Specific dependencies (prerequisites) are stated elsewhere in the documentation. This section describes our policy on which dependencies are acceptable. Generally ~~~~~~~~~ All dependencies must be stated and justified in the documentation. We want Charliecloud to run on as many systems as practical, so we work hard to keep dependencies minimal. However, because Charliecloud depends on new-ish kernel features, we do depend on standards of similar vintage. Core functionality should be available even on small systems with basic Linux distributions, so dependencies for run-time and build-time are only the bare essentials. Exceptions, to be used judiciously: * Features that add convenience rather than functionality may have additional dependencies that are reasonably expected on most systems where the convenience would be used. * Features that only work if some other software is present (example: the Docker wrapper scripts) can add dependencies of that other software. The test suite is tricky, because we need a test framework and to set up complex test fixtures. We have not yet figured out how to do this at reasonable expense with dependencies as tight as run- and build-time, so there are systems that do support Charliecloud but cannot run the test suite. Building the documentation needs Sphinx features that have not made their way into common distributions (i.e., RHEL), so we use recent versions of Sphinx and provide a source distribution with pre-built documentation. Building the RPMs should work on RPM-based distributions with a kernel new enough to support Charliecloud. You might need to install additional packages (but not from third-party repositories). :code:`curl` vs. :code:`wget` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For URL downloading in shell code, including Dockerfiles, use :code:`wget -nv`. Both work fine for our purposes, and we need to use one or the other consistently. According to Debian's popularity contest, 99.88% of reporting systems have :code:`wget` installed, vs. about 44% for :code:`curl`. On the other hand, :code:`curl` is in the minimal install of CentOS 7 while :code:`wget` is not. For now, Reid just picked :code:`wget` because he likes it better. Variable conventions in shell scripts and :code:`.bats` files ------------------------------------------------------------- * Separate words with underscores. * User-configured environment variables: all uppercase, :code:`CH_TEST_` prefix. Do not use in individual :code:`.bats` files; instead, provide an intermediate variable. * Variables local to a given file: lower case, no prefix. * Bats: set in :code:`common.bash` and then used in :code:`.bats` files: lower case, :code:`ch_` prefix. * Surround lower-case variables expanded in strings with curly braces, unless they're the only thing in the string. E.g.: .. code-block:: none "${foo}/bar" # yes "$foo" # yes "$foo/bar" # no "${foo}" # no * Quote the entire string instead of just the variable when practical: .. code-block:: none "${foo}/bar" # yes "${foo}"/bar # no "$foo"/bar # no * Don't quote variable assignments or other places where not needed (e.g., case statements). E.g.: .. code-block:: none foo=${bar}/baz # yes foo="${bar}/baz" # no .. LocalWords: milestoned gh nv cht Chacon's scottchacon charliecloud-0.9.10/doc-src/docker_tips.rst000066400000000000000000000070331346662313000206400ustar00rootroot00000000000000Docker tips =========== Docker is a convenient way to build Charliecloud images. While installing Docker is beyond the scope of this documentation, here are a few tips. Understand the security implications of Docker ---------------------------------------------- Because Docker (a) makes installing random crap from the internet really easy and (b) is easy to deploy insecurely, you should take care. Some of the implications are below. This list should not be considered comprehensive nor a substitute for appropriate expertise; adhere to your moral and institutional responsibilities. :code:`docker` equals root ~~~~~~~~~~~~~~~~~~~~~~~~~~ Anyone who can run the :code:`docker` command or interact with the Docker daemon can `trivially escalate to root `_. This is considered a feature. For this reason, don't create the :code:`docker` group, as this will allow passwordless, unlogged escalation for anyone in the group. Images can contain bad stuff ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Standard hygiene for "installing stuff from the internet" applies. Only work with images you trust. The official Docker Hub repositories can help. Containers run as root ~~~~~~~~~~~~~~~~~~~~~~ By default, Docker runs container processes as root. In addition to being poor hygiene, this can be an escalation path, e.g. if you bind-mount host directories. Docker alters your network configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To see what it did:: $ ifconfig # note docker0 interface $ brctl show # note docker0 bridge $ route -n Docker installs services ~~~~~~~~~~~~~~~~~~~~~~~~ If you don't want the service starting automatically at boot, e.g.:: $ systemctl is-enabled docker enabled $ systemctl disable docker $ systemctl is-enabled docker disabled Configuring for a proxy ----------------------- By default, Docker does not work if you have a proxy, and it fails in two different ways. The first problem is that Docker itself must be told to use a proxy. This manifests as:: $ sudo docker run hello-world Unable to find image 'hello-world:latest' locally Pulling repository hello-world Get https://index.docker.io/v1/repositories/library/hello-world/images: dial tcp 54.152.161.54:443: connection refused If you have a systemd system, the `Docker documentation `_ explains how to configure this. If you don't have a systemd system, then :code:`/etc/default/docker` might be the place to go? The second problem is that Docker containers need to know about the proxy as well. This manifests as images failing to build because they can't download stuff from the internet. The fix is to set the proxy variables in your environment, e.g.:: export HTTP_PROXY=http://proxy.example.com:8088 export http_proxy=$HTTP_PROXY export HTTPS_PROXY=$HTTP_PROXY export https_proxy=$HTTP_PROXY export ALL_PROXY=$HTTP_PROXY export all_proxy=$HTTP_PROXY export NO_PROXY='localhost,127.0.0.1,.example.com' export no_proxy=$NO_PROXY You also need to teach :code:`sudo` to retain them. Add the following to :code:`/etc/sudoers`:: Defaults env_keep+="HTTP_PROXY http_proxy HTTPS_PROXY https_proxy ALL_PROXY all_proxy NO_PROXY no_proxy" Because different programs use different subsets of these variables, and to avoid a situation where some things work and others don't, the Charliecloud test suite (see below) includes a test that fails if some but not all of the above variables are set. charliecloud-0.9.10/doc-src/faq.rst000066400000000000000000000537051346662313000171100ustar00rootroot00000000000000Frequently asked questions (FAQ) ******************************** .. contents:: :depth: 3 :local: About the project ================= Where did the name Charliecloud come from? ------------------------------------------ *Charlie* — Charles F. McMillan was director of Los Alamos National Laboratory from June 2011 until December 2017, i.e., at the time Charliecloud was started in early 2014. He is universally referred to as “Charlie” here. *cloud* — Charliecloud provides cloud-like flexibility for HPC systems. How do you spell Charliecloud? ------------------------------ We try to be consistent with *Charliecloud* — one word, no camel case. That is, *Charlie Cloud* and *CharlieCloud* are both incorrect. Errors ====== How do I read the :code:`ch-run` error messages? ------------------------------------------------ :code:`ch-run` error messages look like this:: $ ch-run foo -- echo hello ch-run[25750]: can't find image: foo: No such file or directory (ch-run.c:107 2) There is a lot of information here, and it comes in this order: 1. Name of the executable; always :code:`ch-run`. 2. Process ID in square brackets; here :code:`25750`. This is useful when debugging parallel :code:`ch-run` invocations. 3. Colon. 4. Main error message; here :code:`can't find image: foo`. This should be informative as to what went wrong, and if it’s not, please file an issue, because you may have found a usability bug. Note that in some cases you may encounter the default message :code:`error`; if this happens and you’re not doing something very strange, that’s also a usability bug. 5. Colon (but note that the main error itself can contain colons too), if and only if the next item is present. 6. Operating system’s description of the the value of :code:`errno`; here :code:`No such file or directory`. Omitted if not applicable. 7. Open parenthesis. 8. Name of the source file where the error occurred; here :code:`ch-run.c`. This and the following item tell developers exactly where :code:`ch-run` became confused, which greatly improves our ability to provide help and/or debug. 9. Source line where the error occurred. 10. Value of :code:`errno` (see `C error codes in Linux `_ for the full list of possibilities). 11. Close parenthesis. *Note:* Despite the structured format, the error messages are not guaranteed to be machine-readable. Tarball build fails with “No command specified” ----------------------------------------------- The full error from :code:`ch-docker2tar` or :code:`ch-build2dir` is:: docker: Error response from daemon: No command specified. You will also see it with various plain Docker commands. This happens when there is no default command specified in the Dockerfile or any of its ancestors. Some base images specify one (e.g., Debian) and others don’t (e.g., Alpine). Docker requires this even for commands that don’t seem like they should need it, such as :code:`docker create` (which is what trips up Charliecloud). The solution is to add a default command to your Dockerfile, such as :code:`CMD ["true"]`. :code:`ch-run` fails with “can't re-mount image read-only” ---------------------------------------------------------- Normally, :code:`ch-run` re-mounts the image directory read-only within the container. This fails if the image resides on certain filesystems, such as NFS (see `issue #9 `_). There are two solutions: 1. Unpack the image into a different filesystem, such as :code:`tmpfs` or local disk. Consult your local admins for a recommendation. Note that Lustre is probably not a good idea because it can give poor performance for you and also everyone else on the system. 2. Use the :code:`-w` switch to leave the image mounted read-write. This may have an impact on reproducibility (because the application can change the image between runs) and/or stability (if there are multiple application processes and one writes a file in the image that another is reading or writing). Unexpected behavior =================== What do the version numbers mean? --------------------------------- Released versions of Charliecloud have a pretty standard version number, e.g. 0.9.7. Work leading up to a released version also has version numbers, to satisfy tools that require them and to give the executables something useful to report on :code:`--version`, but these can be quite messy. We refer to such versions informally as *pre-releases*, but Charliecloud does not have formal pre-releases such as alpha, beta, or release candidate. *Pre-release version numbers are not in order*, because this work is in a DAG rather than linear, except they precede the version we are working towards. If you're dealing with these versions, use Git. Pre-release version numbers are the version we are working towards, followed by: :code:`~pre`, the branch name if not :code:`master` with non-alphanumerics removed, the commit hash, and finally :code:`dirty` if the working directory had uncommitted changes. Examples: * :code:`0.2.0` : Version 0.2.0. Released versions don't include Git information, even if built in a Git working directory. * :code:`0.2.1~pre` : Some snapshot of work leading up to 0.2.1, built from source code where the Git information has been lost, e.g. the tarballs Github provides. This should make you wary because you don't have any provenance. It might even be uncommitted work or an abandoned branch. * :code:`0.2.1~pre.1a99f42` : Master branch commit 1a99f42, built from a clean working directory (i.e., no changes since that commit). * :code:`0.2.1~pre.foo1.0729a78` : Commit 0729a78 on branch :code:`foo-1`, :code:`foo_1`, etc. built from clean working directory. * :code:`0.2.1~pre.foo1.0729a78.dirty` : Commit 0729a78 on one of those branches, plus un-committed changes. :code:`--uid 0` lets me read files I can’t otherwise! ----------------------------------------------------- Some permission bits can give a surprising result with a container UID of 0. For example:: $ whoami reidpr $ echo surprise > ~/cantreadme $ chmod 000 ~/cantreadme $ ls -l ~/cantreadme ---------- 1 reidpr reidpr 9 Oct 3 15:03 /home/reidpr/cantreadme $ cat ~/cantreadme cat: /home/reidpr/cantreadme: Permission denied $ ch-run /var/tmp/hello cat ~/cantreadme cat: /home/reidpr/cantreadme: Permission denied $ ch-run --uid 0 /var/tmp/hello cat ~/cantreadme surprise At first glance, it seems that we’ve found an escalation -- we were able to read a file inside a container that we could not read on the host! That seems bad. However, what is really going on here is more prosaic but complicated: 1. After :code:`unshare(CLONE_NEWUSER)`, :code:`ch-run` gains all capabilities inside the namespace. (Outside, capabilities are unchanged.) 2. This include :code:`CAP_DAC_OVERRIDE`, which enables a process to read/write/execute a file or directory mostly regardless of its permission bits. (This is why root isn’t limited by permissions.) 3. Within the container, :code:`exec(2)` capability rules are followed. Normally, this basically means that all capabilities are dropped when :code:`ch-run` replaces itself with the user command. However, if EUID is 0, which it is inside the namespace given :code:`--uid 0`, then the subprocess keeps all its capabilities. (This makes sense: if root creates a new process, it stays root.) 4. :code:`CAP_DAC_OVERRIDE` within a user namespace is honored for a file or directory only if its UID and GID are both mapped. In this case, :code:`ch-run` maps :code:`reidpr` to container :code:`root` and group :code:`reidpr` to itself. 5. Thus, files and directories owned by the host EUID and EGID (here :code:`reidpr:reidpr`) are available for all access with :code:`ch-run --uid 0`. This is not an escalation. The quirk applies only to files owned by the invoking user, because :code:`ch-run` is unprivileged outside the namespace, and thus he or she could simply :code:`chmod` the file to read it. Access inside and outside the container remains equivalent. References: * http://man7.org/linux/man-pages/man7/capabilities.7.html * http://lxr.free-electrons.com/source/kernel/capability.c?v=4.2#L442 * http://lxr.free-electrons.com/source/fs/namei.c?v=4.2#L328 Why does :code:`ping` not work? ------------------------------- :code:`ping` fails with “permission denied” or similar under Charliecloud, even if you’re UID 0 inside the container:: $ ch-run $IMG -- ping 8.8.8.8 PING 8.8.8.8 (8.8.8.8): 56 data bytes ping: permission denied (are you root?) $ ch-run --uid=0 $IMG -- ping 8.8.8.8 PING 8.8.8.8 (8.8.8.8): 56 data bytes ping: permission denied (are you root?) This is because :code:`ping` needs a raw socket to construct the needed :code:`ICMP ECHO` packets, which requires capability :code:`CAP_NET_RAW` or root. Unprivileged users can normally use :code:`ping` because it’s a setuid or setcap binary: it raises privilege using the filesystem bits on the executable to obtain a raw socket. Under Charliecloud, there are multiple reasons :code:`ping` can’t get a raw socket. First, images are unpacked without privilege, meaning that setuid and setcap bits are lost. But even if you do get privilege in the container (e.g., with :code:`--uid=0`), this only applies in the container. Charliecloud uses the host’s network namespace, where your unprivileged host identity applies and :code:`ping` still can’t get a raw socket. The recommended alternative is to simply try the thing you want to do, without testing connectivity using :code:`ping` first. Why is MATLAB trying and failing to change the group of :code:`/dev/pts/0`? --------------------------------------------------------------------------- MATLAB and some other programs want pseudo-TTY (PTY) files to be group-owned by :code:`tty`. If it’s not, Matlab will attempt to :code:`chown(2)` the file, which fails inside a container. The scenario in more detail is this. Assume you’re user :code:`charlie` (UID=1000), your primary group is :code:`nerds` (GID=1001), :code:`/dev/pts/0` is the PTY file in question, and its ownership is :code:`charlie:tty` (:code:`1000:5`), as it should be. What happens in the container by default is: 1. MATLAB :code:`stat(2)`\ s :code:`/dev/pts/0` and checks the GID. 2. This GID is :code:`nogroup` (65534) because :code:`tty` (5) is not mapped on the host side (and cannot be, because only one’s EGID can be mapped in an unprivileged user namespace). 3. MATLAB concludes this is bad. 4. MATLAB executes :code:`chown("/dev/pts/0", 1000, 5)`. 5. This fails because GID 5 is not mapped on the guest side. 6. MATLAB pukes. The workaround is to map your EGID of 1001 to 5 inside the container (instead of the default 1001:1001), i.e. :code:`--gid=5`. Then, step 4 succeeds because the call is mapped to :code:`chown("/dev/pts/0", 1000, 1001)` and MATLAB is happy. .. _faq_docker2tar-size: :code:`ch-docker2tar` gives incorrect image sizes ------------------------------------------------- :code:`ch-docker2tar` often finishes before the progress bar is complete. For example:: $ ch-docker2tar mpihello /var/tmp 373MiB 0:00:21 [============================> ] 65% 146M /var/tmp/mpihello.tar.gz In this case, the :code:`.tar.gz` contains 392 MB uncompressed:: $ zcat /var/tmp/mpihello.tar.gz | wc 2740966 14631550 392145408 But Docker thinks the image is 597 MB:: $ sudo docker image inspect mpihello | fgrep -i size "Size": 596952928, "VirtualSize": 596952928, We've also seen cases where the Docker-reported size is an *under*\ estimate:: $ ch-docker2tar spack /var/tmp 423MiB 0:00:22 [============================================>] 102% 162M /var/tmp/spack.tar.gz $ zcat /var/tmp/spack.tar.gz | wc 4181186 20317858 444212736 $ sudo docker image inspect spack | fgrep -i size "Size": 433812403, "VirtualSize": 433812403, We think that this is because Docker is computing size based on the size of the layers rather than the unpacked image. We do not currently have a fix; see `issue #165 `_. My second-level directory :code:`dev` is empty ---------------------------------------------- Some image tarballs, such as official Ubuntu Docker images, put device files in :code:`/dev`. These files prevent unpacking the tarball, because unprivileged users cannot create device files. Further, these files are not needed because :code:`ch-run` overmounts :code:`/dev` anyway. We cannot reliably prevent device files from being included in the tar, because often that is outside our control, e.g. :code:`docker export` produces a tarball. Thus, we must exclude them at unpacking time. An additional complication is that :code:`ch-tar2dir` can handle tarballs both with a single top-level directory and without, i.e. “tarbombs”. For example, best practice use of :code:`tar` on the command line produces the former, while :code:`docker export` (perhaps via :code:`ch-docker2tar`) produces a tarbomb. Thus, :code:`ch-tar2dir` uses :code:`tar --exclude` to exclude from unpacking everything under :code:`./dev` and :code:`*/dev`, i.e., directory :code:`dev` appearing at either the first or second level are forced to be empty. This yields false positives if you have a tarbomb image with a directory :code:`dev` at the second level containing stuff you care about. Hopefully this is rare, but please let us know if it is your use case. My password that contains digits doesn't work in VirtualBox console ------------------------------------------------------------------- VirtualBox has confusing Num Lock behavior. Thus, you may be typing arrows, page up/down, etc. instead of digits, without noticing because console password fields give no feedback, not even whether a character has been typed. Try using the number row instead, toggling Num Lock key, or SSHing into the virtual machine. How do I ... ============ My app needs to write to :code:`/var/log`, :code:`/run`, etc. ------------------------------------------------------------- Because the image is mounted read-only by default, log files, caches, and other stuff cannot be written anywhere in the image. You have three options: 1. Configure the application to use a different directory. :code:`/tmp` is often a good choice, because it’s shared with the host and fast. 2. Use :code:`RUN` commands in your Dockerfile to create symlinks that point somewhere writeable, e.g. :code:`/tmp`, or :code:`/mnt/0` with :code:`ch-run --bind`. 3. Run the image read-write with :code:`ch-run -w`. Be careful that multiple containers do not try to write to the same files. Which specific :code:`sudo` commands are needed? ------------------------------------------------ For running images, :code:`sudo` is not needed at all. For building images, it depends on what you would like to support. For example, do you want to let users build images with Docker? Do you want to let them run the build tests? We do not maintain specific lists, but you can search the source code and documentation for uses of :code:`sudo` and :code:`$DOCKER` and evaluate them on a case-by-case basis. (The latter includes :code:`sudo` if needed to invoke :code:`docker` in your environment.) For example:: $ find . \( -type f -executable \ -o -name Makefile \ -o -name '*.bats' \ -o -name '*.rst' \ -o -name '*.sh' \) \ -exec egrep -H '(sudo|\$DOCKER)' {} \; OpenMPI Charliecloud jobs don’t work ------------------------------------ MPI can be finicky. This section documents some of the problems we’ve seen. :code:`mpirun` can’t launch jobs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For example, you might see:: $ mpirun -np 1 ch-run /var/tmp/mpihello -- /hello/hello App launch reported: 2 (out of 2) daemons - 0 (out of 1) procs [cn001:27101] PMIX ERROR: BAD-PARAM in file src/dstore/pmix_esh.c at line 996 We’re not yet sure why this happens — it may be a mismatch between the OpenMPI builds inside and outside the container — but in our experience launching with :code:`srun` often works when :code:`mpirun` doesn’t, so try that. Communication between ranks on the same node fails ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OpenMPI has many ways to transfer messages between ranks. If the ranks are on the same node, it is faster to do these transfers using shared memory rather than involving the network stack. There are two ways to use shared memory. The first and older method is to use POSIX or SysV shared memory segments. This approach uses two copies: one from Rank A to shared memory, and a second from shared memory to Rank B. For example, the :code:`sm` *byte transport layer* (BTL) does this. The second and newer method is to use the :code:`process_vm_readv(2)` and/or :code:`process_vm_writev(2)`) system calls to transfer messages directly from Rank A’s virtual memory to Rank B’s. This approach is known as *cross-memory attach* (CMA). It gives significant performance improvements in `benchmarks `_, though of course the real-world impact depends on the application. For example, the :code:`vader` BTL (enabled by default in OpenMPI 2.0) and :code:`psm2` *matching transport layer* (MTL) do this. The problem in Charliecloud is that the second approach does not work by default. We can demonstrate the problem with LAMMPS molecular dynamics application:: $ srun --cpus-per-task 1 ch-run /var/tmp/lammps_mpi -- \ lmp_mpi -log none -in /lammps/examples/melt/in.melt [cn002:21512] Read -1, expected 6144, errno = 1 [cn001:23947] Read -1, expected 6144, errno = 1 [cn002:21517] Read -1, expected 9792, errno = 1 [... repeat thousands of times ...] With :code:`strace(1)`, one can isolate the problem to the system call noted above:: process_vm_readv(...) = -1 EPERM (Operation not permitted) write(33, "[cn001:27673] Read -1, expected 6"..., 48) = 48 The `man page `_ reveals that these system calls require that the process have permission to :code:`ptrace(2)` one another, but sibling user namespaces `do not `_. (You *can* :code:`ptrace(2)` into a child namespace, which is why :code:`gdb` doesn’t require anything special in Charliecloud.) This problem is not specific to containers; for example, many settings of kernels with `YAMA `_ enabled will similarly disallow this access. So what can you do? There are a few options: * We recommend simply using the :code:`--join` family of arguments to :code:`ch-run`. This puts a group of :code:`ch-run` peers in the same namespaces; then, the system calls work. See the :ref:`man_ch-run` man page for details. * You can also sometimes turn off single-copy. For example, for :code:`vader`, set the MCA variable :code:`btl_vader_single_copy_mechanism` to :code:`none`, e.g. with an environment variable:: $ export OMPI_MCA_btl_vader_single_copy_mechanism=none :code:`psm2` does not let you turn off CMA, but it does fall back to two-copy if CMA doesn’t work. However, this fallback crashed when we tried it. * The kernel module `XPMEM `_ enables a different single-copy approach. We have not yet tried this, and the module needs to be evaluated for user namespace safety, but it’s quite a bit faster than CMA on benchmarks. .. Images by URL only works in Sphinx 1.6+. Debian Stretch has 1.4.9, so remove it for now. .. image:: https://media.giphy.com/media/1mNBTj3g4jRCg/giphy.gif :alt: Darth Vader bowling a strike with the help of the Force :align: center I get a bunch of independent rank-0 processes when launching with :code:`srun` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For example, you might be seeing this:: $ srun ch-run /var/tmp/mpihello -- /hello/hello 0: init ok cn036.localdomain, 1 ranks, userns 4026554634 0: send/receive ok 0: finalize ok 0: init ok cn035.localdomain, 1 ranks, userns 4026554634 0: send/receive ok 0: finalize ok We were expecting a two-rank MPI job, but instead we got two independent one-rank jobs that did not coordinate. MPI ranks start as normal, independent processes that must find one another somehow in order to sync up and begin the coupled parallel program; this happens in :code:`MPI_Init()`. There are lots of ways to do this coordination. Because we are launching with the host's Slurm, we need it to provide something for the containerized processes for such coordination. OpenMPI must be compiled to use what that Slurm has to offer, and Slurm must be told to offer it. What works for us is a something called "PMI2". You can see if your Slurm supports it with:: $ srun --mpi=list srun: MPI types are... srun: mpi/pmi2 srun: mpi/openmpi srun: mpi/mpich1_shmem srun: mpi/mpich1_p4 srun: mpi/lam srun: mpi/none srun: mpi/mvapich srun: mpi/mpichmx srun: mpi/mpichgm If :code:`pmi2` is not in the list, you must ask your admins to enable Slurm's PMI2 support. If it is in the list, but you're seeing this problem, that means it is not the default, and you need to tell Slurm you want it. Try:: $ export SLURM_MPI_TYPE=pmi2 $ srun ch-run /var/tmp/mpihello -- /hello/hello 0: init ok wc035.localdomain, 2 ranks, userns 4026554634 1: init ok wc036.localdomain, 2 ranks, userns 4026554634 0: send/receive ok 0: finalize ok How do I run X11 apps? ---------------------- X11 applications should “just work”. For example, try this Dockerfile: .. code-block:: docker FROM debian:stretch RUN apt-get update \ && apt-get install -y xterm Build it and unpack it to :code:`/var/tmp`. Then:: $ ch-run /scratch/ch/xterm -- xterm should pop an xterm. If your X11 application doesn’t work, please file an issue so we can figure out why. charliecloud-0.9.10/doc-src/favicon.ico000066400000000000000000000104761346662313000177260ustar00rootroot00000000000000  (( @ #.#.[Op 6AGp[z3z [Y p(p3kA [3 O[3?(%[SG-"1= pfi'qfffQfY(q3LQ0  2=zfHf(([G \)charliecloud-0.9.10/doc-src/index.rst000066400000000000000000000007311346662313000174370ustar00rootroot00000000000000Overview ******** .. image:: rd100-winner.png :align: right :alt: R&D 100 2018 winner logo :width: 128px :target: https://www.lanl.gov/discover/news-release-archive/2018/November/1119-rd-100-awards.php .. include:: ../README.rst .. note:: This documentation is for Charliecloud version |release| (Git commit |version|) and was built |today|. .. toctree:: :numbered: :hidden: install test tutorial command-usage vm faq dev charliecloud-0.9.10/doc-src/install.rst000066400000000000000000000152521346662313000200020ustar00rootroot00000000000000Installation ************ This section describes how to build and install Charliecloud. For some distributions, this can be done using your package manager; otherwise, both normal users and admins can build and install it manually. .. contents:: :depth: 2 :local: Dependencies ============ Charliecloud is a simple system with limited dependencies. If your system meets these prerequisites but Charliecloud doesn't work, please report that as a bug. Supported architectures ----------------------- Charliecloud should work on any architecture supported by the Linux kernel, and we have run Charliecloud containers on x86-64, ARM, and Power. However, it is currently tested only on x86_64 and ARM. Most container build software is also fairly portable; e.g., see `Docker's supported platforms `_. Run time -------- Systems used for running images need: * Recent Linux kernel with user namespaces enabled. We recommend version 4.4 or higher. * C11 compiler and standard library * POSIX.1-2017 shell and utilities Some distributions need configuration changes to enable user namespaces. For example: * Debian Stretch `needs sysctl `_ :code:`kernel.unprivileged_userns_clone=1`. * RHEL/CentOS 7.4 and 7.5 need both a `kernel command line option and a sysctl `_. *Important note:* Docker does not work with user namespaces, so skip step 4 of the Red Hat instructions, i.e., don't add :code:`--userns-remap` to the Docker configuration (see `issue #97 `_). Build time ---------- Systems used for building images need the run-time dependencies, plus something to actually build the images. A common choice is `Docker `_, along with internet access or configuration for a local Docker repository. Our wrapper scripts for Docker expect to run the :code:`docker` command under :code:`sudo` and need Docker 17.03+ and :code:`mktemp(1)`. (Older versions of Docker may work but are untested. We know that 1.7.1 does not work.) Optional build-time dependencies: * Bash 4.1+, for :code:`ch-build2dir` Test suite ---------- To run the test suite, you also need: * `Bats `_ 0.4.0 * Bash 4.1+, for Bats and to make programming the tests tractable * Python 2.7 or 3.4+, for building some of the tests * Wget, to download stuff for some of the test images * root access via :code:`sudo` (optional), to test filesystem permissions enforcement Image building software tested, with varying levels of thoroughness: * Shell scripts with various manual bootstrap and :code:`ch-run` * Docker * `Buildah `_ * `skopeo `_ and `umoci `_ Bats can be installed at the system level or embedded in the Charliecloud source code. If it's in both places, the latter is used. To embed Bats, either: * Download Charliecloud using :code:`git clone --recursive`, which will check out Bats as a submodule in :code:`test/bats`. * Unpack the Bats zip file or tarball in :code:`test/bats`. To check an embedded Bats:: $ test/bats/bin/bats --version Bats 0.4.0 Package manager install ======================= Charliecloud is available in some distribution package repositories, and packages can be built for additional distributions. (Note, however, that system-wide installation is not required — Charliecloud works fine when built by any user and run from one's home directory or similar.) This section describes how to obtain packages for the distributions we know about, and where to go for support on them. If you'd like to build one of the packages, or if you're a package maintainer, see :code:`packaging/README` and :code:`packaging/*/README` for additional documentation. Pull requests and other collaboration to improve the packaging situation are particularly welcome! Debian ------ Charliecloud has been proposed for inclusion in Debian; see `issue 95 `_. .. list-table:: :widths: auto * - Distribution versions - proposed for *Buster* and *Stretch backports* * - Maintainers - Lucas Nussbaum (:code:`lucas@debian.org`) and Peter Wienemann (:code:`wienemann@physik.uni-bonn.de`) * - Bug reports to - Charliecloud's GitHub issue tracker * - Packaging source code - in Charliecloud: :code:`packaging/debian` Gentoo ------ A native package for Gentoo is available. .. list-table:: :widths: auto * - Package name - `sys-cluster/charliecloud `_ * - Maintainer - Oliver Freyermuth (:code:`o.freyermuth@googlemail.com`) * - Bug reports to - `Gentoo Bugzilla `_ * - Packaging source code - `Gentoo ebuild repository `_ To install:: $ emerge sys-cluster/charliecloud If may necessary to accept keywords first, e.g.:: $ echo "=sys-cluster/charliecloud-0.2.3_pre20171121 ~amd64" >> /etc/portage/package.accept_keywords A live ebuild is also available and can be keyworded via:: $ echo "~sys-cluster/charliecloud-9999 \*\*" >> /etc/portage/package.accept_keywords RPM-based distributions ----------------------- An RPM :code:`.spec` file is provided in the Charliecloud source code. We are actively seeking distribution packagers to adapt this into official packages! .. list-table:: :widths: auto * - Repositories - none yet * - Maintainer - Oliver Freyermuth (:code:`o.freyermuth@googlemail.com`) * - Bug reports to - Charliecloud's GitHub issue tracker * - Packaging source code - in Charliecloud: :code:`packaging/redhat` Manual build and install ======================== Download -------- See our GitHub project: https://github.com/hpc/charliecloud The recommended download method is :code:`git clone --recursive`. Build ----- To build, simply:: $ make To build the documentation, see :ref:`the contributor's guide `. Install (optional) ------------------ You can run Charliecloud from the source directory, and it's recommended you at least run the test suite before installation to establish that your system will work. To install (FHS-compliant):: $ make install PREFIX=/foo/bar Note that :code:`PREFIX` is required. It does not default to :code:`/usr/local` like many packages. .. include:: ./docker_tips.rst charliecloud-0.9.10/doc-src/logo-sidebar.png000066400000000000000000000767111346662313000206660ustar00rootroot00000000000000PNG  IHDR Y pHYs.#.#x?vrPLTEUUU[[[jjjTTTTTTSSSQQQFFFOOOLLLSSSIII???DDD>>>888000&&& "tRNS --0@HPT`pw?|IDATxbJPC c"ilLamͪariTAyCyB@E*rWUUuiXUUUeYµMYOzWbŪ,tڗmsP.w2*Kfdq.!8Ur%΢(wad߳妘?dƇKNvct8kz1˕Sƛ ˵)fq *eXoK*N2QKr۵2јeuIq1 @q.;eıX]~?-EKj-oW`zx^ cqs3X`3kޝ$Db\I [#`<監28-˖{ Yl*$gT`jWp榝QtT~eޢt>^L 4ThZyc2! V`PKCKk0@d\hloilV魝92 עzDUtMWT'kk7W^e2@*ց^_ELoM*; GktY䜛J/'mY e232T2BPe2@Y(@ ;+r{g_8 @kOiF-v,Phr,m+ Jˬ-`zey0U]y0Vrwf5Y^묭 Kgdjz ȘQIJ "e^LoeYO|(IO 2!2` `2!@`zkCde2hŇ0Na8|>Py(-:|G`XQCMGۡ30b@*/XEU^9v|'ZPPFEkBh5@(^ ñ!ݔμhϙəm-R>Z B+ 0\x2 eFƄ2@L#3LndPC_bdL^{$C#>4E^ BX 1W8wXŸ,n!{ K"/$2^`-&-#3k 0mi Lf(lx $2AAvdd2DT6(+$2ph7 $2 $ !$2d8D A&Hdd2DO3ه aa(@"L  2@"L$2d̐LL*2mHf; \.n HdXӺ2HVd%^.{ @2+erbaD&֩NVd@"u:L>=D&uB|@R+L3O+uJ$2AO~RN(wb s 2 D>ȯtn >d@"`z|SP@l*v LPeCZ/ rKȡH1Lv&7v%d1dG[ɱt%Ɏ刐̗t%ɕ#BYU.zkg#CHنTk7Q% =3d B6$G`9Sl+D}t=mObcSj& Š#L ( d[}1F=-aur1R= O9# Fl$ge@,6@N?-b`rsrMeaBqqMR)L\) o&;-fir>| ]-ބ(qg5pYMufzPzPYyP5L^y09+vV:-Tܹu&; ҮW0N̙C@, =;zQԙ 8[gEWl:3B^|jĜN ²0lEWP`,bUeY~{-˲E좉PGpE$7LeY|.( !,E.]UNUU(:&,X+L^vyWe#{͋߭m+[лnEFɇm|e_p=Vȫfn" e9T&˥"6fj @NuL-7+~9E2y6[aXȩeder3_ayC"',tdc1{L<Υ('-(aE7c5$yl2SeTyL{R?ttLг\t׿dm }L%nW&dCAP&'!/` X&(T"erb>[TȥcjMb<T*ym<ɩ7B$| K29Ye ܪ"er.5*֤׳LV*֥y:XMSȞLLmL<0LiudH(@c˹zx4$ Njsd j̨lcZ dj$c5ced wVs9f9˚dž29wRCeH󭧌;"2ﯦp|~OJe2=a5W͞dW3K0w}I'9$'6N'Ĕ2}ژDFdL)3Bdة^Ӈĵcq,i\Qf"vmLF5kULF9?wkj5XYsxop,佖6Adzc4gY4 k,-|ƫڮu2ld#so e2ʄ3HmxW2eB+QW] ,dL@@-[/uwPUJ2eWz\b/_VƝ[lLŽ1P&%'^&#0/jMDVF&yQ£LfpJ:QʍLVx%eE$(2RXŷV]g_r\|Q(6.l6+tYxLR,`ʡE|Oef*?HS$Ur_R媸Z!D\Q_yJwY1;xXʿY&+^oG!Qfu{z)IgJޛkU2Y(߽Õ?ҘvP*`=LU:3nep-nD(!E2pWkaWV,sd}DO^'ȇmdɈdJ/O29D>tJ[EV25Ul3cѕUl2Y"^.A~|kqdᨀzRPoe|mddDQWL=",;V e \-D&K\|8n2Y"g;?H2:_ɦ72Y"[^ ɉ[LW{Lݕj"9[.eE2Y"_s`  jL섚 2Gkp|h+2MTHNkY!(aIeߤHNzUK5憐!^EIFn uǵCL'T=q1E22Y"bc:9K,ko\FR/D~')ybE2=;䀭G"9__/Vjj}dd/!fߢQ'/D@qM$'kثxߥLr7v--1-Lka-e]"!8:<w, kЀL5GDsDrCKӍ"6B0,cUN'="fXi}FWo#]s_e;ٞ1W=_Ft-~u1EB~S&cd'H?>T0m ˫-="9N1HfLa,{ze/QF?d*D>qwo#_Mg0 [0YkKDr}5<[hf%6iצ9? /=j:+!Yvb_~;)^I*Erxe9-O"szki1ԗHH&1]OhP֚!װ pd&^]GC"Y$H$34KZCx&ؗK%^y-iWy DH~BZ5bIӲkH~}L< 񅰒jeٵHؗY!^?*ƛH"fȆūK,NQ$9xɡ,l?I?I"MN U8mɁF[wo͏L'hr/ErS%Yux[DrF7~9 2D򏗱EyDr8Fb:y$=_lJR$O>B{GV}KHq?tB\9uuٵ"Y$Y!WO$jWDrǾO/$6\sKW $'~|9^~ usI Z$[htTˮmON@#upҲC xa&Er|B?lp^~y}_"Y$Y!Z^HΡ,þ[,E$6;FuLkL>4yN"7 T?Dr[ adD;oq~ƾ< $Qlgڮ$œH'jS{GZ:ױۓ%ErH^j(]+[\""9TO_Q&ryo{LɁDMҵԙlJ,Z'ܯFdX9ѯBP\\K2Y$Gc5 kR:p"Y$Y!eʀuТq,tx$o2kmBO1J%ww2yz]G~>6h g{cA0Q!EHZG1՝K>,|c~GM$geȳ40Z B!ҏd, ȧ1($E9%/@A;+JTIdHw9+i ]w%drV"yHƎ9.d qmWԷ_&{/B))3E^"9xi`D+UV&d<xֵg[^i-ErnGgy6s.L%$ȳl^v%qҫL= M>DV^%&eG8h/Gr`4r=gxL!DMg|6Dpµt6d,Er!aHJJu,5]_[t<%4܋o#T imdHɿ8+d_'T%.+}%?j㟨Kox`ljliiYHw\k6]$*m*D,ErH^f9DVP -k2Y$6+TcjrB㼐R ErDxIuIhR6٣3Y}*lsܳ/ynup ry3[䠥>4yonrmun@& |}ߗVc' j伐,Ergm@z;"iɋGD$O$67f˺3L6H>2Y$gV{Z[n<}ɓKؤXe`HN,>+8n }C5n;TO$h㻾EJ>4^䥷<>gȞpe |t"Y$O,wykJVk޻X&ujHN&c<+8ݺ\',]ӻBRɃF?0es=]y.]x<g|OZ? ,k|ӒC('7 o)M?2H!FtVq ʎAYM#ymƛ]Ĉь|s*c0< ,y-{欐%-y-cFo>I tS8<'xfNprc%Gᬐřc>Q!wr"7}a!#ulg DroǿrpBFfH>E?kAbvXlMs2[+^~|~mG[< up#Y'Hc9W, m'}j+ܿց=c4#V exf" ^ k oM׮&ErRA-]wMdw"/RAZ$wmNeb s8̷&Ӥۘ+^|pm|ɇrƐWխX";:U ⬐zƐl#Oa A"y6aUfqJzI|Er#oLn8 0 Ggy("MqTkfN?PvȸH,c6=<(C俍1Y$+dSA ge^X-X&6eg] ex7l%?3z`+ewA=?X\v&pʖy}7o$"c<(;Z$<29ڷ(;K$2Β뷧{*#GuNB<~"ًx8luǟnHfC/~{Wʳ}6){8gy VzMu(o>/ k<)ׯ_UErf ʶT k<ql/R9Ĩ >eE]GszИpX^JBe!4fp,o.O>^QGr^-8?ll9oϯ-xh1ߞ5 }2]CYٔ?l~@|M#;hJ,ƕ'i("YS^ǧ|=\(*?ٖ/4~<{SߞHF$`pt<Ը/.HMMFek{M*ڐw6.;_Do o&ɷ+tRHdzX[~-nO${7OϽo7.g$yVU "aլ^߻q&GO(s='eui$SCu~3d NzkrDrODn^ߊ"+g ЗnL6HSx 5>{={x|>Xcr>??>ލ3{:5,"-Ix%>[8~y~Sr8krDH6̏cG/&L6?"0u]2~}4/3MmNs|p K|'ڐ|$[`Vێ^nbLdkCD2AZ|Pd(kJL>{/7'MɘLN 囷 m"'{5Fxw;xe`7IOLh|>9mo+TH"Y$gm5j+u?c~;oWz$,67P|k0[0 /=no^G>A ےd6|tӐIR>F3c`/c4JL(FLo9oC oM@/5܉%ErFL~h߳8~oInɺQdw5RۀyP~.ۭc~?4Hə!B! G"Y$nV߷>7> {9[6ٽ?KXp-ErےwHE&[H$dq #Iƙ|:wuP,"9{M&~/_H䦙8wfH,N$af O\&vx%ԋRiLcaG:.m@4ozoU `8Z$dGj4ixkբp~/F8;K`tF"Y${sy췾vPu` VI5e_ EH/m=<u}mk(K$dd򗁣m&{Q}KÿZ7 |uu()E2=O&|06U}W+7\[gT ",=~<ܦz?euVw0 ZW$E2>'2a|<Y( t||" [=H\zL2R5N?fnWihuH,g4Ӭkp^n*P9\qQsݴS}uҍ7d7مmu|q:[LPiߦϽ^68Ӛ8($:HL&|M;Mwa?>أ~U۠oHN ^?$E2ȇԻWx3omJ+98(D$da2vںֻ)C/?-_ jz>%E2?uo/桎[ynyOj6 Dr쩋Z ~t;o_ngZC w-oJGh*Y$diӬ$韁ݷV_`\n}[/`*Y'[i~vuAB}C;j_V[xEH&knu_:#b*9gL`:M&߾Nz ^>Ҹ^w ɝL4ɤQyg~6k:~1= ȷ?K\֍}K ڹQ2<"ْZ4964{ed`=s@k:)7~2Ln s)՛f.۹sYD$O̷EHF9XIzN--k<z ,M䶃i/w-}^K坃?G-n3( ϡݍo*>{uH4Hn6uHB'NuVbk5kO`:v](>"wkZM&7}zQ@_۵#oy@d?L&4/nL~Zkv߶bnƈ\&V&٬]5M a&?w\mϿk%[#)-[7J䆙|O]&~ Zw|<Kw[#z銦ی}o]ԾyN(߶@:j$ەgk|X ~G/[}uu2$ەC|2ܗኮgՌN7⻬2 "Y$3Ɠ{o۴~uo]>u֑'ٰ|$~ӽ;;[R\+Cݗ;ɨn45*2ۮi}ַKf>x J5[O$Ii֞}:R"[9aacra|ݲû|U6-w6~fy0}$tkZwSsj#;KƩ̝$E2h8堌K㷭%}Jar*dlh۲J!ywߵn}N̖Xp=G,MԶU;^wkp^rH׮-hoω8 w%ўAYz^ot&qzpCiQ9H>׾/-&|h4'E29 Fv<άkӴ4<㦒bSrtu19M9Wy#Z_vyu[iZMdm*lHɄgҨ K~ߘnuZ[pP%۾OZUݑdaDTRgh_|l!ߘ"QUH놕&|$խ'cqWdۗ)X$ɣ| nuc˰qu8K$d>N&C^ljM<5z̬60#)ا5xi:~fȗfWal ۸ 8uO0\7RߛoMsBⳌaH;c4|(-^oL:&W%ݼZhG^T@J?I/_N/__Z;8&MA35k΃vzʸkm\Fvh3V~~|=<_[hPѕcMj9!ϸnvþ.|Xf$O1!(nd&G{mSFUs z5ko}kwU y*e­L)& Ԭ;4-?~ت^>.,VR]ʭ.4=S.uk]?~ⵡ_όt,Wt;sPLͺdt9nkǚpMίe{^}p 9wۙ y5~S_Z̪68w;|Fɹ+ԭ֞gTn#|7ӬKO:fu}RikAvLz~Ͽ.Ze2fu^ڹo{6FA2}G}'fYT ?kgr/8dҵ~\[rylr]}Riҵ5jkdR_pmu*.yq!7?y^G|iL g[G~U#+o3zyo6?(>75=zM$$sI̒T=S|nܘ탷ˈ7ر]\.*ژTݝ jMW]7]` f;So ԈK^A2g0jaS? MA}x`e{O*j6?["[5;ECR%NnrV-aD CKoϏw?@=>nb)Y2(۠^f2)Gd8%$]5f'5Obo7֏yJsuĠ,oq+79 uA5ϵj 5=.cR٘N鷸۠>?龥~_@3.#T9QwT㍺O$cL:OЗ!yZR K۠nN?ȅ拱>?ukk7~B܆2htJr+98n[3]ukC});W²H4nm߾=(zkS\gd= 1c}έ{ ow}}< zk')]wX<4l >v6۠ZRsne~̫v:lhw[7:ӄW];>Hw}Ϸќ)hxNUOEmPCai:c_vCdCȣ-:C/N'qHm?C'%8U&Ma!5؛ש~cEģut}uCkSfy{~?/֨e> 峝Oa2i{6ߥb]O|hz=%C(?D |'1Gܖnu"vo4GV7m#eG}}T-:dD4r%~n6GNvyh:T~fV#2w}-H2y-~Y+[U6drcmfYxsU~f|l4@YN|ǵ=bǧ7rXݐ{7ѷƝNn)̟|>]^jekz)$ pe(2j&Se-v@[^byFmpvlo^FYc;ɘJ;epی]o"s3BϬ%D4߼7p&걟!2W&VՄjYSTIE#nn R?EfMQ&;ܙ! 52p@CJCYM?XW%10bB.vAݎ/C|az> dL$ǨȮ9v 0|;;9G['8ytSỢ71x{x>;iLMK YMKgqŎmlR#_,lƴ}Zd*Or6Y(!Kvȴ]('ݴ;F#v[h'GsjOl/1[FxO$CT>8U: *$QהA,N"N嚔C=>oGqLֺ ukk/w_yz|5U!a*DV5T!5?[\r ]u>:\ ~[hq#6$[>Tu խ#e*`>k4e#B -TA"Tz 9;i3٨\D&;:D%Ȅ`:TS$28j3j\D&lMVL|=g65Hd`A5Y$2[il6[i fb@"3)~+m$rJ-Rc:$2,4k0`Bw/kHs #cq A$2wxY 阫b/oN"q-'K*YF/IjniBRHI[&s+EVii9 $F>cϛFaJDȺHE5*&Jv@)Arlu $4fk"PidRkm qH{"I>JBHA% bPYqͩrLarIvvYDžX+1rbWly&dT3mxu\+d@F6HNS5`xޣfG'ƒKާW8K0Y-)Z  &hAa2ApB6H6L& xJkdd|B|eB?eL,,"iδ6H6L&"x0 ^$ka]Aa2qTu$&l}2H6L&! _F6jȎB%70Lɵi)F"c`LZ2:H֠ 3ӐIHils5Ss\y4XH"NNLQ&g>(ҰV6H}F1]A mI&F cis>bԬ`JiTNm8/0&Y愚Ar+Ot (NYh: H'MNv&4ftr~u#dB%oH؁lUWlI0nqΪm4.:a2dzr;QA2LLO&8ΈO80x9}vug7SPvuԌ߀@3k 9b'~+@-v9[+M)zG͘0'̧'Λo PEeIa)z6{csrl"}k hKy)~6BPrnlIVwVDٱS?YE[/3OɭM)[jt2I(59<3([+]{ʱ[tHj:I QoV]NyQ`SfJϡv;=J׼c ּd2cꗭM C&lI ZsUhb j絝1F5 0ScUU۲,WEam4bUeY`F=75Gw+y{n6OƉ!tIKeQHZ={~-"DXER/w߳?d1[~*<K,"/]UEHg7ʭI]4AK˿j9t.PS\xpJZ˲k eb`]dYlm5A$2Xl]Hdd2V($22l V(L~2ؐl{2 w,"F q 12L OPHdd2 Hdd2DF& (2yHd/L@"#Rs' @ (5dL֚2DF&a!R:i'\c)UId @vB,1drdWDNV+mGA$r Id u`Yg:|"[ؕ _]yDiBBSyq@Nڄ2@ eBF6 @L#ۡ @FCTY[8 SJ"x 0={PVx 0=vB:v;s_9'vVZ;džD( |9]9l<ɋl6u{o?Vy^=ί-ӌVњkk߰7JY9&q9lF*d߰;$|-B{Cd([$[ lj `e a^gW@9&0PN I(|GWʆ(ǬIheB"V̵/7P6D@`?:]LM3f 1P&^y6-L):2P6D&-޳VSʩ)z{Bq"|=;!r*S&]üB";Vz8m+U/.Hqߜ9^ܼeW~őc|h\e^)jݝ/ز.,R:ߞ2XE=CYwιP7N"z@uhGTz}%^'cf! d7>;״5͑u}xk}լ陓C̪Ia)e}4˾M8<Wr)7,Ku^i<υUYVqu۹HY&2<e<+jXyQ;ުQyyyvoc:2BYܴwޘ\d>c.ٳ> eLZlHy:>f,NBY tuVoa`P&@f#SǴt<09rpN+aԇ p*Bvy<5z9nj s o>H-:MKBX YzQp-ZZ=&eR2IM*_ k_~M!~b=rC;H*z7YrvP krT0@PYrTvqȆ `dbM;}^) 3$e]eS*߹?X`\*:e`rTkz"_g>285539XQR9?G+IQR ?K\nLg*'3nqP򏳉?!ԗlJ˗idOCVY-^ XEVe#?E@RȕeqQWMi|ګ$c;Oud55)T6J]c|> I=ի8VƈyQ[Ʈk3o*Jd`u{plU>UJw~C!>ڈko*ծ,4{C::qol}]!0v*e_G%Ǣ(xrmisQ`rcdUrc`<,i)K7MKmDrVܻ"tq9|.HF$'2>)Hew\mv6(dDrҷæ=u缬NE2"9ve.HκsnV(H|s PY$948xdDr2Ӷ{/)s2&dDra5ErƝsѬ&dD@4ErƝsVdDreLBY$g97Y9"3sC(|;i;SɈȬ mW_|;iɈ,4~V$973TMHȓ}+9"9ιc嚋dDr4eRko W\$#c"o/S;6"Y$皋dD!e,EHF$'f{ CӁH"Y$#<\Y"Y$dDrJ֧K@MNt"Y$dDrBvV"Y<&k.Ɂ.H%dDr*K+Ersm|1Y$#ö2/CͳDriڸ"%TzA$9ϛk.]VC"9ι*KɈd<eqܰCP"yu \8"9ιaZZ$#M&;Fe[D2"Y"w6C;ET",md"D&;yO.HF$KydsHïəw5Ɉ@/T2Y$9z<]nHӪǸM]OH9J"dDr$U1[s~߇uE2"9'yUs*?,uγl}thHF$Mb{$EGC8\o.HF$keU^obx,wfu~xڭ)A۴C/e9EwE۲ `P,uθ䄯[YT=&H9#ch޷Qe"Y猻HO>|,O&uθZ|Z [~Y$qiuHa1ݓY'Ew?Yk%H9#fmװͷ.*"Y猻H1i߬N?qc$r7վ\OuhǪ*7E\ѺTHsr[U=_ږ"Ċ,6?Uվ,!Zw}QlOPUeY,gKe:>-^|?<_m-vC?x:Ueȍt򦵿rz[}څzETmvzq v9o:8uK>"Ʒ#;;ͪ6Aw~R$/ݦg1_T媷?]_ޱ_yx90_V?;Gt>-}.?9O%ɫ]y6luߡ\|חfl?RGrAico< "5zJN߯|=4οOUs5z_Zsi2+ծ%Dl}?n/M<-Ov^ui2eT:Hnٻj儕ty?z$P#L(bu 8g^nշ*g-vk/ GrN#]VoLPCCd"9^*RM$gmu$>M`|\.ɵnyH.~2r\,vKoNK$Dzkx"d/wenXع!fH޵ɽ\/=k?3^$]@{85B9HS:L3uh7>oɛkߌhUmٯtOqrP)W~uΑn/y7ъUżq$Ӹ}7a1#+"H^ɴbwN(=PdRQjk5ϜZ4j\>yo;E$8#yQ itqd$ogny$KEr%5Oػ0Xsxi}V 9P.5ýC^aR1Z4z띧6#F1r$_;81#42z<;T5-bheJt&\ܰ:.Mho=j$?u70#3:}fubǁ#yf*HQNe;K74t'ucFr0ףaN"yy_RZ6yF jbc7-WDfEvH}MwH%#"T0Gr1̳ڶ,:"ƶEHs-ג2!#{*z5x9ΒZDr.p[$cFZG.=t.G,or(z,FEɃBLp#4,H7Ϸđ<{~$Kg=pm旧K(ErQ&HU=-X&jK5gćD2#O>y$_'rHn`W$Oqdk=a$&vɱRa3a='jڊrH..Ӊz~߉n>_3!FrWe .[MlZhju OqJ$KEv8_Ͷul۽urGU4Hҏ7X"Eȼl>Aݰt!8VWQlmպs>xܰ.rc,.aM%"S/XqVV5qo3)HnfX$7૚}ż<896+MƊŁn}+ea/o)Y|\=>RGYaHnZk ,ˁ{(<=Oze|nV/}<~pm,y*HLݗ*H6o>@.wpC0xABX.>Sd\zy6[tuo?}o\l$&HL)#vl܆g٬ˆNF_wCI?WUlخz 1RF:H>|V7m˾_?ڝ@$89HnԐ:a7VޭϻԺu"S~n|.`$GKe?]$k4 ܆,Hzn>g}kG]ղ͓v짎W$7n d#9^*Ѹgֳ6keM#BbnH{;Zͺ"3u*iuqDrTqÿE>5oݴ\ŏUzt6/ۿӥw_]z8{=.hlgjfHnf?H.FJ&\x [̧֞c-CV6UGrT\!h#y;L9RlB?s.W1"{ 3D]Aq/+Ga-:f?չ:#DrH&k~+n>|gw$l#Dr1n&؏XUMξ٫H("9^*HEol:HnVc~Լ!\i#5Nnv}blA9J"7Eđk/f$G|RHHnXz˛^iEr9B$HtZhw}eIbcOW&;:uo28cDr4y1I"8s#kjS$>Yv R׳jv)HqGrݳ4uSDvιH;jmEq¢ݩB?a!cum/%voo?{ts<-~47N1=snWFrH:siPꣴ8?:^"nmcWrx7op{#7 X#9^J$GmF9Ϊsye =uνDr.uV=&SrznX͏Mo/%cVw_fO-o?v${I:CFo>Ggi$K#՝t}.Is@{ds/\uV#P"~ݺ@o^vř- 'H! "*}>J06#n" G^|s\ݥ"yhVE"9pb['5-Ճׯ2H[ɋc,{y>&hw֍YqrD`Urr<.@}$Rg:q#+17|/IDrWyb˿W,ݛFxկkeɆl7S|V$17CGG$՛'f^J#yiQɃɣH6\ldKV$weѥʵf$͊H$/S^d$z#P{fP}$R7GDroj٢~d(vf"9EB [ez##p"cTL}H\.:5K^$7+"GLW涺otܡ'|ɖM#BB/ƽYt9C|`!>d}^2mzlnH\._}qgF pH>gH#yȻݞV ^ɖM#|)͆2}HL$kOo258 mljUC_כdUʘl}X$X?{D˺Ou=6Cз䛁D*eL֕!0跻֏A}$=6fV/]M'klJj+\N"ZZD}SdV$A7dUyU>GrHG}3Q]l^ڕF*eMpanRCvwD,8Ra+%R' Nɦ5.&W˨!ŗcd"D$k$R?Y=B"Z<*O/Ov[dUʚC$7"{pjEЪ8D?x Kp:5!L$W%Er1ɋZ" 9A]Af"H&mEre@<|id"H&d"Udv5L$D2L$kyD2L$D2|ĺ.?=A$D2Hd3osL$Dr*x[cHvD{x7R[DrD2L$7>yRw$RD0lD2HJGr_n%qmd&Dr·,F#9 -% )CsJ$da-"fHg4ى ]n:A۽ɁWR_u==T_oJW__%UV\{H[;"9C5W+q$=>o㋿=uG*uw>("ٷDr؇{Em]{\?.M<ߙSIcL$7#ْE$}o3>ȶZGr!]]/ۈϽXl:2w{VɡQՍDr,mNm_74Gr O^mDϞPX2z'~68m<Ճ\yqUʨWU[d푼5) H]c){׀`{]U)Lj~?nξ,CwNN?J$.h雹^Z\Y"9tRWt3G eO*et%k-[{$-NJ$H`P:F]n_xɽ W er/tG$-u|ZMn&*J7nحR&W~>}39*ͬj59ՃWNi_/ג*arL3Y}$/ӈ fb6h^~e/v-_O~*ar#9|ԮރH161}[?J<=_T[z0ʮAuҝH9CŒG$K-j=M] zx#sfLZx]*a򵗜zP$Gz_DrlF+ܬiwSEL[Z}EsRL>-(+P$W c?$^jƘ_Bu=]׵tZ.K4tftU)ˆ+E ]#9j {1ƋH6ezQ뇬)4VH冘sGn:7E[RI|8)՛ DrqMuۘH:ktKbYOzT̿::'ZRugR&_@rH[7yתDr|R]?iF7jR6UQN`B#v|*>2*eۜ+ʝM"9f-oCJ>"9.ڄW"5v΍Z}ԣQØN`,Q4KbJ3Nug? w _ hnȉ/?ϔz_G%_D;T:ꡦJן|y2LB$GsDrDQu*{]@_"ڣ"Ǔ"qu90Z:u-C0}q['R~="9egrfueDr_GEVUj2b&&bVY;u-P?>ُ>GK$ǷPf&q.3އsqeDrlmOQы5R >?mJ=yp3_;ϫ&"90vqHlW"9 =%(R81 pzyO䐋do2mS|馜l#-HNi~t]铘?#";<~!"!dr?mL)g=|43O}I-o_LT0} йSIEr_n!~ō qglh^n/2n\$l$+ӑqfJy +rRCyJQ1+PRMCg1"9i~ ;Bwq|\O;U_vTg$''Oc7c"9؁W$>Ӑ(m _t M=5&πsf]%`W-~Go3=:^>jI6`J|ܭ>rS{C"9q_̏=꽤GN?~Sa6XalZr4ۿ=oDrF J{&e\ 0H~nR9\v(]Jߟ}C]+~)R7&D+k~-7ʜ:Dz~ޖ$]F?sK{-i?,U?oɃƓK$8AM跢+{C#9Nc$ ItS/+>*N$c"9uEORV#_;MԇHFk3FBW*E&+KdK.ErNF|VsqRմOmF>Wd{U>*|1uDI!"YlqRr;ja~W(wDL^ejX"s?Ջ7[{+E?[ULj~+_dUv)v2kx*FrGZk1L>cwRdV$[. 5nSjJGr{j1WSUL6!d+- 7>I eM7$mobH.Ɋ]؎H4"Yߍ3S:ʮڑܲػR&CUn> u[F72yL=fWHnwg|FU>;ULֶBc.u=BJ$+ү0j,ɭZ,i>f=R2SnHuHVuD$5tD$iSש vz?q$ډdeuOu])+UFeuH+2:<8ʹT{ҵzn5ψdUѶwٵP$>DwY`JuצGMZ W?:d,љ[Uҵf7?So댁Nt.1JH~;/gGrhdrL^^#?l۩RL^FyY_Q^Dr~?_vL\NmS~"Rw~ֽɯD ]urWƘ_0k ,0Sz_nmh;PN%rO,s:߉oDh08Ep&RwxګemsA#PgHj= Hw嚳~[KSL>\W[GeQ|QIE$7"8cʱ?vƽp$kZܥ (R79)B|*R"Y(?uuEP+/KW$YWl[xDuzug(H.b@vZ7(' G]h,8D.y3AozrލH-Xd2U+ؕe"ȕ"- T`R**%zQm>nA['%+||j3vKk$hHSO^ZQ[4R>--"Yh,,;Wd1UʥB9uN$^T˭hRO UT!%6yO$K b "YrjѦ\w%WAtVgKD="9wA$oy$'CbOd{2nH~ĽBڗV-*BCe/u%sf%/n/Bk*^O%75#YbncT\>UVVU}znԥ'HߵɓN#{rUnۨc+NY{jRLe7e \Ǫ͓:T{Rv26Ks:-L[]\{V vTyUJU=.Y7;'z +3vTߏ`:2/cGzznu~sa?U͓%GþկûF}wןK챹J׵wr~s#iZ5G߆b w*skwnZav={pQW75?Wx0T)=atn;_~q=mIENDB`charliecloud-0.9.10/doc-src/rd100-winner.png000066400000000000000000002520621346662313000204400ustar00rootroot00000000000000PNG  IHDR'@gAMA asRGB cHRMz&u0`:pQ< pHYsԍ9bKGDLIDATx]wE~zŀb)'΄L(*a"`@Q͇ @+h@i&e/]Q#F p}}Uo *E3#Ph^\j^nnaQ%}sy(2  "22==DFF Q WVV[7X@Cdv4PA .\88888pppppp .\88888pppppp .\88888pppppt>> L&b1L 8NI3!""bӐ$rvQϽ0Vh4b x<vi1a6V鬪r\}RRRNg]]2j>|1BRvkkkwrXxߟRq[[[ss3; 䈈QYYt:Ci4[ZZpp!*++1ƄgddDEE p8v޽k׮IbccӫK7++;'><..!RXX_@|||ZZZEEE&J]9Z,Ԗ*A$I6l 'A)mkk+//߶m[YYfaHII1͕uuu#FkN=ԼHɄb{p8>\XXw߽ }Jibbb)--Zϟ\ 5$''gee_cQ&HDl;ہm} 77`}?@WV |oqa[n`5 m;k֬29ҍuϺkG2)RLSѱbsfv̉t)lذtRb%&&޽[{uyc.5s f~v]nh)DЃ#{rɝ*nii}v7x# ",˲e˚t53/[KTUU}>lڵ_|Ŏ;jjjػZ|W@\\#< hPp9NylXpfRRR0ƾ|!W^y?s%''uy2111UUU]t]we2(Cׄ;w~'[l?RRRN|8[Y{-0dȐ̐˫֒; ??_QQQ6oެ8cl6[+Z~)O?tUW@IIIh 11nFVo "F#`Ν;.\8qڷ~E sssC&0]hvo>] 裏)?Zcǎў`]NOlJf!\Xzg/++c5A0_T_>}sժU BEEc/|ʕ%%%As@8p 77[o=p0yd=db]k&M$΂UUUs΍.** Ҡ-%%eݫWVEX,ӧO ~ngE :tѢEza F-9vlbX>T}}}jj?_ZZ DCIxAG${&''36ϲ=Y 6˦9 !O?tZZv>&>>JZ.'tʣZ'''$}GgqƼy233KKKv{bTUU;VaV[ne'̡!VAHHHqp8!bkk+nFXreKK|YdA@})Acc/]IOOojjڶmRSS^B#ZxصtMW^yessfdH1=XNn_py4f*5X,Fǁ9p(vgNgFFƶm۪=~6#**uxz?v/b9p6l 'y hjj뭷KKKSqRRnwAAn$k6""?\8B B#lfY>0ԩSTLyf]FqqqJaYL{{{N!IKJJz,U1f^nnn`\8B&]c I‰'C} QY+@S쩩SLe'SUUU }3h[ZZk2gM͔ ~> hnn.\8B 5:[@XGMLTcٺsݻ4)"""XUsTTT:6hd LSN9Uch'l7;,0l' G*'#Qѫ iӦ6kcV b+ơvR%;2#oM v{bb7|S__: ,K0`4K%۷o8ppt_RsŜx≌Tlu&.uֶ6휣ŗ$-.wcvq͛7 ֬_WbboHً:Rg-.gdMMMuuuY9S#<st7vݪ \L֯_/I?2H΄uoj АpB&{bE9996m2LCm럞u*c\SS&M Go[JY9TߤA7ZPP4t w'>""b'p=p߃c/޵ 9۷p 㜜tqILL˫ݱcرctr GI&H,t:+hiiQo9YShLX &L0n:tlf]|~o̝;777ʂ8냂1sO:UEgzpojrr23ULb=#,&!v4  (sojj:T HII=zlfny襠`Μ9ޕ GȨ]dI^^jwfڵ+;;[Knju*{F[j{ڟ!.*77뮻8džYpa~~͛GYTT8 } VkYYٸq㮼Jcy`-SFǺ\/Kb3,!t:dɒ?__ZeK.]v8`Xi.!#)JiZZ%?쟝]TTk=YYY>fXcBȾ},Xp-|lxy'x{{tx < /Amnn^`Abb/Oq/|'rrrX.]$m7y>voqQ6hӦM1>Jٳx_~9**nn;O8'#F|ᇗ_~Ν;###\^88t8477O>;va]xqqqLx&MMM~ Y}@Jivvvqq?m4_x]jNscǎ}w333_~SN9߹s7z 'Vj^^ޡC^|?O>SvGx8kB7wouP$IEDK KJJ|+[n%???""!666;;{ƍ{v)4`̘1V .f»{ٺofV~ih4߿_7aE>{SLX! n"""O={{c_6lXAA!`0y7|sCCV .KKKY>.(ʦO~}DІ nּ`H%\Tf"cg4Nsл:++kOWXK/ 6SWWwС7|nkkSi!bp L_ GHJJ:3}ٸ89O0Ν;/袬`wGm)zvn@G: mmmgURRr}͝;7''*BՒC 6lٲez? /I,.'f[|yzz6ꫯlllT-W['W__t 1aZS0?A`捙1cFttv]իW+]j*m6ܹs:$ :22K/.LT駟:TS__뭷|X.ƚ02ɔ^\\iӦO?](.X'`ua](uy*%%e͵w@,f0f477222AЎ3{୒$yزh#F& CFY_|1qD-B^z{wСEEE=` 04@YYY'tRYYY^ 1 5~嗌3-{&RvdٮqpaVPP~)S؟x͛1? J=466/DEEUUUM:533SEL@ S$3g\C b!\8[ /Բ?B7С^~裏f$ 2:ujT^7oo࡟Q0sh155^YXEwKII!;;`ڵ_|.f nY. 4vs=8dP38CkWTT,\0:: 8˃***R&bzpyZ>`eʗA:vK.DW\9c ǯ 4^9s0sC%''WVVs=;۷@bbbjǦ#k֬-ȸ[bUSNٳ_;<.Gsss VZ}ӧ3ah4w},Tuɓ'O8H0oJ3Tyc;@2vggg7772Bkab^lv;<ֲr| xo\877СC~ᥗ^jheŊ3f6lXqq1[_&I(qqq<3uv6lXQQ /pǪ/ggg3o{Y,>!@v N=ԇzhtȑ%%%{#VݻwÆ 6rGP}HRѱQ LGW:*\>!3Yr%[]Vu !o;vȐ!)=!C yyyժs`.] GfÇ̙3[ZZp8fΜ ~uW)3DE$QJ׭[Ǯ[4LvL*>|xn#Fy海k~]Ɵ [ WQO&w .wedz`EFJ`/x`Ȑ!999yyyٹyyyfΜY[[,@vvvHJ!6 jY__?w\Fܹ6-)))&&&:::666%%%''';;;)) x3ܲe dee^\p1\b -(**ZduO9v#, 2|gux=0955uZ.//EIFPzz:رCuoq\k֬9餓^l@LqFQuٿO @`&!G?*++_뮻NV9XzRf鯷?˗/}s9gÆ /((hmmlFJHH`OzgRRRtMw+p뭷~iii <|8uԉ'޽{ǎ{-++s8111999cƌ7nѣY* Zd??S? ~.G9XmL8S N.ZZZ׬Y>&I'xndUEyVZZoG{{{RRw.Z- ;a„ &Bn7?t> .dͷ98p谧&տv~GcbbnvѨ˃iot9Ik͛7/77ǭW__{ l6v}Dc,wɷƁ#g`4 *hE03Fa%%%999s?’}5 T Yv 㖖{lΜ9! ]®zȐ!_}Մ 6n.wi˗|X!O?M69pؿ(f⬬{l;wH/83~)۶mCl6}3h*1)S~{as17>%C͟?SN)**2dHyy9gVT^^n2nw2C!V5(VC~qӧO>|+ukk={yŋ2$---L~ߠ ߠ dMfyF #++7XhM7tW;699YmRа~Ga=[ZZT88 y\bqWyQ?h(w齑#8\888+x(.\88888pppppp .\88888pppppp .\88888&@)tqpq;5Puܱs @1($Q`j@r:1P"(Z?E(EI#\nq9]p.[=@p꟪p)c33<0(Gb~.ηl0ì=u ~(R6{8D!QEr{N;]v&!6pUе3rbb0@/:^FqZTgBͻXy}H(CASg @H$(yD7HnD0{aO}:ulzL@ vIк?*@- #]Uv6deW-!$zZ닋(mׁ_ݿo_!!F"A1<rQS%S ($a)NF:!HqJ;' iRSj_qZ]^V)B]5 "#BV€ ,R"Ini7nn,)/(CU%%I ŀ s`$` w{ Tf#amF(q/< |uޞ)W % 2<1_(UUu{;)RHL 0Bc0B,M} "@Fv XKR'ejb@Qh;}5un^ҸmPyu,:HHT]]@ @(* CO)ѣ20 ;KJQ۞ҝKz`P ޟP/_G{ٿs{_]Mѐ]t?z"Q*u uSf;>5kljje?De8U4LT;R~|c_CiDT[H' SGi\C#T>iSPt\EA@ 9g :ꚦ~YO~QtJsJ0hd]1 8$ " 85-˧]qť--+>ZKBH:;jN?]A芍r>S]WTdM9S@<<|#\ !J)%8+H3sl{#6Wl|`ZRC#A |(PLLԩ6O+V|h}#TXQ\-??(ٟbв?hbץ}1Gǟ""8cu1;恻\ j/vc)!+Ah~.Aڿ`ٿ c, Bl6SP+KJ:aDKT?t:Nz )h__DO5MR(!* `[$v׾3痯|?M8)xf+Mp!$Ƙ$GFFؼpU)%uazwCT>=?99ʤ}PYڏ[bM*mTT1ϐOTak*aP !`vpxmOW7ֳ2"pM'Gz:&)ˉc$v|(8s^1?uwa+XT].g#0j9Qu؟ju1i=K4/}3VA@Slw:=Gy}~/ܳ}7Q&> UW;m[iT쏔 `ؖ䥧lY9t YGsP /]W TmhNƧOAZlu Ʉ;,/ /kh]y-ꙐǑ$G ˆ:f(\z1-yYWk1 $쏺{\0`c.A0D~晧g7xUgz9_E|ɗ^N)?d+L*b S*W$Rm%Zg֩A@VGuܘ/̛}GI (cG}3?/iS a.}n O^)f{@7_#'0+4HĀ3H0F!ʈ{ ~rέg-. ԟRC?x@\`VϽƵkbB>wUҔ IU\D'7TQvT'S*h)&cH"i)QO>qY+04։/%(^|Ѡ _2RO K>%>)ٟ(_t˜"͎xg.Z輄Xx@rv9>CGWB/C^ 竦>= +Mb|iu^|Ѯ / հ?hؿcO |e#68. Opbo#|ˤ^*M)~8N‚}뮝ٚܯ!||iz.} /9PnQ?հ?9Iw_O0@1/wӔ߽o@!@@ą 3bDN :_f Ycw+)/p|)>/M @&D3!|SOXĸA8`|)'$Ok dcT2)//z|ieF'y|9U"z#1mhujM;il,!2: n?{_8mI<<%)*A|_.WD&}c6`0gZJ\~a|K.$/ //%Z?*q'~8G]Wx雂)9)UNd UT975ϻ257/: ~("$Ygw /{]Eb/e@*5/m";N~P1?  Tvt# i zvW?$l^'O0 (7 a]_| .9&̚u嫺?l]J`R{VL?*|)J;bħ%EgV|Sٰ}7Aw$Wm~UAMد<.3x_G$Jc1 WWW];׊r_1aIw_B5))"H/ @ Bmlv|H@_QG3#C!<iѢՍMzg(@!P[ڏ  xggH) KlltrRBZzrFƐ!C*, wn)=c"Nywqz{˅MoR%J OOà2>u:!+5^ _2g {?e.Bc=?B~S}"G8`$䙅몪[GAp~~ƞ}99??`%QD]zY/0ʝ1Iy_-ZawhTN2( fVWdUɾF\B:kӍwRJRB$"ID(!X5䳑^%@w|nD'Y`@Eu ZZ]ϽZ=3 @fГo8R 0Bt`F( IK~wi;kW\yAbJ=$bAЋK7u μk~捠~~sU|K_>9l2z+mhCal2LfCd"QHST%"d#G$J(z x|؟vώԱ!"?oD}cG  @Hg0 t<D)]?λo7+)9U(P|2豿SogsB*/J0߮%tQl991*mH\Vf-31/;9';9#-6%1&"ڂ[`0A|Ao @K>Ad[BRn'O7M?؏b_ԑJGQ͈jks<_Xs톿$yAO.ٟRB:#f̸ui,f0 B$I"g >*b0N!? ÍÍm7c2k;ayɉ Q(:I0F(- (%*TKJdTHtc/5CQ׏N yB-7-{.--] 苂/-SJD(PA F#` $5/~u][~,SrR|||Ld cj J0EB(!(Hs潏v@|SN>kԩsMpBu Ӆc.)GbE}S. k^le_ I@J'[H?/pD5aA`6*^.'txBi„?lŔ)%!@|B(!`r-[ΒD6m2O8ɓN<1"#q:@)p@6_*(Fթ3ێ]U;vUg^={DbBv("шiӦV~'۝JA:97(6_nmp1yW_uޕM6,:NBq0_4p}ЏE-mQZJܿqe'dn[bN!myW__Jxٿi?o?7+ZyF0;+Kf:tH̄c!/J(`0̖xz.(Mv]{ c|d6EẌ́H|iTfOKZO~q9<6g\JBZ :?r(V<"DGϱG@H>kn cϸ=.RF%ID6-^p~ޣ\]8$B(|Δ-"b4m',+|voƟ`aE|@_/gcֈ S%{(GlP?g[wб,BK4ǎ4MhP %n74`t{:)J`0kK?%V=RgcNz_DFYHDv_/^̓EV3e? ,xD"v.:>(co\,{+|qGDy%mzODQ27ƛ]l{0[Zӯ{۞A& K5> ~p\YL͠ |6I cƓ[3OMU\1n/ /5e>سg/LާSD՟}Ư~Dޥٌ]ʪ b5J}bu 񢍄.cv;O}X r*+qWN$upꣂ/+|QIX@ e?}aesY;֒fo/b&D _}p7zbMAxd|u쏽r2 opA7ȷ{_]JYɋskZ[  :PBϹ| X Ӏ{/vf#0`ݝ}f5 -={!*n";]e#5` -\Ԩ_ Bn?K+]w+KG!صhƬ1:%d{r\0.}{;`DD$ISEB\c_BXiLGCYtQ7}Q.bp{"k;{eKATg:]>Z_yk(%FЙc΂/l#pkIpf:^-[~pLU&?(zͷ \/~QQT_sW Me7sT$T_ g?{IMfTp_?~;PUYH\)/Q&W~e]/T6  * րO*-&*k c_&F]<3sa?#} G,,*R.1-~1a $oTT^pdIƮX芳\[g2Jb` tk![P0u$u!IT?pw Kl -701Y[-&BI594k{߮(__@.3!c1Q\/ mNPVV!pgI`|{)RٲRF ϲZM}< Eڲuä5)Tu{yLQp|a?*go EFnf40:ڤU>n?!FyϿ3zTΛo<|hڽ}XʧƟ8of삖b6Q>] ǡŜ{65FҰ|u@Hv):$CSե@pb466u_ㅵya:otOK,f ĸ/?o|?sff/lrUtx̒ COo6cJV_GX!Дȿ\> `|_$}=<Hڥp"I?peYmnG"P".z3[DK1ʏ`tէbb(B 4*goa]nzy :Ô]@~xhG5ت .i 'h lq mM3:m{fX6ml2zA- >l„)|iZ;a SL. :ˀ`do/ &{ֆg7Se;NA˳P1p{K?7Laq8= L$]2L6\RC]Z5 vY-ꂯx` /{7ϼ1+.* ʽ@:̋ 22@O@o_|ct;7c=n77ۖر Fv?e Ko62")bCOR[ ǀ)yjFtpCz, MȡĄ8o\w7 _aHAG%+-Αy0˝ģ(H?.nW`*j` oi؟vӌ &epO;3AZZ2%P0:琔')VoAvM<'qV\"2MR? :vTGE HJ'.?5쯠PICMg.!a/e(e$Ly7_omko y&=« ea$'a`^&hx( oAx4뮫_#lYafG{SH"BF8ańp|>uuHig <3ʂ#~T? v}a:6Ӊ0VWZ W]y.3/nwzz~U[uW%F#a,%}j:QaY02ET7оI >N9"u:5=)!PPXva:-jA* P\ts[T?pTO*+M&JoKXpya@upu}P/%/C03I"_%Ղ2,֐RB0BQSNH3Г6Gw={Հ hJjЏvF.ߗ9jdN*)1:*"J_ğ!ύf3"v wૃ;D=Q)h*7pQ6,ɍ"@h[=gTI"$+#=!23"$t}`ZIQ( @%wcHٙqC|Z;󈳿* ȽᄍQ+юg;n?JHBB aF7BHT|^  P|!_ w)#%Y ls);pҳݵR%XTζ/Nz:uG;_ Bhd%:9\ r'PJ/h B :G^?cBd4;r 80,IVCh?(ӻݩ je4PDD_$ɭ仏VoHTC)5 pcB`NU~;v \x d;_v:IEV@ Lt^Z'\cRc6@ q}cqs55 &PRuV>XpFRbYn#$bo@ݕXew(R̹_A|+ߗT(<[@ v}T5-)͛yӜkECtcv I n˓{xh:ո8)FtZ|InjdN aV&Y,bGP&20#Ҵi`l_]@ݵS* %`0 ٙ0P%0iG}ԟ7OthP/s$ICFKvw0F>%DZM;cڭAͼf$:Ji>Xeޞ|A/Tٿe@hH? #y}{?<,ˎ[%#7ٛ@%r'qD) r"",*O3:yvK$M2<%vHIR|A9-҆Ds8pgQ̙۳DGn _vC')AP@z%|!z=1*&⥥֬!3N# PYǁ{M1F)'}c@BS#4 RFM mT?B%݀TUU fveΘG)i쯞P&`6] r|!CVPl\ԪzS?0o'7.TtRuOAlQ&I^/Px и4 _jװy[zq'sY'O g&/O!(\t\uu(9Lj:H-ν|>_ݺuի[]}%@i~uJ'JC헒$0<ɭbÎ|^{~#GNڹ_L, +RMB@m 8@4z4CeKDEq],E!tFW\6 LC|Ƕ/J $"3EbV[UЭ8Cn];lظ/x/e@g:)cktOFz,5AwΤK3_iQVѢȶ(=^2Q(B2-5$ςE?:"" Cd 7-o\^ߐ傀h}'t/er[ٳQ"BXZڿ'$R1t2d*[~$r:-E] i?h49O4vlEsdyoGj5dy#_{M O?[n{B6C];>n#|.YrX+H&ݶ0BMi_ y `9o3 0 Ӧ>mcv_wNuA9ogr*DRXt`݆P@8B,!z_x_exg!p P,Co 'y;$9g"`z]k!|"[rbB X_D29 E i1hːc A]0s@&h_7M3XKvXk0"BGxoV_ D0Q4]576F*ѿ!$7Ze-b2޴J ynOҖ5R4_q4wj#_v)i诅Q` h_i~k}4ħ_ _‘H]o[h.wjk\al"[vh,y7^|YXp 'p<ѳ- z1 ͇&2^ECSN:l옝 e]qԫ(!TWGKhpnW=7ru-0H7QCi49M3|Yd tqۅwnң#h4c~_),w(&RR$"H)uEC l鯕c~9^qh:ݧ=TUQ ;w,i*DȲ'_/CSe V@A"3> 2s .yf8nd~_f'?H  @xN_oe_?D >5ȇk~.eh9=1Y)| !Ӕi pDcٜCR)-diZFEiBY|ؤ[ɠ/`9]N!D;9_~cyGUU݁$37uߡc~/߿Fѡ=zhd_0- c`L^Dt';@D*<^͹v8ɕ'%v앿xk~0syO]3u;LN1flOHA H f_-eat#@_wyE ¹*xTU]NEƥ=M/Y92`(RS8y:܍JNNZLeK)缲:Tǭp YŢ˚o| W]uO^ݪg!%oxyϿIyWUĒ,6G!D.8Vcm|馧) 2* .!{п_W5}K?,z 3|/~,+06|7?u-|خKG |iƠ@B`C?TWYA/fg! % )$)IJ)I!㏓?4hBķ d?!B'K HU9zڊW#P~lz]BȄ # Hr΢iPRL\ OL1 W\S.j cWI,D@ "4VE?ۂ/"RJ}='^k6V96qb*cp?qb-a4ш!8g47[B@"r(lPX,."Bd,}7nq: m$MZX`8s1W^yP Yn_I)% l|腹*J"D2tsn(qζV" a;uVUbIOSR$dáזB9Jty!BBO7lؚwGc ۈrFϭ>q5yOA7/r4mkJ"q媍Edg}FE|Οxt))zA<+)V:s JTO3|I!%޻z_oF]pDEZXe8~P@`;h{6[p:%Qv_tr]#/Bfuga;?P]]o-ھ DXb?trA$x?~[yu&n_s -,2Il`p"= r9O+.üj O+NN[\E~觝9{-ٽ :=.#UNGmm`Ԯ^{VwD ‹ y;e%nU:Gž} "*~ػo8")p gMeqj8gueBDz #F\[ 0'G˩Y҂/Rp{wܻ]w߷O@09#"kG=Ѕp 7̣DIrlyEP f{Lܳ;*L{dKevr.k*韢 (V+#'D7x_d_`q#_B/n#W]]0Q@QX?pqx~#?$%t:_.YpU6߿xiD$Kue HC Zw᧊%TՔȤqCݑ;#`[8gu/>&jnV8̇kAK!\.e8ϻށ8z_,K/A|W((Ga,9p-[>|ea CkWdɊg};\%&XCC=^vZU<@;[IWUUIuDBpƞ~wi/ء5>Aq гT2|A/L_f!J"6l ,0=^Qٺyӡ¹`M$qϹP8kmUtf#N_zjKip o ZZg}矶kN%ј@~̷6ZXh s?C((!Xk?3AǺuPR+) Wqê}-]mkoM z7rjBCw3ù8oaquc\[Ux:7YUO h (:~(]X:||#+Yt;7W$}Q;c )%&Y诽P2i7h͡hYW_0̥ O_X+I)q}߫,RVPUq-C%@&) \n[9|^2jK:@a̳=Z=)8>Qg%{/+ |{(RW r/J7/dPm<>]tY|@RJ}(v9ɐqb]!B(S'XXqΐJ;|nvŻ"OGs ad"Kٰѿo W 7?r^{ݽRЮ ݒ =9R"suDPUr*O>raN%t7wݥ+G;s@@C 6ׂ/0߈I>-B<h!ma,`|җ_~v*5KeHҔϿvoksRr9c`O/̽sN_ҟ;19g|A&/DDF -Z@-,x ƬkW(#J?~ ~p8FE`^SG:xО5A(o7_g(9 |a3_FE*^k>쏢h@˧X\ 4Dm՚ǝ!Db,ZM?/r Gb`!4't~%Ntڌ5c<%š^>%>7k/h˔ * +K`;} _`&Ƈc唒 ҽYt8W ޑZL3F_tQFP(r '?tG:xuTrKq$1/>|:1P3_b|&y=l=(gEݰa<~>4:ѿ/|ԿD.vCՃs?B>i  rp@Z-by ZO-pqSrП,qF4h?w3 !а+x9lj-'/hKnJJ/.X2RbÂKm/ȳ;|c׭5ϼNjiuz$E=XoF`iyubHXH 𜃏8x׼ {NÆ 6=rONA 7F 2b^c \w=X (dF3Ǭk\ xI.N6Sݶ/+7FS!iNڌ*%b~ʀ:M{c qkz?^ާ*4J ,I%>ϓykkP`:Dߞxu۶U@/+[~s ,Ea(IZRAʶ?6H4ֱ}ɓ6|H@1 " `1;X*J_xWI"ǹM-4z`łܸn0 h,sp5k7A]WXR1Q#?pЪ+kRJ@um}KG>W] pF9|uXoʿ~vh0omyK]<˜U@&)oj}/[S!2 t_22I1mSW喻?*K4 &l!/83Fj,&,ؕsWʢ I奞OXs_SP@o9Jǥ P/ j^G8u s; mB8cK[W+w1u[nj̻/.;uZbȐ[P6 _k3'dE6Зn1`I,#J! DPT ϼA4 )Mʿ  "bz]ֿNOܭ' T "OH{|ؓJ m@e27hW+X댻)Q.yǞsr>,Ύ9}Ͽm.-qj%IN~ܠY{uZ`1G//+|ݟW~ڐ\ƵIm@׵?8cА< ooo٨SfKZ}cT?xc=uhmuXm@u %%t};g]0l£n~+*#)|a_IBu/ɶ%+%={|a?|x TU*-"|oRJ[W F1#}zMp9VWfÎgC.zKӭ& 3Dfn υeޛ|gkXQ.|> Bue>UU4i?okm/}}"FeI0@ Bi7íj4 ީ];*Lnj޽k ʪL+m)d @Co~}@T^g_U{Ӄ|էE"QE '}~|tT_/yfGLXӼ&!aJ_s%qu?zωǝ8Gc&k1ØvuPvy05("2Ƹț)D_7'}K!J=m'T) /1UO xh[}t LO@$v*s|b ;)0L/h-YODj?y#d8lA p[:h@}Ee݊c*#`xQd4=^.2R>- &4'mBU+0-iG6ADF΢p}H2GKyūC 4 !HM*cg]vȒWoW L8/+]+:tuݥSIrTFEc6#Rc9qz49û/ޡ 2b| Jg?㯛@) d*mWܧޚq 867@ٵKR6d|nٳ p`| xynbWޱon%5@FHݺGö$$ ]Z $W7OUš_\쫛_[v9`+9C&JB`] & Ci~|as_x1ҟuqs6(,B^п-g?["9󜐹 ;g_[zv}t98gi#FN Gc 4#wOé8b|Ϻ@3¸qO<$ i~K"n@` sЕJ_w;qܮ]<'"Qh^#4Cdqf̾?3)/|i;Y^x-m.)MH=XeKJ oCGJزBHe^<_TTT!g,'f' N=dsN#Oϫ,x] b5@0D@4 D@ Rj0 `$2 em VS+15#'/2F/ Z >DS8S8 1 _?6?w/(^9{]B6V?Ms©y*ǵ?9Vy!RGѣvj/?{v>)eB`Ov3v-)( _sɔCىtEK(sb(cq 0cگ$L 18 \+\+~{Wg^S]L6?_+0=y-{Mfv/ J) J=]-P18kf&ǟxvǓ&X PE'пyĭ(LUEz 21"D2s˫W{kZ_^pQ)Lޞ'̕$bB•2\cͩ~_Em:UAI6іJgШLb½90OdmOҿ^ U~M)%_1h4wٮmؕ7<{kB)IIHHebslλ5WߜGog||%QZ J)B?*+Jn@g㾱|c}HdL(ҥ#, Ac~P(dxS, h=/}Džpqԧ:4$lT!JK<0vw=%O8F% dD9N^sTxYؤ.݈!ڕqD9(tI,#YIMumxf={8zDu$?uҭݛx*ڕ֮:ǴJێ- // rW;y.C03LaA!䉣oֽg~c'ݿߞ;^zƍ(hTFcA\x':tڵ[_~}=27$Xnމ>ðV;\NBU@02f}pk\Jιi}0p"r#a:c$a/^u)so lk26ܤ}f/o}b[uKt71 G?r_/=OOӟws=:8 $>B-[j7׿ i 7祷?~THT"Ǵ||> rO.U2)FH' 3; !KK=+t~tKWs9|wҹ@0yKk_K∨0@#aG4tm|nPv0icJ&BD@"R%U{+\2a[S SaƵ{?"` baOu@0_ eo_ PПɻsb 1d J:*+*<觳.zuur Ec}m.(xBﳈHO.'p v[tD41+oɱ~{ Ti'?#@ $IHjWk^r#~t3A`+TߋԦm߮=w-.Ü1?C8eAa~잨}q);(B.璁 5_ht=te7~?f[y⹆dYߌ6ظ^q:R#c0 "C;9mPrׇ1UuZXe}x~|RasPSyp"}=(5,V1 /lF%E p֡Oܗc'>7nrRSaX)X*yDLUY<>KIBp$ZVιƝ׎c, /2@zPH\NC@'>33/_Ew[0B_ZvV=v.>{DJhR*)PUSQ ">y∶-=isIOjLQX ,ʨ hUJK*|kW_;#uoGa˹ ?G D)6nG{n!A$IJNppD C?7?.YOmIM14jXwO}>='\$Z;KFV\~p@7 I"v;ng] =?˯(:۞HʞKsP?lذu8<2+.9jճ.D6m]>OemhϹ 7S;jxWz c;-’sȅ+񰹂/$IZWq;UUZ彏~y%+~ޢ?Mц "Th6@c_[|+V!}}sGw]3aO}[_,WuԖy`0 GN^Ugv=p,P̽{URwDeʸrDc՟/}[~dB(FYȺ◩ӶKP<q|e/x"yqF^GLg"qD8XUMI;<|3.}vmc&ZYIѿ_5w Pvk#?x>dÆ6 gfUdR2.x| 9~V9'n{ۡ:yrC@RXU:!Pk?-{5l꿶2ҿe_\;C`9Pں﫷E_翯:O.haI\Zbb1&@|C\5k:b1ѿ9Glm;8z-WOv:XeMPH$ ݆)FDcً !;xkes&)}0,> RA5dɧH53`ćpE 3DTU b6֮vź׬Zfm SdK] c6zUn*^Y[l,X^./yK|qө( cR2Ej8 H$&}܃G>eH QyD21H0$@0zI#N9mdb|Qϵ[f:LL1ӫۥ_ݻwڥ}2or9NEL˿ESjJtպBbj8 "~:׬ߛW?+aDӦ5ku;$ILA/\Q=tM[7uޞ~^U"{.^Dk./I"!IRF2ѨbP,UUա歁M6o l d&"Q@Dc1{k`$"J)x5Op{ؐЫON:x9II$!Lz$' ra#cXsw`,)")e( XM+~^/~mm@mzdݏ. Cyy>fYӲ{G%-7ۉרiMԦVB!g*ÆgϡFypڻ]N@Hك(%;cJ^ wHWPo4JI$E_"hΡSǒn]vat,۲7.[g_wiꑨgTZ]+!xd*Sħ{xmyҥtu8Ȯ<8zS?ΐqDkP6mں-euwؔɣ'i]2jLbuA"oAS"0:nA|Y|+ih<DX,21(>>6U/~;2E(#{K\8=.>eç}ln|OT(\{=k9g"RV6)%SxYDRh{#wa{vb UUDHm42gZ/q&3NSLb~&+=c ,ŽDSE4#"dбC!:y?[/7ZC!-7BE}vchLwSS_[>^jk21EPr DXbnf%Ld(f_ҳADIH4綰C{'5uϝw9b`9K$m dQ?Ɏ["cDB-(S#|)4eo}s_uoKvy?p.v)g X?3@`0{p)t| ,~š(h;vX]TU圧qGv3AIAjvl鲩2Lv].8йcpTdL_ӟɰ62Lލ;RV#AMBkC@ԣkgyұ-{]ݏ[W{>^s:}F;|h}:\BP8EHAr41خ5s&?ykUx h{RFy,`̱XdX=Py]/vK}p$ZS`!'VON=/7X0iM>3H4 G}>w( rJ OZ7-,4o;SIO@B#I&WW;>gkxgBa2s'_zl׻g~}:վ[vZްhLTU5e"SfRm"c SVʼW]tqGM=qT4 mD4ZOv m`:Wx^L=_W\tLpmmspf ۂӟ]qg 27}7BDsDTr'5yhO,(+aݻ2F z":g 2* S(v9\.}Wi,+uV{<+RxEADI2Pe?]Ls(n9*BfcT/zqRYס}]7wܑ/>[Y4ő|$0.D+6OfJDaC?ܱw #~qXkC'Y=`M\_`GП:$㊊Lu{RΨ$I"qTU**^=Ƀ.TI0Wݾ#aC33/LעY1CD w)H$$HA@ k#qbd-bny咧^+^Ŷ.; s!0c+/>r10H?ϾD^-\6TȄ`A[fzx%j&K@EAUU5]w+o-iڕ{gqS)Ua%L^HP2mOⴊx.2MP610lOG?7`O;q& >'?l[lŠOs \~ҒXL5":mNg_[6Vt\[̞u CsDǏnq0Z_d|Mʿ~HUV?[!±.!I7^nWơvD@}wL_UƒɳqRi.3$"gj?M|@H~ v%߭ E#ިqíuzv|g]~6(tsa?~qH*a=S>-X\kEǰpƏO6"ޜ lR롿a J3$+7=Cqh/>cW;SOUusRD=jKjh(B)Âԁ0y[|Vx 3s^LUWxZ=o˔o+sg$DmPZH6eAjT8QYK2yvч*1M BПW=N f-nZ4ls?խo8'\7al^ @OD'8 OEBN_m@#wمG1z# #А ƊtuA ~JPJY[q7\k;wP zl( GӟYc}s_۞"6;.vD)6auFr^[.eNy[E`I|Sͧ>;/{G.>uƔ3mR3|=M/k>O&/=4@joM:Z < nա N~땻%ݵSvڡCUMġp#X tOL8ևd 6_&"#kOáyP4Oo˯t*R sW&)Z06-zLKL_KoKFCK]H vA$Π&|։Cf_2 }AO4pv51EƩqom!'LПG꧿ sxlh\@E᱘ Μm Ec9ͷjLMCӊ/S6$w{ 0N::EIk9.M|Q_?td\k$ƨ&|.9c0ks'^GlaXmioҘ=h#5KۂaMc9eT{޸Q۵PVDv<矿Ka(X:nSW('g1#@OҦOnNA ?PSj3g]T?Xo7m`l+4o FecM[=}v}: @C}tgT6_)sؿ|Ɂ@tl> *X! aV7WkE\;ꀽVVk~= )?`ykI ?s Fums;hجKN۷{l4dLߏ+ fan)B; }BA/×, W)Il.`CґDž pOseuD1,2ϤHGǂ/-+OT @q#&[~牅Üs+)2L*8>ەFѸ))P /П9֔4Oe2}py!e_26(^kKl tWU)/h-z+/:ڜ@m;(9~!˖.xe; $KC/hHei) ]>juuxeЂ/"$% !*UUBT !DƢC) Ҧ܀'9jhf?Z;)Hp].EFN_2f(BDH;񟒄RH)IM I/;f?CӀ:o X@Xi'~:r2zE$c0035 Kos;'&z5|d񢷮R ) W ‘JI)c1j! kP˘iG~pG>aো7}u.xv٩*cHo |ac_q!r9µ2ZQ!D$ T!՘PURj%o0'2:bw.9gpI$* O':5}®&~v񨝪jJ3_Y|I"8r:x4&k57nR Ѩ*8cnӡ}INeݺoWRsP8 `b&uH5 Wx Soq @KKѿp(:d]w:r;#w=v׾F߯¡h8*8"2C/;S/;'e%% 9`0.~o2 E_Žuݎeje~ B_>eΣF?Q!$缑/lhu;' I~hZ&EVswخKݻwݫ[=ѷO=vԞ+ TIAV xт/|?g cv<Ɂ@H,/_EBUN^ro=_.z~A6-y}G87[ײP(c6_0IL8Gmdowo-]w$Plַ/kťg6.]~<ͧLiڑcCY d trcɢ:kD)hr;Nko׮vJK}ҒR_t;xbj4B$rƌ/hˆp(<qI|A_p-z6S}s{U5Q=(c, 8`sg[.Ib (J \. )=v%?yN6_r/҂/!TJ|[{SiY&PX;v2Ta! r? gwUq@63g.6uIKPgCnK4 q}ܐ,MOc_8*۩0[t/o g5~SoӇ6u8BEa| 'Xq? bvmw^PbU3GpWD%h|||2֭ko}'M >6,k[n}x G1vHȂ+ٰ4g,T:)ϼ_W @(  jL !%!gLL\snˀP.JG<+]aPE[#_&~_*J}MkNZb5%*zL5e*!Jf>ej;$׶(#8[Ԟc:5Q'<ǽѿdyB@RRvޏ?mw?wI6F/g~f)w^u|\n&47 -FE8 pdLQf1SX#qO'YkÀ^P!6K_WT!e>̵x^vӣ.e?74)(8gnrΥ faNџ<ˈh=& e^U;7Ii?b4)BHo粋 @ف4Dn@{9A "wqp>]0 dAE|Bij6ߜqe}~YyKj/Ӭ~~Q{qs8ġns_V5,""Ƙӡq'^XbPο!imyW[ӿq/DCQ`mhʁwѯhrJ3-6nYϠNj K'p( ;I"cf k2ϫo-;uӭu̼W\QQV c2h*mq/!?g<R>וE:ۭg 2#1]Â/(9/hdomL9?󺄔|5)CBH}9 @NSɎ&9/ ͉Yt"--BZjis?J]ƤOep6NsiO9 Cp*>5S u:xi#_S{>׭ 6q_ q:-Y; /IHC朇BMm]EB6Fi 4^Y|-l ͜8)%o͜ $oq#.!S.xs&I6L E}X4Jr6ʷhYM=lOWZ r}}JƘ/g1 5xyI/̨ }eٕe2vY R20 [{?~~TK8poQ$Y5tfnP~/DkqΧzfg&$ Q_DPJ07ebY!˿W^*׍|fukvyP,/ꯙ4|^3~píIJhW>R߾lDKJ`L e `l/C"H&;~\oC[:20 "4o(k_v)+:)Go ޒmM\]|*kF=^7NOyUr3f3VK)kր/:1Ţu6E}|SӪhE^3P,i' 4}V.M\W(JD5.$>W?-?(_ȦK/=<rTIa$>!Q)\@';j&k߀{>K/ť-_$ {7XmOnܵgb岮BA"F|)ꏠXU &&.g'=>gYa&=^4_~L},B૶R\R lG?{8dCX˽^*h*z-,wtr%?0s)Y2^<0QJ!?sԩ'581"l E  qFMtF;Z ?vI0xC@Ӗ]xDRQ' ~t <%8 kw}#g0qxx{xr7;<7$>8Q`a$]ׄ.@ƲB $Lc˶/y{r9StW`q!尟vq@5=_:W}W3ٲ% "@3k'55DD عh1'w:O;vCC8 _Wby J`~vdٓS0{λ9llvǧ g8* |ꯢȤ%NX".3NIy l']ӝldQ/F>WHJ-{_:9J ?/uWQۛoI&>;~p/ XB>q/ij] -"EZ|~*evfm1 XAMdž3$Q3+/_W֦>uH"zT?r~TUl|>W XB"b-_"/, *_s7׏ouY9~I'Ks;*!v8{2?^wO4S ظy_dž>ePvj"\}̜qɢR7jK;i#S[/AΉM_AooRR"ZfY'A~J' cW-&^˿_"W>cSݺ81Y\7FrކvuŒ܁.˒8K iGK%߳ {%r2@׮Yi'4!˿WOL-Z8` :˿;N_b5I28L3{$)Dfhl2р0+k a-:W#B۵{$زu(:F/LX[q'E|i՟t]e!_&2LvXъؐ> _uʻ4/lQ*|㘕s +,_>w3vxZmێ93R_j Q?"cL bN ]"@4ՊP XbA6B{%YI&""I9R=d#s@,+_ޕd,/fJ}ǑX߽{ O6<_ށKz2$.HNhKL4C_<ʧ]b>gl_F`e v/ 32KM"+DhlҾ0G>KIi7/J!b(vxz@My38ȫ/G%,Y2Q'ԍ_ /e,jK 7"; {R5B૒yY.ز% QoIgI|\)<{`]t4h ʬX,ig|WMMK%"ts84S2u}BJ{p|dž^=/Y_ȤBTBz/ԨnҀAS5nS1NC;\[`|֟%w@Me<KԿ2p ̑RK76.S <bჇKqIQOybv鞮"TBX=Q s6:EZ7ewzU1Xir53cKapGJB=^5a.W8tx"gc_ބIБxxw`DZuȔ0.lw'X(ZKyueSR[f|)kR#1c|AK c%8kt+AJdsavr.X/}D<|d ۡã  |xoT0J) Z) ИfK=a@*R_ |Xr9\{cik,eSup2IJfBy݈@|Md\j_N3rΜZ=~v@@IPm*6!α*J 1ѱ8ɣcq6 v ty{/5|AD044xxdjz8RKFO9"lOOŏO'g?vX*hp`/ET˿wV#SqO>2VbG|U>Kgl@v>Cs2:K_w1!ę\ޚ̡F)e6;ŏO'ԟ4 |_אdw܁rWu(RP ht<) /ݜ2uD|tDĹ0כ_U%!GF!GSy,"4ZA܁V__{ԉB KK|>P[w|Q♴?al]&Jr Y@e 9}])Iʌbw K%96&>g(/tTLs! 4iU |+쉥vdQY>M ]Dt^b$e|" f|ArJ8UZА+'y@دNKe5o!],4 `|D2_auRRoO OR,_Z'BD^(Z`b|I/ Rx8!iZ)૮ZBvw{f399Yhv[jӖf ՙKd+=]f2a[c$6IM彳/%3傾?8R+ :˿qKs#[6$IX $_?˗ނhrsw钐T] R@d(1T"aU*0ws&?evbjB@N#j5 |Ax= #ͩ?Rdn@T)@I+*QXBNMO礰KG|ꯪIH̺l6a|!x_BND`eYgyj25i3 ]?8$էKoE]@C7TB W_ȲtM,rnW|% I4DLsngTx x5 2筶'@?Y5_>S])ZU!%2 տa/P_,yq#i,@u/|| D %rX8g!dEt 2a3 | 0 !jVPn5XعM h ?#KQg-P[) iw)ž :̆/ | ?Կ|((8lD |GQMzqBԪ~%!|c; ~P RZA*@`7VH/r{0HF B@Z@ZW?Ձ [^=NRʲqjZiV2?: C_j_TZ)Bxi-kkz)"R R@p_]7v"zcu/kK|0"oQ-~t@.Q_J'hXVi/1`{-a.>髺jf ҏXinX{_މrZ@v@_z'O(L|RT>ZXI)y4rWJ0 p3) d2h_ѥP_~kč@Qh!d5fD|Ae׳E;3Ҡ3{Ps)os=^uQP8zL/&wR@m0"uPͷ=aӚRMN@|A8 |~Do>tp/B,|i=.QdvO@˿k* J B([4 -_n0a˒DR[J/~K- vJqƪȁ/w½XԯcYRS42K[^‡.38G'9S!Q' I-NMv@؟H-7`I76=^@|G3l!CRY hbn!KPDf_>:y F4 LCx5S\ˋKYZneYS!K Nҍ7 h&@3",_ h ZPHQ QCzJGBɄ>(TXm' AdHXhٝP;CPUKp[P,>3OD~ 2˖Lap*:fr~Rg , s%R6t_y {:hEt:Y5˓⫑qI|b=i=^HY>p[W U*Yӧ/[$EX$ܙ F|s.!,/G%@23K.iX!ݍ]@*Y|; -ZDBA|Q/8$gLeJrTR~?(YJbALXD Rͼi }MNfV=R@YN'Ux<'nSS9hP/x4:PAU K_,!Re|=%jY$"c6hI' p~ @j!cUTAHruf|n&,R)y˿V~ 819ݙ9 ($ -^ ,rREu`wV)(5E )/z@R=9/YP&l(49o޻;vD |UߨqMLL2ES_9)emj1˿N\ c%A6&+LsVUgQL165]21/$COAh/g2E"H&mߕM+.rK33pMtf5|Cmyof:w-Dl Бc c|^NLdPCOz':g!@"J$lMy]4 GG;Կ1+ox/SM__=fkkFޞcvZ_Ac SV>_b?@T2ݝ L==2%+Ja'nIN3cϒo :2:]/ҘAA'縯'Nc3W0~4JrrxmS@ۓiENg PD@GBkTڣ%D|i_'dxhjiJ{U~ [ooBe"*Al"B%&q"|yf/92 חm՝!Rs'ʐ!@t-G|yj?Ȁ!IL"*KQgd"dkknszTw8Հ O\%TSzJ}; Z@mî4IXNjw9B8tdb=,t"@Te8ء#dgr%ӗۇN$!,_E2&$:2Y;9`HU M\FUz RF/lҪ? SGZ}`t>8µ3W^I m(:|d5I_՗)c).)ތ_@;aYP#CSc<)fҞD`NS@1<2QmL -D{8ovBȞvYNJigzdBUbcB`pИ39}Ccvs{I_Ϋk1j:PgG|y՟ቩcH@>f|ԿIHedRr&$؁ h]y glrxT`1x@sxW$ B-?&)TO tC[t@9*Qs3V56Eԥƀ/Wտ\S)Օ5cOtw% w+ | 9p&Ja}J0Rx쯵h\ƕrO3LL}(; NkڥS ,%䜩i/$$I)I6o03f/%Soصk_Nh)p/GT <|,4GT.[WʔHclQbw{^`"1 _SV0 y&)sL j_>eBd/ t<"BlK#rܫĎQ:VH1F/R՟|5=UW}˿lL-Ylq3[w+k;v5Q09 qO5 |Q拪1aCӹ"gk3ҒdQ% 3ܢAkSS;m;l? Z رXN:Q_z0ۻP$: G|j!S@{GǦ9G2Z˵VG~,KΛJǸJ@* ,1"g? Nj؎[,Z$1@Qc]Ȧ:3#Z cy_B@gQmjZ<4nXqF|yտ$,Y/_c]sҖ|im84NJ%~Wn;8+q?IJҒ˖yy? }/Y. }ӂNe8RKG ǀ(@Kr[ bf1lR8)/" k([w L:pE.qvJC6ء~㦭ѝJоQ ( [E7/-ۆ# _QO+ Ea?n뎟gE|JCy롆9k߁Q4]Sę_SD5VD- _ȵ3?`IQڴik,gvNjR,wʓ ].񅃒j__rl9vgj%:7l߲ir"oL'OX iJeŪ+_O\@D4޼y{,R'Ԑ E3|P^<4+2Q_. PL,X*< V3PML6m>|e潪3irfBWxªys+u^>tعoz m |U|&优|ECn/n9td48I=|TA@JbE, hArɢREfr=DIh&Ƿkts{%Kci3QDiɓNXyN?})]h.e6n-_?i=^j_l,@ OXf!U]7&w t'eS.K[@/eԵQ)r~P"P WJm(%{ V;%Yv> Mܳo8e3;D|EJb3N=&[85$ 8ֆǟüj_HAPoUW~-D?>ܶ?0VIvb7Ī9}H8e(U/_n%a,K>Ԯ2*Ͽ/Q5E|ݕJIf*yk!nKwRPB쩭|z3 HCx, _ඁOSy^) m$U?D|:vEv/fncͱ@B_ Uc'ܬv""yWQ}! y+Bܖ?>QY篞f/r$N{.P<&O$QKw& HFچ%ba/P*@HvhJ$5/q4?KUc SO^/;ۮ1{k6Y p'ԽB_I泛& &UN9rO8_tN?d%/`irQ૆S[Ey$J&̧t.eSM$ "X NWsObT_tgCd6@L: ,@Uպ{G7.*˽xhsb<|+ EA/%Աߑ"xM| ݴ74%, -C r&g׮>nU(V =_9SH?"#aN?1ҸNSW]˿[|5.y豝E Z૒񯪁G"Ft*K~Ⱦ[_5LR=˛C@ dܺۆ `rEy 4.aP:E^..W_qVM"= nMtPhS=0ʩȀ/+/Xg^s񅢝-K"JD|[gEy];vN&MZuj?!H&c:N=eu)sj͖PO0=>Y!S' yE`ۥ$DL|]}EsZ^~5 Y< Ѳoo)?lI%.8 ૦g+\ӳw5<WzD}ׯ,^2 @$)b) =&#_o_<s7,mN-0{DH{3 wg|yTU@KW,x[/iI^WW W^,JEX(?lcU[#rxi\'ȫP/ /PC`3&ܟJ2!g|K)1Ʀ]pD+Kүةc8ԑtۧ{~̞}# \d<&ϯ\|q)_@_|հտ_~@'4|Qi%mfս[\+M_Sr+hNw\ ǿ]%U`wW<w,=ҙ!e} M^q\qZISPpw@FB:-63ІCc'tPS9R@b<`N wSR |ճ/p̪;6JnTV7?t=Ţrm0Ex_䉔N'|bt$ *7; |a Krq6=W1G)z=^&5/Ѯ n.aN9 p s=^tK3Rtio ]-ewlbº N]nIxwL] [k!t/6Dryq}O3I))B˩S@%$>L}לp/CS;TK0 R;fNQuv6 |S2ª7C$ KH&*'BJ?a@B )HJ&U?~wC@!1[L~^ݕ,°6|yԟRIsCLW`>WtY}0 !X~2wڙk>hVX]#ywY<)#!$y]ƧsPcUjELyN7; _zkw̤lfX6óe,flʲ,*òiͰLeF6fͮLpg@udsZ `yʫ/[=6^6'JIt_?mYx-ܖ$݌/+,nWsKR%3{rypn5*gtp+jtO`) 7g/IJ@'ۣd~ eyI_`{e-ngw"&KS|г/^+KQ<||k]OJFrÆG:&`}x3i{Ytw% /nK3LL nuۛO=l|O-/f_ |_.a+g߱RPcEoDN @h _A.kV=ɫ_l<p&SN{WiwӖ{/oQH%΋T&i_oc0F͟4>"Xt𦛾L&p舀jo{~@6E5/݋2@ԇ\k6PQPԿXWgzɒŸo8id4gz_#g_Sf Og/7u`H"aV7|)#@QE+&,?|/=1[sd~yח,[PlOSqȍ I~< !U5Ep!ZӨ?W0Ho]ϼ1|z9#9C|& ﭟUw`'nDdK_;/;j1v^N<)+ Է14͑ᡯ~;U^o_er.jX7Z`ãm~ó*s?ƛ>sGr3}>;[%Q6|]7t C|CGIC-|-A_1$wO|-GAsI{'z|~rsCIi]X _I@ܺo;v=F/_XڃS•ZuW:;}W]yz_2o3j{}e2BJ/ c$ea*dR?{~Xr?"K4_(>rߌ ` ZЕҬRy?XnpsU?߹ 77#9ؙ -K _n+?[v8N%HB4j\ 8 G'yzw_saYa`O~|:,U ˿FJ)>}E~azܩ!_@ zoLHd>˿ש?<4 >9m1>O_wbo{u&J|5`33 qNx_=IJ)!˟54ſvӧOXT;Ʉ񏟸g~v_"!; ٯ;w?8rɦ\=\9G&zSzl?:w'a0 IknyͿZ(yrQ|_7_=o;㼳WHqj@|Xʷ|ͦy|~ۛ/<ՓyX$W@q!] 0RoyC~{S9ڑK׼~BE{P{A?Iƿצ /r 雮dOWJHUߒKU3v A:,15]`)j__,? doOY%NE:q;]YT# Bo e~}]Wð_7/( S@usZɿѽwݿ?;xDhuc眹՗w9+W-& u}VqdhSѿS>ckRk8 no~۫K7={׏MrY"]KϾs)}}TO397*; d=^PHa {o %&GH2%wu~߽mNjagGOhFpA2/XUyN)*u==뷂' J.b >}Y-p2HBMMؼww\uh~;Fw`Pn|˕02zSstrg 9ktt†/уWHKo߶s( ၇6纻??7  U_=ClcaCcO:cG?^xq[vuCONaLNς+/Zz/_(@)_ML0t3J s#Gm2ޭd Cd;X:&dۂY~Bu=\@HUC֭I}.U{UkHiu{8F8;l~SDwkn$s@K $@˜cU"Qe`)DLӖ0DI $ a 7BQLL19Z_"d{`ӗ~cx%SΙ?sOyA}3_z]U8 Rcs.\2_a`RDB}`9(rvt78uz`/~iI\{xbSL ?,TfetOk_vB}5pWu$We]O~vJ%xdݽ [  |^/ Ҫ?Կ| EE)"KչG6*mtv10/܇2J<GQ[3Qe&URTWUk [6|EPJLZNvuS$R dHPYtJjLi`on|$娿SO5m7M/֯-&5SDkA4w R~RyE 7v@U_ Z5 ! CJxf1;'}-I!gBJgfFz|x4X?e_Аo|n嗿}r2e ߣ4 W8˿VY AZ-J纬Dܥ_~Go0(<ӄ aFBSRI^ޯM\ Ǝu_N$4hY/ׁ%1sy~‹"E'~/:Z*;|qtMj٩iui} +߲xʷ}nnB`v ߾/HD*!,_z R>6(/y)E@VIzslxx}c_EO:Q*_gԇ]c/ |ZG{s_XHJ޲W~vĥ nG^w?Odz%f h%FW_u-4%IJAf[~~WOOet@E_zW-U?w5CdNE] ?X.(%I z_[ Ve ϟ{>}BHr׊/|廞2}B8 a oofԾ_}//h=K>RչH |YG˲D2ivwwm_x]\nfQvEDBX6c}` *5[/WguHlv/ЩG B@)IH?7kmm B}+MMs-! F| /!f׃>|ʩڏ{UAPoUW~w =P./忕/",J=3/_ݳ z?U?|׿z4;He /xI/)_x[w<"ʳڿ /h *??sy<H`w"ǟi!,R_BHswz{5XF/kkۇ %tE-KJi i[i~e /u/6oJAc]$HdҰ/ | /"B"|o;g9}K?!$P_Re Lڱ};.{_v*4Tf%X 7l?|T"$)5./"B2to_KŒ|9ŏO_;YvNr,Q9_R_P >fСßg[}ُnRVMU.B\>-!%BH'/OJe*C!&%$]Lw\>#} HWRz=.*j-e;pȁ5'(9 ++~8VYʃZSy׮7zU'~ҼT|1BVY0 %i4"n~7||k/އW.%t*ZXnÌ/OH/D!"f&xɧ[?wZ* ӈ"q It"Mԛ",߼S]~K97yPRCFb_@JitCSy'>vO,ʶk'>_럮{UmSz!)KU1ϯz+2'$ID1n&7w޹n^+%LMot:!$)ت7WTZ|ܠn lAz^Z]v{+_@,;nE5ܗ٫qRA 6p~2< Ǟyɇïd_ug,^70K,Dr8Stz8 vL`H@pxhǶy?=!X<dɛ~rM?9W+_wg׏D% }\쫿}o$D4L3Cwn実vo.WH\@./9I-&C[hZp9sqgMJ%Q,R$f1mR-.U9C4 09O7o9S;{pooj4;;;h{k-[o^E)td䥤جV rFipNpx adZpMQC(EQ(Ȑiy<5^aA_S'^ӫ`iEdfo:wZEyQNnVbJRpىs4"9ǡу݁""2`?t~k67 )O  `vLtto}և QqUUUY%yiYI)ͦ( cB!AP A hj==Vw_l?^*FH@|hz䱷n̜1|RGiA~AvVVzJJ5!N*v6wt759_!A)$&0V!݆9 9)9ٙIIiɶ$[Bfl%pj~>U=~w5swMq}ŢS532 s2󳳳ҳSSRRl6j*"9'RU|~7;ra޲$Hıf$^7y<mc{Q'˻~̯+@\B2IB,R1)@bD#T?:m^2O5$qroƤ؂G !!!!dHHHHHH !!!!!dHHHHHH !!!!!dHHHHHH !!!!!dHHHHHH !!!!!dHHHHHH !!!!!dHHHHH !!!!!dHHHHHL1X(Z9! [/))%%%yc8u%$m ? h"!d)dL5ܓocߣ"C|aʠ3566ʡ$!!!a`Z@s @00`Omރh^:Y (Mi_<#=k4h Ms]ZUaZq9đy>Fc1>hL=2WFD׍QN#_"h9; 36) Q0r8E{x4c4"0DOjDpȏlQ]9~.e܁x6UhPHBdX)- AT@Ad "( @Ad   0@ԫ_2bNoW1K#B}eD  !ؾz \ЯKۊ(CO] ʡZd9D~E ?eHfy0PGE\t-T"KEnpC'rj^MU~ h8 !h, 91{qK *P֌%8L0ODuX^DR!4*:#4L' @`4aFa"an I?$}BgʁZ 5A" /.p?-Ecęb CP- ZJ($Tė  ø"}X((%|Ckh J n#t8p zpHq~I.-Z'X?# 20I/2P,w!@9 QTY9`G!r K@4&zbaQ Pd` ((@@4h?: k" Pr578![pаF=&0eF"sNɨR^JTơiHD/DV>-5B3^'Qm%p(#tT, E(B U #1JhD =T]:c@C_)qBdPP{Ģ\>FImᑔ"L$CGe h8GYi ::?pvy?G`0rxM1(l;cM(}9(h;D~?ZfUi2r`"XͦPx;IY(r^߸^VU6RVobaCZu>L%h+Ikj KN$'%0@inC0Wk?=VVT֎\ᶛ/]D֑_ y_KΨBtW(po{ve$PMӶnvrgZ6U՘b Xva?sڴi҈¡jg^xwmIL!kovݰ/fH+jcϿIII1Ǣ={.;͎}D4d6mT]0;;{D Ξ=kɑ/x]8o0@FFfee 51==F!ԲFܹUuD1GKjjJCC0S^Dv.lbx4$^ӆ3bFs<'';%%ƺCDjww_\=,35I̘M2a+[Ӓ:~jkhhi,++{{^p\#h6xݏl<ȹ :hYcd /*̻u՜lDz)]]]f485rCWWׄkʛ<|  "c,999%%%))i``^|.K,EQ"N.0n¼>,W%&}?rDއđ?bē|0Uλ_,e pyf [z5k[l }fB𬫫+++}!''HLiZKKKmmmkkkJJJyy?\{=f|2Un/9991OpYYYWŪ(1Q֮]yՍ󔮮G}4>˿=+V\mmm1Onnn>Ӧmooo/\lwh*oȬiZGGGZZ!GlޙA#gAXbkVx)6ݪrI9 PAӖiZJr2|UӘgkꚛ  /9X f_~)/ DLNN]Q$ ---ee?Mw~W"'*Ats_ó>WVV󔬬,8%iVS}^fXQQq-w߳DP#LQs47OW\|߀/ mY|i ^~sfWÓ^o@wwϨNommMMM= k{Kmo1"bvd>kcՕ;=~DΧ0"VUUFĤ!h rUTT/?)ϫ8o|:"JKKKMM &HNN?e,yjE4G4jkkeey6[>3}[dh;o[,L+= OQYeyr7{/)Zt]=/;;hq 333- Bns|s2} 555?u댲90T* OOOha'@(bcw\:`///lnn64<xO.Ykl,ecwݗvcR^DxUx~QcT|3nC188p8~Ͼm}7]`{䑧*tzzzCCÑG.5(`=s{{{qqou ^k]%R욀c'ܳW q HNN?C6߿)dREqwee D7t[$4#Ay_@}C"uwwi_rKqFU%X}|hH,e`E[q$'',|>%FF y'X,1=̂]礤(UO?Ծλ>)]]]?!'f֮][^^~8 Z6-{򀮮O?lu^HIj%J< ??fa }P)Qx2f5g0%DΙ7nn{mdHMD9x}wlm}o 5M<)ا_1bF*z,^ph]Ng/::;;gx2:ͭs.fCoox׿ 4`ƀs|4Cmq̮:缕Ͻځ%)$GCz5-$6iTE|4E0MCCT U4 Uq>..qjjjj 4 nn^-'4M35K8k=LQ("FzDP%5#1&-++{饗!FdE\F'ӯ40"൵7psnnOnn.,_~ZnN1d'ՕGkMhYK.WzEv)Ǩn+ҭ"_-N[/-*xQpy O='%%x>h!ͦ®S G!|+z{vygggii?_*|5$$$Agj766Wp : bs-|JEaG4_H[,l( !c@^fsSWWWaaAV9"qlSKO2aʭٽW\~q__uG^uM͌15D~]_)/w |^z郀TNH{}$:댍6`Ȧvpֶܲ&MyNl 6sD\7p*ͭ]C2a90(fR%%%i0)5-#<^^sI̠-??磌ƍ,?|7|S^^xb>qPaħ*iwww~~W_}u/'Llqvttv--lH1Ml˾vͬV ;0DǙ։^U=#d>k3gs8gsnX୷ט*"dӹpnoV>)rRVVcOt~!j0ճ%$$,ލvK6TT8 !!3 ,H0 N\qmmm%%%/ʪ -.) n~xFuR)m 9sfloo~Ma1FTrEEEs/((>1k|/`XDow2d;qٹo~"x6x?H!p)(pݽAۈ$Ac@fL@z]?Pu`xA.%''3sV";^IH]w[KKK$W%-- :ñ'}DOJ8---2f;K_\]exc0̸0@nvz~By T`KL6i]|YZZ/bM}{k~vAB֬r g֑%DHC4s? iL,YwݙrnmcÁɨ~a~]Pr(~/++Da~z<2Ĺ4slkkgdnEQ.fbag@zz;묛6[QDtuygV1Ԟ~eGFFFFCCQG9kV1뿌1">}nՕeƲ߯(kJf\qEQw/yFFF A,Urio`+'b"s$9ZSHΜb=`nb(?q;.'л¹s|E;s;AVkg=ۈyVOOxFD_|R]x?'P4MңO8؎!'4[Cl3*au:SSc3oxlqGDv8f9e^<}}233L%L<\dCo jliӦm@䄢(gAzPlqvN96)݇vH}}}~~I%O>]y< NA"J)ޕ\Jw%ջꛒ]Iߦc7?Vcs6Y\-dP` 0 #%#lHSZƻ^v9EEE.R]{.jULmZ +I%/zdu8^y_X{b-7Ǹ 9A @˼)&TޥxXh LW^}M$6Cv]./߸*M,#N=?$'''& "MSW?ɑab |ُBÈƘ``IƐC{|UUUuu57awN+x_;0ċbHf瑻;43rA=[rkK;'Alca<kߪ 缣~C4##HP3ܥebGxsCUe\{/,7'gmv\w,ޮkdE RUs/~f}GĄ;t I;mqII1vZ[rrWoK"R~c=|##;;e37`SeȾNZvNeeeMM ƌӳ}nNI ׇ9{YUUY]]*=¼;v1KSU 6n@kkP-)))77sjkCe'mTrcv}©w LՑie g/--]Z9)q8窪jcƖ%$t:bJ~Autt̚5kJq(Ħ̘?c(K)-[t}f{ڬ\f$\`.QHE%+jicH> HoYKDhsOd f*i}fW;<} &!f.ٻ+.q8fCUULNN>[ͳ-]f ; ̸Q\\8=xYQw3eE(w}:TN0~-U999f?SSSN~ʛ؎86CĦ}ֹW>;;dg8ũVS+"q9sfwwwi=)8i6S$" ?hYPP`FKJJ?ι2LAqʊkVUU'oAq'9۶~;)L"#CxY|~0w0 蛑ܡIjAgmI!=rͯjG<_}%%@SM?F7ƿȚwOd)0n]"D3YFn4)(`Z٧=2ׯjBFl`N"n/Zp8F" ]82# 0>pgfxg~M"nsvsLfE8%쾣o6@*}obir0I>3?ۛDG% $wܱn9 &~} YsW^rAWW[}ߝW^d~Z---G!2{f'`fQ?>` Ŵ?sԬ5hrPe`Y|?RM< AV5%%%y 8Qn9 GQ&IѿhYb$ "Ц+^^k mo4Yi@w)577\OdE4|H(S6_(5d8xEvd( L0=k[rsNADr5϶Z:lquc~ q00xs]DhQLy;9\m,>0_ PE؈mS 9W[lVrAKf"9믿@eOu ojK_t3nwqq+vu 7Isnz?u{C.+扃B(^';(;7i19|Eֱ; q ddd``^$"nM&'imvu_xXҴIrg<ԁ%<੧HO7TU|fL|W%s"j{ŗ2*LD#'0$3ON2!N09O(s7rqJJgֻ.[~M'I1˖?L3:;; ?㋯)} ބRy34;`hy uORMGӴ򢢢ꃃo|ŕצ]uґ3'69@@BD$| &-?`Xtm4>b-Pݪr_|ERR]'l9Ф95%¸@z͚~'O2{ύϘf33r9G}byW͙L3}R5c ۥ!$(O1–xᗯX"%myws涵VO6VT߿h _a&9V摝xiN( FED8d[NB`ÞdsYUUS,4QI·],t l·{K 67.[ɻwBt_q͝]vo4l44 1MCMC@ʙ1M@!G~q V*rX*,3\}{}[ly[[[aa!kkk >줚FYLn;+ C;wy"1a,X43#"wt֛0LOE9<'D -> k՗TWW̚ FEkk Ӷā-s*~ӯ}J0 ݭ{7{B 6 s{ H>i=U0i^|^x w)Nt\lSUU އTq VGzY* ӝNŇϝh5#t#qKK\z83HIIq:-l80N{&0:ę't,,45:;;餺a 2YXs VWW{)U_j!2 "w`eg•pqǸ\.3>VQQq}m,M* *̷- ))iLhdFF:}~ vjfck;cغv Ec]j%B LA)+.Z;\b+y3JIxdl/+**2L.tI^ESiϘ1v'$$a(}Zې=6uF=\t:ͤuu]vy EO9V]sygg+VUį 30Fhۧ4Q9%$+(sMMMB X( XRwOp~Ղŭ0 KNdߠȼ>:ݲh}<=$rrr抩g<fT}aU\\4oA(3';7!!16YSŐ׋˸sMI{BGz-%NYclY477Dt:KJJvضb"r;oySSIp:Ţ@ q0w;kjjdMjTPUovɟ@`ы_5cp8}W?xU&!R5Kao+^ mns}"pC ֙0edd444.]z̬%Ȥh~>I7^I ݿ.3>,Sڎ3guuuO9( LJv ZbXz{{`g`̙]tכIzlvƹL "zl?~{Cց jꊊ K8${vzzzʞ߿T{SpNJ \Dm\vn^^^WW׵.+-smf8lpsB$5k[I Q,vvZp@"ý,?HZ'X zכocYP sY2tk-BDe-﷟LKKm Šud$I?<ׇܬ޻n>^(pf&jvkE(9`}׮]>LcFzvetwwpy7xk+L-٤htfʼyꜣuq\wFI1 % 9Y9#`cLĬ.XHG(0 9ie}pkkkaaT֖4Ҳjnr"UUE$Qa(++ ) > ΢B @XZȮ[uYooPιb>#u0 UcޓO\涊4 v8h5myt:GǍrrrZ[[=w_6(%qHY m2k$ys+~xb7x%D²AvGAC f,ѵP!a%hnsnjVk[\qՍmj / wغO_:kTeGp̴y2M:D2F=LIa-Rn媿)cFf} 9y @ b~@ĉDD:32GGGǚZ4'+$"k_t5n:DjjjGGǒY5+c22WQ&j&=H^{ni N:[nxsa3㜲٩;:5foHl+VjWXW/hϼlͯNdA!{ߝEؿw&EN#ED̿ͮYإ=HMMu:srs((HN;myIIIccc̳z{{.]bեXajYl?j_{l3{mNfX;dUZZZW].o[oXEz+[4r"U̴)))119w}_n"K{IN:J9䐃^xE3 {''iu%VxEy#?R0? 9[[ŗk<~m6kyY}s[Ro;j( \.5A0#U[z^e&:zOQC Liqh.>l /12HLLt:m6[+"gYW]u577iժ+30,g͐sJ,vMtlyyy}}}o~ MKHm]~k׮5#{n`I+SX>*63&zTƃ7Ҍ Zs:#"g:ㄬ I|vv`d:D[m & j*rCggef&H/xq}rN[F[ܚRiE`iܜnsފvfQ&>(l-@rrP*\&V^raEseV\8}ԁ8eRtD`L+++SRM'[~RΑ;a6JgN\~_d wcSMut:"rV^[rC\x ռޫ.bXwf&p(-5ȏ98 [}leoooVVfM\!xRzr˪o1N^Q#>$DP5LKю=@2w>E͟ "KM/=? ;+@(ňx?N^@l9XN:7x3;;ؙ;*C l(..Fv6mT 7mW;;;!qJbf/߳Ξ={```HZZhɢ0̌>==9~Kg轧l:Ԝܜm Р04ŀeiSz&VYkjjLtdfu8wv=hgilB an2XHx'AS 742}2rjk6ަjD{I_TT425U䀦q쌙|B$~gϝ˻ݹKKK@ =iwwO]s"U%yvGz߯L>N:n<+n枙ڄ)8b>2+ d@2 |E "C@q(@Hۂ0BtDY4:F-.$r 2}ҫ3?33snJ.632]#5v#2"2Ǟxet@ZZZOO^{s%'ۉ'@,௡ܸ1 j26y"ZDv2c Buk~GLxdgL'4"D$AC`!ѐ;FBBO&9ʃ2ʷ@~J yY."+" "BV7NcnPCkXUґR "$  RaPZgj ~@/u%^/0CPYָ k-Вj%ŸhÙE3' #:u`z5)tnTeDPa~(xj?)1F mShΑaPN~u5NbAAA ͯrGQBz`ZψMDƫFD&`cا@EwjYxia+)pΐH2@뭫s[, HO.`(2ɥj(1jʐQp ~b =0-:? z! 55l!? ];H8p hG Á$(и0@}  ].MjPPL(M kd/ ۯZx 4Eeجd,vDjZ7@D IQxذtBd9HE&"" ' r T!p 3,3#!" jFLԄMFh|34)B"fF9©yDD(qH!9!qDN@VPqch|Cx[ <=HQdLqM`z. ^CyH$8F9G 4# \qGƕ cc N^S.s*Kh D@6!֐" $cCmm1G6O lBGos7HCRaG: lKŇl=CitQx ŹL<نr$w>B8K h *̃B }P7)HC ֙ &#p0$$$$$ @BBBBB2 J؝ ^:I1NԘLD@X"PBZ4*@bDEH @B@` &IĂS0W(_q^/P ;Arп3"IwN" Y`^#W DERKG!~ӣנ{ pMQV F-he538q$cp@YPhН8gaa%z^"Bj' clLwda(jU0cDʣ\ C'$#{`hO@p60z ޸^8;/, s_G4pAB 6} ES  \%p:AcH/`)n{."mCD1Ãt+b" #+ (C0("rQ;r`=W E OT ƘP~ C' Pn 8t!8E`CczO*09iláٸFF O2=D+1_BV(<Ɩyx07\0O4\ghvu)X7W2IcSNð-t2}QD1VF 3)jnњ(Fq t!:G!FA( Qē9Z1["E^"p=boSg8'E0fS(+X'"ThЈHߠ [J[ ib_| *+**Cee%ܵ.*a9@-.P?EM*W8%'b*i^'ݽ˭'y|#׈4tlj9$K >EY(da{o -&Q7Ltp `Pa0&?qUv"dHHHHHH !!!!!dHHHHHH !!!!!dHHHHHH !!!!!dHHHHHH !!!!!dHHHHHH !!!!!dHHHHHH !!!!d XdŢ$&&ʇ(!!!a(J0M<|%M4@@$ζXHHHHl00ꭖ?"r F$$$$66> UX aAj@*\x֟oߎ`]@u%~b^~ CY%tEXtdate:create2019-03-13T12:57:06-06:00'%tEXtdate:modify2019-03-13T12:57:06-06:00VM6V tEXtdc:formatapplication/postscriptTr' tEXtillustrator:StartupProfilePrintMlj$tEXtpdf:ProducerAdobe PDF library 10.01?|$tEXtps:HiResBoundingBox115x148+0+0b_tEXtps:LevelAdobe-3.0 EPSF-3.0 .@7tEXtps:SpotColor-0procset Adobe_CoolType_Utility_T42 1.0 0mtEXtstRef:originalDocumentIDuuid:5D20892493BFDB11914A8590D31508C8htEXtstRef:renditionClassproof:pdf."&(tEXtxmp:CreateDate2018-10-30T12:07:03-04:00K/tEXtxmp:CreatorToolAdobe Illustrator CS6 (Windows)y*tEXtxmp:MetadataDate2018-10-30T12:07:04-04:00_(tEXtxmp:ModifyDate2018-10-30T12:07:04-04:00:|9tEXtxmpMM:DocumentIDxmp.did:1DDA8BBB5DDCE811B208EDCE842B44C1￀`9tEXtxmpMM:InstanceIDxmp.iid:1DDA8BBB5DDCE811B208EDCE842B44C1R@>tEXtxmpMM:OriginalDocumentIDuuid:5D20892493BFDB11914A8590D31508C8&tEXtxmpMM:RenditionClassproof:pdf߉ tEXtxmpTPg:HasVisibleOverprintFalsey#tEXtxmpTPg:HasVisibleTransparencyFalse:\=tEXtxmpTPg:NPages1Ɂ۲IENDB`charliecloud-0.9.10/doc-src/see_also.rst000066400000000000000000000001401346662313000201140ustar00rootroot00000000000000See also ======== charliecloud(1) Full documentation at: charliecloud-0.9.10/doc-src/test.rst000066400000000000000000000136341346662313000173150ustar00rootroot00000000000000.. _install_test-charliecloud: Testing ******* Charliecloud comes with a fairly comprehensive Bats test suite. This section explains how the tests work and how to run them. .. contents:: :depth: 2 :local: Getting started =============== Charliecloud's tests are based in the directory :code:`test`, which is either at the top level of the source code or installed at :code:`$PREFIX/libexec/charliecloud`. To run them, go there:: $ cd test If you have :code:`sudo`, the tests will make use of it by default. To skip the tests that use :code:`sudo` even if you have privileges, set :code:`CH_TEST_DONT_SUDO` to a non-empty string. The tests use a framework called `Bats `_ (Bash Automated Testing System). To check location and version of Bats used by the tests:: $ make where-bats which bats /usr/bin/bats bats --version Bats 0.4.0 Just like for normal use, the Charliecloud test suite is split into build and run phases, and there is a third phase that runs the examples' test suites. These phases can be tested independently on different systems. Testing is coordinated by :code:`make`. The test targets run one or more test suites. If any test suite has a failure, testing stops with an error message. The tests need three work directories with a dozen or so GB of available space, in order to store image tarballs, unpacked image directories, and permission test fixtures. These are configured with environment variables; for example:: $ export CH_TEST_TARDIR=/var/tmp/tarballs $ export CH_TEST_IMGDIR=/var/tmp/images $ export CH_TEST_PERMDIRS='/var/tmp /tmp' :code:`CH_TEST_PERMDIRS` can be set to :code:`skip` in order to skip the file permissions tests. (Strictly speaking, the build phase needs only the first, and the example test phase does not need the last one. However, for simplicity, the tests will demand all three for all phases.) .. note:: Bats will wait until all descendant processes finish before exiting, so if you get into a failure mode where a test suite doesn't clean up all its processes, Bats will hang. Phases ====== To run all three phases:: $ make test We recommend that a build box pass all phases so it can be used to run containers for testing and development. Build ----- In this phase, image building and associated functionality is tested. :: $ make test-build bats build.bats build_auto.bats build_post.bats ✓ create tarball directory if needed ✓ documentations build ✓ executables seem sane [...] ✓ ch-build obspy ✓ ch-docker2tar obspy ✓ docker pull dockerpull ✓ ch-docker2tar dockerpull ✓ nothing unexpected in tarball directory 41 tests, 0 failures Note that this phase is much faster with a hot Docker cache. To refresh the images, you sometimes need to clear the Docker cache. You can do this with :code:`make clean-docker`. This requires sudo privileges and deletes all Docker containers and images, whether or not they are related to the Charliecloud test suite. Run --- The run tests require the contents of :code:`$CH_TEST_TARDIR` produced by a successful build test. Copy this directory to the run system. Additionally, the user running the tests needs to be a member of at least 2 groups. File permission enforcement is tested against specially constructed fixture directories. These should include every meaningful mounted filesystem, and they cannot be shared between different users. To create them:: $ for d in $CH_TEST_PERMDIRS; do sudo ./make-perms-test $d $USER nobody; done To skip testing file permissions (e.g., if you don't have root), set :code:`$CH_TEST_PERMDIRS` to :code:`skip`. To run these tests:: $ make test-run Examples -------- Some of the examples include test suites of their own. Charliecloud runs those test suites, using a Slurm allocation if one is available or a single node (localhost) if not. These require that the run tests have been completed successfully. Note that single tests from the Charliecloud perspective can include entire test suites from the example's perspective, so be patient. To run these tests:: $ make test-test Scope (speed vs. thoroughness) ============================== Generally --------- The test suite can be abbreviated or extended by setting the environment variable :code:`CH_TEST_SCOPE`. The valid values are: :code:`quick` This tests the most important subset of Charliecloud functionality. With a hot Docker cache, :code:`make test` should finish in under 30 seconds. It's handy for development. **Note:** The :code:`quick` scope uses the results of a prior successful completion of the :code:`standard` scope. :code:`standard` This adds testing of the remaining Charliecloud functionality and a selection of the more important examples. It should finish in 5–10 minutes. This is the default if :code:`CH_TEST_SCOPE` is unset. :code:`full` Run all available tests. It can take 30–60 minutes or more. For example, to run the build tests in quick mode, say:: $ CH_TEST_SCOPE=quick make test-build Running a single test group --------------------------- For focused testing, you can run a single :code:`.bats` file directly with Bats. These are found at the following locations:: test test/run examples/*/*/test.bats First, check which :code:`bats` executable the test suite is using:: $ make where-bats which bats /usr/local/src/charliecloud/test/bats/bin/bats bats --version Bats 0.4.0 Then, use that :code:`bats` to run the file you're interested in. For example, you can test the :code:`mpihello` example with:: $ cd examples/mpi/mpihello $ /usr/local/src/charliecloud/test/bats/bin/bats test.bats ✓ mpihello/serial ✓ mpihello/guest starts ranks ✓ mpihello/host starts ranks 3 tests, 0 failures You will typically need to first make the image available in the appropriate location, either with successful :code:`build` and :code:`run` tests or manually building and unpacking it. charliecloud-0.9.10/doc-src/tutorial.rst000066400000000000000000001151461346662313000202020ustar00rootroot00000000000000Tutorial ******** This tutorial will teach you how to create and run Charliecloud images, using both examples included with the source code as well as new ones you create from scratch. This tutorial assumes that: (a) the Charliecloud executables are in your path, (b) Docker is installed on the build system, and (c) the Charliecloud source code is available at :code:`/usr/local/src/charliecloud`. .. contents:: :depth: 2 :local: .. note:: Shell sessions throughout this documentation will use the prompt :code:`$` to indicate commands executed natively on the host and :code:`>` for commands executed in a container. 90 seconds to Charliecloud ========================== This section is for the impatient. It shows you how to quickly build and run a "hello world" Charliecloud container. If you like what you see, then proceed with the rest of the tutorial to understand what is happening and how to use Charliecloud for your own applications. :: $ cd /usr/local/src/charliecloud/examples/serial/hello $ ch-build -t hello . Sending build context to Docker daemon 5.632kB [...] Successfully built 1136de7d4c0a $ ch-docker2tar hello /var/tmp 114MiB 0:00:03 [=============================================] 103% -rw-r----- 1 reidpr reidpr 49M Nov 21 14:05 /var/tmp/hello.tar.gz $ ch-tar2dir /var/tmp/hello.tar.gz /var/tmp creating new image /var/tmp/hello /var/tmp/hello unpacked ok $ ch-run /var/tmp/hello -- echo "I'm in a container" I'm in a container (See the :ref:`FAQ ` for why the progress bar goes over 100%.) Getting help ============ All the executables have decent help and can tell you what version of Charliecloud you have (if not, please report a bug). For example:: $ ch-run --help Usage: ch-run [OPTION...] NEWROOT CMD [ARG...] Run a command in a Charliecloud container. [...] $ ch-run --version 0.2.0+4836ac1 A description of all commands is also collected later in this documentation; see :doc:`command-usage`. In addition, each executable has a man page. Your first user-defined software stack ====================================== In this section, we will create and run a simple "hello, world" image. This uses the :code:`hello` example in the Charliecloud source code. Start with:: $ cd examples/serial/hello Defining your UDSS ------------------ You must first write a Dockerfile that describes the image you would like; consult the `Dockerfile documentation `_ for details on how to do this. Note that run-time functionality such as :code:`ENTRYPOINT` is not supported. We will use the following very simple Dockerfile: .. literalinclude:: ../examples/serial/hello/Dockerfile :language: docker This creates a minimal Debian Stretch image with :code:`ssh` installed. We will encounter more complex Dockerfiles later in this tutorial. .. note:: Docker does not update the base image unless asked to. Specific images can be updated manually; in this case:: $ sudo docker pull debian:stretch There are various resources and scripts online to help automate this process, as well as :code:`test/docker-clean.sh`. Build Docker image ------------------ Charliecloud provides a convenience wrapper :code:`ch-build` around :code:`docker build` that mitigates some of the latter's more irritating characteristics. In particular, it passes through any HTTP proxy variables, and by default it uses the Dockerfile in the current directory, rather than at the root of the Docker context directory. (We will address the context directory in more detail later.) The two arguments here are a tag for the Docker image and the context directory, which in this case is the current directory. :: $ ch-build -t hello . Sending build context to Docker daemon 5.632kB Step 1/4 : FROM debian:stretch ---> be2868bebaba [...] Step 4/4 : RUN touch /usr/bin/ch-ssh ---> e5920427a8f2 Successfully built e5920427a8f2 Successfully tagged hello:latest Note that Docker prints each step of the Dockerfile as it's executed. :code:`ch-build` and many other Charliecloud commands wrap various privileged :code:`docker` commands. Thus, you will be prompted for a password to escalate as needed. Note however that most configurations of :code:`sudo` don't require a password on every invocation, so privileged commands may be running even if you're not prompted for a password. Share image and other standard Docker stuff ------------------------------------------- If needed, the Docker image can be manipulated with standard Docker commands. In particular, image sharing using a public or private Docker Hub repository can be very useful. :: $ sudo docker images REPOSITORY TAG IMAGE ID CREATED SIZE debian stretch 1742affe03b5 10 days ago 125.1 MB hello latest 1742affe03b5 10 days ago 139.7 MB $ sudo docker push # FIXME Running the image with Docker is not generally useful, because Docker's run-time environment is significantly different than Charliecloud's, but it can have value when debugging Charliecloud. :: $ sudo docker run -it hello /bin/bash # ls / bin dev hello lib media opt root sbin sys usr boot etc home lib64 mnt proc run srv tmp var # exit exit Flatten image ------------- Next, we flatten the Docker image into a tarball, which is then a plain file amenable to standard file manipulation commands. This tarball is placed in an arbitrary directory, here :code:`/var/tmp`. :: $ ch-docker2tar hello /var/tmp 57M /var/tmp/hello.tar.gz Distribute tarball ------------------ Thus far, the workflow has taken place on the build system. The next step is to copy the tarball to the run system. This can use any appropriate method for moving files: :code:`scp`, :code:`rsync`, something integrated with the scheduler, etc. If the build and run systems are the same, then no copy is needed. This is a typical use case for development and testing. Unpack tarball -------------- Charliecloud runs out of a normal directory rather than a filesystem image. In order to create this directory, we unpack the image tarball. This will replace the image directory if it already exists. :: $ ch-tar2dir /var/tmp/hello.tar.gz /var/tmp creating new image /var/tmp/hello /var/tmp/hello unpacked ok Generally, you should avoid unpacking into shared filesystems such as NFS and Lustre, in favor of local storage such as :code:`tmpfs` and local hard disks. This will yield better performance for you and anyone else on the shared filesystem. .. One potential gotcha is the tarball including special files such as devices. Because :code:`tar` is running unprivileged, these will not be unpacked, and they can cause the extraction to fail. The fix is to delete them in the Dockerfile. .. note:: You can run perfectly well out of :code:`/tmp`, but because it is bind-mounted automatically, the image root will then appear in multiple locations in the container's filesystem tree. This can cause confusion for both users and programs. Activate image -------------- We are now ready to run programs inside a Charliecloud container. This is done with the :code:`ch-run` command:: $ ch-run /var/tmp/hello -- echo hello hello Symbolic links in :code:`/proc` tell us the current namespaces, which are identified by long ID numbers:: $ ls -l /proc/self/ns total 0 lrwxrwxrwx 1 reidpr reidpr 0 Sep 28 11:24 ipc -> ipc:[4026531839] lrwxrwxrwx 1 reidpr reidpr 0 Sep 28 11:24 mnt -> mnt:[4026531840] lrwxrwxrwx 1 reidpr reidpr 0 Sep 28 11:24 net -> net:[4026531969] lrwxrwxrwx 1 reidpr reidpr 0 Sep 28 11:24 pid -> pid:[4026531836] lrwxrwxrwx 1 reidpr reidpr 0 Sep 28 11:24 user -> user:[4026531837] lrwxrwxrwx 1 reidpr reidpr 0 Sep 28 11:24 uts -> uts:[4026531838] $ ch-run /var/tmp/hello -- ls -l /proc/self/ns total 0 lrwxrwxrwx 1 reidpr reidpr 0 Sep 28 17:34 ipc -> ipc:[4026531839] lrwxrwxrwx 1 reidpr reidpr 0 Sep 28 17:34 mnt -> mnt:[4026532257] lrwxrwxrwx 1 reidpr reidpr 0 Sep 28 17:34 net -> net:[4026531969] lrwxrwxrwx 1 reidpr reidpr 0 Sep 28 17:34 pid -> pid:[4026531836] lrwxrwxrwx 1 reidpr reidpr 0 Sep 28 17:34 user -> user:[4026532256] lrwxrwxrwx 1 reidpr reidpr 0 Sep 28 17:34 uts -> uts:[4026531838] Notice that the container has different mount (:code:`mnt`) and user (:code:`user`) namespaces, but the rest of the namespaces are shared with the host. This highlights Charliecloud's focus on functionality (make your UDSS run), rather than isolation (protect the host from your UDSS). Each invocation of :code:`ch-run` creates a new container, so if you have multiple simultaneous invocations, they will not share containers. However, container overhead is minimal, and containers communicate without hassle, so this is generally of peripheral interest. .. note:: The :code:`--` in the :code:`ch-run` command line is a standard argument that separates options from non-option arguments. Without it, :code:`ch-run` would try (and fail) to interpret :code:`ls`’s :code:`-l` argument. These IDs are available both in the symlink target as well as its inode number:: $ stat -L --format='%i' /proc/self/ns/user 4026531837 $ ch-run /var/tmp/hello -- stat -L --format='%i' /proc/self/ns/user 4026532256 You can also run interactive commands, such as a shell:: $ ch-run /var/tmp/hello -- /bin/bash > stat -L --format='%i' /proc/self/ns/user 4026532256 > exit Be aware that wildcards in the :code:`ch-run` command are interpreted by the host, not the container, unless protected. One workaround is to use a sub-shell. For example:: $ ls /usr/bin/oldfind ls: cannot access '/usr/bin/oldfind': No such file or directory $ ch-run /var/tmp/hello -- ls /usr/bin/oldfind /usr/bin/oldfind $ ls /usr/bin/oldf* ls: cannot access '/usr/bin/oldf*': No such file or directory $ ch-run /var/tmp/hello -- ls /usr/bin/oldf* ls: cannot access /usr/bin/oldf*: No such file or directory $ ch-run /var/tmp/hello -- sh -c 'ls /usr/bin/oldf*' /usr/bin/oldfind You have now successfully run commands within a single-node Charliecloud container. Next, we explore how Charliecloud accesses host resources. Interacting with the host ========================= Charliecloud is not an isolation layer, so containers have full access to host resources, with a few quirks. This section demonstrates how this works. Filesystems ----------- Charliecloud makes host directories available inside the container using bind mounts, which is somewhat like a hard link in that it causes a file or directory to appear in multiple places in the filesystem tree, but it is a property of the running kernel rather than the filesystem. Several host directories are always bind-mounted into the container. These include system directories such as :code:`/dev`, :code:`/proc`, and :code:`/sys`; :code:`/tmp`; Charliecloud's :code:`ch-ssh` command in :code:`/usr/bin`; and the invoking user's home directory (for dotfiles), unless :code:`--no-home` is specified. Charliecloud uses recursive bind mounts, so for example if the host has a variety of sub-filesystems under :code:`/sys`, as Ubuntu does, these will be available in the container as well. In addition to the default bind mounts, arbitrary user-specified directories can be added using the :code:`--bind` or :code:`-b` switch. By default, :code:`/mnt/0`, :code:`/mnt/1`, etc., are used for the destination in the guest:: $ mkdir /var/tmp/foo0 $ echo hello > /var/tmp/foo0/bar $ mkdir /var/tmp/foo1 $ echo world > /var/tmp/foo1/bar $ ch-run -b /var/tmp/foo0 -b /var/tmp/foo1 /var/tmp/hello -- bash > ls /mnt 0 1 2 3 4 5 6 7 8 9 > cat /mnt/0/bar hello > cat /mnt/1/bar world Explicit destinations are also possible:: $ ch-run -b /var/tmp/foo0:/mnt /var/tmp/hello -- bash > ls /mnt bar > cat /mnt/bar hello Network ------- Charliecloud containers share the host's network namespace, so most network things should be the same. However, SSH is not aware of Charliecloud containers. If you SSH to a node where Charliecloud is installed, you will get a shell on the host, not in a container, even if :code:`ssh` was initiated from a container:: $ stat -L --format='%i' /proc/self/ns/user 4026531837 $ ssh localhost stat -L --format='%i' /proc/self/ns/user 4026531837 $ ch-run /var/tmp/hello -- /bin/bash > stat -L --format='%i' /proc/self/ns/user 4026532256 > ssh localhost stat -L --format='%i' /proc/self/ns/user 4026531837 There are several ways to SSH to a remote node and run commands inside a container. The simplest is to manually invoke :code:`ch-run` in the :code:`ssh` command:: $ ssh localhost ch-run /var/tmp/hello -- stat -L --format='%i' /proc/self/ns/user 4026532256 .. note:: Recall that each :code:`ch-run` invocation creates a new container. That is, the :code:`ssh` command above has not entered an existing user namespace :code:`’2256`; rather, it has re-used the namespace ID :code:`’2256`. Another is to use the :code:`ch-ssh` wrapper program, which adds :code:`ch-run` to the :code:`ssh` command implicitly. It takes the :code:`ch-run` arguments from the environment variable :code:`CH_RUN_ARGS`, making it mostly a drop-in replacement for :code:`ssh`. For example:: $ export CH_RUN_ARGS="/var/tmp/hello --" $ ch-ssh localhost stat -L --format='%i' /proc/self/ns/user 4026532256 $ ch-ssh -t localhost /bin/bash > stat -L --format='%i' /proc/self/ns/user 4026532256 :code:`ch-ssh` is available inside containers as well (in :code:`/usr/bin` via bind-mount):: $ export CH_RUN_ARGS="/var/tmp/hello --" $ ch-run /var/tmp/hello -- /bin/bash > stat -L --format='%i' /proc/self/ns/user 4026532256 > ch-ssh localhost stat -L --format='%i' /proc/self/ns/user 4026532258 This also demonstrates that :code:`ch-run` does not alter your environment variables. .. warning:: 1. :code:`CH_RUN_ARGS` is interpreted very simply; the sole delimiter is spaces. It is not shell syntax. In particular, quotes and backslashes are not interpreted. 2. Argument :code:`-t` is required for SSH to allocate a pseudo-TTY and thus convince your shell to be interactive. In the case of Bash, otherwise you'll get a shell that accepts commands but doesn't print prompts, among other other issues. (`Issue #2 `_.) A third may be to edit one's shell initialization scripts to check the command line and :code:`exec(1)` :code:`ch-run` if appropriate. This is brittle but avoids wrapping :code:`ssh` or altering its command line. User and group IDs ------------------ Unlike Docker and some other container systems, Charliecloud tries to make the container's users and groups look the same as the host's. (This is accomplished by bind-mounting :code:`/etc/passwd` and :code:`/etc/group` into the container.) For example:: $ id -u 901 $ whoami reidpr $ ch-run /var/tmp/hello -- bash > id -u 901 > whoami reidpr More specifically, the user namespace, when created without privileges as Charliecloud does, lets you map any container UID to your host UID. :code:`ch-run` implements this with the :code:`--uid` switch. So, for example, you can tell Charliecloud you want to be root, and it will tell you that you're root:: $ ch-run --uid 0 /var/tmp/hello -- bash > id -u 0 > whoami root But, this doesn't get you anything useful, because the container UID is mapped back to your UID on the host before permission checks are applied:: > dd if=/dev/mem of=/tmp/pwned dd: failed to open '/dev/mem': Permission denied This mapping also affects how users are displayed. For example, if a file is owned by you, your host UID will be mapped to your container UID, which is then looked up in :code:`/etc/passwd` to determine the display name. In typical usage without :code:`--uid`, this mapping is a no-op, so everything looks normal:: $ ls -nd ~ drwxr-xr-x 87 901 901 4096 Sep 28 12:12 /home/reidpr $ ls -ld ~ drwxr-xr-x 87 reidpr reidpr 4096 Sep 28 12:12 /home/reidpr $ ch-run /var/tmp/hello -- bash > ls -nd ~ drwxr-xr-x 87 901 901 4096 Sep 28 18:12 /home/reidpr > ls -ld ~ drwxr-xr-x 87 reidpr reidpr 4096 Sep 28 18:12 /home/reidpr But if :code:`--uid` is provided, things can seem odd. For example:: $ ch-run --uid 0 /var/tmp/hello -- bash > ls -nd /home/reidpr drwxr-xr-x 87 0 901 4096 Sep 28 18:12 /home/reidpr > ls -ld /home/reidpr drwxr-xr-x 87 root reidpr 4096 Sep 28 18:12 /home/reidpr This UID mapping can contain only one pair: an arbitrary container UID to your effective UID on the host. Thus, all other users are unmapped, and they show up as :code:`nobody`:: $ ls -n /tmp/foo -rw-rw---- 1 902 902 0 Sep 28 15:40 /tmp/foo $ ls -l /tmp/foo -rw-rw---- 1 sig sig 0 Sep 28 15:40 /tmp/foo $ ch-run /var/tmp/hello -- bash > ls -n /tmp/foo -rw-rw---- 1 65534 65534 843 Sep 28 21:40 /tmp/foo > ls -l /tmp/foo -rw-rw---- 1 nobody nogroup 843 Sep 28 21:40 /tmp/foo User namespaces have a similar mapping for GIDs, with the same limitation --- exactly one arbitrary container GID maps to your effective *primary* GID. This can lead to some strange-looking results, because only one of your GIDs can be mapped in any given container. All the rest become :code:`nogroup`:: $ id uid=901(reidpr) gid=901(reidpr) groups=901(reidpr),903(nerds),904(losers) $ ch-run /var/tmp/hello -- id uid=901(reidpr) gid=901(reidpr) groups=901(reidpr),65534(nogroup) $ ch-run --gid 903 /var/tmp/hello -- id uid=901(reidpr) gid=903(nerds) groups=903(nerds),65534(nogroup) However, this doesn't affect access. The container process retains the same GIDs from the host perspective, and as always, the host IDs are what control access:: $ ls -l /tmp/primary /tmp/supplemental -rw-rw---- 1 sig reidpr 0 Sep 28 15:47 /tmp/primary -rw-rw---- 1 sig nerds 0 Sep 28 15:48 /tmp/supplemental $ ch-run /var/tmp/hello -- bash > cat /tmp/primary > /dev/null > cat /tmp/supplemental > /dev/null One area where functionality *is* reduced is that :code:`chgrp(1)` becomes useless. Using an unmapped group or :code:`nogroup` fails, and using a mapped group is a no-op because it's mapped back to the host GID:: $ ls -l /tmp/bar rw-rw---- 1 reidpr reidpr 0 Sep 28 16:12 /tmp/bar $ ch-run /var/tmp/hello -- chgrp nerds /tmp/bar chgrp: changing group of '/tmp/bar': Invalid argument $ ch-run /var/tmp/hello -- chgrp nogroup /tmp/bar chgrp: changing group of '/tmp/bar': Invalid argument $ ch-run --gid 903 /var/tmp/hello -- chgrp nerds /tmp/bar $ ls -l /tmp/bar -rw-rw---- 1 reidpr reidpr 0 Sep 28 16:12 /tmp/bar Workarounds include :code:`chgrp(1)` on the host or fastidious use of setgid directories:: $ mkdir /tmp/baz $ chgrp nerds /tmp/baz $ chmod 2770 /tmp/baz $ ls -ld /tmp/baz drwxrws--- 2 reidpr nerds 40 Sep 28 16:19 /tmp/baz $ ch-run /var/tmp/hello -- touch /tmp/baz/foo $ ls -l /tmp/baz/foo -rw-rw---- 1 reidpr nerds 0 Sep 28 16:21 /tmp/baz/foo This concludes our discussion of how a Charliecloud container interacts with its host and principal Charliecloud quirks. We next move on to installing software. Installing your own software ============================ This section covers four situations for making software available inside a Charliecloud container: 1. Third-party software installed into the image using a package manager. 2. Third-party software compiled from source into the image. 3. Your software installed into the image. 4. Your software stored on the host but compiled in the container. Many of Docker's `Best practices for writing Dockerfiles `_ apply to Charliecloud images as well, so you should be familiar with that document. .. note:: Maybe you don't have to install the software at all. Is there already a trustworthy image on Docker Hub you can use as a base? Third-party software via package manager ---------------------------------------- This approach is the simplest and fastest way to install stuff in your image. The :code:`examples/hello` Dockerfile also seen above does this to install the package :code:`openssh-client`: .. literalinclude:: ../examples/serial/hello/Dockerfile :language: docker :lines: 2-6 You can use distribution package managers such as :code:`apt-get`, as demonstrated above, or others, such as :code:`pip` for Python packages. Be aware that the software will be downloaded anew each time you build the image, unless you add an HTTP cache, which is out of scope of this tutorial. Third-party software compiled from source ----------------------------------------- Under this method, one uses :code:`RUN` commands to fetch the desired software using :code:`curl` or :code:`wget`, compile it, and install. Our example does this with two chained Dockerfiles. First, we build a basic Debian image (:code:`test/Dockerfile.debian9`): .. literalinclude:: ../test/Dockerfile.debian9 :language: docker :lines: 2- Then, we add OpenMPI with :code:`test/Dockerfile.openmpi`. This is a complex Dockerfile that compiles several dependencies in addition to OpenMPI. For the purposes of this tutorial, you can skip most of it, but we felt it would be useful to show a real example. .. literalinclude:: ../test/Dockerfile.openmpi :language: docker :lines: 2- So what is going on here? 1. Use the latest Debian, Stretch, as the base image. 2. Install a basic build system using the OS package manager. 3. For a few dependencies and then OpenMPI itself: 1. Download and untar. Note the use of variables to make adjusting the URL and versions easier, as well as the explanation of why we're not using :code:`apt-get`, given that several of these packages are included in Debian. 2. Build and install OpenMPI. Note the :code:`getconf` trick to guess at an appropriate parallel build. 4. Clean up, in order to reduce the size of layers as well as the resulting Charliecloud tarball (:code:`rm -Rf`). .. Finally, because it's a container image, you can be less tidy than you might be on a normal system. For example, the above downloads and builds in :code:`/` rather than :code:`/usr/local/src`, and it installs MPI into :code:`/usr` rather than :code:`/usr/local`. Your software stored in the image --------------------------------- This method covers software provided by you that is included in the image. This is recommended when your software is relatively stable or is not easily available to users of your image, for example a library rather than simulation code under active development. The general approach is the same as installing third-party software from source, but you use the :code:`COPY` instruction to transfer files from the host filesystem (rather than the network via HTTP) to the image. For example, :code:`examples/mpi/mpihello/Dockerfile.openmpi` uses this approach: .. literalinclude:: ../examples/mpi/mpihello/Dockerfile.openmpi :language: docker These Dockerfile instructions: 1. Copy the host directory :code:`examples/mpi/mpihello` to the image at path :code:`/hello`. The host path is relative to the *context directory*, which is tarred up and sent to the Docker daemon. Docker builds have no access to the host filesystem outside the context directory. (Unlike the HPC custom, Docker comes from a world without network filesystems. This tar-based approach lets the Docker daemon run on a different node from the client without needing any shared filesystems.) The convention for Charliecloud tests and examples is that the context is the directory containing the Dockerfile in question, and a common pattern, used here, is to copy in the entire context. 2. :code:`cd` to :code:`/hello`. 3. Compile our example. We include :code:`make clean` to remove any leftover build files, since they would be inappropriate inside the container. Once the image is built, we can see the results. (Install the image into :code:`/var/tmp` as outlined above, if you haven't already.) :: $ ch-run /var/tmp/mpihello -- ls -lh /hello total 32K -rw-rw---- 1 reidpr reidpr 908 Oct 4 15:52 Dockerfile -rw-rw---- 1 reidpr reidpr 157 Aug 5 22:37 Makefile -rw-rw---- 1 reidpr reidpr 1.2K Aug 5 22:37 README -rwxr-x--- 1 reidpr reidpr 9.5K Oct 4 15:58 hello -rw-rw---- 1 reidpr reidpr 1.4K Aug 5 22:37 hello.c -rwxrwx--- 1 reidpr reidpr 441 Aug 5 22:37 test.sh We will revisit this image later. Your software stored on the host -------------------------------- This method leaves your software on the host but compiles it in the image. This is recommended when your software is volatile or each image user needs a different version, for example a simulation code under active development. The general approach is to bind-mount the appropriate directory and then run the build inside the container. We can re-use the :code:`mpihello` image to demonstrate this. :: $ cd examples/mpi/mpihello $ ls -l total 20 -rw-rw---- 1 reidpr reidpr 908 Oct 4 09:52 Dockerfile -rw-rw---- 1 reidpr reidpr 1431 Aug 5 16:37 hello.c -rw-rw---- 1 reidpr reidpr 157 Aug 5 16:37 Makefile -rw-rw---- 1 reidpr reidpr 1172 Aug 5 16:37 README $ ch-run -b . --cd /mnt/0 /var/tmp/mpihello -- make mpicc -std=gnu11 -Wall hello.c -o hello $ ls -l total 32 -rw-rw---- 1 reidpr reidpr 908 Oct 4 09:52 Dockerfile -rwxrwx--- 1 reidpr reidpr 9632 Oct 4 10:43 hello -rw-rw---- 1 reidpr reidpr 1431 Aug 5 16:37 hello.c -rw-rw---- 1 reidpr reidpr 157 Aug 5 16:37 Makefile -rw-rw---- 1 reidpr reidpr 1172 Aug 5 16:37 README A common use case is to leave a container shell open in one terminal for building, and then run using a separate container invoked from a different terminal. Your first single-node, multi-process jobs ========================================== This is an important use case even for large-scale codes, when testing and development happens at small scale but need to use an environment comparable to large-scale runs. This tutorial covers three approaches: 1. Processes are coordinated by the host, i.e., one process per container. 2. Processes are coordinated by the container, i.e., one container with multiple processes, using configuration files from the container. 3. Processes are coordinated by the container using configuration files from the host. In order to test approach 1, you must install OpenMPI 2.1.2 on the host. In our experience, we have had success compiling from source with the same options as in the Dockerfile, but there is probably more nuance to the match than we've discovered. Processes coordinated by host ----------------------------- This approach does the forking and process coordination on the host. Each process is spawned in its own container, and because Charliecloud introduces minimal isolation, they can communicate as if they were running directly on the host. For example, using Slurm :code:`srun` and the :code:`mpihello` example above:: $ stat -L --format='%i' /proc/self/ns/user 4026531837 $ ch-run /var/tmp/mpihello -- mpirun --version mpirun (Open MPI) 2.1.5 $ srun -n4 ch-run /var/tmp/images/mpihello -- /hello/hello 0: init ok cn001, 4 ranks, userns 4026554650 1: init ok cn001, 4 ranks, userns 4026554652 3: init ok cn002, 4 ranks, userns 4026554652 2: init ok cn002, 4 ranks, userns 4026554650 0: send/receive ok 0: finalize ok We recommend this approach because it lets you take advantage of difficult things already done by your site admins, such as configuring Slurm. If you don't have Slurm, you can use :code:`mpirun -np 4` instead of :code:`srun -n4`. However, this requires that the host have a compatible version of OpenMPI installed on the host. Which versions are compatible seems to be a moving target, but having the same versions inside and outside the container *usually* works. Processes coordinated by container ---------------------------------- This approach starts a single container process, which then forks and coordinates the parallel work. The advantage is that this approach is completely independent of the host for dependency configuration and installation; the disadvantage is that it cannot take advantage of host things such as Slurm configuration. For example:: $ ch-run /var/tmp/mpihello -- mpirun -np 4 /hello/hello 0: init ok cn001, 4 ranks, userns 4026532256 1: init ok cn001, 4 ranks, userns 4026532256 2: init ok cn001, 4 ranks, userns 4026532256 3: init ok cn001, 4 ranks, userns 4026532256 0: send/receive ok 0: finalize ok Note that in this case, we use :code:`mpirun` rather than :code:`srun` because the Slurm client programs are not installed inside the container, and we don't want the host's Slurm coordinating processes anyway. Your first multi-node jobs ========================== This section assumes that you are using a Slurm cluster and some type of node-local storage. A :code:`tmpfs` will suffice, and we use :code:`/var/tmp` for this tutorial. (Using :code:`/tmp` often works but can cause confusion because it's shared by the container and host, yielding cycles in the directory tree.) We cover three cases: 1. The MPI hello world example above, run interactively, with the host coordinating. 2. Same, non-interactive. 3. An Apache Spark example, run interactively. 4. Same, non-interactive. We think that container-coordinated MPI jobs will also work, but we haven't worked out how to do this yet. (See `issue #5 `_.) .. note:: The image directory is mounted read-only by default so it can be shared by multiple Charliecloud containers in the same or different jobs. It can be mounted read-write with :code:`ch-run -w`. .. warning:: The image can reside on most filesystems, but be aware of metadata impact. A non-trivial Charliecloud job may overwhelm a network filesystem, earning you the ire of your sysadmins and colleagues. NFS sometimes does not work for read-only images; see `issue #9 `_. Interactive MPI hello world --------------------------- First, obtain an interactive allocation of nodes. This tutorial assumes an allocation of 4 nodes (but any number should work) and an interactive shell on one of those nodes. For example:: $ salloc -N4 The next step is to distribute the image tarball to the compute nodes. To do so, we run one instance of :code:`ch-tar2dir` on each node:: $ srun ch-tar2dir mpihello.tar.gz /var/tmp creating new image /tmp/mpihello creating new image /tmp/mpihello creating new image /tmp/mpihello creating new image /tmp/mpihello /tmp/mpihello unpacked ok /tmp/mpihello unpacked ok /tmp/mpihello unpacked ok /tmp/mpihello unpacked ok We can now activate the image and run our program:: $ srun --cpus-per-task=1 ch-run /var/tmp/mpihello -- /hello/hello 2: init ok cn001, 64 ranks, userns 4026532567 4: init ok cn001, 64 ranks, userns 4026532571 8: init ok cn001, 64 ranks, userns 4026532579 [...] 45: init ok cn003, 64 ranks, userns 4026532589 17: init ok cn002, 64 ranks, userns 4026532565 55: init ok cn004, 64 ranks, userns 4026532577 0: send/receive ok 0: finalize ok Success! Non-interactive MPI hello world ------------------------------- Production jobs are normally run non-interactively, via submission of a job script that runs when resources are available, placing output into a file. The MPI hello world example includes such a script, :code:`examples/mpi/mpihello/slurm.sh`: .. literalinclude:: ../examples/mpi/mpihello/slurm.sh :language: bash Note that this script both unpacks the image and runs it. Submit it with something like:: $ sbatch -N4 slurm.sh ~/mpihello.tar.gz /var/tmp 207745 When the job is complete, look at the output:: $ cat slurm-207745.out tarball: /home/reidpr/mpihello.tar.gz image: /var/tmp/mpihello creating new image /var/tmp/mpihello creating new image /var/tmp/mpihello [...] /var/tmp/mpihello unpacked ok /var/tmp/mpihello unpacked ok container: mpirun (Open MPI) 2.1.5 0: init ok cn001.localdomain, 144 ranks, userns 4026554766 37: init ok cn002.localdomain, 144 ranks, userns 4026554800 [...] 96: init ok cn003.localdomain, 144 ranks, userns 4026554803 86: init ok cn003.localdomain, 144 ranks, userns 4026554793 0: send/receive ok 0: finalize ok Success! Interactive Apache Spark ------------------------ This example is in :code:`examples/spark`. Build a tarball and upload it to your cluster. Once you have an interactive job, unpack the tarball. :: $ srun ch-tar2dir spark.tar.gz /var/tmp creating new image /var/tmp/spark creating new image /var/tmp/spark [...] /var/tmp/spark unpacked ok /var/tmp/spark unpacked ok We need to first create a basic configuration for Spark, as the defaults in the Dockerfile are insufficient. (For real jobs, you'll want to also configure performance parameters such as memory use; see `the documentation `_.) First:: $ mkdir -p ~/sparkconf $ chmod 700 ~/sparkconf We'll want to use the cluster's high-speed network. For this example, we'll find the Spark master's IP manually:: $ ip -o -f inet addr show | cut -d/ -f1 1: lo inet 127.0.0.1 2: eth0 inet 192.168.8.3 8: eth1 inet 10.8.8.3 Your site support can tell you which to use. In this case, we'll use 10.8.8.3. Create some configuration files. Replace :code:`[MYSECRET]` with a string only you know. Edit to match your system; in particular, use local disks instead of :code:`/tmp` if you have them:: $ cat > ~/sparkconf/spark-env.sh SPARK_LOCAL_DIRS=/tmp/spark SPARK_LOG_DIR=/tmp/spark/log SPARK_WORKER_DIR=/tmp/spark SPARK_LOCAL_IP=127.0.0.1 SPARK_MASTER_HOST=10.8.8.3 $ cat > ~/sparkconf/spark-defaults.conf spark.authenticate true spark.authenticate.secret [MYSECRET] We can now start the Spark master:: $ ch-run -b ~/sparkconf /var/tmp/spark -- /spark/sbin/start-master.sh Look at the log in :code:`/tmp/spark/log` to see that the master started correctly:: $ tail -7 /tmp/spark/log/*master*.out 17/02/24 22:37:21 INFO Master: Starting Spark master at spark://10.8.8.3:7077 17/02/24 22:37:21 INFO Master: Running Spark version 2.0.2 17/02/24 22:37:22 INFO Utils: Successfully started service 'MasterUI' on port 8080. 17/02/24 22:37:22 INFO MasterWebUI: Bound MasterWebUI to 127.0.0.1, and started at http://127.0.0.1:8080 17/02/24 22:37:22 INFO Utils: Successfully started service on port 6066. 17/02/24 22:37:22 INFO StandaloneRestServer: Started REST server for submitting applications on port 6066 17/02/24 22:37:22 INFO Master: I have been elected leader! New state: ALIVE If you can run a web browser on the node, browse to :code:`http://localhost:8080` for the Spark master web interface. Because this capability varies, the tutorial does not depend on it, but it can be informative. Refresh after each key step below. The Spark workers need to know how to reach the master. This is via a URL; you can get it from the log excerpt above, or consult the web interface. For example:: $ MASTER_URL=spark://10.8.8.3:7077 Next, start one worker on each compute node. In this tutorial, we start the workers using :code:`srun` in a way that prevents any subsequent :code:`srun` invocations from running until the Spark workers exit. For our purposes here, that's OK, but it's a big limitation for some jobs. (See `issue #230 `_.) Alternatives include :code:`pdsh`, which is the approach we use for the Spark tests (:code:`examples/other/spark/test.bats`), or a simple for loop of :code:`ssh` calls. Both of these are also quite clunky and do not scale well. :: $ srun sh -c " ch-run -b ~/sparkconf /var/tmp/spark -- \ spark/sbin/start-slave.sh $MASTER_URL \ && sleep infinity" & One of the advantages of Spark is that it's resilient: if a worker becomes unavailable, the computation simply proceeds without it. However, this can mask issues as well. For example, this example will run perfectly fine with just one worker, or all four workers on the same node, which aren't what we want. Check the master log to see that the right number of workers registered:: $ fgrep worker /tmp/spark/log/*master*.out 17/02/24 22:52:24 INFO Master: Registering worker 127.0.0.1:39890 with 16 cores, 187.8 GB RAM 17/02/24 22:52:24 INFO Master: Registering worker 127.0.0.1:44735 with 16 cores, 187.8 GB RAM 17/02/24 22:52:24 INFO Master: Registering worker 127.0.0.1:22445 with 16 cores, 187.8 GB RAM 17/02/24 22:52:24 INFO Master: Registering worker 127.0.0.1:29473 with 16 cores, 187.8 GB RAM Despite the workers calling themselves 127.0.0.1, they really are running across the allocation. (The confusion happens because of our :code:`$SPARK_LOCAL_IP` setting above.) This can be verified by examining logs on each compute node. For example (note single quotes):: $ ssh 10.8.8.4 -- tail -3 '/tmp/spark/log/*worker*.out' 17/02/24 22:52:24 INFO Worker: Connecting to master 10.8.8.3:7077... 17/02/24 22:52:24 INFO TransportClientFactory: Successfully created connection to /10.8.8.3:7077 after 263 ms (216 ms spent in bootstraps) 17/02/24 22:52:24 INFO Worker: Successfully registered with master spark://10.8.8.3:7077 We can now start an interactive shell to do some Spark computing:: $ ch-run -b ~/sparkconf /var/tmp/spark -- /spark/bin/pyspark --master $MASTER_URL Let's use this shell to estimate 𝜋 (this is adapted from one of the Spark `examples `_): .. code-block:: pycon >>> import operator >>> import random >>> >>> def sample(p): ... (x, y) = (random.random(), random.random()) ... return 1 if x*x + y*y < 1 else 0 ... >>> SAMPLE_CT = int(2e8) >>> ct = sc.parallelize(xrange(0, SAMPLE_CT)) \ ... .map(sample) \ ... .reduce(operator.add) >>> 4.0*ct/SAMPLE_CT 3.14109824 (Type Control-D to exit.) We can also submit jobs to the Spark cluster. This one runs the same example as included with the Spark source code. (The voluminous logging output is omitted.) :: $ ch-run -b ~/sparkconf /var/tmp/spark -- \ /spark/bin/spark-submit --master $MASTER_URL \ /spark/examples/src/main/python/pi.py 1024 [...] Pi is roughly 3.141211 [...] Exit your allocation. Slurm will clean up the Spark daemons. Success! Next, we'll run a similar job non-interactively. Non-interactive Apache Spark ---------------------------- We'll re-use much of the above to run the same computation non-interactively. For brevity, the Slurm script at :code:`examples/other/spark/slurm.sh` is not reproduced here. Submit it as follows. It requires three arguments: the tarball, the image directory to unpack into, and the high-speed network interface. Again, consult your site administrators for the latter. :: $ sbatch -N4 slurm.sh spark.tar.gz /var/tmp ib0 Submitted batch job 86754 Output:: $ fgrep 'Pi is' slurm-86754.out Pi is roughly 3.141393 Success! (to four significant digits) charliecloud-0.9.10/doc-src/vm.rst000066400000000000000000000237641346662313000167650ustar00rootroot00000000000000.. _virtualbox_build: Pre-installed virtual machine ***************************** This page explains how to create and use a single-node virtual machine with Charliecloud and Docker pre-installed. This lets you: * use Charliecloud on Macs and Windows * quickly try out Charliecloud without installing anything You can use this CentOS 7 VM either with `Vagrant `_ or with `VirtualBox `_ alone. Various settings are specified, but in most cases we have not done any particular tuning, so use your judgement, and feedback is welcome. .. contents:: :depth: 2 :local: .. warning:: These instructions provide for an SSH server in the virtual machine guest that is accessible to anyone logged into the host. It is your responsibility to ensure this is safe and compliant with your organization's policies, or modify the procedure accordingly. Import and use an :code:`ova` appliance file with plain VirtualBox =================================================================== This procedure imports a :code:`.ova` file (created using the instructions below) into VirtualBox and walks you through logging in and running a brief Hello World in Charliecloud. You will act as user :code:`charlie`, who has passwordless :code:`sudo`. The Charliecloud developers do not distribute a :code:`.ova` file. You will need to get it from your site, a third party, or build it yourself with Vagrant using the instructions below. Prerequisite: Installed and working VirtualBox. (You do not need Vagrant.) Configure VirtualBox -------------------- 1. Set *Preferences* → *Proxy* if needed at your site. Import the appliance -------------------- 1. Download the :code:`charliecloud_centos7.ova` file (or whatever your site has called it). 2. *File* → *Import appliance*. Choose :code:`charliecloud_centos7.ova` and click *Continue*. 3. Review the settings. * CPU should match the number of cores in your system. * RAM should be reasonable. Anywhere from 2GiB to half your system RAM will probably work. * Check *Reinitialize the MAC address of all network cards*. 4. Click *Import*. 5. Verify that the appliance's port forwarding is acceptable to you and your site: *Details* → *Network* → *Adapter 1* → *Advanced* → *Port Forwarding*. Log in and try Charliecloud --------------------------- 1. Start the VM by clicking the green arrow. 2. Wait for it to boot. 3. Click on the console window, where user :code:`charlie` is logged in. (If the VM "captures" your mouse pointer, type the key combination listed in the lower-right corner of the window to release it.) 4. Change your password. (You must use :code:`sudo` because you have passwordless :code:`sudo` but don't know your password.) :: $ sudo passwd charlie 5. SSH (from terminal on the host) into the VM using the password you just set. (Accessing the VM using SSH rather than the console is generally more pleasant, because you have a nice terminal with native copy-and-paste, etc.) :: $ ssh -p 2222 charlie@localhost 6. Build and run a container: :: $ ch-build -t hello -f /usr/local/src/charliecloud/examples/serial/hello \ /usr/local/src/charliecloud $ ch-docker2tar hello /var/tmp 57M /var/tmp/hello.tar.gz $ ch-tar2dir /var/tmp/hello.tar.gz /var/tmp creating new image /var/tmp/hello /var/tmp/hello unpacked ok $ cat /etc/redhat-release CentOS Linux release 7.3.1611 (Core) $ ch-run /var/tmp/hello -- /bin/bash > cat /etc/debian_version 8.9 > exit Congratulations! You've successfully used Charliecloud. Now all of your wildest dreams will come true. Shut down the VM at your leisure. Possible next steps: * Follow the :doc:`tutorial `. * Run the :ref:`test suite ` in :code:`/usr/share/doc/charliecloud/test`. (Note that the environment variables are already configured for you in this appliance.) * Configure :code:`/var/tmp` to be a :code:`tmpfs`, if you have enough RAM, for better performance. Build and use the VM with Vagrant ================================= This procedure builds and provisions an idiomatic Vagrant virtual machine. You should also read the Vagrantfile in :code:`packaging/vagrant` before proceeding. This contains the specific details on build and provisioning, which are not repeated here. Prerequisite: You already know how to use Vagrant. Caveats and gotchas ------------------- In no particular order: * While Vagrant supports a wide variety of host and virtual machine providers, this procedure is tested only on VirtualBox on a Mac. Current Vagrant versions should work, but we don't track specifically which ones. (Anyone who wants to help us broaden this support, please get in touch.) * Switching between proxy and no-proxy environments is not currently supported. If you have a mixed environment (e.g. laptops that travel between a corporate network and the wild), you may want to provide two separate images. * Provisioning is not idempotent. Running the provisioners again will have undefined results. * The documentation is not built. Use the web documentation instead of man pages. Install Vagrant and plugins --------------------------- You can install VirtualBox and Vagrant either manually using website downloads or with Homebrew:: $ brew cask install virtualbox virtualbox-extension-pack vagrant Sanity check:: $ vagrant version Installed Version: 2.1.2 Latest Version: 2.1.2 You're running an up-to-date version of Vagrant! Then, install the needed plugins:: $ vagrant plugin install vagrant-disksize \ vagrant-proxyconf \ vagrant-reload \ vagrant-vbguest Build and provision ------------------- To build the VM and install Docker, Charliecloud, etc.:: $ cd packaging/vagrant $ CH_VERSION=v0.9.1 vagrant up This takes less than 5 minutes. If you want the head of the master branch, omit :code:`CH_VERSION`. Then, optionally run the Charliecloud tests:: $ vagrant provision --provision-with=test This runs the full Charliecloud test suite, which takes quite a while (maybe 1–2 hours). Go have lunch, and then second lunch, and then third lunch. Note that the test output does not have a TTY, so you will not have the tidy checkmarks. The last test printed is the last one that completed, not the one currently running. If the tests don't pass, that's a bug. Please report it! Now you can :code:`vagrant ssh` and do all the usual Vagrant stuff. Build :code:`.ova` appliance file with Vagrant and VirtualBox ============================================================= This section uses Vagrant and the VirtualBox GUI to create a :code:`.ova` file that you can provide to end users as described above. You should read the above section on using the VM with Vagrant as well. Remove old virtual machine -------------------------- .. warning:: If you are using a Vagrant virtual machine for your own use, make sure you're not removing it here, unless you are sure it's disposable. Each time we create a new image to distribute, we start from scratch rather than updating the old image. Therefore, we must remove the old image. 1. Destroy the old virtual machine:: $ cd packaging/vagrant $ vagrant destroy 2. Remove deleted disk images from the VirtualBox media manager: *File* → *Virtual Media Manager*. Right click on and remove any :code:`.vmdk` with a red exclamation icon next to them. Build and provision ------------------- The most important differences with this build procedure have to do with login. A second user :code:`charlie` is created and endowed with passwordless :code:`sudo`; SSH will allow login with password; and the console will automatically log in :code:`charlie`. You need to reboot for the latter to take effect (which is done in the next step). :: $ CH_VERSION=v0.9.1 vagrant up $ vagrant provision --provision-with=ova Snapshot for distribution ------------------------- We want to distribute a small appliance file, but one that passes the tests. Running the tests greatly bloats the appliance. Therefore, we'll take a snapshot of the powered-off VM named :code:`exportme`, run the tests, and then roll back to the snapshot before exporting. :: $ vagrant halt $ VBoxManage modifyvm charliebox --defaultfrontend default $ vagrant snapshot save exportme .. note:: If you wish to use the appliance yourself, and you prefer to use plain VirtualBox instead of Vagrant, now is a good time to clone the VM and use the clone. This will protect your VM from Vagrant's attentions later. Test Charliecloud ----------------- Restart and test:: $ vagrant up --provision-with=test You might also show the console in the VirtualBox GUI and make sure :code:`charlie` is logged in. Export appliance :code:`.ova` file ---------------------------------- This creates a :code:`.ova` file, which is a standard way to package a virtual machine image with metadata. Some else can then import it into their own VirtualBox, as described above. (In principle, other virtual machine emulators should work as well, but we haven't tried.) These steps are done in the VirtualBox GUI because I haven't figured out a way to produce a :code:`.ova` in Vagrant, only Vagrant "boxes". #. Shut down the VM (you can just power it off). #. Restore the snapshot *exportme*. (Don't use :code:`vagrant shapshot restore` because it boots the snapshot and runs the provisioners again.) #. *File* → *Export appliance*. #. Select your VM, *charliebox*. Click *Continue*. #. Configure the export: * *Format*: OVF 2.0. (Note: Changing this menu resets the filename.) * *File*: Directory and filename you want. (The install procedure above uses :code:`charliecloud_centos7.ova`.) * *Write manifest file*: unchecked #. Click *Continue*. #. Check the descriptive information and click *Export*. (For example, maybe you want to put the Charliecloud version in the *Version* field.) #. Distribute the resulting file, which should be about 800–900MiB. charliecloud-0.9.10/examples/000077500000000000000000000000001346662313000160615ustar00rootroot00000000000000charliecloud-0.9.10/examples/mpi/000077500000000000000000000000001346662313000166465ustar00rootroot00000000000000charliecloud-0.9.10/examples/mpi/lammps/000077500000000000000000000000001346662313000201375ustar00rootroot00000000000000charliecloud-0.9.10/examples/mpi/lammps/Dockerfile000066400000000000000000000016711346662313000221360ustar00rootroot00000000000000# ch-test-scope: full # ch-test-arch-exclude: aarch64 # issue #392 FROM openmpi WORKDIR / # Packages for building. RUN apt-get install -qy --no-install-recommends \ git \ patch \ python-dev # Build LAMMPS. ENV LAMMPS_VERSION 17Nov16 ENV LAMMPS_DIR lammps-$LAMMPS_VERSION ENV LAMMPS_TAR $LAMMPS_DIR.tar.gz RUN wget -nv http://lammps.sandia.gov/tars/$LAMMPS_TAR RUN tar xf $LAMMPS_TAR RUN cd $LAMMPS_DIR/src \ && python Make.py -j $(getconf _NPROCESSORS_ONLN) -p none \ std no-lib reax meam poems python reaxc orig -a lib-all mpi RUN mv $LAMMPS_DIR/src/lmp_mpi /usr/bin \ && ln -s ./$LAMMPS_DIR /lammps RUN cd $LAMMPS_DIR/python \ && python2.7 install.py # Make another test input file. # Patch instead of including in.large.melt in full because LAMMPS is GPL. COPY melt.patch /lammps/examples/melt RUN patch -p1 -o /lammps/examples/melt/in.large.melt \ < /lammps/examples/melt/melt.patch charliecloud-0.9.10/examples/mpi/lammps/melt.patch000066400000000000000000000004741346662313000221260ustar00rootroot00000000000000--- a/lammps/examples/melt/in.melt 2014-01-07 14:43:31.000000000 -0700 +++ b/lammps/examples/melt/in.melt 2018-03-16 14:37:02.000000000 -0600 @@ -6,3 +6,3 @@ lattice fcc 0.8442 -region box block 0 10 0 10 0 10 +region box block 0 120 0 120 0 120 create_box 1 box @@ -32,2 +32,2 @@ thermo 50 -run 250 +run 3 charliecloud-0.9.10/examples/mpi/lammps/test.bats000066400000000000000000000063371346662313000220020ustar00rootroot00000000000000load ../../../test/common # LAMMPS does have a test suite, but we do not use it, because it seems too # fiddly to get it running properly. # # 1. Running the command listed in LAMMPS' Jenkins tests [2] fails with a # strange error: # # $ python run_tests.py tests/test_commands.py tests/test_examples.py # Loading tests from tests/test_commands.py... # Traceback (most recent call last): # File "run_tests.py", line 81, in # tests += load_tests(f) # File "run_tests.py", line 22, in load_tests # for testname in list(tc): # TypeError: 'Test' object is not iterable # # Looking in run_tests.py, this sure looks like a bug (it's expecting a # list of Tests, I think, but getting a single Test). But it works in # Jenkins. Who knows. # # 2. The files test/test_*.py say that the tests can be run with # "nosetests", which they can, after setting several environment # variables. But some of the tests fail for me. I didn't diagnose. # # Instead, we simply run some of the example problems in a loop and see if # they exit with return code zero. We don't check output. # # Note that a lot of the other examples crash. I haven't diagnosed or figured # out if we care. # # We are open to patches if anyone knows how to fix this situation reliably. # # [1]: https://github.com/lammps/lammps-testing # [2]: https://ci.lammps.org/job/lammps/job/master/job/testing/lastSuccessfulBuild/console setup () { scope full arch_exclude aarch64 # issue #392 prerequisites_ok "$ch_tag" multiprocess_ok } lammps_try () { # These examples cd because some (not all) of the LAMMPS tests expect to # find things based on $CWD. infiles=$(ch-run --cd "/lammps/examples/${1}" "$ch_img" -- \ bash -c "ls in.*") for i in $infiles; do printf '\n\n%s\n' "$i" # shellcheck disable=SC2086 $ch_mpirun_core ch-run --join --cd /lammps/examples/$1 "$ch_img" -- \ lmp_mpi -log none -in "$i" done } @test "${ch_tag}/crayify image" { crayify_mpi_or_skip "$ch_img" } @test "${ch_tag}/using all cores" { # shellcheck disable=SC2086 run $ch_mpirun_core ch-run --join "$ch_img" -- \ lmp_mpi -log none -in /lammps/examples/melt/in.melt echo "$output" [[ $status -eq 0 ]] ranks_found=$( echo "$output" \ | grep -F 'MPI tasks' \ | tail -1 \ | sed -r 's/^.+with ([0-9]+) MPI tasks.+$/\1/') echo "ranks expected: ${ch_cores_total}" echo "ranks found: ${ranks_found}" [[ $ranks_found -eq "$ch_cores_total" ]] } @test "${ch_tag}/crack" { lammps_try crack; } @test "${ch_tag}/dipole" { lammps_try dipole; } @test "${ch_tag}/flow" { lammps_try flow; } @test "${ch_tag}/friction" { lammps_try friction; } @test "${ch_tag}/melt" { lammps_try melt; } # This test busy-hangs after several: # # FOO error: local variable 'foo' referenced before assignment # Inside simple function # # Perhaps related to --join? # @test "${ch_tag}/python" { skip 'incompatible with --join' lammps_try python; } @test "${ch_tag}/revert image" { unpack_img_all_nodes "$ch_cray" } charliecloud-0.9.10/examples/mpi/mpibench/000077500000000000000000000000001346662313000204335ustar00rootroot00000000000000charliecloud-0.9.10/examples/mpi/mpibench/Dockerfile.mpich000066400000000000000000000005241346662313000235250ustar00rootroot00000000000000# ch-test-scope: full FROM mpich RUN apt-get install -y git # Compile the Intel MPI benchmark WORKDIR /usr/local/src ENV IMB_VERSION 2018.1 RUN git clone --branch v$IMB_VERSION --depth 1 \ https://github.com/intel/mpi-benchmarks RUN cd mpi-benchmarks/src \ && make CC=mpicc -j$(getconf _NPROCESSORS_ONLN) -f make_ict charliecloud-0.9.10/examples/mpi/mpibench/Dockerfile.openmpi000066400000000000000000000005261346662313000240760ustar00rootroot00000000000000# ch-test-scope: full FROM openmpi RUN apt-get install -y git # Compile the Intel MPI benchmark WORKDIR /usr/local/src ENV IMB_VERSION 2018.1 RUN git clone --branch v$IMB_VERSION --depth 1 \ https://github.com/intel/mpi-benchmarks RUN cd mpi-benchmarks/src \ && make CC=mpicc -j$(getconf _NPROCESSORS_ONLN) -f make_ict charliecloud-0.9.10/examples/mpi/mpibench/test.bats000066400000000000000000000065501346662313000222730ustar00rootroot00000000000000load ../../../test/common setup () { scope full prerequisites_ok "$ch_tag" # - One iteration because we just care about correctness, not performance. # (If we let the benchmark choose, there is an overwhelming number of # errors when MPI calls start failing, e.g. if CMA isn't working, and # this makes the test take really long.) # # - Large -npmin because we only want to test all cores. # imb_mpi1=/usr/local/src/mpi-benchmarks/src/IMB-MPI1 imb_args="-iter 1 -npmin 1000000000" } check_errors () { [[ ! "$1" =~ 'errno =' ]] } check_finalized () { [[ "$1" =~ 'All processes entering MPI_Finalize' ]] } check_process_ct () { ranks_expected="$1" echo "ranks expected: ${ranks_expected}" ranks_found=$( echo "$output" \ | grep -F '#processes =' \ | sed -r 's/^.+#processes = ([0-9]+)\s+$/\1/') echo "ranks found: ${ranks_found}" [[ $ranks_found -eq "$ranks_expected" ]] } # one from "Single Transfer Benchmarks" @test "${ch_tag}/pingpong (guest launch)" { # shellcheck disable=SC2086 run ch-run $ch_unslurm "$ch_img" -- \ mpirun $ch_mpirun_np "$imb_mpi1" $imb_args PingPong echo "$output" [[ $status -eq 0 ]] check_errors "$output" check_process_ct 2 "$output" check_finalized "$output" } # one from "Parallel Transfer Benchmarks" @test "${ch_tag}/sendrecv (guest launch)" { # shellcheck disable=SC2086 run ch-run $ch_unslurm "$ch_img" -- \ mpirun $ch_mpirun_np "$imb_mpi1" $imb_args Sendrecv echo "$output" [[ $status -eq 0 ]] check_errors "$output" check_process_ct "$ch_cores_node" "$output" check_finalized "$output" } # one from "Collective Benchmarks" @test "${ch_tag}/allreduce (guest launch)" { # shellcheck disable=SC2086 run ch-run $ch_unslurm "$ch_img" -- \ mpirun $ch_mpirun_np "$imb_mpi1" $imb_args Allreduce echo "$output" [[ $status -eq 0 ]] check_errors "$output" check_process_ct "$ch_cores_node" "$output" check_finalized "$output" } @test "${ch_tag}/crayify image" { crayify_mpi_or_skip "$ch_img" } @test "${ch_tag}/pingpong (host launch)" { arch_exclude aarch64 # issue 392 multiprocess_ok # shellcheck disable=SC2086 run $ch_mpirun_core ch-run --join "$ch_img" -- \ "$imb_mpi1" $imb_args PingPong echo "$output" [[ $status -eq 0 ]] check_errors "$output" check_process_ct 2 "$output" check_finalized "$output" } @test "${ch_tag}/sendrecv (host launch)" { arch_exclude aarch64 # issue 392 multiprocess_ok # shellcheck disable=SC2086 run $ch_mpirun_core ch-run --join "$ch_img" -- \ "$imb_mpi1" $imb_args Sendrecv echo "$output" [[ $status -eq 0 ]] check_errors "$output" check_process_ct "$ch_cores_total" "$output" check_finalized "$output" } @test "${ch_tag}/allreduce (host launch)" { arch_exclude aarch64 # issue 392 multiprocess_ok # shellcheck disable=SC2086 run $ch_mpirun_core ch-run --join "$ch_img" -- \ "$imb_mpi1" $imb_args Allreduce echo "$output" [[ $status -eq 0 ]] check_errors "$output" check_process_ct "$ch_cores_total" "$output" check_finalized "$output" } @test "${ch_tag}/revert image" { unpack_img_all_nodes "$ch_cray" } charliecloud-0.9.10/examples/mpi/mpibench/test.sh000077500000000000000000000013301346662313000217460ustar00rootroot00000000000000#!/bin/bash set -e cd "$(dirname "$0")" chbase=$(dirname "$0")/../.. chbin=${chbase}/bin outdir=/tmp outtag=$(date -u +'%Y%m%dT%H%M%SZ') imb=/usr/local/src/imb/src/IMB-MPI1 if [[ "$1" == build ]]; then shift "${chbin}/ch-build" -t "${USER}/mpibench" "$chbase" "${chbin}/ch-docker2tar" "${USER}/mpibench" /tmp "${chbin}/ch-tar2dir" "/tmp/${USER}.mpibench.tar.gz" /tmp/mpibench fi if [[ -n "$1" ]]; then echo "testing on host" time mpirun -n "$1" "$imb" \ > "${outdir}/mpibench.host.${outtag}.txt" echo "testing in container" time mpirun -n "$1" "${chbin}/ch-run" /tmp/mpibench -- "$imb" \ > "${outdir}/mpibench.guest.${outtag}.txt" echo "done; output in ${outdir}" fi charliecloud-0.9.10/examples/mpi/mpihello/000077500000000000000000000000001346662313000204575ustar00rootroot00000000000000charliecloud-0.9.10/examples/mpi/mpihello/Dockerfile.mpich000066400000000000000000000001261346662313000235470ustar00rootroot00000000000000# ch-test-scope: full FROM mpich COPY . /hello WORKDIR /hello RUN make clean && make charliecloud-0.9.10/examples/mpi/mpihello/Dockerfile.openmpi000066400000000000000000000001471346662313000241210ustar00rootroot00000000000000# ch-test-scope: full FROM openmpi # This example COPY . /hello WORKDIR /hello RUN make clean && make charliecloud-0.9.10/examples/mpi/mpihello/Makefile000066400000000000000000000002351346662313000221170ustar00rootroot00000000000000BINS := hello CFLAGS := -std=gnu11 -Wall .PHONY: all all: $(BINS) .PHONY: clean clean: rm -f $(BINS) $(BINS): Makefile %: %.c mpicc $(CFLAGS) $< -o $@ charliecloud-0.9.10/examples/mpi/mpihello/hello.c000066400000000000000000000042251346662313000217310ustar00rootroot00000000000000/* MPI test program. Reports user namespace and rank, then sends and receives some simple messages. Patterned after: http://en.wikipedia.org/wiki/Message_Passing_Interface#Example_program */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include #define TAG 0 #define MSG_OUT 8675309 void fatal(char * fmt, ...); int op(int rank, int i); int rank, rank_ct; int main(int argc, char ** argv) { char hostname[HOST_NAME_MAX+1]; char mpi_version[MPI_MAX_LIBRARY_VERSION_STRING]; int mpi_version_len; int msg; MPI_Status mstat; struct stat st; stat("/proc/self/ns/user", &st); MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &rank_ct); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 0) { MPI_Get_library_version(mpi_version, &mpi_version_len); printf("%d: MPI version:\n%s\n", rank, mpi_version); } gethostname(hostname, HOST_NAME_MAX+1); printf("%d: init ok %s, %d ranks, userns %lu\n", rank, hostname, rank_ct, st.st_ino); fflush(stdout); if (rank == 0) { for (int i = 1; i < rank_ct; i++) { msg = MSG_OUT; MPI_Send(&msg, 1, MPI_INT, i, TAG, MPI_COMM_WORLD); msg = 0; MPI_Recv(&msg, 1, MPI_INT, i, TAG, MPI_COMM_WORLD, &mstat); if (msg != op(i, MSG_OUT)) fatal("0: expected %d back but got %d", op(i, MSG_OUT), msg); } } else { msg = 0; MPI_Recv(&msg, 1, MPI_INT, 0, TAG, MPI_COMM_WORLD, &mstat); if (msg != MSG_OUT) fatal("%d: expected %d but got %d", rank, MSG_OUT, msg); msg = op(rank, msg); MPI_Send(&msg, 1, MPI_INT, 0, TAG, MPI_COMM_WORLD); } if (rank == 0) printf("%d: send/receive ok\n", rank); MPI_Finalize(); if (rank == 0) printf("%d: finalize ok\n", rank); return 0; } void fatal(char * fmt, ...) { va_list ap; fprintf(stderr, "rank %d:", rank); va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); fprintf(stderr, "\n"); exit(EXIT_FAILURE); } int op(int rank, int i) { return i * rank; } charliecloud-0.9.10/examples/mpi/mpihello/slurm.sh000077500000000000000000000013261346662313000221620ustar00rootroot00000000000000#!/bin/bash #SBATCH --time=0:10:00 # Arguments: Path to tarball, path to image parent directory. set -e tar=$1 imgdir=$2 img=${2}/$(basename "${tar%.tar.gz}") if [[ -z $tar ]]; then echo 'no tarball specified' 1>&2 exit 1 fi printf 'tarball: %s\n' "$tar" if [[ -z $imgdir ]]; then echo 'no image directory specified' 1>&2 exit 1 fi printf 'image: %s\n' "$img" # Make Charliecloud available (varies by site). module purge module load friendly-testing module load charliecloud # Unpack image. srun ch-tar2dir "$tar" "$imgdir" # MPI version in container. printf 'container: ' ch-run "$img" -- mpirun --version | grep -E '^mpirun' # Run the app. srun --cpus-per-task=1 ch-run "$img" -- /hello/hello charliecloud-0.9.10/examples/mpi/mpihello/test.bats000066400000000000000000000045611346662313000223170ustar00rootroot00000000000000load ../../../test/common setup () { scope full prerequisites_ok "$ch_tag" } count_ranks () { echo "$1" \ | grep -E '^0: init ok' \ | tail -1 \ | sed -r 's/^.+ ([0-9]+) ranks.+$/\1/' } @test "${ch_tag}/guest starts ranks" { # shellcheck disable=SC2086 run ch-run $ch_unslurm "$ch_img" -- mpirun $ch_mpirun_np /hello/hello echo "$output" [[ $status -eq 0 ]] rank_ct=$(count_ranks "$output") echo "found ${rank_ct} ranks, expected ${ch_cores_node}" [[ $rank_ct -eq "$ch_cores_node" ]] [[ $output = *'0: send/receive ok'* ]] [[ $output = *'0: finalize ok'* ]] } @test "${ch_tag}/crayify image" { crayify_mpi_or_skip "$ch_img" } @test "${ch_tag}/MPI version" { # shellcheck disable=SC2086 run ch-run $ch_unslurm "$ch_img" -- /hello/hello echo "$output" [[ $status -eq 0 ]] if [[ $ch_mpi = openmpi ]]; then [[ $output = *'Open MPI'* ]] else [[ $ch_mpi = mpich ]] if [[ $ch_cray ]]; then [[ $output = *'CRAY MPICH'* ]] else [[ $output = *'MPICH Version:'* ]] fi fi } @test "${ch_tag}/serial" { # This seems to start up the MPI infrastructure (daemons, etc.) within the # guest even though there's no mpirun. # shellcheck disable=SC2086 run ch-run $ch_unslurm "$ch_img" -- /hello/hello echo "$output" [[ $status -eq 0 ]] [[ $output = *' 1 ranks'* ]] [[ $output = *'0: send/receive ok'* ]] [[ $output = *'0: finalize ok'* ]] } @test "${ch_tag}/host starts ranks" { arch_exclude aarch64 # issue 392 multiprocess_ok echo "starting ranks with: ${mpirun_core}" guest_mpi=$(ch-run "$ch_img" -- mpirun --version | head -1) echo "guest MPI: ${guest_mpi}" # shellcheck disable=SC2086 run $ch_mpirun_core ch-run --join "$ch_img" -- /hello/hello echo "$output" [[ $status -eq 0 ]] rank_ct=$(count_ranks "$output") echo "found ${rank_ct} ranks, expected ${ch_cores_total}" [[ $rank_ct -eq "$ch_cores_total" ]] [[ $output = *'0: send/receive ok'* ]] [[ $output = *'0: finalize ok'* ]] } @test "${ch_tag}/Cray bind mounts" { [[ $ch_cray ]] || skip 'host is not a Cray' ch-run "$ch_img" -- mount | grep -F /var/opt/cray/alps/spool ch-run "$ch_img" -- mount | grep -F /var/opt/cray/hugetlbfs } @test "${ch_tag}/revert image" { unpack_img_all_nodes "$ch_cray" } charliecloud-0.9.10/examples/mpi/paraview/000077500000000000000000000000001346662313000204645ustar00rootroot00000000000000charliecloud-0.9.10/examples/mpi/paraview/Dockerfile000066400000000000000000000062521346662313000224630ustar00rootroot00000000000000# ch-test-scope: full # ch-test-arch-exclude: aarch64 # issue #415 FROM openmpi WORKDIR /usr/src RUN apt-get install -y \ python-dev \ python-pip RUN pip install \ cython \ mpi4py RUN apt-get install -y \ cmake \ git \ python-mako \ libpthread-stubs0-dev \ libqt4-dev \ libxt-dev \ pkg-config \ qt4-qmake \ zlib1g-dev # LLVM. Mesa's "swr" states a dependency of 3.9.0+. Mesa build fails on 5.0.1 # with thousands of "undefined reference" errors while linking "libOSMesa.la". ENV LLVM_VERSION 3.9.1 RUN wget -nv http://releases.llvm.org/${LLVM_VERSION}/llvm-${LLVM_VERSION}.src.tar.xz RUN tar xf llvm-${LLVM_VERSION}.src.tar.xz RUN mkdir llvm-${LLVM_VERSION}.build RUN cd llvm-${LLVM_VERSION}.build \ && cmake -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_INSTALL_PREFIX=/usr \ -DLLVM_BUILD_LLVM_DYLIB=ON \ -DLLVM_ENABLE_RTTI=ON \ -DLLVM_INSTALL_UTILS=ON \ -DLLVM_TARGETS_TO_BUILD=X86 \ ../llvm-${LLVM_VERSION}.src \ && make -j $(getconf _NPROCESSORS_ONLN) install RUN rm -Rf llvm-${LLVM_VERSION}* # Mesa. We need a version newer than Debian provides. ENV MESA_VERSION 18.0.5 RUN wget -nv https://mesa.freedesktop.org/archive/mesa-${MESA_VERSION}.tar.xz RUN tar xf mesa-${MESA_VERSION}.tar.xz RUN cd mesa-${MESA_VERSION} \ && ./configure --prefix=/usr \ --enable-opengl \ --disable-gles1 \ --disable-gles2 \ --disable-va \ --disable-xvmc \ --disable-vdpau \ --disable-dri \ --disable-texture-float \ --disable-egl \ --disable-gbm \ --disable-glx \ --disable-osmesa \ --enable-shared-glapi \ --enable-llvm \ --enable-llvm-shared-libs \ --enable-gallium-osmesa \ --with-gallium-drivers=swrast,swr \ --with-dri-drivers= \ --with-platforms= \ && make -j$(getconf _NPROCESSORS_ONLN) install RUN rm -Rf mesa-${MESA_VERSION}* # ParaView. ENV PARAVIEW_MAJORMINOR 5.5 ENV PARAVIEW_VERSION 5.5.2 RUN wget -nv -O ParaView-v${PARAVIEW_VERSION}.tar.gz "https://www.paraview.org/paraview-downloads/download.php?submit=Download&version=v${PARAVIEW_MAJORMINOR}&type=binary&os=Sources&downloadFile=ParaView-v${PARAVIEW_VERSION}.tar.gz" RUN tar xf ParaView-v${PARAVIEW_VERSION}.tar.gz RUN mkdir ParaView-v${PARAVIEW_VERSION}.build RUN cd ParaView-v${PARAVIEW_VERSION}.build \ && cmake -DCMAKE_INSTALL_PREFIX=/usr \ -DBUILD_TESTING=OFF \ -DBUILD_SHARED_LIBS=ON \ -DPARAVIEW_ENABLE_PYTHON=ON \ -DPARAVIEW_BUILD_QT_GUI=OFF \ -DVTK_USE_X=OFF \ -DOPENGL_INCLUDE_DIR=IGNORE \ -DOPENGL_gl_LIBRARY=IGNORE \ -DVTK_OPENGL_HAS_OSMESA=ON \ -DVTK_USE_OFFSCREEN=OFF \ -DPARAVIEW_USE_MPI=ON \ ../ParaView-v${PARAVIEW_VERSION} \ && make -j $(getconf _NPROCESSORS_ONLN) install RUN rm -Rf ParaView-v${PARAVIEW_VERSION}* charliecloud-0.9.10/examples/mpi/paraview/cone.2ranks.vtk000066400000000000000000000005771346662313000233460ustar00rootroot00000000000000# vtk DataFile Version 4.2 vtk output ASCII DATASET POLYDATA POINTS 12 float 0.5 0 0 -0.5 0.5 0 -0.5 0.25 0.433013 -0.5 -0.25 0.433013 -0.5 -0.5 6.12323e-17 -0.5 -0.25 -0.433013 -0.5 0.25 -0.433013 0.5 0 0 -0.5 -0.5 6.12323e-17 -0.5 -0.25 -0.433013 -0.5 0.25 -0.433013 -0.5 0.5 -1.22465e-16 POLYGONS 7 31 6 6 5 4 3 2 1 3 0 1 2 3 0 2 3 3 0 3 4 3 7 8 9 3 7 9 10 3 7 10 11 charliecloud-0.9.10/examples/mpi/paraview/cone.nranks.vtk000066400000000000000000000010451346662313000234310ustar00rootroot00000000000000# vtk DataFile Version 4.2 vtk output ASCII DATASET POLYDATA POINTS 22 float 0.5 0 0 -0.5 0.5 0 -0.5 0.25 0.433013 -0.5 -0.25 0.433013 -0.5 -0.5 6.12323e-17 -0.5 -0.25 -0.433013 -0.5 0.25 -0.433013 0.5 0 0 -0.5 0.25 0.433013 -0.5 -0.25 0.433013 0.5 0 0 -0.5 -0.25 0.433013 -0.5 -0.5 6.12323e-17 0.5 0 0 -0.5 -0.5 6.12323e-17 -0.5 -0.25 -0.433013 0.5 0 0 -0.5 -0.25 -0.433013 -0.5 0.25 -0.433013 0.5 0 0 -0.5 0.25 -0.433013 -0.5 0.5 -1.22465e-16 POLYGONS 7 31 6 6 5 4 3 2 1 3 0 1 2 3 7 8 9 3 10 11 12 3 13 14 15 3 16 17 18 3 19 20 21 charliecloud-0.9.10/examples/mpi/paraview/cone.png000066400000000000000000000074701346662313000221260ustar00rootroot00000000000000PNG  IHDRݡIDATx^ݏ]y NR[I\*7mFirQE3e^-R܄"ZJiJ;#L>33{19|}zz+<^[6H]c6=ē@3MR!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@!X@1t [Gg;?Om: $`}fٻڿo[?Y-`=;[vPhقd RVbzd Tbnk`k`=|N e3{ܿoWө LWYLKY}WPnl$ׯF&[0}lAJ V`-ق^lA֘_L]EׯF&[Ж"u d Ɨ&v>قq^lhV `-قaeZXK(G d<^l܂W#-r[ nlUu dd_leO.`$[ĕO|jdEDׯF&[ĒI~5"L}|Er)!Xt#[iMgRw۷lvl,CwOaq\֖Ϳx̅/opᘶHG."Yk&X7ӕC暂ux)di).X]c-&/`4i ^#-&Izd~\x EZtd\Ʈ!X33]MXkErVw+a#;cErV ]}mW-b|-![C&Dž:b45Ʈ5ŰkK`%xDy%['X+p-6"X/fU!-n$X9KX#XY{-+[KJ&XEog2 VY2d4Ul%[,*xV ^/d+oņ]+b %[,+^b,Q.d+E;B]`Ѿ%[q Jvg Is쒭X)H-^`1eK'X$$ /J`]&"uSlFd*tQMKR X7ɱKKd%["XdQ&O_clM`Q%[!XxV /`Ǵ5vV 64~d]'^v%[,clCCKF#Xо%[,Vㅗl N`B]5)(^՟`ݼ3F R3v߷kߞ;8~ ?>q\!Xxؾ÷N,ޙOeo0e/;?}Mg3'XÇ/_t*sVBHKK N}s|Z,HKK OsfaUU--飯MA 'O{;Z#,aĩwN~秤`Dݼ> &v!U g!U,hSC"X0a#FF X0AjI 0T-`A6R DjTHຑjIUҍy-UK:"XōTuJ(EwZ#U XdkZR51E%U&Xd덯TM`6R5EE<zH 1Lx!U,5HəT-J`i]KՒd SC'XLTZ#U!ȵT- DJRR*Mn|=*()o|=*4bDo|=*l|=*EZ#U,%UY, w-UK2&X Vr*`$T-*`e.kZRU P~_*`#ˍTNbx!UTQ_b`PZH=+i%\KՒ*j Vrʹ%U!XI(s!U4uRŀkҊ%U E&kZR+6H#6TRŘr֕+WgggNuȵT#9)˵Ԁ+W7io(RE֕MGbTgT)7brꕦ#ISh\KB2X\KH,al|*(`}zE#R*R]*C^zŗ^~*2oUwKjrhN,:"q9O,`*NH&V7HLX)2ۄU5U=VtBVlREQ2\ 2>@ dŠG(`E"U.ϕmʄ>uNXU zRSPK"UG+aoF&)`MTP2_ T`&M`d59Rc%(ZaNAiT3aO#&NV`N &j/1@ pt 8*:V3DV?RI)n%ːdS,N q%MmBOX:,p ]  C0 C0 -[ھo+H@R|aާo/}/vMKq:t:t:3>kUUyTC@ #/|GUUw,WM[oy _?F^y2ڭ?>9 _W_ޫ|xtk[ϝ ?kzHZr++U˛..^;_z}inGs-έ]<7_UՕ+WW7_YݲeuՕ[Vo̩'o?uї~i qkݧ.>So`'oo޲ ?_s!`qmϹtUU~#}b m?x]=wi56=ēMg, , , , , , , , , , , , , , , , , , , , , , , ?&J?IENDB`charliecloud-0.9.10/examples/mpi/paraview/cone.py000066400000000000000000000024611346662313000217650ustar00rootroot00000000000000# Draw a cone and write it out to sys.argv[1] in a few different ways. All # output files should be bit-for-bit reproducible, i.e., no embedded # timestamps, hostnames, floating point error, etc. from __future__ import print_function import os import platform import sys import mpi4py.MPI import paraview.simple as pv # Version information. print("ParaView %d.%d.%d on Python %s" % (pv.paraview.servermanager.vtkSMProxyManager.GetVersionMajor(), pv.paraview.servermanager.vtkSMProxyManager.GetVersionMinor(), pv.paraview.servermanager.vtkSMProxyManager.GetVersionPatch(), platform.python_version())) # Even if you start multiple pvbatch using MPI, this script is only # executed by rank 0. Check this assumption. assert mpi4py.MPI.COMM_WORLD.rank == 0 # Output directory provided on command line. outdir = sys.argv[1] # Render a cone. pv.Cone() pv.Show() pv.Render() print("rendered") # PNG image (serial). filename = "%s/cone.png" % outdir pv.SaveScreenshot(filename) print(filename) # Legacy VTK file (ASCII, serial). filename = "%s/cone.vtk" % outdir pv.SaveData(filename, FileType="Ascii") print(filename) # XML VTK files (parallel). filename=("%s/cone.pvtp" % outdir) writer = pv.XMLPPolyDataWriter(FileName=filename) writer.UpdatePipeline() print(filename) # Done. print("done") charliecloud-0.9.10/examples/mpi/paraview/cone.serial.vtk000066400000000000000000000006671346662313000234250ustar00rootroot00000000000000# vtk DataFile Version 4.2 vtk output ASCII DATASET POLYDATA POINTS 7 float 0.5 0 0 -0.5 0.5 0 -0.5 0.25 0.433013 -0.5 -0.25 0.433013 -0.5 -0.5 6.12323e-17 -0.5 -0.25 -0.433013 -0.5 0.25 -0.433013 METADATA INFORMATION 2 NAME L2_NORM_RANGE LOCATION vtkDataArray DATA 2 0.5 0.707107 NAME L2_NORM_FINITE_RANGE LOCATION vtkDataArray DATA 2 0.5 0.707107 POLYGONS 7 31 6 6 5 4 3 2 1 3 0 1 2 3 0 2 3 3 0 3 4 3 0 4 5 3 0 5 6 3 0 6 1 charliecloud-0.9.10/examples/mpi/paraview/test.bats000066400000000000000000000044561346662313000223270ustar00rootroot00000000000000load ../../../test/common setup () { scope full arch_exclude aarch64 # issue #415 prerequisites_ok paraview indir=$BATS_TEST_DIRNAME outdir=$BATS_TMPDIR if [[ $ch_multinode ]]; then # Bats only creates $BATS_TMPDIR on the first node. # shellcheck disable=SC2086 $ch_mpirun_node mkdir -p "$BATS_TMPDIR" fi } # The first two tests demonstrate ParaView as an "executable" to process a # non-containerized input deck (cone.py) and produce non-containerized output. # # .png: In previous versions, PNG output is antialiased with a single rank # and not with multiple ranks depending on the execution environment. # This is no longer the case as of version 5.5.4 but may change with # a new version of Paraview. # # .vtk: The number of extra and/or duplicate points and indexing of these # points into polygons varied by rank count on my VM, but not on the # cluster. The resulting VTK file is dependent on whether an image was # rendered serially or using 2 or n processes. # # We do not check .pvtp (and its companion .vtp) output because it's a # collection of XML files containing binary data and it seems too hairy to me. @test "${ch_tag}/crayify image" { crayify_mpi_or_skip "$ch_img" } @test "${ch_tag}/cone serial" { # shellcheck disable=SC2086 ch-run $ch_unslurm -b "$indir" -b "$outdir" "$ch_img" -- \ pvbatch /mnt/0/cone.py /mnt/1 ls -l "$outdir"/cone* diff -u "${indir}/cone.serial.vtk" "${outdir}/cone.vtk" cmp "${indir}/cone.png" "${outdir}/cone.png" } @test "${ch_tag}/cone ranks=2" { multiprocess_ok # shellcheck disable=SC2086 $ch_mpirun_2 ch-run --join -b "$indir" -b "$outdir" "$ch_img" -- \ pvbatch /mnt/0/cone.py /mnt/1 ls -l "$outdir"/cone* diff -u "${indir}/cone.2ranks.vtk" "${outdir}/cone.vtk" cmp "${indir}/cone.png" "${outdir}/cone.png" } @test "${ch_tag}/cone ranks=N" { multiprocess_ok # shellcheck disable=SC2086 $ch_mpirun_core ch-run --join -b "$indir" -b "$outdir" "$ch_img" -- \ pvbatch /mnt/0/cone.py /mnt/1 ls -l "$outdir"/cone* diff -u "${indir}/cone.nranks.vtk" "${outdir}/cone.vtk" cmp "${indir}/cone.png" "${outdir}/cone.png" } @test "${ch_tag}/revert image" { unpack_img_all_nodes "$ch_cray" } charliecloud-0.9.10/examples/other/000077500000000000000000000000001346662313000172025ustar00rootroot00000000000000charliecloud-0.9.10/examples/other/spark/000077500000000000000000000000001346662313000203225ustar00rootroot00000000000000charliecloud-0.9.10/examples/other/spark/Dockerfile000066400000000000000000000027441346662313000223230ustar00rootroot00000000000000# ch-test-scope: standard FROM debian:stretch # Install needed OS packages. RUN apt-get update \ && apt-get install -y less openjdk-8-jre-headless procps python wget \ && rm -rf /var/lib/apt/lists/* # We want ch-ssh RUN touch /usr/bin/ch-ssh # Download and install Spark. # # We're staying on Spark 2.0 because 2.1.0 introduces Hive for metadata # handling somehow [1]. Data for this goes in $CWD by default, which is / and # not writeable in Charliecloud containers. So you get thousands of lines of # stack trace from pyspark. Workarounds exist, including cd to /tmp first or # configure hive-site.xml [2], but I'm not willing to put up with that crap # for demo purposes. Maybe it will be fixed in a 2.1 point release. # # [1]: http://spark.apache.org/docs/latest/sql-programming-guide.html#upgrading-from-spark-sql-20-to-21 # [2]: https://community.cloudera.com/t5/Advanced-Analytics-Apache-Spark/Spark-displays-SQLException-when-Hive-not-installed/td-p/37954 ENV URLPATH http://d3kbcqa49mib13.cloudfront.net ENV DIR spark-2.0.2-bin-hadoop2.7 ENV TAR $DIR.tgz RUN wget -nv $URLPATH/$TAR RUN tar xf $TAR && mv $DIR spark && rm $TAR # Very basic default configuration, to make it run and not do anything stupid. RUN printf '\ SPARK_LOCAL_IP=127.0.0.1\n\ SPARK_LOCAL_DIRS=/tmp\n\ SPARK_LOG_DIR=/tmp\n\ SPARK_WORKER_DIR=/tmp\n\ ' > /spark/conf/spark-env.sh # Move config to /mnt/0 so we can provide a different config if we want RUN mv /spark/conf /mnt/0 \ && ln -s /mnt/0 /spark/conf charliecloud-0.9.10/examples/other/spark/slurm.sh000077500000000000000000000042641346662313000220310ustar00rootroot00000000000000#!/bin/bash #SBATCH --time=0:10:00 # Run an example non-interactive Spark computation. Requires three arguments: # # 1. Image tarball # 2. Directory in which to unpack tarball # 3. High-speed network interface name # # Example: # # $ sbatch slurm.sh /scratch/spark.tar.gz /var/tmp ib0 # # Spark configuration will be generated in ~/slurm-$SLURM_JOB_ID.spark; any # configuration already there will be clobbered. set -e if [[ -z $SLURM_JOB_ID ]]; then echo "not running under Slurm" 1>&2 exit 1 fi tar=$1 img=$2 img=${img}/spark dev=$3 conf=${HOME}/slurm-${SLURM_JOB_ID}.spark # Make Charliecloud available (varies by site) module purge module load friendly-testing module load charliecloud # What IP address to use for master? if [[ -z $dev ]]; then echo "no high-speed network device specified" exit 1 fi master_ip=$( ip -o -f inet addr show dev "$dev" \ | sed -r 's/^.+inet ([0-9.]+).+/\1/') master_url=spark://${master_ip}:7077 if [[ -n $master_ip ]]; then echo "Spark master IP: ${master_ip}" else echo "no IP address for ${dev} found" exit 1 fi # Unpack image srun ch-tar2dir "$tar" "$img" # Make Spark configuration mkdir "$conf" chmod 700 "$conf" cat < "${conf}/spark-env.sh" SPARK_LOCAL_DIRS=/tmp/spark SPARK_LOG_DIR=/tmp/spark/log SPARK_WORKER_DIR=/tmp/spark SPARK_LOCAL_IP=127.0.0.1 SPARK_MASTER_HOST=${master_ip} EOF mysecret=$(cat /dev/urandom | tr -dc '0-9a-f' | head -c 48) cat < "${conf}/spark-defaults.sh" spark.authenticate true spark.authenticate.secret $mysecret EOF chmod 600 "${conf}/spark-defaults.sh" # Start the Spark master ch-run -b "$conf" "$img" -- /spark/sbin/start-master.sh sleep 10 tail -7 /tmp/spark/log/*master*.out grep -Fq 'New state: ALIVE' /tmp/spark/log/*master*.out # Start the Spark workers srun sh -c " ch-run -b '${conf}' '${img}' -- \ /spark/sbin/start-slave.sh ${master_url} \ && sleep infinity" & sleep 10 grep -F worker /tmp/spark/log/*master*.out tail -3 /tmp/spark/log/*worker*.out # Compute pi ch-run -b "$conf" "$img" -- \ /spark/bin/spark-submit --master "$master_url" \ /spark/examples/src/main/python/pi.py 1024 # Let Slurm kill the workers and master charliecloud-0.9.10/examples/other/spark/test.bats000066400000000000000000000105471346662313000221630ustar00rootroot00000000000000load ../../../test/common # Note: If you get output like the following (piping through cat turns off # BATS terminal magic): # # $ ./bats ../examples/spark/test.bats | cat # 1..5 # ok 1 spark/configure # ok 2 spark/start # [...]/test/bats.src/libexec/bats-exec-test: line 329: /tmp/bats.92406.src: No such file or directory # [...]/test/bats.src/libexec/bats-exec-test: line 329: /tmp/bats.92406.src: No such file or directory # [...]/test/bats.src/libexec/bats-exec-test: line 329: /tmp/bats.92406.src: No such file or directory # # that means that mpirun is starting too many processes per node (you want 1). # One solution is to export OMPI_MCA_rmaps_base_mapping_policy= (i.e., set but # empty). setup () { scope standard prerequisites_ok spark umask 0077 spark_dir=~/ch-spark-test.tmp # runs before each test, so no mktemp spark_config=$spark_dir spark_log=/tmp/sparklog if [[ $ch_multinode ]]; then # Use the last non-loopback IP address. This is a barely educated # guess and shouldn't be relied on for real code, but hopefully it # works for testing. master_ip=$( ip -o -f inet addr show \ | grep -F 'scope global' \ | tail -1 \ | sed -r 's/^.+inet ([0-9.]+).+/\1/') # Start Spark workers using pdsh. We would really prefer to do this # using srun, but that doesn't work; see issue #230. command -v pdsh >/dev/null 2>&1 || skip "pdsh not in path" pernode="pdsh -R ssh -w ${SLURM_NODELIST} -- PATH='${PATH}'" else master_ip=127.0.0.1 pernode= fi master_url="spark://${master_ip}:7077" master_log="${spark_log}/*master.Master*.out" } @test "${ch_tag}/configure" { # check for restrictive umask run umask -S echo "$output" [[ $status -eq 0 ]] [[ $output = 'u=rwx,g=,o=' ]] # create config mkdir -p "$spark_config" tee < "${spark_config}/spark-env.sh" SPARK_LOCAL_DIRS=/tmp/spark SPARK_LOG_DIR=$spark_log SPARK_WORKER_DIR=/tmp/spark SPARK_LOCAL_IP=127.0.0.1 SPARK_MASTER_HOST=${master_ip} EOF my_secret=$(cat /dev/urandom | tr -dc '0-9a-f' | head -c 48) tee < "${spark_config}/spark-defaults.conf" spark.authenticate.true spark.authenticate.secret ${my_secret} EOF } @test "${ch_tag}/start" { # remove old master logs so new one has predictable name rm -Rf --one-file-system "$spark_log" # start the master ch-run -b "$spark_config" "$ch_img" -- /spark/sbin/start-master.sh sleep 7 # shellcheck disable=SC2086 cat $master_log # shellcheck disable=SC2086 grep -Fq 'New state: ALIVE' $master_log # start the workers # shellcheck disable=SC2086 $pernode ch-run -b "$spark_config" "$ch_img" -- \ /spark/sbin/start-slave.sh "$master_url" sleep 7 } @test "${ch_tag}/worker count" { # Note that in the log, each worker shows up as 127.0.0.1, which might # lead you to believe that all the workers started on the same (master) # node. However, I believe this string is self-reported by the workers and # is an artifact of SPARK_LOCAL_IP=127.0.0.1 above, which AFAICT just # tells the workers to put their web interfaces on localhost. They still # connect to the master and get work OK. [[ -z $ch_multinode ]] && SLURM_NNODES=1 # shellcheck disable=SC2086 worker_ct=$(grep -Fc 'Registering worker' $master_log || true) echo "node count: $SLURM_NNODES; worker count: ${worker_ct}" [[ $worker_ct -eq "$SLURM_NNODES" ]] } @test "${ch_tag}/pi" { run ch-run -b "$spark_config" "$ch_img" -- \ /spark/bin/spark-submit --master "$master_url" \ /spark/examples/src/main/python/pi.py 64 echo "$output" [[ $status -eq 0 ]] # This computation converges quite slowly, so we only ask for two correct # digits of pi. [[ $output = *'Pi is roughly 3.1'* ]] } @test "${ch_tag}/stop" { $pernode ch-run -b "$spark_config" "$ch_img" -- /spark/sbin/stop-slave.sh ch-run -b "$spark_config" "$ch_img" -- /spark/sbin/stop-master.sh sleep 2 # Any Spark processes left? # (Use egrep instead of fgrep so we don't match the grep process.) # shellcheck disable=SC2086 $pernode ps aux | ( ! grep -E '[o]rg\.apache\.spark\.deploy' ) } @test "${ch_tag}/hang" { # If there are any test processes remaining, this test will hang. true } charliecloud-0.9.10/examples/serial/000077500000000000000000000000001346662313000173405ustar00rootroot00000000000000charliecloud-0.9.10/examples/serial/hello/000077500000000000000000000000001346662313000204435ustar00rootroot00000000000000charliecloud-0.9.10/examples/serial/hello/Dockerfile000066400000000000000000000002761346662313000224420ustar00rootroot00000000000000# ch-test-scope: standard FROM debian:stretch RUN apt-get update \ && apt-get install -y openssh-client \ && rm -rf /var/lib/apt/lists/* COPY . hello RUN touch /usr/bin/ch-ssh charliecloud-0.9.10/examples/serial/hello/README000066400000000000000000000004041346662313000213210ustar00rootroot00000000000000This example is a hello world Charliecloud container. It demonstrates running a command on the host from inside a container. A script test.sh is provided to demonstrate the build and run procedure. Detailed instructions are in the Charliecloud documentation. charliecloud-0.9.10/examples/serial/hello/hello.sh000077500000000000000000000000461346662313000221050ustar00rootroot00000000000000#!/bin/sh set -e echo 'hello world' charliecloud-0.9.10/examples/serial/hello/test.bats000066400000000000000000000011751346662313000223010ustar00rootroot00000000000000load ../../../test/common setup () { scope standard prerequisites_ok hello } @test "${ch_tag}/hello" { run ch-run "$ch_img" -- /hello/hello.sh echo "$output" [[ $status -eq 0 ]] [[ $output = 'hello world' ]] } @test "${ch_tag}/distribution sanity" { # Try various simple things that should work in a basic Debian # distribution. (This does not test anything Charliecloud manipulates.) ch-run "$ch_img" -- /bin/bash -c true ch-run "$ch_img" -- /bin/true ch-run "$ch_img" -- find /etc -name 'a*' ch-run "$ch_img" -- sh -c 'echo foo | /bin/grep -E foo' ch-run "$ch_img" -- nice true } charliecloud-0.9.10/examples/serial/obspy/000077500000000000000000000000001346662313000204745ustar00rootroot00000000000000charliecloud-0.9.10/examples/serial/obspy/Dockerfile000066400000000000000000000017611346662313000224730ustar00rootroot00000000000000# ch-test-scope: skip (issue #64) FROM debian:stretch RUN apt-get update \ && apt-get install -y \ bzip2 \ wget \ && rm -rf /var/lib/apt/lists/* # Install Miniconda into /usr. Some of the instructions [1] warn against # putting conda in $PATH; others don't. We are going to play fast and loose. # # [1]: http://conda.pydata.org/docs/help/silent.html WORKDIR /usr/src ENV MC_VERSION 4.2.12 ENV MC_FILE Miniconda3-$MC_VERSION-Linux-x86_64.sh RUN wget -nv https://repo.continuum.io/miniconda/$MC_FILE RUN bash $MC_FILE -bf -p /usr RUN rm -Rf $MC_FILE # Disable automatic conda upgrades for predictable versioning. RUN conda config --set auto_update_conda False # Install obspy. (Matplotlib 2.0 -- the default as of 2016-01-24 and what # obspy depends on -- with ObsPy 1.0.2 causes lots of test failures.) RUN conda config --add channels conda-forge RUN conda install --yes obspy=1.0.2 \ matplotlib=1.5.3 \ basemap-data-hires=1.0.8.dev0 charliecloud-0.9.10/examples/serial/obspy/README000066400000000000000000000003451346662313000213560ustar00rootroot00000000000000This started failing Travis on roughly 10/31/2017 with: pkg_resources.DistributionNotFound: The 'pytz' distribution was not found and is required by matplotlib I'm not sure how to fix it, so disabling for now. See issue #64. charliecloud-0.9.10/examples/serial/obspy/test.bats000066400000000000000000000011421346662313000223240ustar00rootroot00000000000000load ../../../test/common setup () { scope skip # issue #64 prerequisites_ok obspy } @test "${ch_tag}/runtests" { # Some tests try to use the network even when they're not supposed to. I # reported this as a bug on ObsPy [1]. In the meantime, exclude the # modules with those tests. This is pretty heavy handed, as only three # tests in these two modules have this problem, but I couldn't find a # finer-grained exclusion mechanism. # # [1]: https://github.com/obspy/obspy/issues/1660 ch-run "$ch_img" -- bash -c '. activate && obspy-runtests -d -x core -x signal' } charliecloud-0.9.10/examples/serial/spack/000077500000000000000000000000001346662313000204415ustar00rootroot00000000000000charliecloud-0.9.10/examples/serial/spack/Dockerfile000066400000000000000000000032761346662313000224430ustar00rootroot00000000000000# ch-test-scope: full FROM debian9 # Note: Spack is a bit of an odd duck testing wise. Because it's a package # manager, the key tests we want are to install stuff (this includes the Spack # test suite), and those don't make sense at run time. Thus, most of what we # care about is here in the Dockerfile, and test.bats just has a few # trivialities. # Spack needs curl, git, make, and unzip to install. # The other packages are needed for Spack unit tests. RUN apt-get install -y \ curl \ g++ \ git \ make \ patch \ procps \ python \ python-pkg-resources \ unzip # Install Spack. This follows the documented procedure to run it out of the # source directory. There apparently is no "make install" type operation to # place it at a standard path ("spack clone" simply clones another working # directory to a new path). ENV SPACK_REPO https://github.com/spack/spack ENV SPACK_VERSION 0.11.2 RUN git clone --depth 1 $SPACK_REPO #RUN git clone --branch v$SPACK_VERSION --depth 1 $SPACK_REPO # Set up environment to use Spack. (We can't use setup-env.sh because the # Dockerfile shell is sh, not Bash.) ENV PATH /spack/bin:$PATH RUN spack compiler find --scope system # Test: Some basic commands. RUN which spack RUN spack --version RUN spack compiler list RUN spack compiler list --scope=system RUN spack compiler list --scope=user RUN spack compilers RUN spack spec netcdf # Test: Install a small package. RUN spack spec charliecloud RUN spack install charliecloud # Test: Run Spack test suite. # FIXME: Commented out because the suite fails. It's inconsistent; the number # of failures seems to vary between about 1 and 3 inclusive. #RUN spack test # Clean up. RUN spack clean --all charliecloud-0.9.10/examples/serial/spack/test.bats000066400000000000000000000012751346662313000223000ustar00rootroot00000000000000load ../../../test/common setup() { scope full [[ -z $ch_cray ]] || skip 'issue #193 and Spack issue #8618' prerequisites_ok spack export PATH=/spack/bin:$PATH } @test "${ch_tag}/version" { ch-run "$ch_img" -- spack --version } @test "${ch_tag}/compilers" { echo "spack compiler list" ch-run "$ch_img" -- spack compiler list echo "spack compiler list --scope=system" ch-run "$ch_img" -- spack compiler list --scope=system echo "spack compiler list --scope=user" ch-run "$ch_img" -- spack compiler list --scope=user echo "spack compilers" ch-run "$ch_img" -- spack compilers } @test "${ch_tag}/spec" { ch-run "$ch_img" -- spack spec netcdf } charliecloud-0.9.10/examples/syscalls/000077500000000000000000000000001346662313000177165ustar00rootroot00000000000000charliecloud-0.9.10/examples/syscalls/Makefile000066400000000000000000000002341346662313000213550ustar00rootroot00000000000000BINS := $(patsubst %.c,%,$(wildcard *.c)) .PHONY: all all: $(BINS) .PHONY: clean clean: rm -f $(BINS) $(BINS): Makefile %: %.c gcc $(CFLAGS) $< -o $@ charliecloud-0.9.10/examples/syscalls/pivot_root.c000066400000000000000000000123641346662313000222740ustar00rootroot00000000000000/* This example program walks through the complete namespace / pivot_root(2) dance to enter a Charliecloud container, with each step documented. If you can compile it and run it without error as a normal user, ch-run will work too (if not, that's a bug). If not, this will hopefully help you understand more clearly what went wrong. pivot_root(2) has a large number of error conditions resulting in EINVAL that are not documented in the man page [1]. The ones we ran into are: 1. The new root cannot be shared [2] outside the mount namespace. This makes sense, as we as an unprivileged user inside our namespace should not be able to change privileged things owned by other namespaces. This condition arises on systemd systems, which mount everything shared by default. 2. The new root must not have been mounted before unshare(2), and/or it must be a mount point. The man page says "new_root does not have to be a mount point", but the source code comment says "[i]t must be a mount point" [3]. (I haven't isolated which was our problem.) In either case, this is a very common situation. 3. The old root is a "rootfs" [4]. This is documented in a source code comment [3] but not the man page. This is an unusual situation for most contexts, because the rootfs is typically the initramfs overmounted during boot. However, some cluster provisioning systems, e.g. Perceus, use the original rootfs directly. Regarding overlayfs: It's very attractive to union-mount a tmpfs over the read-only image; then all programs can write to their hearts' desire, and the image does not change. This also simplifies the code. Unfortunately, overlayfs + userns is not allowed as of 4.4.23. See: https://lwn.net/Articles/671774/ [1]: http://man7.org/linux/man-pages/man2/pivot_root.2.html [2]: https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt [3]: http://lxr.free-electrons.com/source/fs/namespace.c?v=4.4#L2952 [4]: https://www.kernel.org/doc/Documentation/filesystems/ramfs-rootfs-initramfs.txt */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include #define TRY(x) if (x) fatal_errno(__LINE__) void fatal_errno(int line) { printf("error at line %d, errno=%d\n", line, errno); exit(1); } int main(void) { /* Ensure that our image directory exists. It doesn't really matter what's in it. */ if (mkdir("/tmp/newroot", 0755) && errno != EEXIST) TRY (errno); /* Enter the mount and user namespaces. Note that in some cases (e.g., RHEL 6.8), this will succeed even though the userns is not created. In that case, the following mount(2) will fail with EPERM. */ TRY (unshare(CLONE_NEWNS|CLONE_NEWUSER)); /* Claim the image for our namespace by recursively bind-mounting it over itself. This standard trick avoids conditions 1 and 2. */ TRY (mount("/tmp/newroot", "/tmp/newroot", NULL, MS_REC | MS_BIND | MS_PRIVATE, NULL)); /* The next few calls deal with condition 3. The solution is to overmount the root filesystem with literally anything else. We use the parent of the image, /tmp. This doesn't hurt if / is not a rootfs, so we always do it for simplicity. */ /* Claim /tmp for our namespace. You would think that because /tmp contains /tmp/newroot and it's a recursive bind mount, we could claim both in the same call. But, this causes pivot_root(2) to fail later with EBUSY. */ TRY (mount("/tmp", "/tmp", NULL, MS_REC | MS_BIND | MS_PRIVATE, NULL)); /* chdir to /tmp. This moves the process' special "." pointer to the soon-to-be root filesystem. Otherwise, it will keep pointing to the overmounted root. See the e-mail at the end of: https://git.busybox.net/busybox/tree/util-linux/switch_root.c?h=1_24_2 */ TRY (chdir("/tmp")); /* Move /tmp to /. (One could use this to directly enter the image, avoiding pivot_root(2) altogether. However, there are ways to remove all active references to the root filesystem. Then, the image could be unmounted, exposing the old root filesystem underneath. While Charliecloud does not claim a strong isolation boundary, we do want to make activating the UDSS irreversible.) */ TRY (mount("/tmp", "/", NULL, MS_MOVE, NULL)); /* Move the "/" special pointer to the new root filesystem, for the reasons above. (Similar reasoning applies for why we don't use chroot(2) to directly activate the UDSS.) */ TRY (chroot(".")); /* Make a place for the old (intermediate) root filesystem to land. */ if (mkdir("/newroot/oldroot", 0755) && errno != EEXIST) TRY (errno); /* Re-mount the image read-only. */ TRY (mount(NULL, "/newroot", NULL, MS_REMOUNT | MS_BIND | MS_RDONLY, NULL)); /* Finally, make our "real" newroot into the root filesystem. */ TRY (chdir("/newroot")); TRY (syscall(SYS_pivot_root, "/newroot", "/newroot/oldroot")); TRY (chroot(".")); /* Unmount the old filesystem and it's gone for good. */ TRY (umount2("/oldroot", MNT_DETACH)); /* Report success. */ printf("ok\n"); } charliecloud-0.9.10/examples/syscalls/userns.c000066400000000000000000000007651346662313000214110ustar00rootroot00000000000000/* This is a simple hello-world implementation of user namespaces. */ #define _GNU_SOURCE #include #include #include #include #include int main(void) { uid_t euid = geteuid(); int fd; printf("outside userns, uid=%d\n", euid); unshare(CLONE_NEWUSER); fd = open("/proc/self/uid_map", O_WRONLY); dprintf(fd, "0 %d 1\n", euid); close(fd); printf("in userns, uid=%d\n", geteuid()); execlp("/bin/bash", "bash", NULL); } charliecloud-0.9.10/man/000077500000000000000000000000001346662313000150165ustar00rootroot00000000000000charliecloud-0.9.10/man/README000066400000000000000000000001361346662313000156760ustar00rootroot00000000000000This directory contains the compiled man pages. You can read them with: $ man -l man/foo.1 charliecloud-0.9.10/packaging/000077500000000000000000000000001346662313000161675ustar00rootroot00000000000000charliecloud-0.9.10/packaging/README000066400000000000000000000023631346662313000170530ustar00rootroot00000000000000This directory contains the code for distribution packages of Charliecloud. To build packages (end users generally do not need to do this), consult the README in the subdirectory of interest. Note that packaging code is not supported by the Charliecloud developers but rather by downstream maintainers. The Charliecloud team defers to these maintainers for non-trivial pull requests in this directory. However, package maintainers are requested to follow these guidelines: 1. Web documentation (install.rst) should cover: a. which packages are available b. how to install them, given prior installation experience for that distro c. who's responsible for a given distro's packages d. where to go for questions and bug reports on the package 2. Documentation in this directory should cover: a. how to build the packages, given prior experience building for that distro (i.e., teaching someone how to build packages for a given distro is out of scope) 3. Packaging code should: a. follow distro best practices and style b. build a reasonably-versioned package for arbitrary Git versions (e.g., not use the top of the package changelog for random later commits) c. provide package build tests for Travis when practical charliecloud-0.9.10/packaging/debian/000077500000000000000000000000001346662313000174115ustar00rootroot00000000000000charliecloud-0.9.10/packaging/debian/README000066400000000000000000000003121346662313000202650ustar00rootroot00000000000000Use debuild at the root of the Charliecloud source code, e.g.: $ make deb $ debuild # arguments unknown; please send a patch! See travis.sh for an example of a build under Travis' Ubuntu image. charliecloud-0.9.10/packaging/debian/changelog000066400000000000000000000002421346662313000212610ustar00rootroot00000000000000charliecloud (0.2.3) unstable; urgency=high * Initial packaged version. -- Oliver Freyermuth Wed, 8 Nov 2017 19:00:00 +0200 charliecloud-0.9.10/packaging/debian/charliecloud.examples000066400000000000000000000000131346662313000236010ustar00rootroot00000000000000examples/* charliecloud-0.9.10/packaging/debian/charliecloud.install000066400000000000000000000000471346662313000234400ustar00rootroot00000000000000debian/tmp/usr/bin debian/tmp/usr/lib charliecloud-0.9.10/packaging/debian/compat000066400000000000000000000000021346662313000206070ustar00rootroot000000000000009 charliecloud-0.9.10/packaging/debian/control000066400000000000000000000044201346662313000210140ustar00rootroot00000000000000Source: charliecloud Priority: optional Maintainer: Oliver Freyermuth Build-Depends: debhelper (>= 9), python, python-sphinx, python-sphinx-rtd-theme, rsync, Standards-Version: 3.9.8 Homepage: https://hpc.github.io/charliecloud/ Package: charliecloud Architecture: any Section: admin Depends: ${misc:Depends}, ${shlibs:Depends} Description: Lightweight user-defined software stacks for high-performance computing. Charliecloud provides user-defined software stacks (UDSS) for high-performance computing (HPC) centers. This "bring your own software stack" functionality addresses needs such as: software dependencies that are numerous, complex, unusual, diferently configured, or simply newer/older than what the center provides; build-time requirements unavailable within the center, such as relatively unfettered internet access; validated software stacks and configuration to meet the standards of a particular field of inquiry; portability of environments between resources, including workstations and other test and development system not managed by the center; consistent environments, even archivally so, that can be easily, reliabily, and verifiably reproduced in the future; and/or usability and comprehensibility. Package: charliecloud-doc Architecture: all Section: doc Depends: libjs-jquery, libjs-underscore, libjs-mathjax Description: Lightweight user-defined software stacks for high-performance computing, documentation package. Charliecloud provides user-defined software stacks (UDSS) for high-performance computing (HPC) centers. This "bring your own software stack" functionality addresses needs such as: software dependencies that are numerous, complex, unusual, diferently configured, or simply newer/older than what the center provides; build-time requirements unavailable within the center, such as relatively unfettered internet access; validated software stacks and configuration to meet the standards of a particular field of inquiry; portability of environments between resources, including workstations and other test and development system not managed by the center; consistent environments, even archivally so, that can be easily, reliabily, and verifiably reproduced in the future; and/or usability and comprehensibility. charliecloud-0.9.10/packaging/debian/copyright000066400000000000000000000010771346662313000213510ustar00rootroot00000000000000Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: charliecloud Upstream-Contact: Reid Priedhorsky Source: https://github.com/hpc/charliecloud Files-Excluded: debian Files: * Copyright: 2014–2018 Los Alamos National Security, LLC License: Apache-2.0 Files: debian/* Copyright: 2017–2018, Oliver Freyermuth License: Apache-2.0 License: Apache-2.0 On Debian systems, the full text of the Apache License version 2 can be found in the file `/usr/share/common-licenses/Apache-2.0'. charliecloud-0.9.10/packaging/debian/docs000066400000000000000000000000131346662313000202560ustar00rootroot00000000000000README.rst charliecloud-0.9.10/packaging/debian/format000066400000000000000000000000041346662313000206160ustar00rootroot000000000000001.0 charliecloud-0.9.10/packaging/debian/rules000077500000000000000000000023041346662313000204700ustar00rootroot00000000000000#!/usr/bin/make -f srcpkg = $(shell LC_ALL=C dpkg-parsechangelog | grep '^Source:' | cut -d ' ' -f 2,2) debver = $(shell LC_ALL=C dpkg-parsechangelog | grep '^Version:' | cut -d ' ' -f 2,2 ) uver = $(shell echo $(debver) | cut -d '-' -f 1,1 ) #DH_VERBOSE=1 export DEB_BUILD_MAINT_OPTIONS = hardening=+all BUILDDIR := debian/tmp DESTDIR := ${CURDIR}/${BUILDDIR} PKGDIR=debian/charliecloud %: dh $@ override_dh_auto_configure: dh_auto_configure #echo ${debver} > VERSION.full override_dh_auto_build: dh_auto_build cd doc-src && $(MAKE) mv doc html override_dh_auto_install: # PREFIX needed for installation (there's no configure). dh_auto_install -- PREFIX=/usr LIBEXEC_DIR=lib/charliecloud rm -rf ${DESTDIR}/usr/share/doc/charliecloud/test # Patch out bundled jquery.js rm html/_static/jquery.js ln -s /usr/share/javascript/jquery/jquery.js html/_static/jquery.js # Patch out bundled underscore.js rm html/_static/underscore.js ln -s /usr/share/javascript/underscore/underscore.js html/_static/underscore.js # Patch out usage of MathJax CDN find html/*.html -type f -print0 | xargs -0 sed -i 's#https://cdn.mathjax.org/mathjax/latest/#/usr/share/javascript/mathjax/#g' override_dh_auto_test: charliecloud-0.9.10/packaging/debian/travis.sh000077500000000000000000000016701346662313000212640ustar00rootroot00000000000000#!/bin/bash # Build a .deb from the current source code directory. $PWD must be the root # of the Charliecloud source code. # # FIXME: This uses the latest debian/changelog entry to choose the package # version, which is wrong most of the time. I'd really like to just be able to # say "debuild" at the source code root, without needing e.g. sed beforehand. # What is the best practice here? This must not be the only package with this # problem. set -e set -x # Need these packages to build. sudo apt-get install devscripts build-essential lintian debhelper # debuild needs Sphinx in the sanitized path # We install sphinx and the rtd-theme via pip in travis.yml. sudo ln -f -s /usr/local/bin/sphinx-build /usr/bin/sphinx-build # Need -d because dependencies can not be satisfied on trusty. # Need -fno-builtin -fPIC to hack around the broken 14.04 travis image. ln -s packaging/debian . DEB_CFLAGS_SET="-fno-builtin -fPIC" debuild -d -i -us -uc charliecloud-0.9.10/packaging/debian/watch000066400000000000000000000002461346662313000204440ustar00rootroot00000000000000version=3 opts="filenamemangle=s/(?:.*)?v?(\d[\d\.]*)\.tar\.gz/charliecloud-$1.tar.gz/" \ https://github.com/hpc/charliecloud/tags (?:.*/)?v?(\d[\d\.]*)\.tar\.gz charliecloud-0.9.10/packaging/fedora/000077500000000000000000000000001346662313000174275ustar00rootroot00000000000000charliecloud-0.9.10/packaging/fedora/build000077500000000000000000000163311346662313000204600ustar00rootroot00000000000000#!/usr/bin/env python2.7 # See contributors' guide for documentation of this script. from __future__ import print_function import argparse import errno import os import pipes import platform import pwd import re import shutil import socket import subprocess import sys import time CH_BASE = os.path.abspath(os.path.dirname(__file__) + "/../..") CH_RUN = [CH_BASE + "/bin/ch-run"] PACKAGES = ["charliecloud", "charliecloud-debuginfo", "charliecloud-test"] ARCH = platform.machine() def main(): # Parse arguments. ap = argparse.ArgumentParser() ap.add_argument("version") ap.add_argument("--image", metavar="DIR") ap.add_argument("--install", action="store_true") ap.add_argument("--rpmbuild", metavar="DIR", default="%s/rpmbuild" % os.getenv("HOME")) args = ap.parse_args() print("# Charliecloud root: %s" % CH_BASE) print("""\ # version: %(version)s # image: %(image)s # install: %(install)s # rpmbuild root: %(rpmbuild)s""" % args.__dict__) # What's the real Git version? if (args.version == "HEAD"): try: # If we're on a branch, we want to build on that branch so the branch # name shows up in the version name. commit = subprocess.check_output(["git", "symbolic-ref", "-q", "--short", "HEAD"])[:-1] except subprocess.CalledProcessError as x: if (x.returncode != 1): raise # Detached HEAD (e.g. Travis) is also fine; use commit hash. commit = subprocess.check_output(["git", "rev-parse", "--verify", "HEAD"])[:-1] rpm_release = "0" else: m = re.search(r"([0-9.]+)-([0-9]+)", args.version) commit = "v" + m.group(1) rpm_release = m.group(2) # Create rpmbuild root rpm_sources = args.rpmbuild + '/SOURCES' rpm_specs = args.rpmbuild + '/SPECS' for d in (rpm_sources, rpm_specs): print("# mkdir -p %s" % d) try: os.makedirs(d) except OSError as x: if (x.errno != errno.EEXIST): raise # Get a clean Git checkout of the desired version. We do this by making a # temporary clone so as not to mess up the WD. git_tmp = rpm_sources + '/charliecloud' print("# cloning into %s and checking out commit %s" % (git_tmp, commit)) cmd("git", "clone", '.', git_tmp) cmd("git", "checkout", commit, cwd=git_tmp) # Build tarball. print("# building docs") cmd("make", "-j2", cwd=git_tmp+"/doc-src") print("# building source tarball") cmd("make", "export", cwd=git_tmp) ch_version = open(git_tmp + "/VERSION.full").read()[:-1] ch_tarball = "charliecloud-%s.tar.gz" % ch_version print("# Charliecloud version: %s" % ch_version) print("# source tarball: %s" % ch_tarball) os.rename("%s/%s" % (git_tmp, ch_tarball), "%s/%s" % (rpm_sources, ch_tarball)) # Copy lint configuration. # FIXME: Put version into destination sometime? shutil.copy("%s/packaging/fedora/charliecloud.rpmlintrc" % CH_BASE, "%s/charliecloud.rpmlintrc" % rpm_specs) # Remove temporary Git directory. print("# rm -rf %s" % git_tmp) shutil.rmtree(git_tmp) # Copy and patch spec file. rpm_vr = "%s-%s" % (ch_version, rpm_release) # Fedora requires no version in spec file. Add a version for pre-release # specs to make it hard to upload one to Fedora by mistake. if ("~pre" not in ch_version): spec = "charliecloud.spec" else: spec = "charliecloud-%s.spec" % rpm_vr with open("%s/packaging/fedora/charliecloud.spec" % CH_BASE, "rt") as in_, \ open("%s/%s" % (rpm_specs, spec), "wt") as out: print("# writing %s" % out.name) t = in_.read() t = t.replace("@VERSION@", ch_version) t = t.replace("@RELEASE@", rpm_release) if ("~pre" in ch_version): # Add dummy changelog entry. timestamp = time.strftime("%a %b %d %Y") # required format name = pwd.getpwuid(os.geteuid()).pw_gecos.split(",")[0] moniker = pwd.getpwuid(os.geteuid()).pw_name domain = re.sub(r"^[^.]+.", "", socket.getfqdn()) t = t.replace("%changelog\n", """\ %%changelog * %s %s <%s@%s> %s - Pre-release package. See Git history for what is going on. """ % (timestamp, name, moniker, domain, rpm_vr)) else: # Verify requested version matches changelog. m = re.search(r"%changelog\n.+?([0-9.-]+)\n", t) if (m.group(1) != rpm_vr): print("requested version %s != changelog %s" % (rpm_vr, m.group(1))) sys.exit(1) out.write(t) # Prepare build and rpmlint arguments. container = [] rpmbuild_args = [] rpmlint_args = [] if (not args.image): rpms = "%s/RPMS/%s" % (args.rpmbuild, ARCH) rpmbuild_args += ["--define", "_topdir " + args.rpmbuild] rpmlint_args += ["--file", "%s/charliecloud.rpmlintrc" % rpm_specs] else: # Use /usr/local/src because rpmbuild fails with "%{_topdir}/BUILD" # shorter than "/usr/src/debug" (yes, really!) [1,2]. # # [1]: https://access.redhat.com/solutions/1426113 # [2]: https://gbenson.net/?p=367 rpms = "/usr/local/src/RPMS/%s" % ARCH rpm_specs = "/usr/local/src/SPECS" rpm_sources = "/usr/local/src/SOURCES" rpmbuild_args += ["--define", "_topdir /usr/local/src"] rpmlint_args += ["--file", "%s/charliecloud.rpmlintrc" % rpm_specs] container += [CH_BASE + "/bin/ch-run", "-w", "-b", "%s:/usr/local/src" % args.rpmbuild, args.image, "--"] # Build RPMs. cmd(container, "rpmbuild", rpmbuild_args, "--version") cmd(container, "rpmbuild", rpmbuild_args, "-ba", "%s/%s" % (rpm_specs, spec)) cmd(container, "ls", "-lh", rpms) # Install RPMs. if (args.install): print("# uninstalling (most errors can be ignored)") cmd_ok(container, "rpm", "--erase", PACKAGES) print("# installing") for p in PACKAGES: cmd(container, "rpm", "--install", "%s/%s-%s.*.rpm" % (rpms, p, rpm_vr)) cmd(container, "rpm", "-qa", "charliecloud*") # Lint RPMs and spec file. Last so problems that don't result in program # returning error are more obvious. print("# linting") cmd(container, "rpmlint", rpmlint_args, "%s/%s" % (rpm_specs, spec)) for p in PACKAGES: file_ = "%s/%s-%s.el7.%s.rpm" % (rpms, p, rpm_vr, ARCH) cmd(container, "test", "-e", file_) cmd(container, "rpmlint", rpmlint_args, file_) # Success! print("# done") def cmd(*args, **kwargs): cmd_real(subprocess.check_call, *args, **kwargs) def cmd_ok(*args, **kwargs): rc = cmd_real(subprocess.call, *args, **kwargs) return (rc == 0) def cmd_out(*args, **kwargs): out = cmd_real(subprocess.check_output, *args, **kwargs) return out.rstrip() # remove trailing newline def cmd_real(runf, *args, **kwargs): # flatten any sub-lists (kludge) args2 = [] for arg in args: if (isinstance(arg, list)): args2 += arg else: args2.append(arg) # print and run print("$", end="") for arg in args2: arg = pipes.quote(arg) print(" " + arg, end="") print() return runf(args2, **kwargs) if (__name__ == "__main__"): main() charliecloud-0.9.10/packaging/fedora/charliecloud.rpmlintrc000066400000000000000000000034011346662313000240170ustar00rootroot00000000000000# This file is used to supress false positive errors and warnings generated by # rpmlint when used with our charliecloud packages. # charliecloud.spec # The RPM build script will generate invalid source URLs for non-release # versions, e.g., '0.9.8~pre+epelpackage.41fe9fd'. addFilter(r'invalid-url') # charliecloud # Charliecloud uses pivot_root(2), not chroot(2), for containerization. The # calls to chroot(2) are part of the pivot_root(2) dance and not relevant to # Charliecloud's security posture. addFilter(r'missing-call-to-chdir-with-chroot') # charliecloud-debuginfo # The only files under /usr/lib are those placed there by rpmbuild. addFilter(r'only-non-binary-in-usr-lib') # charliecloud-test # Charliecloud is a container runtime. These shared objects are never used in # the host environment; rather, they are compiled by the test suite (both # running and examination of which serve as end-user documentation) and injected # into the container (guest) via utility script 'ch-fromhost'. The ldconfig # links are generated inside the container runtime environment. For more # information, see the test file: test/run/ch-fromhost.bats (line 108). addFilter(r'no-ldconfig-symlink') addFilter(r'library-without-ldconfig-postin') addFilter(r'library-without-ldconfig-postun') # The test suite has a few C files, e.g. userns.c, pivot_root.c, # chroot-escape.c, sotest.c, setgroups.c, mknods.c, setuid.c, etc., that # document -- line-by-line in some cases -- various components of the open source # runtime. These C files serve to show end users how containers work; some of # them are used explicitly during test suite runtime. addFilter(r'devel-file-in-non-devel-package') # The symlink to /usr/bin is created and does exist. addFilter(r'dangling-relative-symlink') charliecloud-0.9.10/packaging/fedora/charliecloud.spec000066400000000000000000000060351346662313000227450ustar00rootroot00000000000000# Charliecloud fedora package spec file # # Contributors: # Dave Love @loveshack # Michael Jennings @mej # Jordan Ogas @jogas # Reid Priedhorksy @reidpr # Don't try to compile python files with /usr/bin/python %{?el7:%global __python %__python3} # Fedora does not allow SUSE conditionals, thus we define libexecdir to ensure # consistency. %define _libexecdir %{_prefix}/libexec # Specify python version of a given file %define versionize_script() (sed -i 's,/env python,/env %1,g' %2) %{!?build_cflags:%global build_cflags $RPM_OPT_FLAGS} %{!?build_ldflags:%global build_ldflags %nil} Name: charliecloud Version: @VERSION@ Release: @RELEASE@%{?dist} Summary: Lightweight user-defined software stacks for high-performance computing License: ASL 2.0 URL: https://hpc.github.io/%{name}/ Source0: https://github.com/hpc/%{name}/releases/download/v%{version}/%{name}-%{version}.tar.gz BuildRequires: gcc %package test Summary: Charliecloud examples and test suite Requires: %{name}%{?_isa} = %{version}-%{release} Requires: bats Requires: bash Requires: wget Requires: /usr/bin/python3 %description Charliecloud uses Linux user namespaces to run containers with no privileged operations or daemons and minimal configuration changes on center resources. This simple approach avoids most security risks while maintaining access to the performance and functionality already on offer. Container images can be built using Docker or anything else that can generate a standard Linux filesystem tree. For more information: https://hpc.github.io/charliecloud/ %description test Charliecloud test suite and examples. The test suite takes advantage of container image builders such as Docker, Skopeo, and Buildah. %prep %setup -q %{versionize_script python3 test/make-auto} %{versionize_script python3 test/make-perms-test} %build %make_build CFLAGS="%build_cflags -std=c11 -pthread" LDFLAGS="%build_ldflags" %install %make_install PREFIX=%{_prefix} cat > README.EL7 </etc/sysctl.d/51-userns.conf systemctl -p Note for versions below RHEL7.6, you will also need to enable user namespaces: grubby --args=namespace.unpriv_enable=1 --update-kernel=ALL systemctl -p EOF cat > README.TESTS < @VERSION@-@RELEASE@ - Add initial Fedora/EPEL package. charliecloud-0.9.10/packaging/vagrant/000077500000000000000000000000001346662313000176315ustar00rootroot00000000000000charliecloud-0.9.10/packaging/vagrant/Vagrantfile000066400000000000000000000242311346662313000220200ustar00rootroot00000000000000# This Vagrantfile builds a Centos 7 virtual machine with Charliecloud and # Docker installed. It is documented in the HTML docs. # A few design decisions: # # 1. We use the CentOS kernel (3.10 + lots of Red Hat patches) because it's # easier than installing the upstream kernel and seems to work fine once # user namespaces are turned on. Please let us know of any problems. # # 2. /tmp is the CentOS default: simply a directory on the root filesystem, # but cleaned out on each boot. This is to avoid (a) configuring it to be a # tmpfs and (b) to save swap. # # 3. We don't try any clever sizing of the VM appliance (e.g., number of # vCPUs, RAM). End users should adjust these values, but we want to leave # reasonable defaults in case they don't, even if they have a smallish # host. For example, we don't want to configure the appliance to take # advantage of your 96-core Beast Machine and then give it to some poor end # user to run that 96-vCPU VM on their 4-core laptop. # # 4. No OpenMPI is installed. This saves build time, a little disk space, and # the complexity of finding the right version to match the Charliecloud # examples, while the cost is low: a few tests are skipped. Users who want # to run single-node MPI apps in the VM should launch from within ch-run. Vagrant.require_version ">= 2.1.3" Vagrant.configure("2") do |c| c.vm.box = "centos/7" # https://app.vagrantup.com/centos/boxes/7 c.vm.box_version = "~>1809" # updates: https://blog.centos.org/?s=vagrant c.vm.box_check_update = true # warn if base box out of date c.vm.hostname = "charliebox" c.vagrant.plugins = ['vagrant-disksize', 'vagrant-proxyconf', 'vagrant-reload', 'vagrant-vbguest'] # Note: Vagrant sets up a port mapping from localhost:2222 to charliebox:22 # automatically, so we need no repeat that here. # Set up proxies if appropriate. if ENV["HTTP_PROXY"] or ENV["HTTPS_PROXY"] or ENV["NO_PROXY"] if not (ENV["HTTP_PROXY"] and ENV["HTTPS_PROXY"] and ENV["NO_PROXY"]) abort("missing proxy variable(s): HTTP_PROXY HTTPS_PROXY and/or NO_PROXY") end c.proxy.http = ENV["HTTP_PROXY"] c.proxy.https = ENV["HTTPS_PROXY"] c.proxy.no_proxy = ENV["NO_PROXY"] c.vm.provision "proxy", type:"shell", privileged: true, inline: <<-EOF echo 'Defaults env_keep+="ALL_PROXY all_proxy auto_proxy RSYNC_PROXY"' \ >> /etc/sudoers.d/proxy EOF end # Configure the appliance. c.vm.provider "virtualbox" do |vb| vb.name = "charliebox" vb.gui = false vb.memory = "4096" vb.cpus = 4 c.disksize.size = '96GB' # see also provisioner "disksize" below vb.customize ["modifyvm", :id, "--nictype1", "virtio"] end # Install a decent user environment. c.vm.provision "environment", type: "shell", privileged: true, inline: <<-EOF set -e cd /tmp # Basic stuff from standard repos. yum makecache fast yum-config-manager --setopt=deltarpm=0 --save yum -y upgrade yum -y install emacs \ vim \ wget # Git from IUS. This also activates EPEL. wget https://centos7.iuscommunity.org/ius-release.rpm yum -y install epel-release rpm --install ius-release.rpm yum -y install git2u # Utilities to make the shell scripts faster and more usable. yum -y install pigz pv # Add /usr/local/{bin,sbin} to $PATH. echo 'export PATH=/usr/local/sbin:/usr/local/bin:$PATH' > /etc/profile.d/path.sh EOF # Expand the root filesystem to use the full resized disk. This is needed so # the full-scope tests can finish. Notes: # # 1. This is specific to the provisioning scheme selected by the base box. # See issue #285. # # 2. We install parted from Fedora 23 because the version of parted in # CentOS 7 (3.1-29) won't resize mounted partitions. Fedora 23's parted # is apparently the newest whose dependencies CentOS 7 still meets. # # 3. ---pretend-input-tty is an undocumented option (note third hyphen) to # convince parted to accept "yes" to the warning even without a TTY. # See: https://unix.stackexchange.com/a/365657 # c.vm.provision "disksize", type: "shell", privileged: true, inline: <<-EOF set -e cd /tmp yum -y install e2fsprogs wget -nv https://archives.fedoraproject.org/pub/archive/fedora/linux/releases/23/Everything/x86_64/os/Packages/p/parted-3.2-11.fc23.x86_64.rpm rpm --upgrade parted-*.x86_64.rpm parted /dev/sda ---pretend-input-tty resizepart 1 yes 100% parted /dev/sda p resize2fs /dev/sda1 df -h EOF # Configure namespaces. This needs a reboot for the kernel command line # update to take effect. # # Note: This could be skipped if we installed an upstream kernel (e.g., via # ElRepo). However, we're not aware of any advantages vs. the CentOS kernel # for this use case. c.vm.provision "namespaces", type: "shell", privileged: true, inline: <<-EOF set -e echo 'user.max_user_namespaces = 32767' > /etc/sysctl.d/51-userns.conf EOF c.vm.provision :reload # Install Docker. # # vagrant-proxyconf for Docker doesn't seem to work, so do it manually. c.vm.provision "docker", type: "shell", privileged: true, inline: <<-EOF set -e yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo yum -y install docker-ce if [[ $HTTP_PROXY ]]; then echo 'configuring Docker proxy' mkdir -p /etc/systemd/system/docker.service.d cat << EOF2 > /etc/systemd/system/docker.service.d/http-proxy.conf [Service] Environment="HTTP_PROXY=$HTTP_PROXY" Environment="HTTPS_PROXY=$HTTPS_PROXY" EOF2 fi systemctl enable docker systemctl daemon-reload systemctl restart docker systemctl show --property=Environment docker docker run hello-world EOF # Install Charliecloud. c.vm.provision "charliecloud", type: "shell", privileged: false, env: { "CH_VERSION" => ENV["CH_VERSION"] }, inline: <<-EOF set -e cd /usr/local/src sudo chmod 1777 . # CentOS/EPEL/IUI don't have the version of shadow-utils (newuidmap and # newgidmap) we need for runc (used by Buildah), so install from source. wget -nv https://github.com/shadow-maint/shadow/releases/download/4.6/shadow-4.6.tar.xz tar xf shadow-4.6.tar.xz (cd shadow-4.6 && ./configure && sudo make install) # Install Buildah. sudo yum -y install buildah sudo tee /etc/profile.d/charliecloud.sh << 'EOF2' export CH_TEST_TARDIR=/var/tmp/tarballs export CH_TEST_IMGDIR=/var/tmp/images export CH_TEST_PERMDIRS=skip EOF2 git clone --recursive https://github.com/hpc/charliecloud.git cd charliecloud if [[ $CH_VERSION ]]; then git checkout $CH_VERSION fi make examples/syscalls/pivot_root sudo make install PREFIX=/usr/local which ch-run ch-run --version EOF # Twiddle vagrant user so Charliecloud tests will pass (add to a second # group, and permit sudo to UID 0 and GID != 0). c.vm.provision "vagrant-user", type: "shell", privileged: true, inline: <<-EOF set -e usermod -aG users vagrant echo '%vagrant ALL=(ALL:ALL) NOPASSWD: ALL' > /etc/sudoers.d/vagrant # Configure subuids and subgids for runc. sudo usermod --add-subuids 10000-65536 vagrant sudo usermod --add-subgids 10000-65536 vagrant EOF # Remove unneeded packages. c.vm.provision "cleanup", type: "shell", privileged: true, inline: <<-EOF set -e package-cleanup -y --oldkernels --count=1 EOF # Prepare for .ova export. In this case, end users need not know or care # that the VM was set up using Vagrant. c.vm.provision "ova", type: "shell", run: "never", privileged: true, inline: <<-EOF set -e # Create a user "charlie" for the end user (see documentation). # # Strictly speaking, this is not necessary, as they could just use the # existing "vagrant" user. However, I eventually concluded that I'd prefer # to (1) keep the "charlie" user that we've been promoting for some time, # and (2) leave the door open for other VM build schemes in the future. adduser --password='*' --groups users,vagrant charlie # chown(2) /usr/local/src/charliecloud to charlie so end user can update, # rebuild, etc. chown -R charlie:charlie /usr/local/src/charliecloud # Configure subuids and subgids for runc. sudo usermod --add-subuids 10000-65536 charlie sudo usermod --add-subgids 10000-65536 charlie # Automatically log in "charlie" on the console, so they have a way to get # in if SSH isn't working. cd /etc/systemd/system/getty.target.wants rm -f getty@tty1.service cp /lib/systemd/system/getty@.service getty@tty1.service sed -ri 's|^ExecStart=.*$|ExecStart=-/sbin/agetty --autologin charlie --noclear %I|' getty@tty1.service # Configure SSH to allow password logins. We would prefer to keep the # Vagrant default of SSH keys only, but I can't figure out how to get the # key into the VM in a way that's easy for end users. sed -ri 's/^PasswordAuthentication no$/PasswordAuthentication yes/' /etc/ssh/sshd_config systemctl restart sshd # Fix /etc/shadow permissions. Not clear where they were broken, but # passwd(1) below fails without this. sudo restorecon -v /etc/shadow # Lock out password login for root and vagrant, because the default # password is well-known and we now allow password login. passwd -l root passwd -l vagrant EOF # Test Charliecloud (optional). # # Note: This will grow the image quite a bit. Don't run it before taking the # snapshot to be distributed to end users. c.vm.provision "test", type: "shell", run: "never", privileged: false, env: { "CH_TEST_SCOPE" => "full" }, inline: <<-EOF set -e if ( id -u charlie 2>/dev/null ); then # issue #309 user=charlie else user=vagrant fi echo "testing as: $user" sudo -iu $user -- sh -c "\ cd /usr/local/libexec/charliecloud/test \ && CH_TEST_SCOPE=$CH_TEST_SCOPE make test" EOF end # vi: set ft=ruby charliecloud-0.9.10/test/000077500000000000000000000000001346662313000152225ustar00rootroot00000000000000charliecloud-0.9.10/test/Build.buildah000077500000000000000000000075641346662313000176320ustar00rootroot00000000000000#!/bin/bash # ch-test-scope: standard # Build an Alpine Linux image using Buildah and umoci. This file may be # removed if/when we can support Buildah instead of Docker for the full test # suite (#184). # # See Build.skopeo-umoci for some caveats also relevant here. # # There are three basic approaches we considered: # # 1. "buildah export". The main problem here is that the subcommand does not # exist, though it used to [1,2]. A secondary issue is that it required # starting up a container [3], which we don't want. # # 2. "podman export". Podman (formerly kpod) is a container runtime [4]; it # can create a container and then export its filesystem. The addition of # the export command is why "buildah export" was removed [2]. We haven't # looked into this in detail. It appears to be Buildah's recommended # approach but would add a dependency. # # 3. Export to OCI image with "buildah push", flatten with umoci, and then # tar up the resulting rootfs. The advantage here is that it uses only # documented commands; the downside is that it introduces a redundant # unpack/repack of tarballs # # 4. Build with "buildah bud --squash", export to OCI image with "buildah # push", and manually copy out the tarball of the single layer. The main # advantage is that we can do it with Buildah only; the disadvantages are # (1) parsing JSON in shell is a kludge and (2) it only works with # single-layer images. The latter is easy to get with --squash, but if # you have a multi-layer image, that will require some fooling around # that I'm pretty sure is possible but haven't figured out yet. # # For now, this script uses approach 4. # # [1]: https://github.com/containers/buildah/pull/170 # [2]: https://github.com/containers/buildah/pull/245 # [3]: https://github.com/containers/buildah/issues/1118 # [4]: https://www.projectatomic.io/blog/2018/02/reintroduction-podman set -e srcdir=$1 tarball=${2}.tar.gz workdir=$3 tag=alpine39 cd "$srcdir" if ( ! command -v buildah >/dev/null 2>&1 ); then echo 'buildah not found' 1>&2 exit 65 fi if ( ! command -v runc >/dev/null 2>&1 ); then echo 'runc not found' 1>&2 exit 65 fi # Basic registries.conf file; needed for Buildah to use public Docker Hub. registries_conf=$(cat <<'EOF' [registries.search] registries = ['docker.io'] EOF ) # Build image in Buildah local storage. (Note: This hangs after "Storing # signatures" if $PWD is not $srcdir.) export BUILDAH_ISOLATION=rootless export STORAGE_DRIVER=vfs buildah --root "${workdir}/storage" --runroot "${workdir}/runroot" \ --registries-conf <(echo "$registries_conf") \ build-using-dockerfile \ --build-arg HTTP_PROXY="$HTTP_PROXY" \ --build-arg HTTPS_PROXY="$HTTPS_PROXY" \ --build-arg NO_PROXY="$NO_PROXY" \ --build-arg http_proxy="$http_proxy" \ --build-arg https_proxy="$https_proxy" \ --build-arg no_proxy="$no_proxy" \ --squash --layers=true -t $tag -f ./Dockerfile.${tag} . cd "$workdir" # Export an OCI image directory. buildah --root ./storage --runroot ./runroot push $tag "oci:./oci" # Extract the tarball containing the single-layer image from the OCI directory. manifest=$(sed -E 's|^.+"application/vnd.oci.image.manifest.v1\+json","digest":"sha256:([0-9a-f]+)".+$|\1|' oci/index.json) echo "manifest: ${manifest}" layer_ct=$(grep -Eo 'application/vnd.oci.image.layer.v1.tar' \ "oci/blobs/sha256/${manifest}" | wc -l) if [[ $layer_ct -ne 1 ]]; then echo "one layer required; found $layer_ct" 1>&2 exit 1 fi layer=$(sed -E 's|^.+"application/vnd.oci.image.layer.v1.tar","digest":"sha256:([0-9a-f]+)".+$|\1|' "oci/blobs/sha256/${manifest}") echo "layer: ${layer}" # Move the layer tarball to the output (not copy, because the OCI image will # be deleted when we're done, so OK to break it). mv "oci/blobs/sha256/${layer}" "$tarball" charliecloud-0.9.10/test/Build.centos7xz000077500000000000000000000021001346662313000201430ustar00rootroot00000000000000#!/bin/bash # Download an xz-compressed CentOS 7 tarball. These are the base images for # the official CentOS Docker images. # # https://github.com/CentOS/sig-cloud-instance-images # # This GitHub repository is arranged with CentOS version and architecture in # different branches. However, the matrix is not complete: by branch, you can # download any architecture at the latest version, or a specific version of # x86_64, but not a specific version of aarch64. Therefore, we download by # commit hash. # # To check what version is in a tarball (on any architecture): # # $ tar xf centos-7-${arch}-docker.tar.xz --to-stdout ./etc/centos-release # # ch-test-scope: standard set -ex srcdir=$1 tarball=${2}.tar.xz workdir=$3 # 7.6.1810 arch=$(uname -m) case $arch in aarch64) commit=ccc35e0 ;; x86_64) commit=9a389e1 ;; *) echo 'unsupported architecture' 1>&2 exit 1 esac url="https://github.com/CentOS/sig-cloud-instance-images/blob/${commit}/docker/centos-7-${arch}-docker.tar.xz?raw=true" wget -nv -O "$tarball" "$url" charliecloud-0.9.10/test/Build.ch-build2dir000077500000000000000000000006441346662313000204620ustar00rootroot00000000000000#!/bin/bash # ch-test-scope: standard # Generate image directory using ch-build2dir and stage it for testing. set -e srcdir=$1 tarball_gz=${2}.tar.gz workdir=$3 tag=build2dir if ( ! command -v docker &> /dev/null); then echo 'docker not found' 1>&2 exit 65 fi cd "$srcdir" ch-build2dir -t $tag --file=Dockerfile.alpine39 . "$workdir" cd "$workdir" tar czf ${tag}.tar.gz $tag mv ${tag}.tar.gz "$tarball_gz" charliecloud-0.9.10/test/Build.ch-pull2dir000077500000000000000000000006251346662313000203360ustar00rootroot00000000000000#!/bin/bash # ch-test-scope: standard # Generate image directory using ch-pull2dir and stage it for testing. set -e srcdir=$1 tarball_gz=${2}.tar.gz workdir=$3 imgtag=alpine:3.9 tag=ch-pull2dir if ( ! command -v docker &> /dev/null); then echo 'docker not found' 1>&2 exit 65 fi cd "$workdir" ch-pull2dir "$imgtag" . mv $imgtag $tag tar czf ${tag}.tar.gz $tag mv ${tag}.tar.gz "$tarball_gz" charliecloud-0.9.10/test/Build.ch-pull2tar000077500000000000000000000005671346662313000203530ustar00rootroot00000000000000#!/bin/bash # ch-test-scope: standard # Generate a flattened image tarball using ch-pull2tar and stage it for # testing. set -e srcdir=$1 tarball_gz=${2}.tar.gz workdir=$3 imgtag=alpine:3.9 tag=ch-pull2tar if ( ! command -v docker &> /dev/null); then echo 'docker not found' 1>&2 exit 65 fi cd "$workdir" ch-pull2tar $imgtag . mv ${imgtag}.tar.gz "$tarball_gz" charliecloud-0.9.10/test/Build.missing000077500000000000000000000001411346662313000176530ustar00rootroot00000000000000#!/bin/bash # ch-test-scope: quick # This image's prerequisites can never be satisfied. exit 65 charliecloud-0.9.10/test/Build.skopeo-umoci000077500000000000000000000020521346662313000206170ustar00rootroot00000000000000#!/bin/bash # Build an Alpine Linux image using skopeo and umoci. This is a precursor to # proper support within the tools (issue #325). # # Note that this approach is a little contrived, in that we unpack the image # with umoci, tar it up, and then later in the test suite unpack it again # redundantly with ch-tar2dir. Many real workflows would just use the # umoci-unpacked image. (umoci does not support producing a tarball directly.) # # Warning: This image currently tests the case of a tarball with one top-level # directory and no hidden files. If you remove it, make sure regressions on # issue #332 are still tested. # # ch-test-scope: standard set -ex srcdir=$1 tarball_gz=${2}.tar.gz workdir=$3 cd "$workdir" if ( ! command -v skopeo >/dev/null 2>&1 ); then echo 'skopeo not found' 1>&2 exit 65 fi if ( ! command -v umoci >/dev/null 2>&1 ); then echo 'umoci not found' 1>&2 exit 65 fi skopeo copy docker://alpine:3.9 oci:./oci:alpine umoci unpack --rootless --image ./oci:alpine ./img ( cd img && tar czf "$tarball_gz" -- rootfs ) charliecloud-0.9.10/test/Docker_Pull.alpine39_dp000066400000000000000000000000401346662313000214500ustar00rootroot00000000000000alpine:3.9 ch-test-scope: quick charliecloud-0.9.10/test/Dockerfile.alpine39000066400000000000000000000002131346662313000206330ustar00rootroot00000000000000# ch-test-scope: quick FROM alpine:3.9 RUN apk add --no-cache bc # Base image has no default command; we need one to build. CMD ["true"] charliecloud-0.9.10/test/Dockerfile.alpineedge000066400000000000000000000002131346662313000213040ustar00rootroot00000000000000# ch-test-scope: full FROM alpine:edge RUN apk add --no-cache bc # Base image has no default command; we need one to build. CMD ["true"] charliecloud-0.9.10/test/Dockerfile.centos6000066400000000000000000000002241346662313000205720ustar00rootroot00000000000000# ch-test-scope: full # ch-test-arch-exclude: aarch64 # No ARM images provided for CentOS 6 FROM centos:6 RUN yum -y install bc RUN yum clean all charliecloud-0.9.10/test/Dockerfile.centos7000066400000000000000000000006021346662313000205730ustar00rootroot00000000000000# ch-test-scope: standard FROM centos:7 # This image has two purposes: (1) demonstrate we can build a CentOS 7 image # and (2) provide a build environment for Charliecloud RPMs. RUN yum -y install epel-release RUN yum -y install \ bats \ gcc \ make \ python36 \ rpm-build \ rpmlint \ wget RUN yum clean all charliecloud-0.9.10/test/Dockerfile.debian9000066400000000000000000000002471346662313000205310ustar00rootroot00000000000000# ch-test-scope: standard FROM debian:stretch ENV chse_dockerfile foo ENV DEBIAN_FRONTEND noninteractive RUN apt-get update \ && apt-get install -y apt-utils charliecloud-0.9.10/test/Dockerfile.mpich000066400000000000000000000033651346662313000203220ustar00rootroot00000000000000# ch-test-scope: full FROM debian9 # The MPICH example has a smaller scope than the OpenMPI example. We want to # provide an MPICH build that works on a single node and (via ch-fromhost # trickery) on Cray Aires systems. That's it for now. # We build MPICH rather than install the Debian package because something # about Debian's build prevents applications from linking against # libmpi.so.12, which is the library we need to replace later. I have not # figured out what. While we're at it, tweak some other options as well to # produce a bare bones build. RUN apt-get install -y --no-install-suggests \ autoconf \ g++ \ gcc \ git \ libgfortran3 \ libpmi2-0-dev \ make \ wget # We currently need our own patched patchelf; see issue #256. RUN git clone https://github.com/hpc/patchelf.git RUN cd patchelf \ && git checkout shrink-soname \ && ./bootstrap.sh \ && ./configure --prefix=/usr/local \ && make install RUN rm -Rf patchelf ENV MPI_VERSION 3.2.1 ENV MPI_URL http://www.mpich.org/static/downloads/${MPI_VERSION} RUN wget -nv ${MPI_URL}/mpich-${MPI_VERSION}.tar.gz RUN tar xf mpich-${MPI_VERSION}.tar.gz RUN apt-get install -y --no-install-suggests file RUN cd mpich-${MPI_VERSION} \ && CFLAGS=-O3 \ CXXFLAGS=-O3 \ ./configure --prefix=/usr/local \ --disable-cxx \ --disable-fortran \ --disable-threads \ --disable-rpath \ --disable-static \ --disable-wrapper-rpath \ --without-ibverbs \ --without-libfabric \ --without-slurm \ && make -j$(getconf _NPROCESSORS_ONLN) install RUN rm -Rf mpich-${MPI_VERSION}* RUN ldconfig charliecloud-0.9.10/test/Dockerfile.nvidia000066400000000000000000000034101346662313000204630ustar00rootroot00000000000000# ch-test-scope: full # ch-test-arch-exclude: aarch64 # only x86-64 and ppc64le supported by nVidia # This Dockerfile demonstrates a multi-stage build. With a single-stage build # that brings along the nVidia build environment, the resulting unpacked image # is 2.9 GiB; with the multi-stage build, it's 146 MiB. # # See: https://docs.docker.com/develop/develop-images/multistage-build ## Stage 1: Install the nVidia build environment and build a sample app. FROM ubuntu:16.04 # OS packages needed ENV DEBIAN_FRONTEND noninteractive RUN apt-get update RUN apt-get install -y \ gnupg-curl \ wget # Install CUDA from nVidia. # See: https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&target_distro=Ubuntu&target_version=1704&target_type=debnetwork WORKDIR /usr/local/src RUN wget -nv http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1704/x86_64/cuda-repo-ubuntu1704_9.1.85-1_amd64.deb RUN dpkg --install cuda-repo-ubuntu1704_9.1.85-1_amd64.deb RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1704/x86_64/7fa2af80.pub RUN apt-get update RUN apt-get install -y cuda-toolkit-9-1 # Build the sample app we'll use to test. WORKDIR /usr/local/cuda-9.1/samples/0_Simple/matrixMulCUBLAS RUN make ## Stage 2: Copy the built sample app into a clean Ubuntu image. FROM ubuntu:16.04 COPY --from=0 /usr/local/cuda-9.1/samples/0_Simple/matrixMulCUBLAS / # This is the one nVidia shared library that the sample app needs. We could be # smarter about finding this path. However, one thing to avoid is copying in # all of /usr/local/cuda-9.1/targets/x86_64-linux/lib, because that directory # is 1.3GiB. COPY --from=0 /usr/local/cuda-9.1/targets/x86_64-linux/lib/libcublas.so.9.1.181 /usr/local/lib RUN ldconfig charliecloud-0.9.10/test/Dockerfile.openmpi000066400000000000000000000123231346662313000206630ustar00rootroot00000000000000# ch-test-scope: full FROM debian9 # A key goal of this Dockerfile is to demonstrate best practices for building # OpenMPI for use inside a container. # # This OpenMPI aspires to work close to optimally on clusters with any of the # following interconnects: # # - Ethernet (TCP/IP) # - InfiniBand (IB) # - Omni-Path (OPA) # - RDMA over Converged Ethernet (RoCE) interconnects # # with no environment variables, command line arguments, or additional # configuration files. Thus, we try to implement decisions at build time. # # This is a work in progress, and we're very interested in feedback. # # OpenMPI has numerous ways to communicate messages [1]. The ones relevant to # this build and the interconnects they support are: # # Module Eth IB OPA RoCE note decision # ------------ ---- ---- ---- ---- ---- -------- # # ob1 : tcp Y* X X X a include # ob1 : openib N Y Y Y b,c exclude # cm : psm2 N N Y* N include # : ucx Y? Y* N Y? b,d include # # Y : supported # Y*: best choice for that interconnect # X : supported but sub-optimal # # a : No RDMA, so performance will suffer. # b : Uses libibverbs. # c : Will be removed in OpenMPI 4. # d : Uses Mellanox libraries if available in preference to libibverbs. # # You can check what's available with: # # $ ch-run /var/tmp/openmpi -- ompi_info | egrep '(btl|mtl|pml)' # # The other build decisions are: # # 1. PMI/PMIx: Include these so that we can use srun or any other PMI[x] # provider, with no matching OpenMPI needed on the host. # # 2. --disable-pty-support to avoid "pipe function call failed when # setting up I/O forwarding subsystem". # # 3. --enable-mca-no-build=plm-slurm to support launching processes using # the host's srun (i.e., the container OpenMPI needs to talk to the host # Slurm's PMI) but prevent OpenMPI from invoking srun itself from within # the container, where srun is not installed (the error messages from # this are inscrutable). # # [1]: https://github.com/open-mpi/ompi/blob/master/README # OS packages needed to build this stuff. RUN apt-get install -y --no-install-suggests \ autoconf \ file \ flex \ g++ \ gcc \ gfortran \ git \ hwloc-nox \ less \ libdb5.3-dev \ libhwloc-dev \ libnl-3-200 \ libnl-route-3-200 \ libnl-route-3-dev \ libnuma1 \ libpmi2-0-dev \ make \ wget \ udev WORKDIR /usr/local/src # Use the Buster versions of libpsm2 (not present in Stretch) and libibverbs # (too old in Stretch). Download manually because I'm too lazy to set up # package pinning. # # Note that libpsm2 is x86-64 only: # https://packages.debian.org/buster/libpsm2-2 # https://lists.debian.org/debian-hpc/2017/12/msg00015.html ENV DEB_URL http://snapshot.debian.org/archive/debian/20181126T030749Z/pool/main ENV PSM2_VERSION 11.2.68-3 RUN [ "$(dpkg --print-architecture)" = "amd64" ] \ || wget -nv ${DEB_URL}/libp/libpsm2/libpsm2-2_${PSM2_VERSION}_amd64.deb \ ${DEB_URL}/libp/libpsm2/libpsm2-dev_${PSM2_VERSION}_amd64.deb # As of 5/2/2019, this is not the newest libibverbs. However, it is the # newest that doesn't crash on our test systems. ENV IBVERBS_VERSION 20.0-1 RUN for i in ibacm \ ibverbs-providers \ ibverbs-utils \ libibumad-dev \ libibumad3 \ libibverbs-dev \ libibverbs1 \ librdmacm-dev \ librdmacm1 \ rdma-core \ rdmacm-utils ; \ do \ wget -nv ${DEB_URL}/r/rdma-core/${i}_${IBVERBS_VERSION}_$(dpkg --print-architecture).deb ; \ done # UCX. There is stuff to build Debian packages, but it seems not too polished. ENV UCX_VERSION 1.3.1 RUN git clone --branch v${UCX_VERSION} --depth 1 \ https://github.com/openucx/ucx.git RUN cd ucx \ && ./autogen.sh \ && ./contrib/configure-release --prefix=/usr/local \ && make -j$(getconf _NPROCESSORS_ONLN) install # Install the .debs we collected. RUN dpkg --install *.deb # OpenMPI. # # Patch OpenMPI to disable UCX plugin on systems with Intel or Cray HSNs. UCX # has inferior performance than PSM2/uGNI but higher priority. ENV MPI_URL https://www.open-mpi.org/software/ompi/v3.1/downloads ENV MPI_VERSION 3.1.4 RUN wget -nv ${MPI_URL}/openmpi-${MPI_VERSION}.tar.gz RUN tar xf openmpi-${MPI_VERSION}.tar.gz COPY dont-init-ucx-on-intel-cray.patch ./openmpi-${MPI_VERSION} RUN cd openmpi-${MPI_VERSION} && git apply dont-init-ucx-on-intel-cray.patch RUN cd openmpi-${MPI_VERSION} \ && CFLAGS=-O3 \ CXXFLAGS=-O3 \ ./configure --prefix=/usr/local \ --sysconfdir=/mnt/0 \ --with-slurm \ --with-pmi \ --with-pmix \ --with-ucx \ --disable-pty-support \ --enable-mca-no-build=btl-openib,plm-slurm \ && make -j$(getconf _NPROCESSORS_ONLN) install RUN ldconfig RUN rm -Rf openmpi-${MPI_VERSION}* # OpenMPI expects this program to exist, even if it's not used. Default is # "ssh : rsh", but that's not installed. RUN echo 'plm_rsh_agent = false' >> /mnt/0/openmpi-mca-params.conf charliecloud-0.9.10/test/Dockerfile.python3000066400000000000000000000000531346662313000206150ustar00rootroot00000000000000# ch-test-scope: full FROM python:3-alpine charliecloud-0.9.10/test/Dockerfile.ubuntu1604000066400000000000000000000002031346662313000210430ustar00rootroot00000000000000# ch-test-scope: full FROM ubuntu:16.04 RUN apt-get update \ && apt-get install -y bc \ && rm -rf /var/lib/apt/lists/* charliecloud-0.9.10/test/Makefile000066400000000000000000000053711346662313000166700ustar00rootroot00000000000000export LC_ALL := C images_ch := chtest/Build \ $(sort $(wildcard ./Build.*)) \ $(sort $(wildcard ./Dockerfile.*)) \ $(sort $(wildcard ./Docker_Pull.*)) images_eg := $(sort $(wildcard ../examples/*/*/Build)) \ $(sort $(wildcard ../examples/*/*/Build.*)) \ $(sort $(wildcard ../examples/*/*/Dockerfile)) \ $(sort $(wildcard ../examples/*/*/Dockerfile.*)) \ $(sort $(wildcard ../examples/*/*/Docker_Pull)) \ $(sort $(wildcard ../examples/*/*/Docker_Pull.*)) images := $(images_ch) $(images_eg) # Favor embedded Bats, if installed, over system Bats. export PATH := $(CURDIR)/bats/bin:$(PATH) # Used by "make all" at top level to build these files for "make install". .PHONY: all all: build_auto.bats run_auto.bats \ sotest/bin/sotest sotest/lib/libsotest.so.1.0 .PHONY: test test: test-build test-run ifneq ($(CH_TEST_SCOPE),quick) test: test-test endif .PHONY: test-build test-build: build_auto.bats bats build.bats build_auto.bats build_post.bats # Note: This will will not find ch-run correctly if $CWD is not the test # directory, which I believe is assumed elsewhere in the test suite as well. .PHONY: test-run test-run: run_auto.bats bats run_first.bats run_auto.bats run/*.bats set -e; \ if [ "$$CH_TEST_SCOPE" != "quick" ]; then \ for GUEST_USER in $$(id -un) root nobody; do \ for GUEST_GROUP in $$(id -gn) root $$(id -gn nobody); do \ export GUEST_USER; \ export GUEST_GROUP; \ echo testing as: $$GUEST_USER $$GUEST_GROUP; \ bats run/ch-run_uidgid.bats; \ done; \ done; fi # FIXME: This could be sped up by skipping bats if the image is out of scope. .PHONY: test-test test-test: $(images_eg) set -e; \ for image in $(images_eg); do \ export CH_TEST_TAG=$$(./make-auto tag $$image); \ bats $$(dirname $$image)/test.bats; \ done .PHONY: clean clean: rm -f *_auto.bats rm -f sotest/sotest sotest/libsotest.so* rm -f sotest/bin/sotest sotest/lib/libsotest.so* .PHONY: where-bats where-bats: which bats bats --version build_auto.bats: $(images) ./make-auto build $^ > $@ run_auto.bats: $(images) ./make-auto run $^ > $@ sotest/bin/sotest: sotest/sotest cp -a $^ $@ sotest/lib/libsotest.so.1.0: sotest/libsotest.so.1.0 cp -a $^ $@ # We hardcode gcc here because some other compilers (hello, Intel) link the # resulting binaries with extra shared libraries that are then not copied into # the container. (Issue #227.) sotest/sotest: sotest/sotest.c sotest/libsotest.so.1.0 gcc -o $@ -L./sotest -lsotest $^ sotest/libsotest.so.1.0: sotest/libsotest.c gcc -o $@ -shared -fPIC -Wl,-soname,libsotest.so.1 -lc $^ ln -f -s libsotest.so.1.0 sotest/libsotest.so ln -f -s libsotest.so.1.0 sotest/libsotest.so.1 charliecloud-0.9.10/test/README.md000066400000000000000000000002151346662313000164770ustar00rootroot00000000000000Charliecloud comes with a fairly comprehensive Bats test suite. For testing instructions visit: https://hpc.github.io/charliecloud/test.html charliecloud-0.9.10/test/bats/000077500000000000000000000000001346662313000161535ustar00rootroot00000000000000charliecloud-0.9.10/test/bin000077700000000000000000000000001346662313000166162../binustar00rootroot00000000000000charliecloud-0.9.10/test/build.bats000066400000000000000000000103411346662313000171730ustar00rootroot00000000000000load common @test 'create tarball directory if needed' { scope quick mkdir -p "$ch_tardir" } @test 'documentations build' { scope standard command -v sphinx-build > /dev/null 2>&1 || skip 'Sphinx is not installed' test -d ../doc-src || skip 'documentation source code absent' cd ../doc-src && make -j "$(getconf _NPROCESSORS_ONLN)" } @test 'version number seems sane' { echo "version: ${ch_version}" [[ $(echo "$ch_version" | wc -l) -eq 1 ]] # one line [[ $ch_version =~ ^0\.[0-9]+\.[0-9]+ ]] # starts with a number triplet # matches VERSION.full if available if [[ -e $ch_bin/../VERSION.full ]]; then diff -u <(echo "$ch_version") "${ch_bin}/../VERSION.full" fi } @test 'executables seem sane' { scope quick # Assume that everything in $ch_bin is ours if it starts with "ch-" and # either (1) is executable or (2) ends in ".c". Demand satisfaction from # each. The latter is to catch cases when we haven't compiled everything; # if we have, the test makes duplicate demands, but that's low cost. while IFS= read -r -d '' i; do i=${i%.c} echo echo "$i" # --version run "$i" --version echo "$output" [[ $status -eq 0 ]] diff -u <(echo "${output}") <(echo "$ch_version") # --help: returns 0, says "Usage:" somewhere. run "$i" --help echo "$output" [[ $status -eq 0 ]] [[ $output =~ Usage: ]] # not setuid or setgid ls -l "$i" [[ ! -u $i ]] [[ ! -g $i ]] done < <( find "$ch_bin" -name 'ch-*' -a \( -executable -o -name '*.c' \) \ -print0 ) } @test 'lint shell scripts' { scope standard ( command -v shellcheck >/dev/null 2>&1 ) || skip "no shellcheck found" # user executables for i in "$ch_bin"/ch-*; do echo "shellcheck: ${i}" [[ ! $(file "$i") = *'shell script'* ]] && continue shellcheck -e SC1090,SC2002,SC2154 "$i" done # libraries for user executables for i in "$ch_libexec"/*.sh; do echo "shellcheck: ${i}" shellcheck -s sh -e SC1090,SC2002 "$i" done # BATS scripts # # The sed horror encapsulated here is because BATS requires that the curly # open brace after @test be on the same line, while ShellCheck requires # that it not be (otherwise parse error). Thus, line numbers are wrong. while IFS= read -r -d '' i; do echo "shellcheck: ${i}" sed -r $'s/(@test .+) \{/\\1\\\n{/g' "$i" \ | shellcheck -s bash -e SC1090,SC2002,SC2154,SC2164 - done < <( find . ../examples -name bats -prune -o -name '*.bats' -print0 ) # libraries for BATS scripts shellcheck -s bash -e SC2002,SC2034 ./common.bash # misc shell scripts if [[ -e ../packaging ]]; then misc=". ../examples ../packaging" else misc=". ../examples" fi shellcheck -e SC2002,SC2034 chtest/Build # shellcheck disable=SC2086 while IFS= read -r -d '' i; do echo "shellcheck: ${i}" shellcheck -e SC2002 "$i" done < <( find $misc -name bats -prune -o -name '*.sh' -print0 ) } @test 'proxy variables' { scope quick # Proxy variables are a mess on UNIX. There are a lot them, and different # programs use them inconsistently. This test is based on the assumption # that if one of the proxy variables are set, then they all should be, in # order to prepare for diverse internet access at build time. # # Coordinate this test with bin/ch-build. # # Note: ALL_PROXY and all_proxy aren't currently included, because they # cause image builds to fail until Docker 1.13 # (https://github.com/docker/docker/pull/27412). v=' no_proxy http_proxy https_proxy' v+=$(echo "$v" | tr '[:lower:]' '[:upper:]') empty_ct=0 for i in $v; do if [[ -n ${!i} ]]; then echo "${i} is non-empty" for j in $v; do echo " $j=${!j}" if [[ -z ${!j} ]]; then (( ++empty_ct )) fi done break fi done [[ $empty_ct -eq 0 ]] } @test 'sotest executable works' { scope quick export LD_LIBRARY_PATH=./sotest ldd sotest/sotest sotest/sotest } charliecloud-0.9.10/test/build_post.bats000066400000000000000000000003631346662313000202430ustar00rootroot00000000000000load common @test 'nothing unexpected in tarball directory' { scope quick run find "$ch_tardir" -mindepth 1 \ -not \( -name '*.tar.gz' -o -name '*.tar.xz' -o -name '*.pq_missing' \) echo "$output" [[ $output = '' ]] } charliecloud-0.9.10/test/chtest/000077500000000000000000000000001346662313000165145ustar00rootroot00000000000000charliecloud-0.9.10/test/chtest/Build000077500000000000000000000106561346662313000175110ustar00rootroot00000000000000#!/bin/bash # Build an Alpine Linux image roughly following the chroot(2) instructions: # https://wiki.alpinelinux.org/wiki/Installing_Alpine_Linux_in_a_chroot # # We deliberately do not sudo. It's a little rough around the edges, because # apk expects root, but it better follows the principle of least privilege. We # could tidy by using the fakeroot utility, but AFAICT that's not particularly # common and we'd prefer not to introduce another dependency. For example, # it's a standard tool on Debian but only in EPEL for CentOS. # # Most of the tests in scope quick use this image, but to save time, build the # image in standard scope and re-use for quick. # # ch-test-scope: standard set -ex srcdir=$1 tarball_uncompressed=${2}.tar tarball=${tarball_uncompressed}.gz workdir=$3 arch=$(uname -m) mirror=http://dl-cdn.alpinelinux.org/alpine/v3.9 # Dynamically select apk-tools-static version. We would prefer to hard-code a # version (and upgrade on our schedule), but we can't because Alpine does not # keep old package versions. If we try, the build breaks every few months (for # example, see issue #242). apk_tools=$( wget -qO - "${mirror}/main/${arch}" \ | grep -F apk-tools-static \ | sed -E 's/^.*(apk-tools-static-[0-9.r-]+\.apk).*$/\1/') img=${workdir}/img cd "$workdir" # "apk add" wants to install a bunch of files root:root. Thus, if we don't map # ourselves to root:root, we get thousands of errors about "Failed to set # ownership". # # For most Build scripts, we'd simply error out with missing prerequisites, # but this is a core image that much of the test suite depends on. ch_run="ch-run -u0 -g0 -w --no-home ${img}" ## Bootstrap base Alpine Linux. # Download statically linked apk. wget "${mirror}/main/${arch}/${apk_tools}" # Bootstrap directories. mkdir img mkdir img/{dev,etc,proc,sys,tmp} touch img/etc/{group,hosts,passwd,resolv.conf} # Bootstrap static apk. (cd img && tar xf "../${apk_tools}") mkdir img/etc/apk echo ${mirror}/main > img/etc/apk/repositories # Install the base system and a dynamically linked apk. # # This will give a few errors about chown failures. However, the install does # seem to work, so we ignore the failed exit code. $ch_run -- /sbin/apk.static \ --allow-untrusted --initdb --update-cache \ add alpine-base apk-tools \ || true # Now that we've bootstrapped, we don't need apk.static any more. It wasn't # installed using apk, so it's not in the database and can just be rm'ed. rm img/sbin/apk.static.* # Install packages we need for our tests. $ch_run -- /sbin/apk add gcc make musl-dev python3 || true # Validate the install. $ch_run -- /sbin/apk audit --system $ch_run -- /sbin/apk stats # Fix permissions. # # Note that this removes setuid/setgid bits from a few files (and # directories). There is not a race condition, i.e., a window where setuid # executables could become the invoking users, which would be a security hole, # because the setuid/setgid binaries are not group- or world-readable until # after this chmod. chmod -R u+rw,ug-s img ## Install our test stuff. # Sentinel file for --no-home --bind test echo "tmpfs and host home are not overmounted" \ > img/home/overmount-me # We want ch-ssh touch img/usr/bin/ch-ssh # Test programs. cp -r "$srcdir" img/test $ch_run --cd /test -- sh -c 'make clean && make' # Fixtures for /dev cleaning. touch img/dev/deleteme mkdir -p img/mnt/dev touch img/mnt/dev/dontdeleteme # Fixture to make sure we raise hidden files in non-tarbombs. touch img/.hiddenfile1 img/..hiddenfile2 img/...hiddenfile3 ## Tar it up. # Using pigz saves about 8 seconds. Normally we wouldn't care about that, but # this script is part of the quick scope, which we'd like developers to use # frequently, so every second matters. if ( command -v pigz >/dev/null 2>&1 ); then gzip_cmd=pigz else gzip_cmd=gzip fi # Charliecloud supports images both with a single top level directory and # without (tarbomb). The Docker images in the test suite are all tarbombs # (because that's what "docker export" gives us), so use a containing # directory for this one. tar cf "$tarball_uncompressed" -- img # Add in the /dev fixtures in a couple more places. Note that this will cause # the tarball to no longer have a single root directory, but they'll be # removed during unpacking, restoring that condition. ( cd img && tar rf "$tarball_uncompressed" ./dev/deleteme dev/deleteme ) # Finalize the tarball. $gzip_cmd -f "$tarball_uncompressed" [[ -f $tarball ]] charliecloud-0.9.10/test/chtest/Makefile000066400000000000000000000002331346662313000201520ustar00rootroot00000000000000BINS := chroot-escape mknods setgroups setuid ALL := $(BINS) CFLAGS := -std=c11 -Wall -Werror .PHONY: all all: $(ALL) .PHONY: clean clean: rm -f $(ALL) charliecloud-0.9.10/test/chtest/bind_priv.py000077500000000000000000000022461346662313000210510ustar00rootroot00000000000000#!/usr/bin/env python3 # This script tries to bind to a privileged port on each of the IP addresses # specified on the command line. import errno import socket import sys PORT = 7 # echo results = dict() try: for ip in sys.argv[1:]: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.bind((ip, PORT)) except OSError as x: if (x.errno in (errno.EACCES, errno.EADDRNOTAVAIL)): results[ip] = x.errno else: raise else: results[ip] = 0 except Exception as x: print('ERROR\texception: %s' % x) rc = 1 else: if (len(results) < 1): print('ERROR\tnothing to test', end='') rc = 1 elif (len(set(results.values())) != 1): print('ERROR\tmixed results: ', end='') rc = 1 else: result = next(iter(results.values())) if (result != 0): print('SAFE\t%d (%s) ' % (result, errno.errorcode[result]), end='') rc = 0 else: print('RISK\tsuccessful bind ', end='') rc = 1 explanation = ' '.join('%s=%d' % (ip, e) for (ip, e) in sorted(results.items())) print(explanation) sys.exit(rc) charliecloud-0.9.10/test/chtest/chroot-escape.c000066400000000000000000000042131346662313000214140ustar00rootroot00000000000000/* This program tries to escape a chroot using well-established methods, which are not an exploit but rather take advantage of chroot(2)'s well-defined behavior. We use device and inode numbers to test whether the root directory is the same before and after the escape. References: https://filippo.io/escaping-a-chroot-jail-slash-1/ http://www.bpfh.net/simes/computing/chroot-break.html */ #define _DEFAULT_SOURCE #include #include #include #include #include #include #include #include void fatal(char * msg) { printf("ERROR\t%s: %s\n", msg, strerror(errno)); exit(EXIT_FAILURE); } int main() { struct stat before, after; int fd; int status = EXIT_FAILURE; char tmpdir_template[] = "/tmp/chtest.tmp.chroot.XXXXXX"; char * tmpdir_name; if (stat("/", &before)) fatal("stat before"); tmpdir_name = mkdtemp(tmpdir_template); if (tmpdir_name == NULL) fatal("mkdtemp"); if ((fd = open(".", O_RDONLY)) < 0) fatal("open"); if (chroot(tmpdir_name)) { if (errno == EPERM) { printf("SAFE\tchroot(2) failed with EPERM\n"); status = EXIT_SUCCESS; } else { fatal("chroot"); } } else { if (fchdir(fd)) fatal("fchdir"); if (close(fd)) fatal("close"); for (int i = 0; i < 1024; i++) if (chdir("..")) fatal("chdir"); /* If we got this far, we should be able to call chroot(2), so failure is an error. */ if (chroot(".")) fatal("chroot"); /* If root directory is the same before and after the attempted escape, then the escape failed, and we should be happy. */ if (stat("/", &after)) fatal("stat after"); if (before.st_dev == after.st_dev && before.st_ino == after.st_ino) { printf("SAFE\t"); status = EXIT_SUCCESS; } else { printf("RISK\t"); status = EXIT_FAILURE; } printf("dev/inode before %lu/%lu, after %lu/%lu\n", before.st_dev, before.st_ino, after.st_dev, after.st_ino); } if (rmdir(tmpdir_name)) fatal("rmdir"); return status; } charliecloud-0.9.10/test/chtest/dev_proc_sys.py000077500000000000000000000022541346662313000215730ustar00rootroot00000000000000#!/usr/bin/env python3 import os.path import sys # File in /sys seem to vary between Linux systems. Thus, try a few candidates # and use the first one that exists. What we want is any file under /sys with # permissions root:root -rw------- that's in a directory readable and # executable by unprivileged users, so we know we're testing permissions on # the file rather than any of its containing directories. This may help: # # $ find /sys -type f -a -perm 600 -ls # sys_file = None for f in ("/sys/devices/cpu/rdpmc", "/sys/kernel/mm/page_idle/bitmap", "/sys/module/nf_conntrack_ipv4/parameters/hashsize", "/sys/kernel/slab/request_sock_TCP/red_zone"): if (os.path.exists(f)): sys_file = f break if (sys_file is None): print("ERROR\tno test candidates in /sys exist") sys.exit(1) problem_ct = 0 for f in ("/dev/mem", "/proc/kcore", sys_file): try: open(f, "rb").read(1) print("RISK\t%s: read allowed" % f) problem_ct += 1 except PermissionError: print("SAFE\t%s: read not allowed" % f) except OSError as x: print("ERROR\t%s: exception: %s" % (f, x)) problem_ct += 1 sys.exit(problem_ct != 0) charliecloud-0.9.10/test/chtest/fs_perms.py000077500000000000000000000067301346662313000207150ustar00rootroot00000000000000#!/usr/bin/env python3 # This script walks the directories specified in sys.argv[1:] prepared by # make-perms-test.sh and attempts to read, write, and traverse (cd) each of # the entries within. It compares the result to the expectation encoded in the # filename. # # A summary line is printed on stdout. Running chatter describing each # evaluation is printed on stderr. # # Note: This works more or less the same as an older version embodied by # `examples/sandbox.py --filesystem` but is implemented in pure Python without # shell commands. Thus, the whole script must be run as root if you want to # see what root can do. import os.path import random import re import sys EXPECTED_RE = re.compile(r'~(...)$') class Makes_No_Sense(TypeError): pass VERBOSE = False def main(): if (sys.argv[1] == '--verbose'): global VERBOSE VERBOSE = True sys.argv.pop(1) d = sys.argv[1] mismatch_ct = 0 test_ct = 0 for path in sorted(os.listdir(d)): test_ct += 1 mismatch_ct += not test('%s/%s' % (d, path)) if (test_ct <= 0 or test_ct % 2887 != 0): error("unexpected number of tests: %d" % test_ct) if (mismatch_ct == 0): print('SAFE\t', end='') else: print('RISK\t', end='') print('%d mismatches in %d tests' % (mismatch_ct, test_ct)) sys.exit(mismatch_ct != 0) # Table of test function name fragments. testvec = { (False, False, False): ('X', 'bad'), (False, False, True ): ('l', 'broken_symlink'), (False, True, False): ('f', 'file'), (False, True, True ): ('f', 'file'), (True, False, False): ('d', 'dir'), (True, False, True ): ('d', 'dir') } def error(msg): print('ERROR\t%s' % msg) sys.exit(1) def expected(path): m = EXPECTED_RE.search(path) if (m is None): return '*' else: return m.group(1) def test(path): filetype = (os.path.isdir(path), os.path.isfile(path), os.path.islink(path)) report = '%s %-24s ' % (testvec[filetype][0], path) expect = expected(path) result = '' for op in 'r', 'w', 't': # read, write, traverse f = globals()['try_%s_%s' % (op, testvec[filetype][1])] try: f(path) except (PermissionError, Makes_No_Sense): result += '-' except Exception as x: error('exception on %s: %s' % (path, x)) else: result += op report += result if (expect != '*' and result != expect): print('%s mismatch' % report) return False else: if (VERBOSE): print('%s ok' % report) return True def try_r_bad(path): error('bad file type: %s' % path) try_t_bad = try_r_bad try_w_bad = try_r_bad def try_r_broken_symlink(path): raise Makes_No_Sense() try_t_broken_symlink = try_r_broken_symlink try_w_broken_symlink = try_r_broken_symlink def try_r_dir(path): os.listdir(path) def try_t_dir(path): try_r_file(path + '/file') def try_w_dir(path): fpath = '%s/a%d' % (path, random.getrandbits(64)) try_w_file(fpath) os.unlink(fpath) def try_r_file(path): with open(path, 'rb', buffering=0) as fp: fp.read(1) def try_t_file(path): raise Makes_No_Sense() def try_w_file(path): # The file should exist, but this will create it if it doesn't. We don't # check for that error condition because we *only* want to touch the OS for # open(2) and write(2). with open(path, 'wb', buffering=0) as fp: fp.write(b'written by fs_test.py\n') if (__name__ == '__main__'): main() charliecloud-0.9.10/test/chtest/mknods.c000066400000000000000000000061261346662313000201600ustar00rootroot00000000000000/* Try to make some device files, and print a message to stdout describing what happened. See: https://www.kernel.org/doc/Documentation/devices.txt */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include const unsigned char_devs[] = { 1, 3, /* /dev/null -- most innocuous */ 1, 1, /* /dev/mem -- most juicy */ 0 }; int main(int argc, char ** argv) { dev_t dev; char * dir; int i, j; unsigned maj, min; bool open_ok; char * path; for (i = 1; i < argc; i++) { dir = argv[i]; for (j = 0; char_devs[j] != 0; j += 2) { maj = char_devs[j]; min = char_devs[j + 1]; if (0 > asprintf(&path, "%s/c%d.%d", dir, maj, min)) { printf("ERROR\tasprintf() failed with errno=%d\n", errno); return 1; } fprintf(stderr, "trying to mknod %s: ", path); dev = makedev(maj, min); if (mknod(path, S_IFCHR | 0500, dev)) { // Could not create device; make sure it's an error we expected. switch (errno) { case EACCES: case EINVAL: // e.g. /sys/firmware/efi/efivars case ENOENT: // e.g. /proc case ENOTDIR: // for bind-mounted files e.g. /etc/passwd case EPERM: case EROFS: fprintf(stderr, "failed as expected with errno=%d\n", errno); break; default: fprintf(stderr, "failed with unexpected errno\n"); printf("ERROR\tmknod(2) failed on %s with errno=%d\n", path, errno); return 1; } } else { // Device created; safe if we can't open it (see issue #381). fprintf(stderr, "succeeded\n"); fprintf(stderr, "trying to open %s: ", path); if (open(path, O_RDONLY) != -1) { fprintf(stderr, "succeeded\n"); open_ok = true; } else { open_ok = false; switch (errno) { case EACCES: fprintf(stderr, "failed as expected with errno=%d\n", errno); break; default: fprintf(stderr, "failed with unexpected errno\n"); printf("ERROR\topen(2) failed on %s with errno=%d\n", path, errno); return 1; } } // Remove the device, whether or not we were able to open it. if (unlink(path)) { printf("ERROR\tunlink(2) failed on %s with errno=%d", path, errno); return 1; } if (open_ok) { printf("RISK\tmknod(2), open(2) succeeded on %s (now removed)\n", path); return 1; } } } } printf("SAFE\t%d devices in %d dirs failed\n", (i - 1) * (j / 2), i - 1); return 0; } charliecloud-0.9.10/test/chtest/printns000077500000000000000000000011171346662313000201370ustar00rootroot00000000000000#!/usr/bin/env python3 # Print out my namespace IDs, to stdout or (if specified) the path in $2. # Then, if $1 is specified, wait that number of seconds before exiting. import glob import os import socket import sys import time if (len(sys.argv) > 1): pause = float(sys.argv[1]) else: pause = 0 if (len(sys.argv) > 2): out = open(sys.argv[2], "wt") else: out = sys.stdout hostname = socket.gethostname() for ns in glob.glob("/proc/self/ns/*"): stat = os.stat(ns) print("%s:%s:%d" % (ns, hostname, stat.st_ino), file=out, flush=True) if (pause): time.sleep(pause) charliecloud-0.9.10/test/chtest/setgroups.c000066400000000000000000000016461346662313000207220ustar00rootroot00000000000000/* Try to drop the last supplemental group, and print a message to stdout describing what happened. */ #define _DEFAULT_SOURCE #include #include #include #include #include #define NGROUPS_MAX 128 int main() { int group_ct; gid_t groups[NGROUPS_MAX]; group_ct = getgroups(NGROUPS_MAX, groups); if (group_ct == -1) { printf("ERROR\tgetgroups(2) failed with errno=%d\n", errno); return 1; } fprintf(stderr, "found %d groups; trying to drop last group %d\n", group_ct, groups[group_ct - 1]); if (setgroups(group_ct - 1, groups)) { if (errno == EPERM) { printf("SAFE\tsetgroups(2) failed with EPERM\n"); return 0; } else { printf("ERROR\tsetgroups(2) failed with errno=%d\n", errno); return 1; } } else { printf("RISK\tsetgroups(2) succeeded\n"); return 1; } } charliecloud-0.9.10/test/chtest/setuid.c000066400000000000000000000015441346662313000201610ustar00rootroot00000000000000/* Try to change effective UID. */ #define _GNU_SOURCE #include #include #include #include #define NOBODY 65534 #define NOBODY2 65533 int main(int argc, char ** argv) { // target UID is nobody, unless we're already nobody uid_t start = geteuid(); uid_t target = start != NOBODY ? NOBODY : NOBODY2; int result; fprintf(stderr, "current EUID=%u, attempting EUID=%u\n", start, target); result = seteuid(target); // setuid(2) fails with EINVAL in user namespaces and EPERM if not root. if (result == 0) { printf("RISK\tsetuid(2) succeeded for EUID=%u\n", target); return 1; } else if (errno == EINVAL) { printf("SAFE\tsetuid(2) failed as expected with EINVAL\n"); return 0; } printf("ERROR\tsetuid(2) failed unexpectedly with errno=%d\n", errno); return 1; } charliecloud-0.9.10/test/chtest/signal_out.py000077500000000000000000000020611346662313000212340ustar00rootroot00000000000000#!/usr/bin/env python3 # Send a signal to a process outside the container. # # This is a little tricky. We want a process that: # # 1. is certain to exist, to avoid false negatives # 2. we shouldn't be able to signal (specifically, we can't create a process # to serve as the target) # 3. is outside the container # 4. won't crash the host too badly if killed by the signal # # We want a signal that: # # 5. will be harmless if received # 6. is not blocked # # Accordingly, this test sends SIGCONT to the youngest getty process. The # thinking is that the virtual terminals are unlikely to be in use, so losing # one will be straightforward to clean up. import os import signal import subprocess import sys try: pdata = subprocess.check_output(["pgrep", "-nl", "getty"]) except subprocess.CalledProcessError: print("ERROR\tpgrep failed") sys.exit(1) pid = int(pdata.split()[0]) try: os.kill(pid, signal.SIGCONT) except PermissionError as x: print("SAFE\tfailed as expected: %s" % x) sys.exit(0) print("RISK\tsucceeded") sys.exit(1) charliecloud-0.9.10/test/common.bash000066400000000000000000000202031346662313000173460ustar00rootroot00000000000000arch_exclude () { if [[ $1 = $(uname -m) ]]; then skip 'unsupported architecture' fi } crayify_mpi_or_skip () { if [[ $ch_cray ]]; then # shellcheck disable=SC2086 $ch_mpirun_node ch-fromhost --cray-mpi "$1" else skip 'host is not a Cray' fi } docker_tag_p () { printf 'image tag %s ... ' "$1" hash_=$(sudo docker images -q "$1" | sort -u) if [[ $hash_ ]]; then echo "$hash_" return 0 else echo 'not found' return 1 fi } docker_ok () { docker_tag_p "$1" docker_tag_p "${1}:latest" docker_tag_p "${1}:$(ch-run --version |& tr '~+' '--')" } env_require () { if [[ -z ${!1} ]]; then printf '$%s is empty or not set\n\n' "$1" >&2 exit 1 fi } image_ok () { ls -ld "$1" "${1}/WEIRD_AL_YANKOVIC" || true test -d "$1" ls -ld "$1" || true byte_ct=$(du -s -B1 "$1" | cut -f1) echo "$byte_ct" [[ $byte_ct -ge 3145728 ]] # image is at least 3MiB } multiprocess_ok () { [[ $ch_multiprocess ]] || skip 'no multiprocess launch tool found' # If the MPI in the container is MPICH, we only try host launch on Crays. # For the other settings (workstation, other Linux clusters), it may or # may not work; we simply haven't tried. [[ $ch_mpi = mpich && -z $ch_cray ]] \ && skip 'MPICH untested' # Exit function successfully. true } need_docker () { # Skip test if $CH_TEST_SKIP_DOCKER is true. If argument provided, use # that tag as missing prerequisite sentinel file. pq=${ch_tardir}/${1}.pq_missing if [[ $pq ]]; then rm -f "$pq" fi if [[ $CH_TEST_SKIP_DOCKER ]]; then if [[ $pq ]]; then touch "$pq" fi skip 'Docker not found or user-skipped' fi } prerequisites_ok () { if [[ -f $CH_TEST_TARDIR/${1}.pq_missing ]]; then skip 'build prerequisites not met' fi } scope () { case $1 in # $1 is the test's scope quick) ;; # always run quick-scope tests standard) if [[ $CH_TEST_SCOPE = quick ]]; then skip "${1} scope" fi ;; full) if [[ $CH_TEST_SCOPE = quick || $CH_TEST_SCOPE = standard ]]; then skip "${1} scope" fi ;; skip) skip "developer-skipped; see comments and/or issues" ;; *) exit 1 esac } tarball_ok () { ls -ld "$1" || true test -f "$1" test -s "$1" } unpack_img_all_nodes () { if [[ $1 ]]; then $ch_mpirun_node ch-tar2dir "${ch_tardir}/${ch_tag}.tar.gz" "$ch_imgdir" else skip 'not needed' fi } # Predictable sorting and collation export LC_ALL=C # Do we have what we need? env_require CH_TEST_TARDIR env_require CH_TEST_IMGDIR env_require CH_TEST_PERMDIRS if ( bash -c 'set -e; [[ 1 = 0 ]]; exit 0' ); then # Bash bug: [[ ... ]] expression doesn't exit with set -e # https://github.com/sstephenson/bats/issues/49 printf 'Need at least Bash 4.1 for these tests.\n\n' >&2 exit 1 fi # Set path to the right Charliecloud. This uses a symlink in this directory # called "bin" which points to the corresponding bin directory, either simply # up and over (source code) or set during "make install". # # Note that sudo resets $PATH, so if you want to run any Charliecloud stuff # under sudo, you must use an absolute path. ch_bin="$(cd "$(dirname "${BASH_SOURCE[0]}")/bin" && pwd)" ch_bin="$(readlink -f "${ch_bin}")" export PATH=$ch_bin:$PATH # shellcheck disable=SC2034 ch_runfile=$(command -v ch-run) # shellcheck disable=SC2034 ch_libexec=$(ch-build --libexec-path) if [[ ! -x ${ch_bin}/ch-run ]]; then printf 'Must build with "make" before running tests.\n\n' >&2 exit 1 fi # Charliecloud version. ch_version=$(ch-run --version 2>&1) # shellcheck disable=SC2034 ch_version_docker=$(echo "$ch_version" | tr '~+' '--') # Separate directories for tarballs and images. # # Canonicalize both so the have consistent paths and we can reliably use them # in tests (see issue #143). We use readlink(1) rather than realpath(2), # despite the admonition in the man page, because it's more portable [1]. # # [1]: https://unix.stackexchange.com/a/136527 ch_imgdir=$(readlink -ef "$CH_TEST_IMGDIR") ch_tardir=$(readlink -ef "$CH_TEST_TARDIR") if ( mount | grep -Fq "$ch_imgdir" ); then printf 'Something is mounted at or under %s.\n\n' "$ch_imgdir" >&2 exit 1 fi # Image information. ch_tag=${CH_TEST_TAG:-NO_TAG_SET} # set by Makefile; many tests don't need it ch_img=${ch_imgdir}/${ch_tag} ch_tar=${ch_tardir}/${ch_tag}.tar.gz ch_ttar=${ch_tardir}/chtest.tar.gz ch_timg=${ch_imgdir}/chtest # User-private temporary directory in case multiple users are running the # tests simultaneously. btnew=$BATS_TMPDIR/bats.tmp.$USER mkdir -p "$btnew" chmod 700 "$btnew" export BATS_TMPDIR=$btnew [[ $(stat -c %a "$BATS_TMPDIR") = '700' ]] # MPICH requires different handling from OpenMPI. Set a variable to enable # some kludges. if [[ $ch_tag = *'-mpich' ]]; then ch_mpi=mpich # First kludge. MPICH's internal launcher is called "Hydra". If Hydra sees # Slurm environment variables, it tries to launch even local ranks with # "srun". This of course fails within the container. You can't turn it off # by building with --without-slurm like OpenMPI, so we fall back to this # environment variable at run time. export HYDRA_LAUNCHER=fork else ch_mpi=openmpi fi # Crays are special. if [[ -f /etc/opt/cray/release/cle-release ]]; then ch_cray=yes else ch_cray= fi # Slurm stuff. if [[ $SLURM_JOB_ID ]]; then # $SLURM_NTASKS isn't always set, nor is $SLURM_CPUS_ON_NODE despite the # documentation. if [[ -z $SLURM_CPUS_ON_NODE ]]; then SLURM_CPUS_ON_NODE=$(echo "$SLURM_JOB_CPUS_PER_NODE" | cut -d'(' -f1) fi ch_nodes=$SLURM_JOB_NUM_NODES ch_cores_node=$SLURM_CPUS_ON_NODE else ch_nodes=1 ch_cores_node=$(getconf _NPROCESSORS_ONLN) fi ch_cores_total=$((ch_nodes * ch_cores_node)) if [[ $ch_mpi = mpich ]]; then ch_mpirun_np="-np ${ch_cores_node}" else ch_mpirun_np='--use-hwthread-cpus' fi ch_unslurm= if [[ $SLURM_JOB_ID ]]; then ch_multinode=yes # can run on multiple nodes ch_multiprocess=yes # can run multiple processes ch_mpirun_node='srun --ntasks-per-node 1' # one process/node ch_mpirun_core='srun --cpus-per-task 1' # one process/core ch_mpirun_2='srun -n2' # two processes on diff nodes ch_mpirun_2_1node='srun -N1 -n2' # two processes on one node # OpenMPI 3.1 pukes when guest-launched and Slurm environment variables # are present. Work around this by fooling OpenMPI into believing it's not # in a Slurm allocation. if [[ $ch_mpi = openmpi ]]; then ch_unslurm='--unset-env=SLURM*' fi else ch_multinode= if ( command -v mpirun >/dev/null 2>&1 ); then ch_multiprocess=yes ch_mpirun_node='mpirun --map-by ppr:1:node' ch_mpirun_core="mpirun ${ch_mpirun_np}" ch_mpirun_2='mpirun -np 2' ch_mpirun_2_1node='mpirun -np 2' else ch_multiprocess= ch_mpirun_node='' ch_mpirun_core=false ch_mpirun_2=false ch_mpirun_2_1node=false fi fi # If the variable CH_TEST_SKIP_DOCKER is true, we skip all the tests that # depend on Docker. It's true if user-set or command "docker" is not in $PATH. if ( ! command -v docker >/dev/null 2>&1 ); then CH_TEST_SKIP_DOCKER=yes fi # Validate CH_TEST_SCOPE and set if empty. if [[ -z $CH_TEST_SCOPE ]]; then CH_TEST_SCOPE=standard elif [[ $CH_TEST_SCOPE != quick \ && $CH_TEST_SCOPE != standard \ && $CH_TEST_SCOPE != full ]]; then # shellcheck disable=SC2016 printf '$CH_TEST_SCOPE value "%s" is invalid\n\n' "$CH_TEST_SCOPE" >&2 exit 1 fi # Do we have and want sudo? if [[ -z $CH_TEST_DONT_SUDO ]] \ && ( command -v sudo >/dev/null 2>&1 && sudo -v >/dev/null 2>&1 ); then # This isn't super reliable; it returns true if we have *any* sudo # privileges, not specifically to run the commands we want to run. # shellcheck disable=SC2034 ch_have_sudo=yes fi charliecloud-0.9.10/test/docker-clean.sh000077500000000000000000000020751346662313000201140ustar00rootroot00000000000000#!/bin/bash # FIXME: Give up after a certain number of iterations. set -e # Remove all containers. while true; do cmd='sudo docker ps -aq' cs_ct=$($cmd | wc -l) echo "found $cs_ct containers" [[ 0 -eq $cs_ct ]] && break # shellcheck disable=SC2046 sudo docker rm $($cmd) done # Untag all images. This fails with: # # Error response from daemon: invalid reference format # # sometimes. I don't know why. if [[ $1 != --all ]]; then while true; do cmd='sudo docker images --filter dangling=false --format {{.Repository}}:{{.Tag}}' tag_ct=$($cmd | wc -l) echo "found $tag_ct tagged images" [[ 0 -eq $tag_ct ]] && break # shellcheck disable=SC2046 sudo docker rmi -f --no-prune $($cmd) done fi # If --all specified, remove all images. if [[ $1 = --all ]]; then while true; do cmd='sudo docker images -q' img_ct=$($cmd | wc -l) echo "found $img_ct images" [[ 0 -eq $img_ct ]] && break # shellcheck disable=SC2046 sudo docker rmi -f $($cmd) done fi charliecloud-0.9.10/test/dont-init-ucx-on-intel-cray.patch000066400000000000000000000014471346662313000234320ustar00rootroot00000000000000diff --git a/ompi/mca/pml/ucx/pml_ucx_component.c b/ompi/mca/pml/ucx/pml_ucx_component.c index ff0040f18c..e8cf903860 100644 --- a/ompi/mca/pml/ucx/pml_ucx_component.c +++ b/ompi/mca/pml/ucx/pml_ucx_component.c @@ -14,6 +14,9 @@ #include +#ifdef HAVE_UNISTD_H +#include +#endif static int mca_pml_ucx_component_register(void); static int mca_pml_ucx_component_open(void); @@ -131,6 +134,11 @@ mca_pml_ucx_component_init(int* priority, bool enable_progress_threads, { int ret; + if ((0 == access("/sys/class/ugni/", F_OK) || (0 == access("/sys/class/hfi1/", F_OK)))){ + PML_UCX_VERBOSE(1, "Cray or Intel HSN detected, removing UCX from consideration"); + return NULL; + } + if ( (ret = mca_pml_ucx_init()) != 0) { return NULL; } charliecloud-0.9.10/test/make-auto000077500000000000000000000067771346662313000170540ustar00rootroot00000000000000#!/usr/bin/env python from __future__ import print_function import os import os.path import re import sys def tag_from_path(path): (dirname, basename) = os.path.split(path) parent_dir = os.path.basename(dirname) (basicname, extension) = os.path.splitext(basename) extension = extension[1:] # remove leading dot, if any assert (basicname in ("Build", "Dockerfile", "Docker_Pull")) if (parent_dir in ("", ".", "test")): assert (extension != "") return extension else: if (extension == ""): return parent_dir else: return (parent_dir + "-" + extension) mode = sys.argv[1] if (mode == 'tag'): print(tag_from_path(sys.argv[2])) sys.exit(0) print("""\ # Do not edit this file; it's autogenerated. load common """) for path in sys.argv[2:]: (dirname, basename) = os.path.split(path) if (dirname == ""): dirname = "." tag = tag_from_path(path) # Interpret test hints. with open(path) as fp: text = fp.read() # ch-test-scope m = re.search(r"ch-test-scope: (skip|quick|standard|full)", text) if (m is None): print("%s: no valid scope specified" % path, file=sys.stderr) sys.exit(1) scope = m.group(1) # ch-test-arch-exclude arch_exclude = "\n".join("arch_exclude %s" % i for i in re.findall(r"ch-test-arch-exclude: (\w+)", text)) # Build a tarball: different test for each type. if (mode == "build"): if ("Build" in basename): template = """\ @test 'custom build %(tag)s' { scope %(scope)s %(arch_exclude)s tarball=$ch_tardir/%(tag)s pq=$ch_tardir/%(tag)s.pq_missing workdir=$ch_tardir/%(tag)s.tmp rm -f "$pq" mkdir "$workdir" cd "%(dirname)s" run ./%(basename)s "$PWD" "$tarball" "$workdir" echo "$output" rm -Rf "$workdir" if [[ $status -eq 65 ]]; then touch "$pq" skip 'prerequisites not met' fi [[ $status -eq 0 ]] }""" elif ("Dockerfile" in basename or "Docker_Pull" in basename): if ("Dockerfile" in basename): template = """\ @test 'ch-build %(tag)s' { scope %(scope)s %(arch_exclude)s need_docker %(tag)s ch-build -t %(tag)s --file="%(path)s" "%(dirname)s" sudo docker tag %(tag)s "%(tag)s:$ch_version_docker" docker_ok %(tag)s }""" else: assert ("Docker_Pull" in basename) with open(path) as fp: addr = fp.readline().rstrip() template = """\ @test 'docker pull %(tag)s' { scope %(scope)s %(arch_exclude)s need_docker %(tag)s sudo docker pull %(addr)s sudo docker tag %(addr)s %(tag)s sudo docker tag %(tag)s "%(tag)s:$ch_version_docker" docker_ok %(tag)s }""" template += """ @test 'ch-docker2tar %(tag)s' { scope %(scope)s %(arch_exclude)s need_docker tarball="$ch_tardir/%(tag)s.tar.gz" ch-docker2tar %(tag)s "$ch_tardir" tar -tf "$tarball" | grep -E '^environment$' tarball_ok "$tarball" }""" else: assert False, "unknown build type" print("\n" + template % locals()) # Unpack tarball and run: same for all types. if (mode == "run"): print() print("""\ @test 'ch-tar2dir %(tag)s' { scope %(scope)s %(arch_exclude)s prerequisites_ok %(tag)s ch-tar2dir "$ch_tardir/%(tag)s" "$ch_imgdir" } @test 'ch-run %(tag)s /bin/true' { scope %(scope)s %(arch_exclude)s prerequisites_ok %(tag)s img="$ch_imgdir/%(tag)s" ch-run "$img" /bin/true }""" % locals()) charliecloud-0.9.10/test/make-perms-test000077500000000000000000000144271346662313000201760ustar00rootroot00000000000000#!/usr/bin/env python # This script sets up a test directory for testing filesystem permissions # enforcement in UDSS such as virtual machines and containers. It must be run # as root. For example: # # $ sudo ./make-perms-test /data $USER nobody # $ ./fs_perms.py /data/perms_test/pass 2>&1 | egrep -v 'ok$' # d /data/perms_test/pass/ld.out-a~--- --- rwt mismatch # d /data/perms_test/pass/ld.out-r~--- --- rwt mismatch # f /data/perms_test/pass/lf.out-a~--- --- rw- mismatch # f /data/perms_test/pass/lf.out-r~--- --- rw- mismatch # RISK 4 mismatches in 1 directories # # In this case, there will be four mismatches because the symlinks are # expected to be invalid after the pass directory is attached to the UDSS. # # Roughly 3,000 permission settings are evaluated in order to check files and # directories against user, primary group, and supplemental group access. # # For files, we test read and write. For directories, read, write, and # traverse. Files are not tested for execute because it's a more complicated # test (new process needed) and if readable, someone could simply make their # own executable copy. # # Compatibility: As of February 2016, this needs to be compatible with Python # 2.6 because that's the highest version that comes with RHEL 6. We're also # aiming to be source-compatible with Python 3.4+, but that's untested. # # Help: http://python-future.org/compatible_idioms.html from __future__ import division, print_function, unicode_literals import grp import os import os.path import pwd import sys if (len(sys.argv) != 4): print('usage error (PEBKAC)', file=sys.stderr) sys.exit(1) FILE_PERMS = set([0, 2, 4, 6]) DIR_PERMS = set([0, 1, 2, 3, 4, 5, 6, 7]) ALL_PERMS = FILE_PERMS | DIR_PERMS FILE_CONTENT = 'gary' * 19 + '\n' testdir = os.path.abspath(sys.argv[1] + '/perms_test') my_user = sys.argv[2] yr_user = sys.argv[3] me = pwd.getpwnam(my_user) you = pwd.getpwnam(yr_user) my_uid = me.pw_uid my_gid = me.pw_gid my_group = grp.getgrgid(my_gid).gr_name yr_uid = you.pw_uid yr_gid = you.pw_gid yr_group = grp.getgrgid(yr_gid).gr_name # find an arbitrary supplemental group for my_user my_group2 = None my_gid2 = None for g in grp.getgrall(): if (my_user in g.gr_mem and g.gr_name != my_group): my_group2 = g.gr_name my_gid2 = g.gr_gid break if (my_group2 is None): print("couldn't find supplementary group for %s" % my_user, file=sys.stderr) sys.exit(1) if (my_gid == yr_gid or my_gid == my_gid2): print('%s and %s share a group' % (my_user, yr_user), file=sys.stderr) sys.exit(1) print('''\ test directory: %(testdir)s me: %(my_user)s %(my_uid)d you: %(yr_user)s %(yr_uid)d my primary group: %(my_group)s %(my_gid)d my supp. group: %(my_group2)s %(my_gid2)d your primary group: %(yr_group)s %(yr_gid)d ''' % locals()) def set_perms(name, uid, gid, mode): os.chown(name, uid, gid) os.chmod(name, mode) def symlink(src, link_name): if (not os.path.exists(src)): print('link target does not exist: %s' % src) sys.exit(1) os.symlink(src, link_name) class Test(object): def __init__(self, uid, gid, up, gp, op, name=None): self.uid = uid self.group = grp.getgrgid(gid).gr_name self.gid = gid self.user = pwd.getpwuid(uid).pw_name self.up = up self.gp = gp self.op = op self.name_override = name self.mode = up << 6 | gp << 3 | op # Which permission bits govern? if (self.uid == my_uid): self.p = self.up elif (self.gid in (my_gid, my_gid2)): self.p = self.gp else: self.p = self.op @property def name(self): if (self.name_override is not None): return self.name_override else: return ('%s.%s-%s.%03o~%s' % (self.type_, self.user, self.group, self.mode, self.expect)) @property def valid(self): return (all(x in self.valid_perms for x in (self.up, self.gp, self.op))) def write(self): if (not self.valid): return 0 self.write_real() set_perms(self.name, self.uid, self.gid, self.mode) return 1 class Test_Directory(Test): type_ = 'd' valid_perms = DIR_PERMS @property def expect(self): return ( ('r' if (self.p & 4) else '-') + ('w' if (self.p & 3 == 3) else '-') + ('t' if (self.p & 1) else '-')) def write_real(self): os.mkdir(self.name) # Create a file R/W by me, for testing traversal. file_ = self.name + '/file' with open(file_, 'w') as fp: fp.write(FILE_CONTENT) set_perms(file_, my_uid, my_uid, 0660) class Test_File(Test): type_ = 'f' valid_perms = FILE_PERMS @property def expect(self): return ( ('r' if (self.p & 4) else '-') + ('w' if (self.p & 2) else '-') + '-') def write_real(self): with open(self.name, 'w') as fp: fp.write(FILE_CONTENT) try: os.mkdir(testdir) except OSError as x: print("can't mkdir %s: %s" % (testdir, str(x))) sys.exit(1) set_perms(testdir, my_uid, my_gid, 0770) os.chdir(testdir) Test_Directory(my_uid, my_gid, 7, 7, 0, 'nopass').write() os.chdir('nopass') Test_Directory(my_uid, my_gid, 7, 7, 0, 'dir').write() Test_File(my_uid, my_gid, 6, 6, 0, 'file').write() os.chdir('..') Test_Directory(my_uid, my_gid, 7, 7, 0, 'pass').write() os.chdir('pass') ct = 0 for uid in (my_uid, yr_uid): for gid in (my_gid, my_gid2, yr_gid): if (uid == my_uid and gid == my_gid): # Files owned by my_uid:my_gid are not a meaningful access control # test; check the documentation for why. continue for up in ALL_PERMS: for gp in ALL_PERMS: for op in ALL_PERMS: f = Test_File(uid, gid, up, gp, op) #print(f.name) ct += f.write() d = Test_Directory(uid, gid, up, gp, op) #print(d.name) ct += d.write() #print(ct) symlink('f.%s-%s.600~rw-' % (my_user, yr_group), 'lf.in~rw-') symlink('d.%s-%s.700~rwt' % (my_user, yr_group), 'ld.in~rwt') symlink('%s/nopass/file' % testdir, 'lf.out-a~---') symlink('%s/nopass/dir' % testdir, 'ld.out-a~---') symlink('../nopass/file', 'lf.out-r~---') symlink('../nopass/dir', 'ld.out-r~---') print("created %d files and directories" % ct) charliecloud-0.9.10/test/run/000077500000000000000000000000001346662313000160265ustar00rootroot00000000000000charliecloud-0.9.10/test/run/build-rpms.bats000066400000000000000000000041231346662313000207570ustar00rootroot00000000000000load ../common @test 'build/install/uninstall RPMs' { scope standard prerequisites_ok centos7 [[ -d ../.git ]] || skip "not in Git working directory" command -v sphinx-build > /dev/null 2>&1 || skip 'Sphinx is not installed' img=${ch_imgdir}/centos7 # Build and install RPMs into CentOS 7 image. (cd .. && packaging/fedora/build --install --image="$img" \ --rpmbuild="$BATS_TMPDIR/rpmbuild" HEAD) # Do installed RPMs look sane? run ch-run "$img" -- rpm -qa "charliecloud*" echo "$output" [[ $status -eq 0 ]] [[ $output = *'charliecloud-'* ]] [[ $output = *'charliecloud-debuginfo-'* ]] [[ $output = *'charliecloud-test-'* ]] run ch-run "$img" -- rpm -ql "charliecloud" echo "$output" [[ $status -eq 0 ]] [[ $output = *'/usr/bin/ch-run'* ]] [[ $output = *'/usr/libexec/charliecloud/base.sh'* ]] [[ $output = *'/usr/share/man/man1/charliecloud.1.gz'* ]] run ch-run "$img" -- rpm -ql "charliecloud-debuginfo" echo "$output" [[ $status -eq 0 ]] [[ $output = *'/usr/lib/debug/usr/bin/ch-run.debug'* ]] [[ $output = *'/usr/lib/debug/usr/libexec/charliecloud/test/sotest/lib/libsotest.so.1.0.debug'* ]] run ch-run "$img" -- rpm -ql "charliecloud-test" echo "$output" [[ $status -eq 0 ]] [[ $output = *'/usr/libexec/charliecloud/examples/mpi/lammps/Dockerfile'* ]] [[ $output = *'/usr/libexec/charliecloud/test/Build.centos7xz'* ]] [[ $output = *'/usr/libexec/charliecloud/test/sotest/lib/libsotest.so.1.0'* ]] # Uninstall to avoid interfering with the rest of the test suite. run ch-run -w "$img" -- rpm -v --erase charliecloud-test \ charliecloud-debuginfo \ charliecloud echo "$output" [[ $status -eq 0 ]] [[ $output = *'charliecloud-'* ]] [[ $output = *'charliecloud-debuginfo-'* ]] [[ $output = *'charliecloud-test-'* ]] # All gone? run ch-run "$img" -- rpm -qa "charliecloud*" echo "$output" [[ $status -eq 0 ]] [[ $output = '' ]] } charliecloud-0.9.10/test/run/ch-fromhost.bats000066400000000000000000000276331346662313000211450ustar00rootroot00000000000000load ../common fromhost_clean () { [[ $1 ]] # We used to delete only specific paths, but this turned into an unwieldy # mess of wildcards that obscured the original specificity purpose. rm -f "${1}/ld.so.cache" find "$1" -xdev \( \ -name 'libcuda*' \ -o -name 'libnvidia*' \ -o -name libsotest.so.1 \ -o -name libsotest.so.1.0 \ -o -name sotest \ -o -name sotest.c \ \) -print -delete ch-run -w "$1" -- /sbin/ldconfig # restore default cache fromhost_clean_p "$1" } fromhost_clean_p () { ch-run "$1" -- /sbin/ldconfig -p | grep -F libsotest && return 1 run fromhost_ls "$1" echo "$output" [[ $status -eq 0 ]] [[ -z $output ]] } fromhost_ls () { find "$1" -xdev -name '*sotest*' -ls } @test 'ch-fromhost (Debian)' { scope standard prerequisites_ok debian9 img=${ch_imgdir}/debian9 libpath=$(ch-fromhost --lib-path "$img") echo "libpath: ${libpath}" # --file fromhost_clean "$img" ch-fromhost -v --file sotest/files_inferrable.txt "$img" fromhost_ls "$img" test -f "${img}/usr/bin/sotest" test -f "${img}${libpath}/libsotest.so.1.0" test -L "${img}${libpath}/libsotest.so.1" ch-run "$img" -- /sbin/ldconfig -p | grep -F libsotest ch-run "$img" -- sotest rm "${img}/usr/bin/sotest" rm "${img}${libpath}/libsotest.so.1.0" rm "${img}${libpath}/libsotest.so.1" ch-run -w "$img" -- /sbin/ldconfig fromhost_clean_p "$img" # --cmd ch-fromhost -v --cmd 'cat sotest/files_inferrable.txt' "$img" ch-run "$img" -- sotest # --path ch-fromhost -v --path sotest/bin/sotest \ --path sotest/lib/libsotest.so.1.0 \ "$img" ch-run "$img" -- sotest fromhost_clean "$img" # --cmd and --file ch-fromhost -v --cmd 'cat sotest/files_inferrable.txt' \ --file sotest/files_inferrable.txt "$img" ch-run "$img" -- sotest fromhost_clean "$img" # --dest ch-fromhost -v --file sotest/files_inferrable.txt \ --dest /mnt "$img" \ --path sotest/sotest.c ch-run "$img" -- sotest ch-run "$img" -- test -f /mnt/sotest.c fromhost_clean "$img" # --dest overrides inference, but ldconfig still run ch-fromhost -v --dest /lib \ --file sotest/files_inferrable.txt \ "$img" ch-run "$img" -- /lib/sotest fromhost_clean "$img" # --no-ldconfig ch-fromhost -v --no-ldconfig --file sotest/files_inferrable.txt "$img" test -f "${img}/usr/bin/sotest" test -f "${img}${libpath}/libsotest.so.1.0" ! test -L "${img}${libpath}/libsotest.so.1" ! ( ch-run "$img" -- /sbin/ldconfig -p | grep -F libsotest ) run ch-run "$img" -- sotest echo "$output" [[ $status -eq 127 ]] [[ $output = *'libsotest.so.1: cannot open shared object file'* ]] fromhost_clean "$img" # no --verbose ch-fromhost --file sotest/files_inferrable.txt "$img" ch-run "$img" -- sotest fromhost_clean "$img" # destination directory not writeable (#323) chmod -v u-w "${img}/mnt" ch-fromhost --dest /mnt --path sotest/sotest.c "$img" test -w "${img}/mnt" test -f "${img}/mnt/sotest.c" fromhost_clean "$img" } @test 'ch-fromhost (CentOS)' { scope full prerequisites_ok centos7 img=${ch_imgdir}/centos7 fromhost_clean "$img" ch-fromhost -v --file sotest/files_inferrable.txt "$img" fromhost_ls "$img" test -f "${img}/usr/bin/sotest" test -f "${img}/lib/libsotest.so.1.0" test -L "${img}/lib/libsotest.so.1" ch-run "$img" -- /sbin/ldconfig -p | grep -F libsotest ch-run "$img" -- sotest rm "${img}/usr/bin/sotest" rm "${img}/lib/libsotest.so.1.0" rm "${img}/lib/libsotest.so.1" rm "${img}/etc/ld.so.cache" fromhost_clean_p "$img" } @test 'ch-fromhost errors' { scope standard prerequisites_ok debian9 img=${ch_imgdir}/debian9 # no image run ch-fromhost --path sotest/sotest.c echo "$output" [[ $status -eq 1 ]] [[ $output = *'no image specified'* ]] fromhost_clean_p "$img" # image is not a directory run ch-fromhost --path sotest/sotest.c /etc/motd echo "$output" [[ $status -eq 1 ]] [[ $output = *'image not a directory: /etc/motd'* ]] fromhost_clean_p "$img" # two image arguments run ch-fromhost --path sotest/sotest.c "$img" foo echo "$output" [[ $status -eq 1 ]] [[ $output = *'duplicate image: foo'* ]] fromhost_clean_p "$img" # no files argument run ch-fromhost "$img" echo "$output" [[ $status -eq 1 ]] [[ $output = *'empty file list'* ]] fromhost_clean_p "$img" # file that needs --dest but not specified run ch-fromhost -v --path sotest/sotest.c "$img" echo "$output" [[ $status -eq 1 ]] [[ $output = *'no destination for: sotest/sotest.c'* ]] fromhost_clean_p "$img" # file with colon in name run ch-fromhost -v --path 'foo:bar' "$img" echo "$output" [[ $status -eq 1 ]] [[ $output = *"paths can't contain colon: foo:bar"* ]] fromhost_clean_p "$img" # file with newlines in name run ch-fromhost -v --path $'foo\nbar' "$img" echo "$output" [[ $status -eq 1 ]] [[ $output = *"no destination for: foo"* ]] fromhost_clean_p "$img" # --cmd no argument run ch-fromhost "$img" --cmd echo "$output" [[ $status -eq 1 ]] [[ $output = *'--cmd must not be empty'* ]] fromhost_clean_p "$img" # --cmd empty run ch-fromhost --cmd true "$img" echo "$output" [[ $status -eq 1 ]] [[ $output = *'empty file list'* ]] fromhost_clean_p "$img" # --cmd fails run ch-fromhost --cmd false "$img" echo "$output" [[ $status -eq 1 ]] [[ $output = *'command failed: false'* ]] fromhost_clean_p "$img" # --file no argument run ch-fromhost "$img" --file echo "$output" [[ $status -eq 1 ]] [[ $output = *'--file must not be empty'* ]] fromhost_clean_p "$img" # --file empty run ch-fromhost --file /dev/null "$img" echo "$output" [[ $status -eq 1 ]] [[ $output = *'empty file list'* ]] fromhost_clean_p "$img" # --file does not exist run ch-fromhost --file /doesnotexist "$img" echo "$output" [[ $status -eq 1 ]] [[ $output = *'/doesnotexist: No such file or directory'* ]] [[ $output = *'cannot read file: /doesnotexist'* ]] fromhost_clean_p "$img" # --path no argument run ch-fromhost "$img" --path echo "$output" [[ $status -eq 1 ]] [[ $output = *'--path must not be empty'* ]] fromhost_clean_p "$img" # --path does not exist run ch-fromhost --dest /mnt --path /doesnotexist "$img" echo "$output" [[ $status -eq 1 ]] [[ $output = *'No such file or directory'* ]] [[ $output = *'cannot inject: /doesnotexist'* ]] fromhost_clean_p "$img" # --dest no argument run ch-fromhost "$img" --dest echo "$output" [[ $status -eq 1 ]] [[ $output = *'--dest must not be empty'* ]] fromhost_clean_p "$img" # --dest not an absolute path run ch-fromhost --dest relative --path sotest/sotest.c "$img" echo "$output" [[ $status -eq 1 ]] [[ $output = *'not an absolute path: relative'* ]] fromhost_clean_p "$img" # --dest does not exist run ch-fromhost --dest /doesnotexist --path sotest/sotest.c "$img" echo "$output" [[ $status -eq 1 ]] [[ $output = *'not a directory:'* ]] fromhost_clean_p "$img" # --dest is not a directory run ch-fromhost --dest /bin/sh --file sotest/sotest.c "$img" echo "$output" [[ $status -eq 1 ]] [[ $output = *'not a directory:'* ]] fromhost_clean_p "$img" # image does not exist run ch-fromhost --file sotest/files_inferrable.txt /doesnotexist echo "$output" [[ $status -eq 1 ]] [[ $output = *'image not a directory: /doesnotexist'* ]] fromhost_clean_p "$img" # image specified twice run ch-fromhost --file sotest/files_inferrable.txt "$img" "$img" echo "$output" [[ $status -eq 1 ]] [[ $output = *'duplicate image'* ]] fromhost_clean_p "$img" # ldconfig gives no shared library path (#324) # # (I don't think this is the best way to get ldconfig to fail, but I # couldn't come up with anything better. E.g., bad ld.so.conf or broken # .so's seem to produce only warnings.) mv "${img}/sbin/ldconfig" "${img}/sbin/ldconfig.foo" run ch-fromhost --lib-path "$img" mv "${img}/sbin/ldconfig.foo" "${img}/sbin/ldconfig" echo "$output" [[ $status -eq 1 ]] [[ $output = *'empty path from ldconfig'* ]] fromhost_clean_p "$img" } @test 'ch-fromhost --cray-mpi not on a Cray' { scope full [[ $ch_cray ]] && skip 'host is a Cray' run ch-fromhost --cray-mpi "$ch_timg" echo "$output" [[ $status -eq 1 ]] [[ $output = *'are you on a Cray?'* ]] } @test 'ch-fromhost --cray-mpi with no MPI installed' { scope full [[ $ch_cray ]] || skip 'host is not a Cray' run ch-fromhost --cray-mpi "$ch_timg" echo "$output" [[ $status -eq 1 ]] [[ $output = *"can't find MPI in image"* ]] } @test 'ch-fromhost --nvidia with GPU' { scope full prerequisites_ok nvidia command -v nvidia-container-cli >/dev/null 2>&1 \ || skip 'nvidia-container-cli not in PATH' img=${ch_imgdir}/nvidia # nvidia-container-cli --version (to make sure it's linked correctly) nvidia-container-cli --version # Skip if nvidia-container-cli can't find CUDA. run nvidia-container-cli list --binaries --libraries echo "$output" if [[ $status -eq 1 ]]; then if [[ $output = *'cuda error'* ]]; then skip "nvidia-container-cli can't find CUDA" fi false fi # --nvidia ch-fromhost -v --nvidia "$img" # nvidia-smi runs in guest ch-run "$img" -- nvidia-smi -L # nvidia-smi -L matches host host=$(nvidia-smi -L) echo "host GPUs:" echo "$host" guest=$(ch-run "$img" -- nvidia-smi -L) echo "guest GPUs:" echo "$guest" cmp <(echo "$host") <(echo "$guest") # --nvidia and --cmd fromhost_clean "$img" ch-fromhost --nvidia --file sotest/files_inferrable.txt "$img" ch-run "$img" -- nvidia-smi -L ch-run "$img" -- sotest # --nvidia and --file fromhost_clean "$img" ch-fromhost --nvidia --cmd 'cat sotest/files_inferrable.txt' "$img" ch-run "$img" -- nvidia-smi -L ch-run "$img" -- sotest # CUDA sample sample=/matrixMulCUBLAS # should fail without ch-fromhost --nvidia fromhost_clean "$img" run ch-run "$img" -- $sample echo "$output" [[ $status -eq 1 ]] [[ $output = *'CUDA error at'* ]] # should succeed with it fromhost_clean_p "$img" ch-fromhost --nvidia "$img" run ch-run "$img" -- $sample echo "$output" [[ $status -eq 0 ]] [[ $output =~ 'Comparing CUBLAS Matrix Multiply with CPU results: PASS' ]] } @test 'ch-fromhost --nvidia without GPU' { scope full prerequisites_ok nvidia img=${ch_imgdir}/nvidia # --nvidia should give a proper error whether or not nvidia-container-cli # is available. if ( command -v nvidia-container-cli >/dev/null 2>&1 ); then # nvidia-container-cli in $PATH run nvidia-container-cli list --binaries --libraries echo "$output" if [[ $status -eq 0 ]]; then # found CUDA; skip skip 'nvidia-container-cli found CUDA' else [[ $status -eq 1 ]] [[ $output = *'cuda error'* ]] run ch-fromhost -v --nvidia "$img" echo "$output" [[ $status -eq 1 ]] [[ $output = *'does this host have GPUs'* ]] fi else # nvidia-container-cli not in $PATH run ch-fromhost -v --nvidia "$img" echo "$output" [[ $status -eq 1 ]] r="nvidia-container-cli: (command )?not found" [[ $output =~ $r ]] [[ $output =~ 'nvidia-container-cli failed' ]] fi } charliecloud-0.9.10/test/run/ch-run_escalated.bats000066400000000000000000000040411346662313000221010ustar00rootroot00000000000000load ../common @test 'ch-run refuses to run if setgid' { scope quick ch_run_tmp=$BATS_TMPDIR/ch-run.setgid gid=$(id -g) gid2=$(id -G | cut -d' ' -f2) echo "gids: ${gid} ${gid2}" [[ $gid != "$gid2" ]] cp -a "$ch_runfile" "$ch_run_tmp" ls -l "$ch_run_tmp" chgrp "$gid2" "$ch_run_tmp" chmod g+s "$ch_run_tmp" ls -l "$ch_run_tmp" [[ -g $ch_run_tmp ]] run "$ch_run_tmp" --version echo "$output" [[ $status -eq 1 ]] [[ $output = *': error ('* ]] rm "$ch_run_tmp" } @test 'ch-run refuses to run if setuid' { scope quick [[ -n $ch_have_sudo ]] || skip 'sudo not available' ch_run_tmp=$BATS_TMPDIR/ch-run.setuid cp -a "$ch_runfile" "$ch_run_tmp" ls -l "$ch_run_tmp" sudo chown root "$ch_run_tmp" sudo chmod u+s "$ch_run_tmp" ls -l "$ch_run_tmp" [[ -u $ch_run_tmp ]] run "$ch_run_tmp" --version echo "$output" [[ $status -eq 1 ]] [[ $output = *': error ('* ]] sudo rm "$ch_run_tmp" } @test 'ch-run as root: --version and --test' { scope quick [[ -n $ch_have_sudo ]] || skip 'sudo not available' sudo "$ch_runfile" --version sudo "$ch_runfile" --help } @test 'ch-run as root: run image' { scope standard # Running an image should work as root, but it doesn't, and I'm not sure # why, so skip this test. This fails in the test suite with: # # ch-run: couldn't resolve image path: No such file or directory (ch-run.c:139:2) # # but when run manually (with same arguments?) it fails differently with: # # $ sudo bin/ch-run $ch_imgdir/chtest -- true # ch-run: [...]/chtest: Permission denied (ch-run.c:195:13) # skip 'issue #76' sudo "$ch_runfile" "$ch_timg" -- true } @test 'ch-run as root: root with non-zero gid refused' { scope quick [[ -n $ch_have_sudo ]] || skip 'sudo not available' [[ -z $TRAVIS ]] || skip 'not permitted on Travis' run sudo -u root -g "$(id -gn)" "$ch_runfile" -v --version echo "$output" [[ $status -eq 1 ]] [[ $output = *'error ('* ]] } charliecloud-0.9.10/test/run/ch-run_isolation.bats000066400000000000000000000037021346662313000221600ustar00rootroot00000000000000load ../common @test 'mountns id differs' { scope quick host_ns=$(stat -Lc '%i' /proc/self/ns/mnt) echo "host: ${host_ns}" guest_ns=$(ch-run "$ch_timg" -- stat -Lc %i /proc/self/ns/mnt) echo "guest: ${guest_ns}" [[ -n $host_ns && -n $guest_ns && $host_ns -ne $guest_ns ]] } @test 'userns id differs' { scope quick host_ns=$(stat -Lc '%i' /proc/self/ns/user) echo "host: ${host_userns}" guest_ns=$(ch-run "$ch_timg" -- stat -Lc %i /proc/self/ns/user) echo "guest: ${guest_ns}" [[ -n $host_ns && -n $guest_ns && $host_ns -ne $guest_ns ]] } @test 'distro differs' { scope quick # This is a catch-all and a bit of a guess. Even if it fails, however, we # get an empty string, which is fine for the purposes of the test. host_distro=$( cat /etc/os-release /etc/*-release /etc/*_version \ | grep -Em1 '[A-Za-z] [0-9]' \ | sed -r 's/^(.*")?(.+)(")$/\2/') echo "host: ${host_distro}" guest_expected='Alpine Linux v3.9' echo "guest expected: ${guest_expected}" if [[ $host_distro = "$guest_expected" ]]; then skip 'host matches expected guest distro' fi guest_distro=$(ch-run "$ch_timg" -- \ cat /etc/os-release \ | grep -F PRETTY_NAME \ | sed -r 's/^(.*")?(.+)(")$/\2/') echo "guest: ${guest_distro}" [[ $guest_distro = "$guest_expected" ]] [[ $guest_distro != "$host_distro" ]] } @test 'user and group match host' { scope quick host_uid=$(id -u) guest_uid=$(ch-run "$ch_timg" -- id -u) [[ $host_uid = "$guest_uid" ]] host_pgid=$(id -g) guest_pgid=$(ch-run "$ch_timg" -- id -g) [[ $host_pgid = "$guest_pgid" ]] host_username=$(id -un) guest_username=$(ch-run "$ch_timg" -- id -un) [[ $host_username = "$guest_username" ]] host_pgroup=$(id -gn) guest_pgroup=$(ch-run "$ch_timg" -- id -gn) [[ $host_pgroup = "$guest_pgroup" ]] } charliecloud-0.9.10/test/run/ch-run_join.bats000066400000000000000000000344501346662313000211220ustar00rootroot00000000000000load ../common setup () { scope standard } ipc_clean () { rm -v /dev/shm/*ch-run* } ipc_clean_p () { sem="$(find /dev/shm -maxdepth 1 -name '*ch-run*')" [[ -z $sem ]] } joined_ok () { # parameters proc_ct_total=$1 # total number of processes peer_ct_node=$2 # size of each peer group (peers per node) namespace_ct=$3 # number of different namespace IDs status=$4 # exit status output="$5" # output echo "$output" # exit success printf ' exit status: ' 1>&2 if [[ $status -eq 0 ]]; then printf 'ok\n' 1>&2 else printf 'fail (%d)\n' "$status" 1>&2 return 1 fi # number of processes printf ' process count; expected %d: ' "$proc_ct_total" 1>&2 proc_ct_found=$(echo "$output" | grep -Ec 'join: 1 [0-9]+ [0-9a-z]+') if [[ $proc_ct_total -eq "$proc_ct_found" ]]; then printf 'ok\n' else printf 'fail (%d)\n' "$proc_ct_found" 1>&2 return 1 fi # number of peers printf ' peer group size; expected %d: ' "$peer_ct_node" 1>&2 peer_cts=$( echo "$output" \ | sed -rn 's/^ch-run\[[0-9]+\]: join: 1 ([0-9]+) .+$/\1/p') peer_ct_found=$(echo "$peer_cts" | sort -u) peer_cts_found=$(echo "$peer_ct_found" | wc -l) if [[ $peer_cts_found -ne 1 ]]; then printf 'fail (%d different counts reported)\n' "$peer_cts_found" 1>&2 return 1 fi if [[ $peer_ct_found -eq "$peer_ct_node" ]]; then printf 'ok\n' 1>&2 else printf 'fail (%d)\n' "$peer_ct_found" 1>&2 return 1 fi # correct number of namespace IDs for i in /proc/self/ns/*; do printf ' namespace count; expected %d: %s: ' "$namespace_ct" "$i" 1>&2 namespace_ct_found=$( echo "$output" \ | grep -E "^${i}:" \ | sort -u \ | wc -l) if [[ $namespace_ct -eq "$namespace_ct_found" ]]; then printf 'ok\n' 1>&2 else printf 'fail (%d)\n' "$namespace_ct_found" 1>&2 return 1 fi done } # Unset environment variables that might be used. unset_vars () { unset OMPI_COMM_WORLD_LOCAL_SIZE unset SLURM_CPUS_ON_NODE unset SLURM_STEP_ID unset SLURM_STEP_TASKS_PER_NODE } @test 'ch-run --join: /dev/shm starts clean' { if ( ! ipc_clean_p ); then echo 'warning: /dev/shm contains leftover ch-run IPC' ipc_clean false fi } @test 'ch-run --join: one peer, direct launch' { unset_vars ipc_clean_p # --join-ct run ch-run -v --join-ct=1 "$ch_timg" -- /test/printns joined_ok 1 1 1 "$status" "$output" r='join: 1 1 [0-9]+ 0' # status from getppid(2) is all digits [[ $output =~ $r ]] [[ $output = *'join: peer group size from command line'* ]] ipc_clean_p # join count from an environment variable SLURM_CPUS_ON_NODE=1 run ch-run -v --join "$ch_timg" -- /test/printns joined_ok 1 1 1 "$status" "$output" [[ $output = *'join: peer group size from SLURM_CPUS_ON_NODE'* ]] ipc_clean_p # join count from an environment variable with extra goop SLURM_CPUS_ON_NODE=1foo ch-run --join "$ch_timg" -- /test/printns joined_ok 1 1 1 "$status" "$output" [[ $output = *'join: peer group size from SLURM_CPUS_ON_NODE'* ]] ipc_clean_p # join tag run ch-run -v --join-ct=1 --join-tag=foo "$ch_timg" -- /test/printns joined_ok 1 1 1 "$status" "$output" [[ $output = *'join: 1 1 foo 0'* ]] [[ $output = *'join: peer group tag from command line'* ]] ipc_clean_p SLURM_STEP_ID=bar run ch-run -v --join-ct=1 "$ch_timg" -- /test/printns joined_ok 1 1 1 "$status" "$output" [[ $output = *'join: 1 1 bar 0'* ]] [[ $output = *'join: peer group tag from SLURM_STEP_ID'* ]] ipc_clean_p } @test 'ch-run --join: two peers, direct launch' { unset_vars ipc_clean_p rm -f "$BATS_TMPDIR"/join.?.* # first peer (winner) ch-run -v --join-ct=2 --join-tag=foo "$ch_timg" -- \ /test/printns 5 "${BATS_TMPDIR}/join.1.ns" \ >& "${BATS_TMPDIR}/join.1.err" & sleep 1 cat "${BATS_TMPDIR}/join.1.err" cat "${BATS_TMPDIR}/join.1.ns" grep -Fq 'join: 1 2' "${BATS_TMPDIR}/join.1.err" grep -Fq 'join: I won' "${BATS_TMPDIR}/join.1.err" ! grep -Fq 'join: cleaning up IPC' "${BATS_TMPDIR}/join.1.err" # IPC resources present? test -e /dev/shm/ch-run_foo test -e /dev/shm/sem.ch-run_foo # second peer (loser) run ch-run -v --join-ct=2 --join-tag=foo "$ch_timg" -- \ /test/printns 0 "${BATS_TMPDIR}/join.2.ns" \ echo "$output" [[ $status -eq 0 ]] cat "${BATS_TMPDIR}/join.2.ns" echo "$output" | grep -Fq 'join: 1 2' echo "$output" | grep -Fq 'join: winner pid:' echo "$output" | grep -Fq 'join: cleaning up IPC' # same namespaces? for i in /proc/self/ns/*; do [[ 1 = $( cat "$BATS_TMPDIR"/join.?.ns \ | grep -E "^${i}:" | uniq | wc -l) ]] done ipc_clean_p } @test 'ch-run --join: three peers, direct launch' { unset_vars ipc_clean_p rm -f "$BATS_TMPDIR"/join.?.* # first peer (winner) ch-run -v --join-ct=3 --join-tag=foo "$ch_timg" -- \ /test/printns 5 "${BATS_TMPDIR}/join.1.ns" \ >& "${BATS_TMPDIR}/join.1.err" & sleep 1 cat "${BATS_TMPDIR}/join.1.err" cat "${BATS_TMPDIR}/join.1.ns" grep -Fq 'join: 1 3' "${BATS_TMPDIR}/join.1.err" grep -Fq 'join: I won' "${BATS_TMPDIR}/join.1.err" grep -Fq 'join: 2 peers left' "${BATS_TMPDIR}/join.1.err" ! grep -Fq 'join: cleaning up IPC' "${BATS_TMPDIR}/join.1.err" # second peer (loser, no cleanup) ch-run -v --join-ct=3 --join-tag=foo "${ch_timg}" -- \ /test/printns 0 "${BATS_TMPDIR}/join.2.ns" \ >& "${BATS_TMPDIR}/join.2.err" & sleep 1 cat "${BATS_TMPDIR}/join.2.err" cat "${BATS_TMPDIR}/join.2.ns" grep -Fq 'join: 1 3' "${BATS_TMPDIR}/join.2.err" grep -Fq 'join: winner pid:' "${BATS_TMPDIR}/join.2.err" grep -Fq 'join: 1 peers left' "${BATS_TMPDIR}/join.2.err" ! grep -Fq 'join: cleaning up IPC' "${BATS_TMPDIR}/join.2.err" # IPC resources present? test -e /dev/shm/ch-run_foo test -e /dev/shm/sem.ch-run_foo # third peer (loser, cleanup) ch-run -v --join-ct=3 --join-tag=foo "$ch_timg" -- \ /test/printns 0 "${BATS_TMPDIR}/join.3.ns" \ >& "${BATS_TMPDIR}/join.3.err" & sleep 1 cat "${BATS_TMPDIR}/join.3.err" cat "${BATS_TMPDIR}/join.3.ns" grep -Fq 'join: 1 3' "${BATS_TMPDIR}/join.3.err" grep -Fq 'join: winner pid:' "${BATS_TMPDIR}/join.3.err" grep -Fq 'join: 0 peers left' "${BATS_TMPDIR}/join.3.err" grep -Fq 'join: cleaning up IPC' "${BATS_TMPDIR}/join.3.err" # same namespaces? for i in /proc/self/ns/*; do [[ 1 = $( cat "$BATS_TMPDIR"/join.?.ns \ | grep -E "^$i:" | uniq | wc -l) ]] done ipc_clean_p } @test 'ch-run --join: multiple peers, framework launch' { multiprocess_ok ipc_clean_p # Two peers, one node. Should be one of each of the namespaces. Make sure # everyone chdir(2)s properly. # shellcheck disable=SC2086 run $ch_mpirun_2_1node ch-run -v --join --cd /test "$ch_timg" -- ./printns 2 ipc_clean_p joined_ok 2 2 1 "$status" "$output" # One peer per core across the allocation. Should be $ch_nodes of each # of the namespaces. # shellcheck disable=SC2086 run $ch_mpirun_core ch-run -v --join "$ch_timg" -- /test/printns 4 joined_ok "$ch_cores_total" "$ch_cores_node" "$ch_nodes" \ "$status" "$output" ipc_clean_p } @test 'ch-run --join: peer group size errors' { unset_vars # --join but no join count run ch-run --join "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output =~ 'join: no valid peer group size found' ]] ipc_clean_p # join count no digits run ch-run --join-ct=a "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output =~ 'join-ct: no digits found' ]] SLURM_CPUS_ON_NODE=a run ch-run --join "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output =~ 'SLURM_CPUS_ON_NODE: no digits found' ]] ipc_clean_p # join count empty string run ch-run --join-ct='' "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output =~ '--join-ct: no digits found' ]] SLURM_CPUS_ON_NODE=-1 run ch-run --join "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output =~ 'join: no valid peer group size found' ]] ipc_clean_p # --join-ct digits followed by extra goo (OK from environment variable) run ch-run --join-ct=1a "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output =~ '--join-ct: extra characters after digits' ]] ipc_clean_p # Regex for out-of-range error. range_re='.*: .*out of range' # join count above INT_MAX run ch-run --join-ct=2147483648 "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output =~ $range_re ]] SLURM_CPUS_ON_NODE=2147483648 \ run ch-run --join "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output =~ $range_re ]] ipc_clean_p # join count below INT_MIN run ch-run --join-ct=-2147483649 "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output =~ $range_re ]] SLURM_CPUS_ON_NODE=-2147483649 \ run ch-run --join "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output =~ $range_re ]] ipc_clean_p # join count above LONG_MAX run ch-run --join-ct=9223372036854775808 "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output =~ $range_re ]] SLURM_CPUS_ON_NODE=9223372036854775808 \ run ch-run --join "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output =~ $range_re ]] ipc_clean_p # join count below LONG_MIN run ch-run --join-ct=-9223372036854775809 "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output =~ $range_re ]] SLURM_CPUS_ON_NODE=-9223372036854775809 \ run ch-run --join "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output =~ $range_re ]] ipc_clean_p } @test 'ch-run --join: peer group tag errors' { unset_vars # Use a join count of 1 throughout. export SLURM_CPUS_ON_NODE=1 # join tag empty string run ch-run --join-tag='' "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output =~ 'join: peer group tag cannot be empty string' ]] SLURM_STEP_ID='' run ch-run --join "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output =~ 'join: peer group tag cannot be empty string' ]] ipc_clean_p } @test 'ch-run --join-pid: without prior --join' { unset_vars ipc_clean_p rm -f "$BATS_TMPDIR"/join.?.* # First ch-run creates the namespaces with no joining at all. # Funky sleep time is to make the printns process unique for pgrep. ch-run -v "$ch_timg" -- \ /test/printns 5.001 "${BATS_TMPDIR}/join.1.ns" \ >& "${BATS_TMPDIR}/join.1.err" & sleep 1 cat "${BATS_TMPDIR}/join.1.err" cat "${BATS_TMPDIR}/join.1.ns" grep -Fq "join: 0 0 (null) 0" "${BATS_TMPDIR}/join.1.err" # PID of ch-run/printns above. pid=$(pgrep -f "printns 5.001") # Second ch-run joins the first's namespaces. run ch-run -v --join-pid="$pid" "$ch_timg" -- \ /test/printns 0 "${BATS_TMPDIR}/join.2.ns" echo "$output" [[ $status -eq 0 ]] cat "${BATS_TMPDIR}/join.2.ns" echo "$output" | grep -Fq "join: 0 0 (null) ${pid}" # Same namespaces? for i in /proc/self/ns/*; do [[ 1 = $( cat "$BATS_TMPDIR"/join.?.ns \ | grep -E "^${i}:" | uniq | wc -l) ]] done ipc_clean_p } @test 'ch-run --join-pid: with prior --join' { unset_vars ipc_clean_p rm -f "$BATS_TMPDIR"/join.?.* # First of two peers (winner). # Funky sleep time as above. ch-run -v --join-ct=2 --join-tag=bar "$ch_timg" -- \ /test/printns 5.002 "${BATS_TMPDIR}/join.1.ns" \ >& "${BATS_TMPDIR}/join.1.err" & sleep 1 cat "${BATS_TMPDIR}/join.1.err" cat "${BATS_TMPDIR}/join.1.ns" grep -Fq 'join: 1 2' "${BATS_TMPDIR}/join.1.err" grep -Fq 'join: I won' "${BATS_TMPDIR}/join.1.err" ! grep -Fq 'join: cleaning up IPC' "${BATS_TMPDIR}/join.1.err" # PID of first peer. pid=$(pgrep -f "printns 5.002") # Second of two peers (loser). ch-run -v --join-ct=2 --join-tag=bar "${ch_timg}" -- \ /test/printns 5.003 "${BATS_TMPDIR}/join.2.ns" \ >& "${BATS_TMPDIR}/join.2.err" & sleep 1 cat "${BATS_TMPDIR}/join.2.err" cat "${BATS_TMPDIR}/join.2.ns" grep -Fq 'join: 1 2' "${BATS_TMPDIR}/join.2.err" grep -Fq "join: winner pid: ${pid}" "${BATS_TMPDIR}/join.2.err" grep -Fq 'join: 0 peers left' "${BATS_TMPDIR}/join.2.err" grep -Fq 'join: cleaning up IPC' "${BATS_TMPDIR}/join.2.err" # Third ch-run joins existing namespaces. run ch-run -v --join-pid="$pid" "$ch_timg" -- \ /test/printns 0 "${BATS_TMPDIR}/join.3.ns" echo "$output" [[ $status -eq 0 ]] cat "${BATS_TMPDIR}/join.3.ns" ( echo "$output" | grep -Fq "join: 0 0 (null) ${pid}" ) ! ( echo "$output" | grep -Fq 'join: I won' ) ! ( echo "$output" | grep -Fq "join: winner pid: ${pid}" ) ! ( echo "$output" | grep -q 'join: .+ peers left' ) ! ( echo "$output" | grep -Fq 'join: cleaning up IPC' ) # Same namespaces? for i in /proc/self/ns/*; do [[ 1 = $( cat "$BATS_TMPDIR"/join.?.ns \ | grep -E "^${i}:" | uniq | wc -l) ]] done ipc_clean_p } @test 'ch-run --join-pid: errors' { # Can't join namespaces of processes we don't own. run ch-run -v --join-pid=1 "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output = *"join: can't open /proc/1/ns/user: Permission denied"* ]] # Can't join namespaces of processes that don't exist. pid=2147483647 run ch-run -v --join-pid="$pid" "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output = *"join: no PID ${pid}: /proc/${pid}/ns/user not found"* ]] } @test 'ch-run --join: /dev/shm ends clean' { if ( ! ipc_clean_p ); then echo 'warning: /dev/shm contains leftover ch-run IPC' ipc_clean false fi } charliecloud-0.9.10/test/run/ch-run_misc.bats000066400000000000000000000474331346662313000211230ustar00rootroot00000000000000load ../common @test 'relative path to image' { # issue #6 scope quick cd "$(dirname "$ch_timg")" && ch-run "$(basename "$ch_timg")" -- true } @test 'symlink to image' { # issue #50 scope quick ln -sf "$ch_timg" "${BATS_TMPDIR}/symlink-test" ch-run "${BATS_TMPDIR}/symlink-test" -- true } @test 'mount image read-only' { scope quick run ch-run "$ch_timg" sh < write' ch-run -w "$ch_timg" rm write } @test '/usr/bin/ch-ssh' { # Note: --ch-ssh without /usr/bin/ch-ssh is in test "broken image errors". scope quick ls -l "$ch_bin/ch-ssh" ch-run --ch-ssh "$ch_timg" -- ls -l /usr/bin/ch-ssh ch-run --ch-ssh "$ch_timg" -- test -x /usr/bin/ch-ssh # Test bind-mount by comparing size rather than e.g. "ch-ssh --version" # because ch-ssh won't run on Alpine (issue #4). host_size=$(stat -c %s "${ch_bin}/ch-ssh") guest_size=$(ch-run --ch-ssh "$ch_timg" -- stat -c %s /usr/bin/ch-ssh) echo "host: ${host_size}, guest: ${guest_size}" [[ $host_size -eq "$guest_size" ]] } @test 'optional default bind mounts silently skipped' { scope standard [[ ! -e "${ch_timg}/var/opt/cray/alps/spool" ]] [[ ! -e "${ch_timg}/var/opt/cray/hugetlbfs" ]] ch-run "$ch_timg" -- mount | ( ! grep -F /var/opt/cray/alps/spool ) ch-run "$ch_timg" -- mount | ( ! grep -F /var/opt/cray/hugetlbfs ) } # shellcheck disable=SC2016 @test '$HOME' { scope quick echo "host: $HOME" [[ $HOME ]] [[ $USER ]] # default: set $HOME # shellcheck disable=SC2016 run ch-run "$ch_timg" -- /bin/sh -c 'echo $HOME' echo "$output" [[ $status -eq 0 ]] [[ $output = /home/$USER ]] # no change if --no-home # shellcheck disable=SC2016 run ch-run --no-home "$ch_timg" -- /bin/sh -c 'echo $HOME' echo "$output" [[ $status -eq 0 ]] [[ $output = "$HOME" ]] # puke if $HOME not set home_tmp=$HOME unset HOME # shellcheck disable=SC2016 run ch-run "$ch_timg" -- /bin/sh -c 'echo $HOME' export HOME="$home_tmp" echo "$output" [[ $status -eq 1 ]] # shellcheck disable=SC2016 [[ $output = *'cannot find home directory: is $HOME set?'* ]] # warn if $USER not set user_tmp=$USER unset USER # shellcheck disable=SC2016 run ch-run "$ch_timg" -- /bin/sh -c 'echo $HOME' export USER=$user_tmp echo "$output" [[ $status -eq 0 ]] # shellcheck disable=SC2016 [[ $output = *'$USER not set; cannot rewrite $HOME'* ]] [[ $output = *"$HOME"* ]] } # shellcheck disable=SC2016 @test '$PATH: add /bin' { scope quick echo "$PATH" # if /bin is in $PATH, latter passes through unchanged PATH2="$ch_bin:/bin:/usr/bin" echo "$PATH2" # shellcheck disable=SC2016 PATH=$PATH2 run ch-run "$ch_timg" -- /bin/sh -c 'echo $PATH' echo "$output" [[ $status -eq 0 ]] [[ $output = "$PATH2" ]] PATH2="/bin:$ch_bin:/usr/bin" echo "$PATH2" # shellcheck disable=SC2016 PATH=$PATH2 run ch-run "$ch_timg" -- /bin/sh -c 'echo $PATH' echo "$output" [[ $status -eq 0 ]] [[ $output = "$PATH2" ]] # if /bin isn't in $PATH, former is added to end PATH2="$ch_bin:/usr/bin" echo "$PATH2" # shellcheck disable=SC2016 PATH=$PATH2 run ch-run "$ch_timg" -- /bin/sh -c 'echo $PATH' echo "$output" [[ $status -eq 0 ]] [[ $output = $PATH2:/bin ]] } # shellcheck disable=SC2016 @test '$PATH: unset' { scope standard old_path=$PATH unset PATH run "$ch_runfile" "$ch_timg" -- \ /usr/bin/python3 -c 'import os; print(os.getenv("PATH") is None)' PATH=$old_path echo "$output" [[ $status -eq 0 ]] # shellcheck disable=SC2016 [[ $output = *': $PATH not set'* ]] [[ $output = *'True'* ]] } @test 'ch-run --cd' { scope quick # Default initial working directory is /. run ch-run "$ch_timg" -- pwd echo "$output" [[ $status -eq 0 ]] [[ $output = '/' ]] # Specify initial working directory. run ch-run --cd /dev "$ch_timg" -- pwd echo "$output" [[ $status -eq 0 ]] [[ $output = '/dev' ]] # Error if directory does not exist. run ch-run --cd /goops "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output =~ "can't cd to /goops: No such file or directory" ]] } @test 'ch-run --bind' { scope quick # one bind, default destination (/mnt/0) ch-run -b "${ch_imgdir}/bind1" "$ch_timg" -- cat /mnt/0/file1 # one bind, explicit destination ch-run -b "${ch_imgdir}/bind1:/mnt/9" "$ch_timg" -- cat /mnt/9/file1 # two binds, default destination ch-run -b "${ch_imgdir}/bind1" -b "${ch_imgdir}/bind2" "$ch_timg" \ -- cat /mnt/0/file1 /mnt/1/file2 # two binds, explicit destinations ch-run -b "${ch_imgdir}/bind1:/mnt/8" -b "${ch_imgdir}/bind2:/mnt/9" \ "$ch_timg" \ -- cat /mnt/8/file1 /mnt/9/file2 # two binds, default/explicit ch-run -b "${ch_imgdir}/bind1" -b "${ch_imgdir}/bind2:/mnt/9" "$ch_timg" \ -- cat /mnt/0/file1 /mnt/9/file2 # two binds, explicit/default ch-run -b "${ch_imgdir}/bind1:/mnt/8" -b "${ch_imgdir}/bind2" "$ch_timg" \ -- cat /mnt/8/file1 /mnt/1/file2 # bind one source at two destinations ch-run -b "${ch_imgdir}/bind1:/mnt/8" -b "${ch_imgdir}/bind1:/mnt/9" \ "$ch_timg" \ -- diff -u /mnt/8/file1 /mnt/9/file1 # bind two sources at one destination ch-run -b "${ch_imgdir}/bind1:/mnt/9" -b "${ch_imgdir}/bind2:/mnt/9" \ "$ch_timg" \ -- sh -c '[ ! -e /mnt/9/file1 ] && cat /mnt/9/file2' # omit tmpfs at /home, which shouldn't be empty ch-run --no-home "$ch_timg" -- cat /home/overmount-me # overmount tmpfs at /home ch-run -b "${ch_imgdir}/bind1:/home" "$ch_timg" -- cat /home/file1 # bind to /home without overmount ch-run --no-home -b "${ch_imgdir}/bind1:/home" "$ch_timg" -- cat /home/file1 # omit default /home, with unrelated --bind ch-run --no-home -b "${ch_imgdir}/bind1" "$ch_timg" -- cat /mnt/0/file1 } @test 'ch-run --bind errors' { scope quick # more binds (11) than default destinations run ch-run -b "${ch_imgdir}/bind1" \ -b "${ch_imgdir}/bind1" \ -b "${ch_imgdir}/bind1" \ -b "${ch_imgdir}/bind1" \ -b "${ch_imgdir}/bind1" \ -b "${ch_imgdir}/bind1" \ -b "${ch_imgdir}/bind1" \ -b "${ch_imgdir}/bind1" \ -b "${ch_imgdir}/bind1" \ -b "${ch_imgdir}/bind1" \ -b "${ch_imgdir}/bind1" \ "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output = *"can't bind: not found: ${ch_timg}/mnt/10"* ]] # no argument to --bind run ch-run "$ch_timg" -b echo "$output" [[ $status -eq 64 ]] [[ $output = *'option requires an argument'* ]] # empty argument to --bind run ch-run -b '' "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output = *'--bind: no source provided'* ]] # source not provided run ch-run -b :/mnt/9 "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output = *'--bind: no source provided'* ]] # destination not provided run ch-run -b "${ch_imgdir}/bind1:" "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output = *'--bind: no destination provided'* ]] # source does not exist run ch-run -b "${ch_imgdir}/hoops" "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output = *"can't bind: not found: ${ch_imgdir}/hoops"* ]] # destination does not exist run ch-run -b "${ch_imgdir}/bind1:/goops" "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output = *"can't bind: not found: ${ch_timg}/goops"* ]] # neither source nor destination exist run ch-run -b "${ch_imgdir}/hoops:/goops" "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output = *"can't bind: not found: ${ch_imgdir}/hoops"* ]] # correct bind followed by source does not exist run ch-run -b "${ch_imgdir}/bind1" -b "${ch_imgdir}/hoops" "$ch_timg" -- \ true echo "$output" [[ $status -eq 1 ]] [[ $output = *"can't bind: not found: ${ch_imgdir}/hoops"* ]] # correct bind followed by destination does not exist run ch-run -b "${ch_imgdir}/bind1" -b "${ch_imgdir}/bind2:/goops" \ "$ch_timg" -- true echo "$output" [[ $status -eq 1 ]] [[ $output = *"can't bind: not found: ${ch_timg}/goops"* ]] } @test 'ch-run --set-env' { scope standard # Quirk that is probably too obscure to put in the documentation: The # string containing only two straight quotes does not round-trip through # "printenv" or "env", though it does round-trip through Bash "set": # # $ export foo="''" # $ echo [$foo] # [''] # $ set | fgrep foo # foo=''\'''\''' # $ eval $(set | fgrep foo) # $ echo [$foo] # [''] # $ printenv | fgrep foo # foo='' # $ eval $(printenv | fgrep foo) # $ echo $foo # [] # Valid inputs. Use Python to print the results to avoid ambiguity. f_in=${BATS_TMPDIR}/env.txt cat <<'EOF' > "$f_in" chse_a1=bar chse_a2=bar=baz chse_a3=bar baz chse_a4='bar' chse_a5= chse_a6='' chse_a7='''' chse_b1="bar" chse_b2=bar # baz chse_b3=$PATH chse_b4=bar chse_b5= bar chse_c1=foo chse_c1=bar EOF cat "$f_in" output_expected=$(cat <<'EOF' (' chse_b4', 'bar') ('chse_a1', 'bar') ('chse_a2', 'bar=baz') ('chse_a3', 'bar baz') ('chse_a4', 'bar') ('chse_a5', '') ('chse_a6', '') ('chse_a7', "''") ('chse_b1', '"bar"') ('chse_b2', 'bar # baz') ('chse_b3', '$PATH') ('chse_b5', ' bar') ('chse_c1', 'bar') EOF ) run ch-run --set-env="$f_in" "$ch_timg" -- python3 -c 'import os; [print((k,v)) for (k,v) in sorted(os.environ.items()) if "chse_" in k]' echo "$output" [[ $status -eq 0 ]] diff -u <(echo "$output_expected") <(echo "$output") } @test 'ch-run --set-env from Dockerfile' { scope standard prerequisites_ok debian9 img=${ch_imgdir}/debian9 output_expected=$(cat <<'EOF' chse_dockerfile=foo EOF ) run ch-run --set-env="${img}/environment" "$img" -- \ sh -c 'env | grep -E "^chse_"' echo "$output" [[ $status -eq 0 ]] diff -u <(echo "$output_expected") <(echo "$output") } @test 'ch-run --set-env errors' { scope standard f_in=${BATS_TMPDIR}/env.txt # file does not exist run ch-run --set-env=doesnotexist.txt "$ch_timg" -- /bin/true echo "$output" [[ $status -eq 1 ]] [[ $output = *"--set-env: can't open:"* ]] [[ $output = *"No such file or directory"* ]] # Note: I'm not sure how to test an error during reading, i.e., getline(3) # rather than fopen(3). Hence no test for "error reading". # invalid line: missing '=' echo 'FOO bar' > "$f_in" run ch-run --set-env="$f_in" "$ch_timg" -- /bin/true echo "$output" [[ $status -eq 1 ]] [[ $output = *"--set-env: no delimiter: ${f_in}:1"* ]] # invalid line: no name echo '=bar' > "$f_in" run ch-run --set-env="$f_in" "$ch_timg" -- /bin/true echo "$output" [[ $status -eq 1 ]] [[ $output = *"--set-env: empty name: ${f_in}:1"* ]] } @test 'ch-run --unset-env' { scope standard export chue_1=foo export chue_2=bar printf '\n# Nothing\n\n' run ch-run --unset-env=doesnotmatch "$ch_timg" -- env echo "$output" [[ $status -eq 0 ]] ex='^(_|HOME|PATH)=' # variables expected to change diff -u <(env | grep -Ev "$ex") <(echo "$output" | grep -Ev "$ex") printf '\n# Everything\n\n' run ch-run --unset-env='*' "$ch_timg" -- env echo "$output" [[ $status -eq 0 ]] [[ $output = '' ]] printf '\n# Everything, plus shell re-adds\n\n' run ch-run --unset-env='*' "$ch_timg" -- /bin/sh -c env echo "$output" [[ $status -eq 0 ]] diff -u <(printf 'SHLVL=1\nPWD=/\n') <(echo "$output") printf '\n# Without wildcards\n\n' run ch-run --unset-env=chue_1 "$ch_timg" -- env echo "$output" [[ $status -eq 0 ]] diff -u <(printf 'chue_2=bar\n') <(echo "$output" | grep -E '^chue_') printf '\n# With wildcards\n\n' run ch-run --unset-env='chue_*' "$ch_timg" -- env echo "$output" [[ $status -eq 0 ]] [[ $(echo "$output" | grep -E '^chue_') = '' ]] printf '\n# Empty string\n\n' run ch-run --unset-env= "$ch_timg" -- env echo "$output" [[ $status -eq 1 ]] [[ $output = *'--unset-env: GLOB must have non-zero length'* ]] } @test 'ch-run mixed --set-env and --unset-env' { scope standard # Input. export chmix_a1=z export chmix_a2=y export chmix_a3=x f1_in=${BATS_TMPDIR}/env1.txt cat <<'EOF' > "$f1_in" chmix_b1=w chmix_b2=v EOF f2_in=${BATS_TMPDIR}/env2.txt cat <<'EOF' > "$f2_in" chmix_c1=u chmix_c2=t EOF # unset, unset output_expected=$(cat <<'EOF' chmix_a3=x EOF ) run ch-run --unset-env=chmix_a1 --unset-env=chmix_a2 "$ch_timg" -- \ sh -c 'env | grep -E ^chmix_ | sort' echo "$output" [[ $status -eq 0 ]] diff -u <(echo "$output_expected") <(echo "$output") echo '# set, set' output_expected=$(cat <<'EOF' chmix_a1=z chmix_a2=y chmix_a3=x chmix_b1=w chmix_b2=v chmix_c1=u chmix_c2=t EOF ) run ch-run --set-env="$f1_in" --set-env="$f2_in" "$ch_timg" -- \ sh -c 'env | grep -E ^chmix_ | sort' echo "$output" [[ $status -eq 0 ]] diff -u <(echo "$output_expected") <(echo "$output") echo '# unset, set' output_expected=$(cat <<'EOF' chmix_a2=y chmix_a3=x chmix_b1=w chmix_b2=v EOF ) run ch-run --unset-env=chmix_a1 --set-env="$f1_in" "$ch_timg" -- \ sh -c 'env | grep -E ^chmix_ | sort' echo "$output" [[ $status -eq 0 ]] diff -u <(echo "$output_expected") <(echo "$output") echo '# set, unset' output_expected=$(cat <<'EOF' chmix_a1=z chmix_a2=y chmix_a3=x chmix_b1=w EOF ) run ch-run --set-env="$f1_in" --unset-env=chmix_b2 "$ch_timg" -- \ sh -c 'env | grep -E ^chmix_ | sort' echo "$output" [[ $status -eq 0 ]] diff -u <(echo "$output_expected") <(echo "$output") echo '# unset, set, unset' output_expected=$(cat <<'EOF' chmix_a2=y chmix_a3=x chmix_b1=w EOF ) run ch-run --unset-env=chmix_a1 \ --set-env="$f1_in" \ --unset-env=chmix_b2 \ "$ch_timg" -- sh -c 'env | grep -E ^chmix_ | sort' echo "$output" [[ $status -eq 0 ]] diff -u <(echo "$output_expected") <(echo "$output") echo '# set, unset, set' output_expected=$(cat <<'EOF' chmix_a1=z chmix_a2=y chmix_a3=x chmix_b1=w chmix_c1=u chmix_c2=t EOF ) run ch-run --set-env="$f1_in" \ --unset-env=chmix_b2 \ --set-env="$f2_in" \ "$ch_timg" -- sh -c 'env | grep -E ^chmix_ | sort' echo "$output" [[ $status -eq 0 ]] diff -u <(echo "$output_expected") <(echo "$output") } @test 'broken image errors' { scope standard img="${BATS_TMPDIR}/broken-image" # Create an image skeleton. dirs=$(echo {dev,proc,sys}) files=$(echo etc/{group,hosts,passwd,resolv.conf}) # shellcheck disable=SC2116 files_optional= # formerly for ch-ssh (#378), but leave infrastructure mkdir -p "$img" for d in $dirs; do mkdir -p "${img}/$d"; done mkdir -p "${img}/etc" "${img}/home" "${img}/usr/bin" "${img}/tmp" for f in $files $files_optional; do touch "${img}/${f}"; done # This should start up the container OK but fail to find the user command. run ch-run "$img" -- true echo "$output" [[ $status -eq 1 ]] [[ $output = *"can't execve(2): true: No such file or directory"* ]] # For each required file, we want a correct error if it's missing. for f in $files; do echo "required: ${f}" rm "${img}/${f}" ls -l "${img}/${f}" || true run ch-run "$img" -- true touch "${img}/${f}" # restore before test fails for idempotency echo "$output" [[ $status -eq 1 ]] r="can't bind: not found: .+/${f}" echo "expected: ${r}" [[ $output =~ $r ]] done # For each optional file, we want no error if it's missing. for f in $files_optional; do echo "optional: ${f}" rm "${img}/${f}" run ch-run "$img" -- true touch "${img}/${f}" # restore before test fails for idempotency echo "$output" [[ $status -eq 1 ]] [[ $output = *"can't execve(2): true: No such file or directory"* ]] done # For all files, we want a correct error if it's not a regular file. for f in $files $files_optional; do echo "not a regular file: ${f}" rm "${img}/${f}" mkdir "${img}/${f}" run ch-run "$img" -- true rmdir "${img}/${f}" # restore before test fails for idempotency touch "${img}/${f}" echo "$output" [[ $status -eq 1 ]] r="can't bind .+ to /.+/${f}: Not a directory" echo "expected: ${r}" [[ $output =~ $r ]] done # For each directory, we want a correct error if it's missing. for d in $dirs tmp; do echo "required: ${d}" rmdir "${img}/${d}" run ch-run "$img" -- true mkdir "${img}/${d}" # restore before test fails for idempotency echo "$output" [[ $status -eq 1 ]] r="can't bind: not found: .+/${d}" echo "expected: ${r}" [[ $output =~ $r ]] done # For each directory, we want a correct error if it's not a directory. for d in $dirs tmp; do echo "not a directory: ${d}" rmdir "${img}/${d}" touch "${img}/${d}" run ch-run "$img" -- true rm "${img}/${d}" # restore before test fails for idempotency mkdir "${img}/${d}" echo "$output" [[ $status -eq 1 ]] r="can't bind .+ to /.+/${d}: Not a directory" echo "expected: ${r}" [[ $output =~ $r ]] done # --private-tmp rmdir "${img}/tmp" run ch-run --private-tmp "$img" -- true mkdir "${img}/tmp" # restore before test fails for idempotency echo "$output" [[ $status -eq 1 ]] r="can't mount tmpfs at /.+/tmp: No such file or directory" echo "expected: ${r}" [[ $output =~ $r ]] # /home without --private-home # FIXME: Not sure how to make the second mount(2) fail. rmdir "${img}/home" run ch-run "$img" -- true mkdir "${img}/home" # restore before test fails for idempotency echo "$output" [[ $status -eq 1 ]] r="can't mount tmpfs at /.+/home: No such file or directory" echo "expected: ${r}" [[ $output =~ $r ]] # --no-home shouldn't care if /home is missing rmdir "${img}/home" run ch-run --no-home "$img" -- true mkdir "${img}/home" # restore before test fails for idempotency echo "$output" [[ $status -eq 1 ]] [[ $output = *"can't execve(2): true: No such file or directory"* ]] # --ch-ssh but no /usr/bin/ch-ssh run ch-run --ch-ssh "$img" -- true echo "$output" [[ $status -eq 1 ]] [[ $output = *"--ch-ssh: /usr/bin/ch-ssh not in image"* ]] # Everything should be restored and back to the original error. run ch-run "$img" -- true echo "$output" [[ $status -eq 1 ]] [[ $output = *"can't execve(2): true: No such file or directory"* ]] # At this point, there should be exactly two each of passwd and group # temporary files. Remove them. [[ $(find /tmp -maxdepth 1 -name 'ch-run_passwd*' | wc -l) -eq 2 ]] [[ $(find /tmp -maxdepth 1 -name 'ch-run_group*' | wc -l) -eq 2 ]] rm -v /tmp/ch-run_{passwd,group}* [[ $(find /tmp -maxdepth 1 -name 'ch-run_passwd*' | wc -l) -eq 0 ]] [[ $(find /tmp -maxdepth 1 -name 'ch-run_group*' | wc -l) -eq 0 ]] } charliecloud-0.9.10/test/run/ch-run_uidgid.bats000066400000000000000000000115641346662313000214310ustar00rootroot00000000000000load ../common setup () { scope standard if [[ -n $GUEST_USER ]]; then # Specific user requested for testing. [[ -n $GUEST_GROUP ]] guest_uid=$(id -u "$GUEST_USER") guest_gid=$(getent group "$GUEST_GROUP" | cut -d: -f3) uid_args="-u ${guest_uid}" gid_args="-g ${guest_gid}" echo "ID args: ${GUEST_USER}/${guest_uid} ${GUEST_GROUP}/${guest_gid}" echo else # No specific user requested. [[ -z $GUEST_GROUP ]] GUEST_USER=$(id -un) guest_uid=$(id -u) [[ $GUEST_USER = "$USER" ]] [[ $guest_uid -ne 0 ]] GUEST_GROUP=$(id -gn) guest_gid=$(id -g) [[ $guest_gid -ne 0 ]] uid_args= gid_args= echo "no ID arguments" echo fi } @test 'user and group as specified' { g=$(ch-run $uid_args $gid_args "$ch_timg" -- id -un) [[ $GUEST_USER = "$g" ]] g=$(ch-run $uid_args $gid_args "$ch_timg" -- id -u) [[ $guest_uid = "$g" ]] g=$(ch-run $uid_args $gid_args "$ch_timg" -- id -gn) [[ $GUEST_GROUP = "$g" ]] g=$(ch-run $uid_args $gid_args "$ch_timg" -- id -g) [[ $guest_gid = "$g" ]] } @test 'chroot escape' { # Try to escape a chroot(2) using the standard approach. ch-run $uid_args $gid_args "$ch_timg" -- /test/chroot-escape } @test '/dev /proc /sys' { # Read some files in /dev, /proc, and /sys that I shouldn't have access to. ch-run $uid_args $gid_args "$ch_timg" -- /test/dev_proc_sys.py } @test 'filesystem permission enforcement' { [[ $CH_TEST_PERMDIRS = skip ]] && skip 'user request' for d in $CH_TEST_PERMDIRS; do d="${d}/perms_test/pass" echo "verifying: ${d}" ch-run --no-home --private-tmp \ $uid_args $gid_args -b "$d" "$ch_timg" -- \ /test/fs_perms.py /mnt/0 done } @test 'mknod(2)' { # Make some device files. If this works, we might be able to later read or # write them to do things we shouldn't. Try on all mount points. # shellcheck disable=SC2016 ch-run $uid_args $gid_args "$ch_timg" -- \ sh -c '/test/mknods $(cat /proc/mounts | cut -d" " -f2)' } @test 'privileged IPv4 bind(2)' { # Bind to privileged ports on all host IPv4 addresses. # # Some supported distributions don't have "hostname --all-ip-addresses". # Hence the awk voodoo. addrs=$(ip -o addr | awk '/inet / {gsub(/\/.*/, " ",$4); print $4}') # shellcheck disable=SC2086 ch-run $uid_args $gid_args "$ch_timg" -- /test/bind_priv.py $addrs } @test 'remount host root' { # Re-mount the root filesystem. Notes: # # - Because we have /dev from the host, we don't need to create a new # device node. This makes the test simpler. In particular, we can # treat network and local root the same. # # - We leave the filesystem mounted even if successful, again to make # the test simpler. The rest of the tests will ignore it or maybe # over-mount something else. ch-run $uid_args $gid_args "$ch_timg" -- \ sh -c '[ -f /bin/mount -a -x /bin/mount ]' dev=$(findmnt -n -o SOURCE -T /) type=$(findmnt -n -o FSTYPE -T /) opts=$(findmnt -n -o OPTIONS -T /) run ch-run $uid_args $gid_args "$ch_timg" -- \ /bin/mount -n -o "$opts" -t "$type" "$dev" /mnt/0 echo "$output" # return codes from http://man7.org/linux/man-pages/man8/mount.8.html # busybox seems to use the same list case $status in 0) # "success" printf 'RISK\tsuccessful mount\n' return 1 ;; 1) ;& # "incorrect invocation or permissions" (we care which) 111) ;& # undocumented 255) # undocumented if [[ $output = *'ermission denied'* ]]; then printf 'SAFE\tmount exit %d, permission denied\n' "$status" return 0 elif [[ $dev = 'rootfs' && $output =~ 'No such device' ]]; then printf 'SAFE\tmount exit %d, no such device' "$status" return 0 else printf 'RISK\tmount exit %d w/o known explanation\n' "$status" return 1 fi ;; 32) # "mount failed" printf 'SAFE\tmount exited with code 32\n' return 0 ;; esac printf 'ERROR\tunknown exit code: %s\n' "$status" return 1 } @test 'setgroups(2)' { # Can we change our supplemental groups? ch-run $uid_args $gid_args "$ch_timg" -- /test/setgroups } @test 'seteuid(2)' { # Try to seteuid(2) to another UID we shouldn't have access to ch-run $uid_args $gid_args "$ch_timg" -- /test/setuid } @test 'signal process outside container' { # Send a signal to a process we shouldn't be able to signal. [[ $(pgrep -c getty) -eq 0 ]] && skip 'no getty process found' ch-run $uid_args $gid_args "$ch_timg" -- /test/signal_out.py } charliecloud-0.9.10/test/run/ch-tar2dir.bats000066400000000000000000000066351346662313000206520ustar00rootroot00000000000000load ../common @test 'ch-tar2dir: unpack image' { scope standard if ( image_ok "$ch_timg" ); then # image exists, remove so we can test new unpack rm -Rf --one-file-system "$ch_timg" fi ch-tar2dir "$ch_ttar" "$ch_imgdir" # new unpack image_ok "$ch_timg" ch-tar2dir "$ch_ttar" "$ch_imgdir" # overwrite image_ok "$ch_timg" # Did we raise hidden files correctly? [[ -e $ch_timg/.hiddenfile1 ]] [[ -e $ch_timg/..hiddenfile2 ]] [[ -e $ch_timg/...hiddenfile3 ]] } @test 'ch-tar2dir: /dev cleaning' { # issue #157 scope standard # Are all fixtures present in tarball? present=$(tar tf "$ch_ttar" | grep -F deleteme) [[ $(echo "$present" | wc -l) -eq 4 ]] echo "$present" | grep -E '^img/dev/deleteme$' echo "$present" | grep -E '^./dev/deleteme$' echo "$present" | grep -E '^dev/deleteme$' echo "$present" | grep -E '^img/mnt/dev/dontdeleteme$' # Did we remove the right fixtures? [[ -e $ch_timg/mnt/dev/dontdeleteme ]] [[ $(ls -Aq "${ch_timg}/dev") -eq 0 ]] ch-run "$ch_timg" -- test -e /mnt/dev/dontdeleteme } @test 'ch-tar2dir: errors' { scope quick # destination doesn't exist run ch-tar2dir "$ch_timg" /doesnotexist echo "$output" [[ $status -eq 1 ]] [[ $output = *"can't unpack: /doesnotexist does not exist"* ]] # destination is not a directory run ch-tar2dir "$ch_timg" /bin/false echo "$output" [[ $status -eq 1 ]] [[ $output = *"can't unpack: /bin/false is not a directory"* ]] # tarball doesn't exist (extension provided) run ch-tar2dir does_not_exist.tar.gz "$ch_imgdir" echo "$output" [[ $status -eq 1 ]] [[ $output = *"can't read: does_not_exist.tar.gz"* ]] ! [[ $output = *"can't read: does_not_exist.tar.gz.tar.gz"* ]] ! [[ $output = *"can't read: does_not_exist.tar.xz"* ]] [[ $output = *"no input found" ]] # tarball doesn't exist (extension inferred, doesn't contain "tar") run ch-tar2dir does_not_exist "$ch_imgdir" echo "$output" [[ $status -eq 1 ]] [[ $output = *"can't read: does_not_exist"* ]] [[ $output = *"can't read: does_not_exist.tar.gz"* ]] [[ $output = *"can't read: does_not_exist.tar.xz"* ]] [[ $output = *"no input found"* ]] # tarball doesn't exist (bad extension containing "tar") run ch-tar2dir does_not_exist.tar.foo "$ch_imgdir" echo "$output" [[ $status -eq 1 ]] [[ $output = *"can't read: does_not_exist.tar.foo"* ]] ! [[ $output = *"can't read: does_not_exist.tar.foo.tar.gz"* ]] ! [[ $output = *"can't read: does_not_exist.tar.foo.tar.xz"* ]] [[ $output = *"no input found"* ]] # tarball exists but isn't readable touch "${BATS_TMPDIR}/unreadable.tar.gz" chmod 000 "${BATS_TMPDIR}/unreadable.tar.gz" run ch-tar2dir "${BATS_TMPDIR}/unreadable.tar.gz" "$ch_imgdir" echo "$output" [[ $status -eq 1 ]] [[ $output = *"can't read: ${BATS_TMPDIR}/unreadable.tar.gz"* ]] [[ $output = *"no input found"* ]] # file exists but has bad extension touch "${BATS_TMPDIR}/foo.bar" run ch-tar2dir "${BATS_TMPDIR}/foo.bar" "$ch_imgdir" echo "$output" [[ $status -eq 1 ]] [[ $output = *"unknown extension: ${BATS_TMPDIR}/foo.bar"* ]] touch "${BATS_TMPDIR}/foo.tar.bar" run ch-tar2dir "${BATS_TMPDIR}/foo.tar.bar" "$ch_imgdir" echo "$output" [[ $status -eq 1 ]] [[ $output = *"unknown extension: ${BATS_TMPDIR}/foo.tar.bar"* ]] } charliecloud-0.9.10/test/run_first.bats000066400000000000000000000030401346662313000201050ustar00rootroot00000000000000load common @test 'prepare images directory' { scope standard shopt -s nullglob # globs that match nothing yield empty string if [[ -e $ch_imgdir ]]; then # Images directory exists. If all it contains is Charliecloud images # or supporting directories, or nothing, then we're ok. Remove any # images (this makes test-build and test-run follow the same path when # run on the same or different machines). Otherwise, error. for i in "$ch_imgdir"/*; do if [[ -d $i && -f $i/WEIRD_AL_YANKOVIC ]]; then echo "found image ${i}; removing" rm -Rf --one-file-system "${i}" else echo "found non-image ${i}; aborting" false fi done fi mkdir -p "$ch_imgdir" mkdir -p "${ch_imgdir}/bind1" touch "${ch_imgdir}/bind1/WEIRD_AL_YANKOVIC" # fool logic above touch "${ch_imgdir}/bind1/file1" mkdir -p "${ch_imgdir}/bind2" touch "${ch_imgdir}/bind2/WEIRD_AL_YANKOVIC" touch "${ch_imgdir}/bind2/file2" } @test 'permissions test directories exist' { scope standard [[ $CH_TEST_PERMDIRS = skip ]] && skip 'user request' for d in $CH_TEST_PERMDIRS; do d=${d}/perms_test echo "$d" test -d "${d}" test -d "${d}/pass" test -f "${d}/pass/file" test -d "${d}/nopass" test -d "${d}/nopass/dir" test -f "${d}/nopass/file" done } @test 'syscalls/pivot_root' { scope quick cd ../examples/syscalls ./pivot_root } charliecloud-0.9.10/test/sotest/000077500000000000000000000000001346662313000165435ustar00rootroot00000000000000charliecloud-0.9.10/test/sotest/bin/000077500000000000000000000000001346662313000173135ustar00rootroot00000000000000charliecloud-0.9.10/test/sotest/bin/GITKEEP000066400000000000000000000000001346662313000203140ustar00rootroot00000000000000charliecloud-0.9.10/test/sotest/files_inferrable.txt000066400000000000000000000000561346662313000226000ustar00rootroot00000000000000sotest/bin/sotest sotest/lib/libsotest.so.1.0 charliecloud-0.9.10/test/sotest/lib/000077500000000000000000000000001346662313000173115ustar00rootroot00000000000000charliecloud-0.9.10/test/sotest/lib/GITKEEP000066400000000000000000000000001346662313000203120ustar00rootroot00000000000000charliecloud-0.9.10/test/sotest/libsotest.c000066400000000000000000000001011346662313000207070ustar00rootroot00000000000000int increment(int a); int increment(int a) { return a + 1; } charliecloud-0.9.10/test/sotest/sotest.c000066400000000000000000000002631346662313000202310ustar00rootroot00000000000000#include #include int increment(int a); int main() { int b = 8675308; printf("libsotest says %d incremented is %d\n", b, increment(b)); exit(0); } charliecloud-0.9.10/test/travis.sh000077500000000000000000000030221346662313000170660ustar00rootroot00000000000000#!/bin/bash # Warning: This script installs software and messes with your "docker" binary. # Don't run it unless you know what you are doing. # We start in the Charliecloud Git working directory. set -e PREFIX=/var/tmp # Remove sbin directories from $PATH (see issue #43). Assume none are first. echo "$PATH" for i in /sbin /usr/sbin /usr/local/sbin; do export PATH=${PATH/:$i/} done echo "$PATH" set -x case $TARBALL in export) (cd doc-src && make) make export mv charliecloud-*.tar.gz "$PREFIX" cd "$PREFIX" tar xf charliecloud-*.tar.gz cd charliecloud-* ;; export-bats) (cd doc-src && make) make export-bats mv charliecloud-*.tar.gz "$PREFIX" cd "$PREFIX" tar xf charliecloud-*.tar.gz cd charliecloud-* ;; archive) # The Travis image already has Bats installed. git archive HEAD --prefix=charliecloud/ -o "$PREFIX/charliecloud.tar" cd "$PREFIX" tar xf charliecloud.tar cd charliecloud ;; esac make bin/ch-run --version if [[ $INSTALL ]]; then sudo make install PREFIX="$PREFIX" cd "$PREFIX/libexec/charliecloud" fi cd test make where-bats make test-build if [[ $SUDO_RM_AFTER_BUILD ]]; then sudo rm /etc/sudoers.d/travis fi if [[ $SUDO_AVOID_AFTER_BUILD ]]; then export CH_TEST_DONT_SUDO=yes fi if ( sudo -v ); then echo "have sudo" else echo "don't have sudo" fi echo "\$CH_TEST_DONT_SUDO=$CH_TEST_DONT_SUDO" make test-run make test-test charliecloud-0.9.10/test/travis.yml000066400000000000000000000070701346662313000172610ustar00rootroot00000000000000dist: xenial sudo: required language: c compiler: gcc # This defines a "matrix" of jobs. Each combination of environment variables # defines a different job. They run in parallel, five at a time. # # FIXME: Each job starts with a cold Docker cache, which wastes work heating # it up in parallel. It would be nice if "make test-build" could be done # serially before splitting into parallel jobs. # # TARBALL= # build in Git checkout & use embedded Bats # TARBALL=archive # build from "git archive" tarball & use system Bats # TARBALL=export # build from "make export" tarball & use system Bats # TARBALL=export-bats # build from "make export" tarball & use embedded Bats # INSTALL= # run from build directory # INSTALL=yes # make install to /usr/local, run that one # # Additional options: # # MINIMAL_DEPS # test with minimal dependencies (no fancy tools) # NO_DOCKER # remove Docker before testing # SUDO_RM_AFTER_BUILD # remove sudo privileges after build # SUDO_AVOID_AFTER_BUILD # set CH_TEST_DONT_SUDO after build # env: # Complete matrix of TARBALL and INSTALL. - TARBALL= INSTALL= - TARBALL= INSTALL=yes - TARBALL=archive INSTALL= # - TARBALL=archive INSTALL=yes - TARBALL=export INSTALL= - TARBALL=export INSTALL=yes - TARBALL=export-bats INSTALL= # - TARBALL=export-bats INSTALL=yes # Extra conditions - TARBALL= INSTALL= MINIMAL_DEPS=yes - TARBALL= INSTALL= NO_DOCKER=yes - TARBALL= INSTALL= SUDO_RM_AFTER_BUILD=yes - TARBALL= INSTALL= SUDO_AVOID_AFTER_BUILD=yes # One full-scope test. This will finish last by a lot. # (Disabled because it gives a >10-minute gap in output, so Travis times out.) # - TARBALL= INSTALL= CH_TEST_SCOPE=full addons: apt: sources: - sourceline: 'ppa:projectatomic/ppa' packages: - buildah - cri-o-runc # runc from Project Atomic PPA, not Ubuntu - fakeroot - pigz - pv - python3-pip - skopeo install: # We need Python 3 because Sphinx 1.8.0 doesn't work right under Python 2 (see # issue #241). Travis provides images pre-installed with Python 3, but it's in # a virtualenv and unavailable by default under sudo, in package builds, and # maybe elsewhere. It's simpler and fast enough to install it with apt-get. - if [ -z "$MINIMAL_DEPS" ]; then pip3 --version; sudo pip3 install sphinx sphinx-rtd-theme; fi # umoci provides a binary build; no appropriate Ubuntu packages AFAICT. - if [ -z "$MINIMAL_DEPS" ]; then wget -nv https://github.com/openSUSE/umoci/releases/download/v0.4.3/umoci.amd64; sudo chmod 755 umoci.amd64; sudo mv umoci.amd64 /usr/local/bin/umoci; umoci --version; fi # Tests should still pass with only the basics installed. - if [ -n "$MINIMAL_DEPS" ]; then sudo apt-get purge buildah cri-o-runc pv skopeo; fi before_script: - getconf _NPROCESSORS_ONLN - free -m - df -h - df -h /var/tmp - export CH_TEST_TARDIR=/var/tmp/tarballs - export CH_TEST_IMGDIR=/var/tmp/images - export CH_TEST_PERMDIRS='/var/tmp /run' - unset JAVA_HOME # otherwise Spark tries to use host's Java - for d in $CH_TEST_PERMDIRS; do sudo test/make-perms-test $d $USER nobody; done - sudo usermod --add-subuids 10000-65536 $USER - sudo usermod --add-subgids 10000-65536 $USER - if [ -n "$NO_DOCKER" ]; then sudo rm $(command -v docker); fi script: - test/travis.sh after_script: - free -m - df -h charliecloud-0.9.10/test/unused/000077500000000000000000000000001346662313000165255ustar00rootroot00000000000000charliecloud-0.9.10/test/unused/echo-euid.c000066400000000000000000000004051346662313000205320ustar00rootroot00000000000000/* This program prints the effective user ID on stdout and exits. It is useful for testing whether the setuid bit was effective. */ #include #include #include int main(void) { printf("%u\n", geteuid()); return 0; } charliecloud-0.9.10/test/unused/su_wrap.py000077500000000000000000000036721346662313000205720ustar00rootroot00000000000000#!/usr/bin/env python3 # This script tries to use su to gain root privileges, assuming that # /etc/shadow has been changed such that no password is required. It uses # pexpect to emulate the terminal that su requires. # # WARNING: This does not work. For example: # # $ whoami ; echo $UID EUID # reidpr # 1001 1001 # $ /bin/su -c whoami # root # $ ./su_wrap.py 2>> /dev/null # SAFE escalation failed: empty password rejected # # That is, manual su can escalate without a password (and doesn't without the # /etc/shadow hack), but when this program tries to do apparently the same # thing, su wants a password. # # I have not been able to track down why this happens. I suspect that PAM has # some extra smarts about TTY that causes it to ask for a password under # pexpect. I'm leaving the code in the repository in case some future person # can figure it out. import sys import pexpect # Invoke su. This will do one of three things: # # 1. Print 'root'; the escalation was successful. # 2. Ask for a password; the escalation was unsuccessful. # 3. Something else; this is an error. # p = pexpect.spawn('/bin/su', ['-c', 'whoami'], timeout=5, encoding='UTF-8', logfile=sys.stderr) i = p.expect_exact(['root', 'Password:']) try: if (i == 0): # printed "root" print('RISK\tescalation successful: no password requested') elif (i == 1): # asked for password p.sendline() # try empty password i = p.expect_exact(['root', 'Authentication failure']) if (i == 0): # printed "root" print('RISK\tescalation successful: empty password accepted') elif (i == 1): # explicit failure print('SAFE\tescalation failed: empty password rejected') else: assert False else: assert False except p.EOF: print('ERROR\tsu exited unexpectedly') except p.TIMEOUT: print('ERROR\ttimed out waiting for su') except AssertionError: print('ERROR\tassertion failed')