btest-0.54/0000775002342100234210000000000012523041075013640 5ustar johannajohanna00000000000000btest-0.54/btest-bg-run-helper0000775002342100234210000000071212506370126017357 0ustar johannajohanna00000000000000#! /usr/bin/env bash # # Internal helper for btest-bg-run. cleanup () { if [ ! -e .exitcode ]; then echo 15 >.exitcode kill 0 &>/dev/null if [ ! -z "$pid" ]; then kill -0 $pid &>/dev/null && kill $pid sleep 1 kill -0 $pid &>/dev/null && kill -9 $pid && echo 9 >.exitcode fi fi } trap "cleanup" EXIT eval "$@ &" pid=$! echo $$ >.pid wait $pid echo $? >.exitcode pid="" btest-0.54/btest-bg-run0000775002342100234210000000124712506370126016106 0ustar johannajohanna00000000000000#! /usr/bin/env bash # # Usage: btest-bg-run # # Creates a new empty working directory within the current directory # and spawns in there in the background. It also records # a set of meta information that btest-bg-wait will read. if [ "$#" -le 1 ]; then echo "usage: `basename $0` " exit 1 fi cwd=`pwd` cd `dirname $0` helper=`pwd`/btest-bg-run-helper setsid=`pwd`/btest-setsid cd $cwd bname=`basename $0` dir=$1 shift if [ -e $dir ]; then echo "directory '$dir' already exists" >&2 exit 1; fi echo $dir >>.bgprocs mkdir $dir cd $dir echo $@ >.cmdline $setsid $helper "$@" >.stdout 2>.stderr & sleep 1 btest-0.54/btest.cfg.example0000664002342100234210000000055712506370126017106 0ustar johannajohanna00000000000000 [btest] TestDirs = examples TmpDir = %(testbase)s/.tmp BaselineDir = %(testbase)s/Baseline IgnoreDirs = .svn CVS .tmp IgnoreFiles = *.tmp *.swp #* [environment] CFLAGS=-O3 PATH=%(testbase)s/bin:%(default_path)s [filter-myalternative] cat=%(testbase)s/examples/my-filter [substitution-myalternative] original=filtered [environment-myalternative] MYALT=1 btest-0.54/btest-bg-wait0000775002342100234210000000574612506370126016256 0ustar johannajohanna00000000000000#! /usr/bin/env bash # # Usage: btest-bg-wait [-k] # # Waits until all of the background process spawned by btest-bg-run # have finished, or the given timeout (in seconds) has been exceeded. # # If the timeout triggers, all remaining processed are killed. If -k # is not given, this is considered an error and the script abort with # error code 1. If -k is given, a timeout is not considered an error. # # Once all processes have finished (or were killed), the scripts # merges their stdout and stderr. If one of them returned an error, # this script does so as well if [ "$1" == "-k" ]; then timeout_ok=1 shift else timeout_ok=0 fi if [ $# != 1 ]; then echo "usage: `basename $0` [-k] " exit 1 fi timeout=$1 procs=`cat .bgprocs` rm -f .timeout touch .timeout function check_procs { for p in $procs; do if [ ! -e $p/.exitcode ]; then return 1; fi done # All done. return 0; } function kill_procs { for p in $procs; do if [ ! -e $p/.exitcode ]; then kill -1 `cat $p/.pid` 2>/dev/null cat $p/.cmdline >>.timeout if [ "$1" == "timeout" ]; then touch $p/.timeout fi fi done sleep 1 for p in $procs; do if [ ! -e $p/.exitcode ]; then kill -9 `cat $p/.pid` 2>/dev/null sleep 1 fi done } function collect_output { rm -f .stdout .stderr if [ $timeout_ok != 1 -a -s .timeout ]; then echo "The following processes did not terminate:" >>.stderr echo >>.stderr cat .timeout >>.stderr echo >>.stderr echo "-----------" >>.stderr fi for p in $procs; do pid=`cat $p/.pid` cmdline=`cat $p/.cmdline` printf "<<< [%s] %s\\n" "$pid" "$cmdline" >> .stdout cat $p/.stdout >>.stdout echo ">>>" >>.stdout printf "<<< [%s] %s\\n" "$pid" "$cmdline" >> .stderr cat $p/.stderr >>.stderr echo ">>>" >>.stderr done } trap kill_procs EXIT while true; do if check_procs; then # All done. break fi timeout=`expr $timeout - 1` if [ $timeout -le 0 ]; then # Timeout exceeded. kill_procs timeout if [ $timeout_ok == 1 ]; then # Just continue. break; fi # Exit with error. collect_output exit 1 fi sleep 1 done trap - EXIT # All terminated either by themselves, or with a benign timeout. collect_output # See if any returned an error. result=0 for p in $procs; do if [ -e $p/.timeout ]; then # we're here because timeouts are ok, so don't mind the exit code # if we initiated killing the process due to timeout continue fi rc=`cat $p/.exitcode` pid=`cat $p/.pid` cmdline=`cat $p/.cmdline` if [ $rc != 0 ]; then echo ">>> process $pid failed with exitcode $rc: $cmdline" >> .stderr result=1 fi done exit $result btest-0.54/README0000644002342100234210000011356612522743410014533 0ustar johannajohanna00000000000000.. -*- mode: rst-mode -*- .. .. Version number is filled in automatically. .. |version| replace:: 0.54 ============================================ BTest - A Simple Driver for Basic Unit Tests ============================================ .. rst-class:: opening The ``btest`` is a simple framework for writing unit tests. Freely borrowing some ideas from other packages, it's main objective is to provide an easy-to-use, straightforward driver for a suite of shell-based tests. Each test consists of a set of command lines that will be executed, and success is determined based on their exit codes. ``btest`` comes with some additional tools that can be used within such tests to compare output against a previously established baseline. .. contents:: Download ======== You can find the latest BTest release for download at http://www.bro.org/download. BTest's git repository is located at `git://git.bro.org/btest.git `__. You can browse the repository `here `__. This document describes BTest |version|. See the ``CHANGES`` file for version history. Prerequisites ============= BTest has the following prerequisites: - Python version >= 2.6. - Bash (note that on FreeBSD, bash is not installed by default). BTest has the following optional prerequisites to enable additional functionality: - Sphinx. - perf (Linux only). Note that on Debian/Ubuntu, you also need to install the "linux-tools" package. Installation ============ Installation is simple and standard:: tar xzvf btest-*.tar.gz cd btest-* python setup.py install This will install a few scripts: ``btest`` is the main driver program, and there are a number of further helper scripts that we discuss below (including ``btest-diff``, which is a tool for comparing output to a previously established baseline). Writing a Simple Test ===================== In the most simple case, ``btest`` simply executes a set of command lines, each of which must be prefixed with ``@TEST-EXEC:`` :: > cat examples/t1 @TEST-EXEC: echo "Foo" | grep -q Foo @TEST-EXEC: test -d . > btest examples/t1 examples.t1 ... ok The test passes as both command lines return success. If one of them didn't, that would be reported:: > cat examples/t2 @TEST-EXEC: echo "Foo" | grep -q Foo @TEST-EXEC: test -d DOESNOTEXIST > btest examples/t2 examples.t2 ... failed Usually you will just run all tests found in a directory:: > btest examples examples.t1 ... ok examples.t2 ... failed 1 test failed Why do we need the ``@TEST-EXEC:`` prefixes? Because the file containing the test can simultaneously act as *its input*. Let's say we want to verify a shell script:: > cat examples/t3.sh # @TEST-EXEC: sh %INPUT ls /etc | grep -q passwd > btest examples/t3.sh examples.t3 ... ok Here, ``btest`` is executing (something similar to) ``sh examples/t3.sh``, and then checks the return value as usual. The example also shows that the ``@TEST-EXEC`` prefix can appear anywhere, in particular inside the comment section of another language. Now, let's say we want to check the output of a program, making sure that it matches what we expect. For that, we first add a command line to the test that produces the output we want to check, and then run ``btest-diff`` to make sure it matches a previously recorded baseline. ``btest-diff`` is itself just a script that returns success if the output is as expected, and failure otherwise. In the following example, we use an awk script as a fancy way to print all file names starting with a dot in the user's home directory. We write that list into a file called ``dots`` and then check whether its content matches what we know from last time:: > cat examples/t4.awk # @TEST-EXEC: ls -a $HOME | awk -f %INPUT >dots # @TEST-EXEC: btest-diff dots /^\.+/ { print $1 } Note that each test gets its own little sandbox directory when run, so by creating a file like ``dots``, you aren't cluttering up anything. The first time we run this test, we need to record a baseline:: > btest -U examples/t4.awk Now, ``btest-diff`` has remembered what the ``dots`` file should look like:: > btest examples/t4.awk examples.t4 ... ok > touch ~/.NEWDOTFILE > btest examples/t4.awk examples.t4 ... failed 1 test failed If we want to see what exactly the unexpected change is that was introduced to ``dots``, there's a *diff* mode for that:: > btest -d examples/t4.awk examples.t4 ... failed % 'btest-diff dots' failed unexpectedly (exit code 1) % cat .diag == File =============================== [... current dots file ...] == Diff =============================== --- /Users/robin/work/binpacpp/btest/Baseline/examples.t4/dots 2010-10-28 20:11:11.000000000 -0700 +++ dots 2010-10-28 20:12:30.000000000 -0700 @@ -4,6 +4,7 @@ .CFUserTextEncoding .DS_Store .MacOSX +.NEWDOTFILE .Rhistory .Trash .Xauthority ======================================= % cat .stderr [... if any of the commands had printed something to stderr, that would follow here ...] Once we delete the new file, we are fine again:: > rm ~/.NEWDOTFILE > btest -d examples/t4.awk examples.t4 ... ok That's already the main functionality that the ``btest`` package provides. In the following, we describe a number of further options extending/modifying this basic approach. Reference ========= Command Line Usage ------------------ ``btest`` must be started with a list of tests and/or directories given on the command line. In the latter case, the default is to recursively scan the directories and assume all files found to be tests to perform. It is however possible to exclude specific files and directories by specifying a suitable `configuration file`_. ``btest`` returns exit code 0 if all tests have successfully passed, and 1 otherwise. ``btest`` accepts the following options: -a ALTERNATIVE, --alternative=ALTERNATIVE Activates an alternative_ configuration defined in the configuration file. This option can be given multiple times to run tests with several alternatives. If ``ALTERNATIVE`` is ``-`` that refers to running with the standard setup, which can be used to run tests both with and without alternatives by giving both. -b, --brief Does not output *anything* for tests which pass. If all tests pass, there will not be any output at all except final summary information. -c CONFIG, --config=CONFIG Specifies an alternative `configuration file`_ to use. If not specified, the default is to use a file called ``btest.cfg`` if found in the current directory. That default can be overridden with ``BTEST_CFG`` environment variable. -d, --diagnostics Reports diagnostics for all failed tests. The diagnostics include the command line that failed, its output to standard error, and potential additional information recorded by the command line for diagnostic purposes (see `@TEST-EXEC`_ below). In the case of ``btest-diff``, the latter is the ``diff`` between baseline and actual output. -D, --diagnostics-all Reports diagnostics for all tests, including those which pass. -f DIAGFILE, --file-diagnostics=DIAGFILE Writes diagnostics for all failed tests into the given file. If the file already exists, it will be overwritten. -g GROUPS, --groups=GROUPS Runs only tests assigned to the given test groups, see `@TEST-GROUP`_. Multiple groups can be given as a comma-separated list. Specifying ``-`` as a group name selects all tests that do not belong to any group. -j THREADS, --jobs=THREADS Runs up to the given number of tests in parallel. If no number is given, BTest substitutes the number of available CPU cores as reported by the OS. By default, BTest assumes that all tests can be executed concurrently without further constraints. One can however ensure serialization of subsets by assigning them to the same serialization set, see `@TEST-SERIALIZE`_. -q, --quiet Suppress information output other than about failed tests. If all tests pass, there will not be any output at all. -r, --rerun Runs only tests that failed last time. After each execution (except when updating baselines), BTest generates a state file that records the tests that have failed. Using this option on the next run then reads that file back in and limits execution to those tests found in there. -t, --tmp-keep Does not delete any temporary files created for running the tests (including their outputs). By default, the temporary files for a test will be located in ``.tmp//``, where ```` is the relative path of the test file with all slashes replaced with dots and the file extension removed (e.g., the files for ``example/t3.sh`` will be in ``.tmp/example.t3``). -T, --update-times Record new `timing`_ baselines for the current host for tests that have `@TEST-MEASURE-TIME`_. Tests are run as normal except that the timing measurements are recorded as the new baseline instead of being compared to a previous baseline. -U, --update-baseline Records a new baseline for all ``btest-diff`` commands found in any of the specified tests. To do this, all tests are run as normal except that when ``btest-diff`` is executed, it does not compute a diff but instead considers the given file to be authoritative and records it as the version to compare with in future runs. -u, --update-interactive Each time a ``btest-diff`` command fails in any tests that are run, btest will stop and ask whether or not the user wants to record a new baseline. -v, --verbose Shows all test command lines as they are executed. -w, --wait Interactively waits for ```` after showing diagnostics for a test. -x FILE, --xml=FILE Records test results in JUnit XML format to the given file. If the file exists already, it is overwritten. .. _configuration file: Configuration ------------- Specifics of ``btest``'s execution can be tuned with a configuration file, which by default is ``btest.cfg`` if that's found in the current directory. It can alternatively be specified with the ``--config`` command line option, or a ``BTEST_CFG`` environment variable. The configuration file is "INI-style", and an example comes with the distribution, see ``btest.cfg.example``. A configuration file has one main section, ``btest``, that defines most options; as well as an optional section for defining `environment variables`_ and further optional sections for defining alternatives_. Note that all paths specified in the configuration file are relative to ``btest``'s *base directory*. The base directory is either the one where the configuration file is located if such is given/found, or the current working directory if not. When setting values for configuration options, the absolute path to the base directory is available by using the macro ``%(testbase)s`` (the weird syntax is due to Python's ``ConfigParser`` module). Furthermore, all values can use standard "backtick-syntax" to include the output of external commands (e.g., xyz=`\echo test\`). Note that the backtick expansion is performed after any ``%(..)`` have already been replaced (including within the backticks). .. _options: Options ~~~~~~~ The following options can be set in the ``btest`` section of the configuration file: ``TestDirs`` A space-separated list of directories to search for tests. If defined, one doesn't need to specify any tests on the command line. ``TmpDir`` A directory where to create temporary files when running tests. By default, this is set to ``%(testbase)s/.tmp``. ``BaselineDir`` A directory where to store the baseline files for ``btest-diff``. By default, this is set to ``%(testbase)s/Baseline``. ``IgnoreDirs`` A space-separated list of relative directory names to ignore when scanning test directories recursively. Default is empty. An alternative way to ignore a directory is placing a file ``.btest-ignore`` in it. ``IgnoreFiles`` A space-separated list of filename globs matching files to ignore when scanning given test directories recursively. Default is empty. An alternative way to ignore a file is by placing ``@TEST-IGNORE`` in it. ``StateFile`` The name of the state file to record the names of failing tests. Default is ``.btest.failed.dat``. ``Initializer`` An executable that will be executed before each test. It runs in the same directory as the test itself will and receives the name of the test as its parameter. The return value indicates whether the test should continue; if false, the test will be considered failed. By default, there's no initializer set. ``Finalizer`` An executable that will be executed each time any test has successfully run. It runs in the same directory as the test itself and receives the name of the test as its parameter. The return value indicates whether the test should indeed be considered successful. By default, there's no finalizer set. ``PartFinalizer`` An executable that will be executed each time a test *part* has successfully run. This operates similarly to ``Finalizer`` except that it runs after each test part rather than only at completion of the full test. See `parts`_ for more about test parts. ``CommandPrefix`` Changes the naming of all ``btest`` commands by replacing the ``@TEST-`` prefix with a custom string. For example, with ``CommandPrefix=$TEST-``, the ``@TEST-EXEC`` command becomes ``$TEST-EXEC``. ``TimingBaselineDir`` A directory where to store the host-specific `timing`_ baseline files. By default, this is set to ``%(testbase)s/Baseline/_Timing``. ``TimingDeltaPerc`` A percent value defining the `timing`_ deviation that's tolerated for a test before it's considered failed. Default is 1.0%. ``PerfPath`` Specifies a path to the ``perf`` tool, which is used on Linux to measure the execution times of tests. By default, BTest searches for ``perf`` in ``PATH``. .. _environment variables: Environment Variables ~~~~~~~~~~~~~~~~~~~~~ A special section ``environment`` defines environment variables that will be propagated to all tests:: [environment] CFLAGS=-O3 PATH=%(testbase)s/bin:%(default_path)s Note how ``PATH`` can be adjusted to include local scripts: the example above prefixes it with a local ``bin/`` directory inside the base directory, using the predefined ``default_path`` macro to refer to the ``PATH`` as it is set by default. Furthermore, by setting ``PATH`` to include the ``btest`` distribution directory, one could skip the installation of the ``btest`` package. .. _alternative: Alternatives ~~~~~~~~~~~~ BTest can run a set of tests with different settings than it would normally use by specifying an *alternative* configuration. Currently, three things can be adjusted: - Further environment variables can be set that will then be available to all the commands that a test executes. - *Filters* can modify an input file before a test uses it. - *Substitutions* can modify command lines executed as part of a test. We discuss the three separately in the following. All of them are defined by adding sections ``[-]`` where ```` corresponds to the type of adjustment being made and ```` is the name of the alternative. Once at least one section is defined for a name, that alternative can be enabled by BTest's ``--alternative`` flag. Environment Variables ^^^^^^^^^^^^^^^^^^^^^ An alternative can add further environment variables by defining an ``[environment-]`` section:: [environment-myalternative] CFLAGS=-O3 Running ``btest`` with ``--alternative=myalternative`` will now make the ``CFLAGS`` environment variable available to all commands executed. .. _filters: Filters ^^^^^^^ Filters are a transparent way to adapt the input to a specific test command before it is executed. A filter is defined by adding a section ``[filter-]`` to the configuration file. This section must have exactly one entry, and the name of that entry is interpreted as the name of a command whose input is to be filtered. The value of that entry is the name of a filter script that will be run with two arguments representing input and output files, respectively. Example:: [filter-myalternative] cat=%(testbase)s/bin/filter-cat Once the filter is activated by running ``btest`` with ``--alternative=myalternative``, every time a ``@TEST-EXEC: cat %INPUT`` is found, ``btest`` will first execute (something similar to) ``%(testbase)s/bin/filter-cat %INPUT out.tmp``, and then subsequently ``cat out.tmp`` (i.e., the original command but with the filtered output). In the simplest case, the filter could be a no-op in the form ``cp $1 $2``. .. note:: There are a few limitations to the filter concept currently: * Filters are *always* fed with ``%INPUT`` as their first argument. We should add a way to filter other files as well. * Filtered commands are only recognized if they are directly starting the command line. For example, ``@TEST-EXEC: ls | cat >outout`` would not trigger the example filter above. * Filters are only executed for ``@TEST-EXEC``, not for ``@TEST-EXEC-FAIL``. .. _substitution: Substitutions ^^^^^^^^^^^^^ Substitutions are similar to filters, yet they do not adapt the input but the command line being executed. A substitution is defined by adding a section ``[substitution-]`` to the configuration file. For each entry in this section, the entry's name specifies the command that is to be replaced with something else given as its value. Example:: [substitution-myalternative] gcc=gcc -O2 Once the substitution is activated by running ``btest`` with ``--alternative=myalternative``, every time a ``@TEST-EXEC`` executes ``gcc``, that is replaced with ``gcc -O2``. The replacement is simple string substitution so it works not only with commands but anything found on the command line; it however only replaces full words, not subparts of words. Writing Tests ------------- ``btest`` scans a test file for lines containing keywords that trigger certain functionality. Currently, the following keywords are supported: .. _@TEST-EXEC: ``@TEST-EXEC: `` Executes the given command line and aborts the test if it returns an error code other than zero. The ```` is passed to the shell and thus can be a pipeline, use redirection, and any environment variables specified in ```` will be expanded, etc. When running a test, the current working directory for all command lines will be set to a temporary sandbox (and will be deleted later). There are two macros that can be used in ````: ``%INPUT`` will be replaced with the full pathname of the file defining the test; and ``%DIR`` will be replaced with the directory where the test file is located. The latter can be used to reference further files also located there. In addition to environment variables defined in the configuration file, there are further ones that are passed into the commands: ``TEST_DIAGNOSTICS`` A file where further diagnostic information can be saved in case a command fails. ``--diagnostics`` will show this file. (This is also where ``btest-diff`` stores its diff.) ``TEST_MODE`` This is normally set to ``TEST``, but will be ``UPDATE`` if ``btest`` is run with ``--update-baseline``, or ``UPDATE_INTERACTIVE`` if run with ``--update-interactive``. ``TEST_BASELINE`` The name of a directory where the command can save permanent information across ``btest`` runs. (This is where ``btest-diff`` stores its baseline in ``UPDATE`` mode.) ``TEST_NAME`` The name of the currently executing test. ``TEST_VERBOSE`` The path of a file where the test can record further information about its execution that will be included with btest's ``--verbose`` output. This is for further tracking the execution of commands and should generally generate output that follows a line-based structure. ``TEST_BASE`` The btest base directory, i.e., the directory where ``btest.cfg`` is located. ``TEST_PART`` The test part number (see `parts`_ for more about test parts). .. note:: If a command returns the special exit code 100, the test is considered failed, however subsequent test commands are still run. ``btest-diff`` uses this special exit code to indicate that no baseline has yet been established. If a command returns the special exit code 200, the test is considered failed and all further test executions are aborted. ``@TEST-EXEC-FAIL: `` Like ``@TEST-EXEC``, except that this expects the command to *fail*, i.e., the test is aborted when the return code is zero. ``@TEST-REQUIRES: `` Defines a condition that must be met for the test to be executed. The given command line will be run before any of the actual test commands, and it must return success for the test to continue. If it does not return success, the rest of the test will be skipped but doing so will not be considered a failure of the test. This allows to write conditional tests that may not always make sense to run, depending on whether external constraints are satisfied or not (say, whether a particular library is available). Multiple requirements may be specified and then all must be met for the test to continue. ``@TEST-ALTERNATIVE: `` Runs this test only for the given alternative (see alternative_). If ```` is ``default``, the test executes when BTest runs with no alternative given (which however is the default anyway). ``@TEST-NOT-ALTERNATIVE: `` Ignores this test for the given alternative (see alternative_). If ```` is ``default``, the test is ignored if BTest runs with no alternative given. ``@TEST-COPY-FILE: `` Copy the given file into the test's directory before the test is run. If ```` is a relative path, it's interpreted relative to the BTest's base directory. Environment variables in ```` will be replaced if enclosed in ``${..}``. This command can be given multiple times. ``@TEST-START-NEXT`` This is a short-cut for defining multiple test inputs in the same file, all executing with the same command lines. When ``@TEST-START-NEXT`` is encountered, the test file is initially considered to end at that point, and all ``@TEST-EXEC-*`` are run with an ``%INPUT`` truncated accordingly. Afterwards, a *new* ``%INPUT`` is created with everything *following* the ``@TEST-START-NEXT`` marker, and the *same* commands are run again (further ``@TEST-EXEC-*`` will be ignored). The effect is that a single file can actually define two tests, and the ``btest`` output will enumerate them:: > cat examples/t5.sh # @TEST-EXEC: cat %INPUT | wc -c >output # @TEST-EXEC: btest-diff output This is the first test input in this file. # @TEST-START-NEXT ... and the second. > ./btest -D examples/t5.sh examples.t5 ... ok % cat .diag == File =============================== 119 [...] examples.t5-2 ... ok % cat .diag == File =============================== 22 [...] Multiple ``@TEST-START-NEXT`` can be used to create more than two tests per file. ``@TEST-START-FILE `` This is used to include an additional input file for a test right inside the test file. All lines following the keyword will be written into the given file (and removed from the test's `%INPUT`) until a terminating ``@TEST-END-FILE`` is found. Example:: > cat examples/t6.sh # @TEST-EXEC: awk -f %INPUT output # @TEST-EXEC: btest-diff output { lines += 1; } END { print lines; } @TEST-START-FILE foo.dat 1 2 3 @TEST-END-FILE > btest -D examples/t6.sh examples.t6 ... ok % cat .diag == File =============================== 3 Multiple such files can be defined within a single test. Note that this is only one way to use further input files. Another is to store a file in the same directory as the test itself, making sure it's ignored via ``IgnoreFiles``, and then refer to it via ``%DIR/``. ``@TEST-IGNORE`` This is used to indicate that this file should be skipped (i.e., no test commands in this file will be executed). An alternative way to ignore files is by using the ``IgnoreFiles`` option in the btest configuration file. .. _@TEST-GROUP: ``@TEST-GROUP: `` Assigns the test to a group of name ````. By using option ``-g`` one can limit execution to all tests that belong to a given group (or a set of groups). .. _@TEST-SERIALIZE: ``@TEST-SERIALIZE: `` When using option ``-j`` to parallelize execution, all tests that specify the same serialization set are guaranteed to run sequentially. ```` is an arbitrary user-chosen string. ``@TEST-KNOWN-FAILURE`` Marks a test as known to currently fail. This only changes BTest's output, which upon failure will indicate that that is expected; it won't change the test's processing otherwise. The keyword doesn't take any arguments but one could add a descriptive text, as in :: .. @TEST-KNOWN-FAILURE: We know this fails because .... .. _@TEST-MEASURE-TIME: ``@TEST-MEASURE-TIME`` Measures execution time for this test and compares it to a previously established `timing`_ baseline. If it deviates significantly, the test will be considered failed. .. _parts: Splitting Tests into Parts ========================== One can split a single test across multiple files by adding a numerical ``#`` postfix to their names, where each ```` represents a separate part of the test. ``btest`` will combine all of a test's parts in numerical order and execute them subsequently within the same sandbox. Example:: > cat examples/t7.sh#1 # @TEST-EXEC: echo Part 1 - %INPUT >>output > cat examples/t7.sh#2 # @TEST-EXEC: echo Part 2 - %INPUT >>output > cat examples/t7.sh#3 # @TEST-EXEC: btest-diff output > btest -D examples/t7.sh examples.t7 ... ok % cat .diag == File =============================== Part 1 - /Users/robin/bro/docs/aux/btest/.tmp/examples.t7/t7.sh#1 Part 2 - /Users/robin/bro/docs/aux/btest/.tmp/examples.t7/t7.sh#2 Note how ``output`` contains the output of both ``t7.sh#1`` and ``t7.sh#2``, however in each case ``%INPUT`` refers to the corresponding part. For the first part of a test, one can also omit the ``#1`` postfix in the filename. Canonifying Diffs ================= ``btest-diff`` has the capability to filter its input through an additional script before it compares the current version with the baseline. This can be useful if certain elements in an output are *expected* to change (e.g., timestamps). The filter can then remove/replace these with something consistent. To enable such canonification, set the environment variable ``TEST_DIFF_CANONIFIER`` to a script reading the original version from stdin and writing the canonified version to stdout. Note that both baseline and current output are passed through the filter before their differences are computed. Running Processes in the Background =================================== Sometimes processes need to be spawned in the background for a test, in particular if multiple processes need to cooperate in some fashion. ``btest`` comes with two helper scripts to make life easier in such a situation: ``btest-bg-run `` This is a script that runs ```` in the background, i.e., it's like using ``cmdline &`` in a shell script. Test execution continues immediately with the next command. Note that the spawned command is *not* run in the current directory, but instead in a newly created sub-directory called ````. This allows spawning multiple instances of the same process without needing to worry about conflicting outputs. If you want to access a command's output later, like with ``btest-diff``, use ``/foo.log`` to access it. ``btest-bg-wait [-k] `` This script waits for all processes previously spawned via ``btest-bg-run`` to finish. If any of them exits with a non-zero return code, ``btest-bg-wait`` does so as well, indicating a failed test. ```` is mandatory and gives the maximum number of seconds to wait for any of the processes to terminate. If any process hasn't done so when the timeout expires, it will be killed and the test is considered to be failed as long as ``-k`` is not given. If ``-k`` is given, pending processes are still killed but the test continues normally, i.e., non-termination is not considered a failure in this case. This script also collects the processes' stdout and stderr outputs for diagnostics output. .. _timing: Timing Execution ================ ``btest`` can time execution of tests and report significant deviations from past runs. As execution time is inherently system-specific it keeps separate per-host timing baselines for that. Furthermore, as time measurements tend to make sense only for individual, usually longer running tests, they are activated on per test basis by adding a `@TEST-MEASURE-TIME`_ directive. The test will then execute as usual yet also record the duration for which it executes. After the timing baselines are created (with the ``--update-times`` option), further runs on the same host will compare their times against that baseline and declare a test failed if it deviates by more than, by default, 1%. (To tune the behaviour, look at the ``Timing*`` `options`_.) If a test requests measurement but BTest can't find a timing baseline or the necessary tools to perform timing measurements, then it will ignore the request. As timing for a test can deviate quite a bit even on the same host, BTest does not actually measure *time* but the number of CPU instructions that a test executes, which tends to be more stable. That however requires the right tools to be in place. On Linux, BTest leverages `perf `_. By default, BTest will search for ``perf`` in the ``PATH``; you can specify a different path to the binary by setting ``PerfPath`` in ``btest.cfg``. Integration with Sphinx ======================= ``btest`` comes with an extension module for the documentation framework `Sphinx `_. The extension module provides two new directives called ``btest`` and ``btest-include``. The ``btest`` directive allows writing a test directly inside a Sphinx document, and then the output from the test's command is included in the generated documentation. The ``btest-include`` directive allows for literal text from another file to be included in the generated documentation. The tests from both directives can also be run externally and will catch if any changes to the included content occur. The following walks through setting this up. Configuration ------------- First, you need to tell Sphinx a base directory for the ``btest`` configuration as well as a directory in there where to store tests it extracts from the Sphinx documentation. Typically, you'd just create a new subdirectory ``tests`` in the Sphinx project for the ``btest`` setup and then store the tests in there in, e.g., ``doc/``:: cd mkdir tests mkdir tests/doc Then add the following to your Sphinx ``conf.py``:: extensions += ["btest-sphinx"] btest_base="tests" # Relative to Sphinx-root. btest_tests="doc" # Relative to btest_base. Next, create a ``btest.cfg`` in ``tests/`` as usual and add ``doc/`` to the ``TestDirs`` option. Also, add a finalizer to ``btest.cfg``:: [btest] ... PartFinalizer=btest-diff-rst Including a Test into a Sphinx Document --------------------------------------- The ``btest`` extension provides a new directive to include a test inside a Sphinx document:: .. btest:: Here, ```` is a custom name for the test; it will be stored in ``btest_tests`` under that name (with a file extension of ``.btest``). ```` is just a standard test as you would normally put into one of the ``TestDirs``. Example:: .. btest:: just-a-test @TEST-EXEC: expr 2 + 2 When you now run Sphinx, it will (1) store the test content into ``tests/doc/just-a-test.btest`` (assuming the above path layout), and (2) execute the test by running ``btest`` on it. You can then run ``btest`` manually in ``tests/`` as well and it will execute the test just as it would in a standard setup. If a test fails when Sphinx runs it, there will be a corresponding error and include the diagnostic output into the document. By default, nothing else will be included into the generated documentation, i.e., the above test will just turn into an empty text block. However, ``btest`` comes with a set of scripts that you can use to specify content to be included. As a simple example, ``btest-rst-cmd `` will execute a command and (if it succeeds) include both the command line and the standard output into the documentation. Example:: .. btest:: another-test @TEST-EXEC: btest-rst-cmd echo Hello, world! When running Sphinx, this will render as: .. code:: # echo Hello, world! Hello, world! The same ```` can be used multiple times, in which case each entry will become one part of a joint test. ``btest`` will execute all parts subsequently within a single sandbox, and earlier results will thus be available to later parts. When running ``btest`` manually in ``tests/``, the ``PartFinalizer`` we added to ``btest.cfg`` (see above) compares the generated reST code with a previously established baseline, just like ``btest-diff`` does with files. To establish the initial baseline, run ``btest -u``, like you would with ``btest-diff``. Scripts ------- The following Sphinx support scripts come with ``btest``: ``btest-rst-cmd [options] `` By default, this executes ```` and includes both the command line itself and its standard output into the generated documentation (but only if the command line succeeds). See above for an example. This script provides the following options: -c ALTERNATIVE_CMDLINE Show ``ALTERNATIVE_CMDLINE`` in the generated documentation instead of the one actually executed. (It still runs the ```` given outside the option.) -d Do not actually execute ````; just format it for the generated documentation and include no further output. -f FILTER_CMD Pipe the command line's output through ``FILTER_CMD`` before including. If ``-r`` is given, it filters the file's content instead of stdout. -o Do not include the executed command into the generated documentation, just its output. -r FILE Insert ``FILE`` into output instead of stdout. The ``FILE`` must be created by a previous ``@TEST-EXEC`` or ``@TEST-COPY-FILE``. -n N Include only ``N`` lines of output, adding a ``[...]`` marker if there's more. ``btest-rst-include [options] `` Includes ```` inside a code block. The ```` must be created by a previous ``@TEST-EXEC`` or ``@TEST-COPY-FILE``. This script provides the following options: -n N Include only ``N`` lines of output, adding a ``[...]`` marker if there's more. ``btest-rst-pipe `` Executes ````, includes its standard output inside a code block (but only if the command line succeeds). Note that this script does not include the command line itself into the code block, just the output. .. note:: All these scripts can be run directly from the command line to show the reST code they generate. .. note:: ``btest-rst-cmd`` can do everything the other scripts provide if you give it the right options. In fact, the other scripts are provided just for convenience and leverage ``btest-rst-cmd`` internally. Including Literal Text ---------------------- The ``btest`` Sphinx extension module also provides a directive ``btest-include`` that functions like ``literalinclude`` (including all its options) but also creates a test checking the included content for changes. As one further extension, the directive expands environment variables of the form ``${var}`` in its argument. Example:: .. btest-include:: ${var}/path/to/file When you now run Sphinx, it will automatically generate a test file in the directory specified by the ``btest_tests`` variable in the Sphinx ``conf.py`` configuration file. In this example, the filename would be ``include-path_to_file.btest`` (it automatically adds a prefix of "include-" and a file extension of ".btest"). When you run the tests externally, the tests generated by the ``btest-include`` directive will check if any of the included content has changed (you'll first need to run ``btest -u`` to establish the initial baseline). License ======= btest is open-source under a BSD licence. btest-0.54/MANIFEST.in0000664002342100234210000000031412506370126015377 0ustar johannajohanna00000000000000include CHANGES include COPYING include MANIFEST include MANIFEST.in include Makefile include README include VERSION include btest.cfg.example include setup.py graft Baseline graft examples graft testing btest-0.54/btest-ask-update0000775002342100234210000000170212506370126016746 0ustar johannajohanna00000000000000#! /usr/bin/env bash # # Helper script that asks whether the user wants to update a baseline. # # Return code: # # 0: Yes, update and continue. # 1: No, don't update but continue. # 200: No, don't update and abort. while true; do echo " failed" >>/dev/tty echo ">> Type 'c' to continue, 'd' to see diagnostics, 'u' to update baseline, and 'a' to abort." >/dev/tty read -s -n 1 key > Updating baseline ..." >/dev/tty; exit 0;; [cC]) echo ">> Continuing ..." >/dev/tty; exit 1;; [aA]) echo ">> Aborting ..." >/dev/tty; exit 200;; [dD]) if [ "$TEST_DIAGNOSTICS" != "" -a "$TEST_DIAGNOSTICS" != "/dev/stdout" ]; then less -S $TEST_DIAGNOSTICS /dev/tty else echo "Do not have diagnostics." >/dev/tty fi ;; *) echo ">> Answer not recognized, try again ..." >/dev/tty ;; esac done btest-0.54/btest0000755002342100234210000016267512522743407014735 0ustar johannajohanna00000000000000#! /usr/bin/env python # # Main test driver. import os import os.path import sys import shutil import fnmatch import optparse import re import tempfile import subprocess import copy import glob import fnmatch import ConfigParser import time import multiprocessing import multiprocessing.managers import multiprocessing.sharedctypes import xml.dom.minidom import socket import resource import struct import uuid import tempfile from datetime import datetime VERSION = "0.54" # Automatically filled in. Name ="btest" Config = None try: ConfigDefault = os.environ["BTEST_CFG"] except KeyError: ConfigDefault = "btest.cfg" def output(msg, nl=True, file=None): if not file: file = sys.stderr if nl: print >>file, msg else: print >>file, msg, def warning(msg): print >>sys.stderr, "warning:", msg def error(msg): print >>sys.stderr, msg sys.exit(1) def mkdir(dir): if not os.path.exists(dir): try: os.makedirs(dir) except OSError, e: error("cannot create directory %s: %s" % (dir, e)) else: if not os.path.isdir(dir): error("path %s exists but is not a directory" % dir) def which(cmd): # Adapted from http://stackoverflow.com/a/377028 def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) (fpath, fname) = os.path.split(cmd) if fpath: if is_exe(cmd): return cmd else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, cmd) if is_exe(exe_file): return exe_file return None def platform(): return os.uname()[0] def getOption(key, default): try: return Config.get("btest", key) except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): return default reBackticks = re.compile(r"`(([^`]|\`)*)`") def readStateFile(): try: # Read state file. tests = [] for line in open(StateFile): line = line.strip() if not line or line.startswith("#"): continue tests += [line] tests = findTests(tests, output_handler) except IOError: return (False, []) return (True, tests) # We monkey-patch the OptionParser to expand backticks. def cpExpandBackticks(self, section, option, rawval, vars): def _exec(m): cmd = m.group(1) if not cmd: return "" try: return subprocess.Popen(cmd.split(), stdout=subprocess.PIPE).communicate()[0].strip() except OSError, e: error("cannot execute '%s': %s" % (cmd, e)) value = cpOriginalInterpolate(self, section, option, rawval, vars) value = reBackticks.sub(_exec, value) return value # We monkey-patch the OptionParser to provide an alternative method that does not include # defaults in returns section items. def cpItemsNoDefaults(self, section): try: items = self._sections[section].items() except KeyError: raise ConfigParser.NoSectionError(section) d = self._defaults.copy() result = {} for (key, value) in items: result[key] = cpExpandBackticks(self, section, key, value, d) return result.items() # Replace environment variables in string. def replaceEnvs(s): def replace_with_env(m): try: return os.environ[m.group(1)] except KeyError: return "" return RE_ENV.sub(replace_with_env, s) # Execute one of test's command line *cmdline*. *measure_time* indicates if # timing measurement is desired. *kw_args* are further keyword arguments # interpreted the same way as with subprocess.check_call(). # Returns a 3-tuple (success, rc, time) where the former two likewise # have the same meaning as with runSubprocess(), and 'time' is an integer # value corresponding to the commands execution time measured in some # appropiate integer measure. If 'time' is negative, that's an indicator # that time measurement wasn't possible and the value is to be ignored. def runTestCommandLine(cmdline, measure_time, **kwargs): if measure_time and Timer: return Timer.timeSubprocess(cmdline, **kwargs) else: (success, rc) = runSubprocess(cmdline, **kwargs) return (success, rc, -1) # Runs a subprocess. Takes same arguments as subprocess.check_call() # and returns a 2-tuple (success, rc) where *success* is a boolean # indicating if the command executed, and *rc* is its exit code if it did. def runSubprocess(*args, **kwargs): def child(q): try: subprocess.check_call(*args, **kwargs) success = True rc = 0 except subprocess.CalledProcessError, e: success = False rc = e.returncode except KeyboardInterrupt: success = False rc = 0 q.put([success, rc]) try: q = multiprocessing.Queue() p = multiprocessing.Process(target=child, args=(q,)) p.start() result = q.get() p.join() except KeyboardInterrupt: # Bailout here directly as otherwise we'll a bunch of errors # from all the childs. os._exit(1) return result cpOriginalInterpolate = ConfigParser.ConfigParser._interpolate ConfigParser.ConfigParser._interpolate = cpExpandBackticks ConfigParser.ConfigParser.itemsNoDefaults = cpItemsNoDefaults # Description of an alternative configuration. class Alternative: def __init__(self, name): self.name = name self.filters = {} self.substitutions = {} self.envs = {} # Main class distributing the work across threads. class TestManager(multiprocessing.managers.SyncManager): def __init__(self, *args, **kwargs): super(TestManager, self).__init__(*args, **kwargs) def run(self, tests, output_handler): self.start() self._output_handler = output_handler self._lock = self.RLock() self._succeeded = multiprocessing.sharedctypes.RawValue('i', 0) self._failed = multiprocessing.sharedctypes.RawValue('i', 0) self._skipped = multiprocessing.sharedctypes.RawValue('i', 0) self._tests = self.list(tests) self._failed_tests = self.list([]) self._num_tests = len(self._tests) self._timing = self.loadTiming() num_threads = Options.threads if num_threads: threads = [] for i in range(num_threads): t = multiprocessing.Process(name="#%d" % (i+1), target=lambda : self.threadRun(i)) t.start() threads += [t] for t in threads: t.join() else: # No threads, just run all directly. self.threadRun(0) # Record failed tests if not updating. if Options.mode != "UPDATE" and Options.mode != "UPDATE_INTERACTIVE": try: state = open(StateFile, "w") except IOError, e: error("cannot open state file %s" % StateFile) for t in self._failed_tests: print >>state, t state.close() return (self._succeeded.value, self._failed.value, self._skipped.value) def percentage(self): if not self._num_tests: return 0 count = self._succeeded.value + self._failed.value + self._skipped.value return 100.0 * count / self._num_tests def threadRun(self, thread_num): all_tests = [] while True: tests = self.nextTests(thread_num) if tests == None: # No more work for us. return all_tests += tests for t in tests: try: t.run(self) self.testReplayOutput(t) except KeyboardInterrupt: if Options.threads: # Caught by parent thread. return else: # Rethrow raise if Options.update_times: self.saveTiming(all_tests) def nextTests(self, thread_num): with self._lock: for i in range(len(self._tests)): t = self._tests[i] if not t: continue if Options.threads and t.serialize: if hash(t.serialize) % Options.threads != thread_num: # Not ours. continue # We'll execute it, delete from queue. del self._tests[i] if Options.alternatives: tests = [] for alternative in Options.alternatives: if alternative in t.ignore_alternatives: continue if t.include_alternatives and not alternative in t.include_alternatives: continue alternative_test = copy.deepcopy(t) if alternative == "-": alternative = "" alternative_test.setAlternative(alternative) tests += [alternative_test] else: if t.include_alternatives and not "default" in t.include_alternatives: tests = [] elif "default" in t.ignore_alternatives: tests = [] else: tests = [t] return tests # No more tests for us. return None def lock(self): return self._lock def testStart(self, test): with self._lock: self._output_handler.testStart(test) def testCommand(self, test, cmdline): with self._lock: self._output_handler.testCommand(test, cmdline) def testSucceeded(self, test): msg = "ok" if test.known_failure: msg += " (but expected to fail)" msg += test.timePostfix() with self._lock: self._output_handler.testSucceeded(test, msg) self._succeeded.value += 1 def testFailed(self, test): msg = "failed" if test.known_failure: msg += " (expected)" msg += test.timePostfix() with self._lock: self._output_handler.testFailed(test, msg) self._failed.value += 1 if not test.known_failure: self._failed_tests += [test.name] def testSkipped(self, test): msg = "not available, skipped" with self._lock: self._output_handler.testSkipped(test, msg) self._skipped.value += 1 def testReplayOutput(self, test): with self._lock: self._output_handler.replayOutput(test) def testTimingBaseline(self, test): return self._timing.get(test.name, -1) # Returns the name of the file to store the timing baseline in for this host. def timingPath(self): id = uuid.uuid3(uuid.NAMESPACE_DNS, str(uuid.getnode())) return os.path.abspath(os.path.join(BaselineTimingDir, id.hex)) # Loads baseline timing information for this host if available. Returns # empty directory if not. def loadTiming(self): timing = {} with self._lock: path = self.timingPath() if not os.path.exists(path): return {} for line in open(path): (k, v) = line.split() timing[k] = float(v) return timing # Updates the timing baseline for the given tests on this host. def saveTiming(self, tests): with self._lock: changed = False timing = self.loadTiming() for t in tests: if t and t.measure_time and t.utime >= 0: changed = True timing[t.name] = t.utime if not changed: return path = self.timingPath() (dir, base) = os.path.split(path) mkdir(dir) out = open(path, "w") for (k, v) in timing.items(): print >>out, "%s %u" % (k, v) out.close() # One @TEST-{EXEC,REQUIRES} command line. class CmdLine: def __init__(self, cmdline, expect_success, part, file): self.cmdline = cmdline self.expect_success = expect_success self.part = part self.file = file # One test. class Test(object): def __init__(self, file, output_handler): self.dir = os.path.abspath(os.path.dirname(file)) self.name = None self.basename = None self.part = -1 self.number = 1 self.serialize = [] self.groups = set() self.cmdlines = [] self.tmpdir = None self.diag = None self.verbose = None self.baseline = None self.alternative = None self.ignore_alternatives = [] self.include_alternatives = [] self.files = [] self.requires = [] self.copy_files = [] self.output_handler = output_handler self.start = None self.contents = [] self.cloned = False self.known_failure = False self.measure_time = False self.utime = -1 self.utime_base = -1 self.utime_perc = 0.0 self.utime_exceeded = False def displayName(self): name = self.name if self.alternative: name = "%s [%s]" % (name, self.alternative) return name def setAlternative(self, alternative): self.alternative = alternative # Parse the test's content. def parse(self, content, file): cmds = {} for line in content: m = RE_IGNORE.search(line) if m: # Ignore this file. return False for (tag, regexp, multiple, optional, group1, group2) in Commands: m = regexp.search(line) if m: value = None if group1 >= 0: value = m.group(group1) if group2 >= 0: value = (value, m.group(group2)) if not multiple: if tag in cmds: error("%s: %s defined multiple times." % (file, tag)) cmds[tag] = value else: try: cmds[tag] += [value] except KeyError: cmds[tag] = [value] # Make sure all non-optional commands are there. for (tag, regexp, multiple, optional, group1, group2) in Commands: if not optional and not tag in cmds: error("%s: mandatory %s command not found." % (file, tag)) basename = file part = 1 m = RE_PART.match(file) if m: basename = m.group(1) part = int(m.group(2)) name = os.path.relpath(basename, TestBase) (name, ext) = os.path.splitext(name) name = name.replace("/", ".") while name.startswith("."): name = name[1:] self.name = name self.part = part self.basename = name self.contents += [(file, content)] for (cmd, success) in cmds["exec"]: cmdline = CmdLine(cmd.strip(), success != "-FAIL", part, file) self.cmdlines += [cmdline] if PartFinalizer != "": finalizer = CmdLine("%s %s" % (PartFinalizer, self.name), True, part, "") self.cmdlines += [finalizer] if "serialize" in cmds: self.serialize = cmds["serialize"] if "group" in cmds: self.groups |= set(cmd.strip() for cmd in cmds["group"]) if "requires" in cmds: for cmd in cmds["requires"]: cmdline = CmdLine(cmd.strip(), True, part, file) self.requires += [cmdline] if "copy-file" in cmds: self.copy_files += [cmd.strip() for cmd in cmds["copy-file"]] if "alternative" in cmds: self.include_alternatives = [cmd.strip() for cmd in cmds["alternative"]] if "not-alternative" in cmds: self.ignore_alternatives = [cmd.strip() for cmd in cmds["not-alternative"]] if "known-failure" in cmds: self.known_failure = True if "measure-time" in cmds: self.measure_time = True return True # Copies all control information over to a new Test but replacing the test's # content with a new one. def clone(self, content): clone = Test("", self.output_handler) clone.number = self.number + 1 clone.basename = self.basename clone.name = "%s-%d" % (self.basename, clone.number) clone.serialize = clone.serialize clone.groups = self.groups clone.cmdlines = self.cmdlines clone.known_failure = self.known_failure clone.measure_time = self.measure_time assert(len(self.contents) == 1) clone.contents = [(self.contents[0][0], content)] self.cloned = True return clone def mergePart(self, part): if self.cloned or part.cloned: error("cannot use @TEST-START-NEXT with tests split across parts (%s)" % self.basename) self.serialize += part.serialize self.groups |= part.groups self.cmdlines += part.cmdlines self.ignore_alternatives += part.ignore_alternatives self.include_alternatives += part.include_alternatives self.files += part.files self.requires += part.requires self.copy_files += part.copy_files self.contents += part.contents self.known_failure |= part.known_failure self.measure_time |= part.measure_time def run(self, mgr): self.start = time.time() self.mgr = mgr mgr.testStart(self) self.tmpdir = os.path.abspath(os.path.join(TmpDir, self.name)) self.diag = os.path.join(self.tmpdir, ".diag") self.verbose = os.path.join(self.tmpdir, ".verbose") self.baseline = os.path.abspath(os.path.join(BaselineDir, self.name)) self.diagmsgs = [] self.utime = -1 self.utime_base = self.mgr.testTimingBaseline(self) self.utime_perc = 0.0; self.utime_exceeded = False self.rmTmp() mkdir(self.baseline) mkdir(self.tmpdir) for (fname, lines) in self.files: fname = os.path.join(self.tmpdir, fname) subdir = os.path.dirname(fname) if subdir != "": mkdir(subdir) try: ffile = open(fname, "w") except IOError, e: error("cannot write test's additional file '%s'" % fname) for line in lines: print >>ffile, line, ffile.close() for file in self.copy_files: src = replaceEnvs(file) try: shutil.copy2(src, self.tmpdir) except IOError, e: error("cannot copy %s: %s" % (src, e)) for (file, content) in self.contents: localfile = os.path.join(self.tmpdir, os.path.basename(file)) out = open(localfile, "w") for line in content: print >>out, line, out.close() self.log = open(os.path.join(self.tmpdir, ".log"), "w") self.stdout = open(os.path.join(self.tmpdir, ".stdout"), "w") self.stderr = open(os.path.join(self.tmpdir, ".stderr"), "w") for cmd in self.requires: (success, rc) = self.execute(cmd, apply_alternative=self.alternative) if not success: self.mgr.testSkipped(self) self.finish() return failures = 0 cmds = [] if Initializer != "": initializer = CmdLine("%s %s" % (Initializer, self.name), True, 1, "") cmds += [initializer] cmds += self.cmdlines if Finalizer != "": finalizer = CmdLine("%s %s" % (Finalizer, self.name), True, 1, "") cmds += [finalizer] skip_part = -1 for cmd in cmds: if skip_part >= 0 and skip_part == cmd.part: continue (success, rc) = self.execute(cmd, apply_alternative=self.alternative) if not success: failures += 1 if Options.sphinx: # We still execute the remaining commands and # raise a failure for each one that fails. self.mgr.testFailed(self) skip_part = cmd.part continue if failures == 1: self.mgr.testFailed(self) if rc == 200: # Abort all tests. sys.exit(1) if rc != 100: break self.utime_perc = 0.0 self.utime_exceeded = False if failures == 0: # If we don't have a timing baseline, we silently ignore that so that # on systems that can't measure execution time, the test will just pass. if self.utime_base >= 0 and self.utime >= 0: delta = getOption("TimingDeltaPerc", "1.0") self.utime_perc = (100.0 * (self.utime - self.utime_base) / self.utime_base) self.utime_exceeded = (abs(self.utime_perc) > float(delta)) if self.utime_exceeded and not Options.update_times: self.diagmsgs += ["'%s' exceeded permitted execution time deviation%s" % (self.name, self.timePostfix())] self.mgr.testFailed(self) else: self.mgr.testSucceeded(self) if not Options.tmps: self.rmTmp() self.finish() def finish(self): try: # Try removing the baseline directory. If it works, it's empty, i.e., no baseline was created. os.rmdir(self.baseline) except OSError, e: pass self.log.close() self.stdout.close() self.stderr.close() def execute(self, cmd, apply_alternative=None): filter_cmd = None addl_envs = {} cmdline = cmd.cmdline # Apply alternative if requested. if apply_alternative: alt = Alternatives[apply_alternative] try: (path, executable) = os.path.split(cmdline.split()[0]) filter_cmd = alt.filters[executable] except LookupError: pass for (key, val) in alt.substitutions.items(): cmdline = re.sub("\\b" + re.escape(key) + "\\b", val, cmdline) addl_envs = alt.envs localfile = os.path.join(self.tmpdir, os.path.basename(cmd.file)) if filter_cmd and cmd.expect_success: # Do not apply filter if we expect failure. # This is not quite correct as it does not necessarily need to be # the %INPUT file which we are filtering ... filtered = os.path.join(self.tmpdir, "filtered-%s" % os.path.basename(localfile)) filter = CmdLine("%s %s %s" % (filter_cmd, localfile, filtered), True, 1, "") (success, rc) = self.execute(filter, apply_alternative=None) if not success: return (False, rc) mv = CmdLine("mv %s %s" % (filtered, localfile), True, 1, "") (success, rc) = self.execute(mv, apply_alternative=None) if not success: return (False, rc) self.mgr.testCommand(self, cmd) # Replace special names. if localfile: cmdline = RE_INPUT.sub(localfile, cmdline) cmdline = RE_DIR.sub(self.dir, cmdline) print >>self.log, cmdline, "(expect %s)" % (("failure", "success")[cmd.expect_success]) env = self.prepareEnv(cmd, addl_envs) measure_time = self.measure_time and (Options.update_times or self.utime_base >= 0) (success, rc, utime) = runTestCommandLine(cmdline, measure_time, cwd=self.tmpdir, shell=True, env=env, stderr=self.stderr, stdout=self.stdout) if utime > 0: self.utime += utime if success: if cmd.expect_success: return (True, rc) self.diagmsgs += ["'%s' succeeded unexpectedly (exit code 0)" % cmdline] return (False, 0) else: if not cmd.expect_success: return (True, rc) self.diagmsgs += ["'%s' failed unexpectedly (exit code %s)" % (cmdline, rc)] return (False, rc) def rmTmp(self): try: if os.path.isfile(self.tmpdir): os.remove(self.tmpdir) if os.path.isdir(self.tmpdir): subprocess.call("rm -rf %s 2>/dev/null" % self.tmpdir, shell=True) except OSError, e: error("cannot remove tmp directory %s: %s" % (self.tmpdir, e)) # Prepares the environment for the child processes. def prepareEnv(self, cmd, addl = {}): env = copy.deepcopy(os.environ) # Make sure these don't propagate from parent processes. for i in ["TESTBASE", "DEFAULT_PATH"]: try: del env[i] except KeyError: pass env["TEST_BASELINE"] = self.baseline env["TEST_DIAGNOSTICS"] = self.diag env["TEST_MODE"] = Options.mode.upper() env["TEST_NAME"] = self.name env["TEST_VERBOSE"] = self.verbose env["TEST_PART"] = str(cmd.part) env["TEST_BASE"] = TestBase for (key, val) in addl.items(): env[key.upper()] = val return env def addFiles(self, files): # files is a list of tuple (fname, lines). self.files = files # If timing information is requested and available returns a # string that summarizes the time spent for the test. # Otherwise, returns an empty string. def timePostfix(self): if self.utime_base >= 0 and self.utime >= 0: return " (%+.1f%%)" % self.utime_perc else: return "" ### Output handlers. class OutputHandler: def __init__(self, options): """Base class for reporting progress and results to user. We derive several classes from this one, with the one being used depending on which output the users wants. A handler's method are called from test TestMgr and may be called interleaved from different tests. However, the TestMgr locks before each call so that it's guaranteed that two calls don't run concurrently. options: An optparser with the global options. """ self._buffered_output = {} self._options = options def options(self): """Returns the current optparser instance.""" return self._options def threadPrefix(self): """In threaded mode, returns a string with the thread's name in a form suitable to prefix output with. In non-threaded mode, returns the empty string.""" if self.options().threads: return "[%s]" % multiprocessing.current_process().name else: return "" def _output(self, msg, nl=True, file=None): if not file: file = sys.stderr if nl: print >>file, msg else: if msg: print >>file, msg, def output(self, test, msg, nl=True, file=None): """Output one line of output to user. In non-threaded mode, this will be printed out directly to stderr. In threaded-mode, this will be buffered until the test has finished; then all output is printed as a block. This should only be called from other members of this class, or derived classes, not from tests. """ if not self.options().threads: self._output(msg, nl, file) return else: try: self._buffered_output[test.name] += [(msg, nl, file)] except KeyError: self._buffered_output[test.name] = [(msg, nl, file)] def replayOutput(self, test): """Prints out all output buffered in threaded mode by output().""" if not test.name in self._buffered_output: return for (msg, nl, file) in self._buffered_output[test.name]: self._output(msg, nl, file) self._buffered_output[test.name] = [] # Methods to override. def testStart(self, test): """Called just before a test begins.""" pass def testCommand(self, test, cmdline): """Called just before a command line is exected for a trace.""" pass def testSucceeded(self, test, msg): """Called when a test was successful.""" pass def testFailed(self, test, msg): """Called when a test failed.""" pass def testSkipped(self, test, msg): """Called when a test is skipped because its dependencies aren't met.""" pass def finished(self): """Called when all tests have been executed.""" pass class Forwarder(OutputHandler): """ Forwards output to several other handlers. options: An optparser with the global options. handlers: List of output handlers to forward to. """ def __init__(self, options, handlers): OutputHandler.__init__(self, options) self._handlers = handlers def testStart(self, test): """Called just before a test begins.""" for h in self._handlers: h.testStart(test) def testCommand(self, test, cmdline): """Called just before a command line is exected for a trace.""" for h in self._handlers: h.testCommand(test, cmdline) def testSucceeded(self, test, msg): """Called when a test was successful.""" for h in self._handlers: h.testSucceeded(test, msg) def testFailed(self, test, msg): """Called when a test failed.""" for h in self._handlers: h.testFailed(test, msg) def testSkipped(self, test, msg): for h in self._handlers: h.testSkipped(test, msg) def replayOutput(self, test): for h in self._handlers: h.replayOutput(test) def finished(self): for h in self._handlers: h.finished() class Standard(OutputHandler): def testStart(self, test): self.output(test, self.threadPrefix(), nl=False) self.output(test, "%s ..." % test.displayName(), nl=False) def testCommand(self, test, cmdline): pass def testSucceeded(self, test, msg): self.output(test, msg) def testFailed(self, test, msg): self.output(test, msg) def testSkipped(self, test, msg): self.output(test, msg) class Console(OutputHandler): """Output handler that writes compact progress report to the console.""" Green = "\033[32m" Red = "\033[31m" Yellow = "\033[33m" Gray = "\033[37m" Normal = "\033[0m" def __init__(self, options): OutputHandler.__init__(self, options) self.sticky = False def testStart(self, test): self.consoleOutput(test, "", False) def testCommand(self, test, cmdline): pass def testSucceeded(self, test, msg): if test.known_failure: msg = Console.Yellow + msg + Console.Normal else: msg = Console.Green + msg + Console.Normal self.consoleOutput(test, msg, False) def testFailed(self, test, msg): if test.known_failure: msg = Console.Yellow + msg + Console.Normal else: msg = Console.Red + msg + Console.Normal self.consoleOutput(test, msg, True) def testSkipped(self, test, msg): msg = Console.Gray + msg + Console.Normal self.consoleOutput(test, msg, False) def finished(self): sys.stdout.write(chr(27) + '[2K') # sys.stdout.write("\r[100%] ") sys.stdout.write("\r") sys.stdout.flush() def consoleOutput(self, test, addl, sticky): line = "[%3d%%] %s ..." % (test.mgr.percentage(), test.displayName()) if addl: line += " " + addl sys.stdout.write(chr(27) + '[2K') sys.stdout.write("\r%s" % line.strip()) if sticky: sys.stdout.write("\n") sys.stdout.flush() class Brief(OutputHandler): """Output handler for producing the brief output format.""" def testStart(self, test): pass def testCommand(self, test, cmdline): pass def testSucceeded(self, test, msg): pass def testFailed(self, test, msg): self.output(test, self.threadPrefix(), nl=False) self.output(test, "%s ... %s" % (test.displayName(), msg)) def testSkipped(self, test, msg): pass class Verbose(OutputHandler): """Output handler for producing the verbose output format.""" def testStart(self, test): self.output(test, self.threadPrefix(), nl=False) self.output(test, "%s ..." % test.displayName()) def testCommand(self, test, cmdline): part = "" if cmdline.part > 1: part = " [part #%d]" % cmdline.part self.output(test, self.threadPrefix(), nl=False) self.output(test, " > %s%s" % (cmdline.cmdline, part)) def testSucceeded(self, test, msg): self.output(test, self.threadPrefix(), nl=False) self.showTestVerbose(test) self.output(test, "... %s %s" % (test.displayName(), msg)) def testFailed(self, test, msg): self.output(test, self.threadPrefix(), nl=False) self.showTestVerbose(test) self.output(test, "... %s %s" % (test.displayName(), msg)) def testSkipped(self, test, msg): self.output(test, self.threadPrefix(), nl=False) self.showTestVerbose(test) self.output(test, "... %s %s" % (test.displayName(), msg)) def showTestVerbose(self, test): if not os.path.exists(test.verbose): return for line in open(test.verbose): self.output(test, " > [test-verbose] %s" % line.strip()) class Diag(OutputHandler): def __init__(self, options, all=False, file=None): """Output handler for producing the diagnostic output format. options: An optparser with the global options. all: Print diagnostics also for succeeding tests. file: Output into given file rather than console. """ OutputHandler.__init__(self, options) self._all = all self._file = file def showDiag(self, test): """Generates diagnostics for a test.""" for line in test.diagmsgs: self.output(test, " % " + line, True, self._file) for f in (test.diag, os.path.join(test.tmpdir, ".stderr")): if not f: continue if os.path.isfile(f): self.output(test, " % cat " + os.path.basename(f), True, self._file) for line in open(f): self.output(test, " " + line.strip(), True, self._file) self.output(test, "", True, self._file) if self.options().wait and not self._file: self.output(test, " ...") try: sys.stdin.readline() except KeyboardInterrupt: sys.exit(1) def testCommand(self, test, cmdline): pass def testSucceeded(self, test, msg): if self._all: if self._file: self.output(test, "%s ... %s" % (test.displayName(), msg), True, self._file) self.showDiag(test) def testFailed(self, test, msg): if self._file: self.output(test, "%s ... %s" % (test.displayName(), msg), True, self._file) if not test.known_failure: self.showDiag(test) def testSkipped(self, test, msg): if self._file: self.output(test, "%s ... %s" % (test.displayName(), msg), True, self._file) class SphinxOutput(OutputHandler): def __init__(self, options, all=False, file=None): """Output handler for producing output when running from Sphinx. The main point here is that we save all diagnostic output to $BTEST_RST_OUTPUT. options: An optparser with the global options. """ OutputHandler.__init__(self, options) self._output = None try: self._rst_output = os.environ["BTEST_RST_OUTPUT"] except KeyError: print >>sys.stderr, "warning: environment variable BTEST_RST_OUTPUT not set, will not produce output" self._rst_output = None def testStart(self, test): self._output = None def testCommand(self, test, cmdline): if not self._rst_output: return self._output = "%s#%s" % (self._rst_output, cmdline.part) self._part = cmdline.part def testSucceeded(self, test, msg): pass def testFailed(self, test, msg): if not self._output: return out = open(self._output, "a") print >>out, "" print >>out, ".. code-block:: none " print >>out, "" print >>out, " ERROR executing test '%s' (part %s)" % (test.displayName(), self._part) print >>out, "" for line in test.diagmsgs: print >>out, " % " + line test.diagmsgs = [] for f in (test.diag, os.path.join(test.tmpdir, ".stderr")): if not f: continue if os.path.isfile(f): print >>out, " % cat " + os.path.basename(f) for line in open(f): print >>out, " ", line.strip() print >>out, "" def testSkipped(self, test, msg): pass class XMLReport(OutputHandler): def __init__(self, options, file=None): """Output handler for producing an XML report of test results. options: An optparser with the global options. file: Output into given file """ OutputHandler.__init__(self, options) self._file = file self._doc = xml.dom.minidom.Document() self._testsuite = self._doc.createElement("testsuite") self._doc.appendChild(self._testsuite) self._testsuite.setAttribute("timestamp", datetime.now().isoformat()) self._testsuite.setAttribute("hostname", socket.gethostname()) self._start = time.time() self._num_tests = 0 self._num_failures = 0 def testStart(self, test): self._num_tests += 1; def testCommand(self, test, cmdline): pass def makeTestCaseElement(self, test): parts = test.displayName().split('.') if len(parts) > 1: classname = ".".join(parts[:-1]) name = parts[-1] else: classname = parts[0] name = parts[0] e = self._doc.createElement("testcase") e.setAttribute("classname", classname) e.setAttribute("name", name) dur = time.time() - test.start e.setAttribute("time", str(dur)) self._testsuite.appendChild(e) return e def getContext(self, test, context_file): context = "" for line in test.diagmsgs: context += " % " + line + "\n" for f in (test.diag, os.path.join(test.tmpdir, context_file)): if not f: continue if os.path.isfile(f): context += " % cat " + os.path.basename(f) + "\n" for line in open(f): context += " " + line.strip() + "\n" return context def testSucceeded(self, test, msg): self.makeTestCaseElement(test) def testFailed(self, test, msg): self._num_failures += 1; test_case = self.makeTestCaseElement(test) e = self._doc.createElement("failure") e.setAttribute("type", "fail") text_node = self._doc.createTextNode(self.getContext(test, ".stderr")) e.appendChild(text_node) test_case.appendChild(e) def testSkipped(self, test, msg): test_case = self.makeTestCaseElement(test) e = self._doc.createElement("skipped") e.setAttribute("type", "skip") text_node = self._doc.createTextNode(self.getContext(test, ".stderr")) e.appendChild(text_node) test_case.appendChild(e) def finished(self): self._testsuite.setAttribute("time", str(time.time() - self._start)) self._testsuite.setAttribute("tests", str(self._num_tests)) self._testsuite.setAttribute("failures", str(self._num_failures)) self._testsuite.setAttribute("errors", str(0)) print >>self._file, self._doc.toprettyxml(indent=" ") ### Timing measurements. # Base class for all timers. class TimerBase: # Returns true if time measurement are supported by this class on the # current platform. Must be overidden by derived classes. def available(self): raise NotImplementedError("Timer.available not implemented") # Runs a subprocess and measures its execution time. Arguments are as with # runSubprocess. Return value is the same with runTestCommandLine(). This # method must only be called if available() returns True. Must be overidden # by derived classes. def timeSubprocess(self, *args, **kwargs): raise NotImplementedError("Timer.timeSubprocess not implemented") # Linux version of time measurements. Uses "perf". class LinuxTimer(TimerBase): def __init__(self): self.perf = getOption("PerfPath", which("perf")) def available(self): if not platform() == "Linux": return False if not self.perf or not os.path.exists(self.perf): return False # Make sure it works. (success, rc) = runSubprocess("%s stat -o /dev/null true 2>/dev/null" % self.perf, shell=True) return success and rc == 0 def timeSubprocess(self, *args, **kwargs): assert self.perf cargs = args ckwargs = kwargs targs = [self.perf, "stat", "-o", ".timing", "-x", " ", "-e", "instructions", "sh", "-c"] targs += [" ".join(cargs)] cargs = [targs] del ckwargs["shell"] (success, rc) = runSubprocess(*cargs, **ckwargs) utime = -1 try: cwd = kwargs["cwd"] if "cwd" in kwargs else "." for line in open(os.path.join(cwd, ".timing")): if "instructions" in line and not "not supported" in line: try: m = line.split() utime = int(m[0]) except ValueError: pass except IOError: pass return (success, rc, utime) # Walk the given directory and return all test files. def findTests(paths, output_handler): tests = [] ignore_files = getOption("IgnoreFiles", "").split() ignore_dirs = getOption("IgnoreDirs", "").split() for path in paths: ignores = [os.path.join(path, dir) for dir in ignore_dirs] m = RE_PART.match(path) if m: error("Do not specify files with part numbers directly, use the base test name (%s)" % path) if os.path.isfile(path): tests += readTestFile(path, output_handler) # See if there are more parts. for part in glob.glob("%s#*" % path): tests += readTestFile(part, output_handler) elif os.path.isdir(path): for (dirpath, dirnames, filenames) in os.walk(path): ign = os.path.join(dirpath, ".btest-ignore") if os.path.isfile(os.path.join(ign)): del dirnames[0:len(dirnames)] continue for file in filenames: for gl in ignore_files: if fnmatch.fnmatch(file, gl): break else: tests += readTestFile(os.path.join(dirpath, file), output_handler) # Don't recurse into these. for (dir, path) in [(dir, os.path.join(dirpath, dir)) for dir in dirnames]: for skip in ignores: if path == skip: dirnames.remove(dir) else: # See if we have test(s) named like this in our configured set. found = False for t in Config.configured_tests: if t and path == t.name: tests += [t] found = True if not found: # See if there are parts. for part in glob.glob("%s#*" % path): tests += readTestFile(part, output_handler) found = True if not found: error("cannot read %s" % path) return tests # Merge parts belonging to the same test into one. def mergeTestParts(tests): def key(t): if t: return (t.basename, t.number, t.part) else: return t out = {} for t in sorted(tests, key=key): if not t: continue try: other = out[t.name] assert t.part != other.part out[t.name].mergePart(t) except KeyError: out[t.name] = t return sorted([t for t in out.values()], key=key) # Read the given test file and instantiate one or more tests from it. def readTestFile(filename, output_handler): def newTest(content, previous): if not previous: t = Test(filename, output_handler) if t.parse(content, filename): return t else: return None else: return previous.clone(content) if os.path.basename(filename) == ".btest-ignore": return [] try: input = open(filename) except IOError, e: error("cannot read test file: %s" % e) tests = [] files = [] content = [] previous = None file = (None, []) state = "test" for line in input: if state == "test": m = RE_START_FILE.search(line) if m: state = "file" file = (m.group(1), []) continue m = RE_END_FILE.search(line) if m: error("%s: unexpected %sEND-FILE" % (filename, CommandPrefix)) m = RE_START_NEXT_TEST.search(line) if not m: content += [line] continue t = newTest(content, previous) if not t: return [] tests += [t] previous = t content = [] elif state == "file": m = RE_END_FILE.search(line) if m: state = "test" files += [file] file = (None, []) continue file = (file[0], file[1] + [line]) else: error("internal: unknown state %s" % state) if state == "file": files += [file] input.close() tests += [newTest(content, previous)] for t in tests: if t: t.addFiles(files) return tests def jOption(default): def func(option, opt_str, value, parser): if parser.rargs and not parser.rargs[0].startswith('-'): try: val = int(parser.rargs[0]) parser.rargs.pop(0) except ValueError: val = default else: val = default setattr(parser.values, option.dest, val) return func ### Main optparser = optparse.OptionParser(usage="%prog [options] ", version=VERSION) optparser.add_option("-U", "--update-baseline", action="store_const", dest="mode", const="UPDATE", help="create a new baseline from the tests' output") optparser.add_option("-u", "--update-interactive", action="store_const", dest="mode", const="UPDATE_INTERACTIVE", help="interactively asks whether to update baseline for a failed test") optparser.add_option("-d", "--diagnostics", action="store_true", dest="diag", default=False, help="show diagnostic output for failed tests") optparser.add_option("-D", "--diagnostics-all", action="store_true", dest="diagall", default=False, help="show diagnostic output for ALL tests") optparser.add_option("-f", "--file-diagnostics", action="store", type="string", dest="diagfile", default="", help="write diagnostic output for failed tests into file; if file exists, it is overwritten") optparser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="show commands as they are executed") optparser.add_option("-w", "--wait", action="store_true", dest="wait", default=False, help="wait for after each failed (with -d) or all (with -D) tests") optparser.add_option("-b", "--brief", action="store_true", dest="brief", default=False, help="outputs only failed tests") optparser.add_option("-c", "--config", action="store", type="string", dest="config", default=ConfigDefault, help="configuration file") optparser.add_option("-t", "--tmp-keep", action="store_true", dest="tmps", default=False, help="do not delete tmp files created for running tests") optparser.add_option("-j", "--jobs", action="callback", callback=jOption(multiprocessing.cpu_count()), dest="threads", default=0, help="number of threads to run tests in simultaneously; 0 disables threading") optparser.add_option("-g", "--groups", action="store", type="string", dest="groups", default="", help="execute only tests of given comma-separated list of groups") optparser.add_option("-r", "--rerun", action="store_true", dest="rerun", default=False, help="execute commands for tests that failed last time") optparser.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False, help="suppress information output other than about failed tests") optparser.add_option("-x", "--xml", action="store", type="string", dest="xmlfile", default="", help="write a report of test results in JUnit XML format to file; if file exists, it is overwritten") optparser.add_option("-a", "--alternative", action="store", type="string", dest="alternatives", default=None, help="activate given alternative") optparser.add_option("-S", "--sphinx", action="store_true", dest="sphinx", default=False, help="indicates that we're running from inside Sphinx; for internal purposes") optparser.add_option("-T", "--update-times", action="store_true", dest="update_times", default=False, help="create a new timing baseline for tests being measured") optparser.set_defaults(mode="TEST") (Options, args) = optparser.parse_args() if not os.path.exists(Options.config): error("configuration file '%s' not found" % Options.config) (basedir, fname) = os.path.split(Options.config) if basedir: os.chdir(basedir) TestBase = os.getcwd() defaults = os.environ defaults["testbase"] = TestBase defaults["default_path"] = os.environ["PATH"] Config = ConfigParser.ConfigParser(defaults) Config.read(fname) if Options.sphinx: Options.quiet = True if Options.quiet: Options.brief = True # Determine output handlers to use. output_handlers = [] if Options.verbose: output_handlers += [Verbose(Options, )] elif Options.brief: output_handlers += [Brief(Options, )] else: if sys.stdout.isatty(): output_handlers += [Console(Options, )] else: output_handlers += [Standard(Options, )] if Options.diagall: output_handlers += [Diag(Options, True, None)] elif Options.diag: output_handlers += [Diag(Options, False, None)] if Options.diagfile: try: diagfile = open(Options.diagfile, "w", 1) output_handlers += [Diag(Options, Options.diagall, diagfile)] except IOError, e: print >>sys.stderr, "cannot open %s: %s" (Options.diagfile, e) if Options.sphinx: output_handlers += [SphinxOutput(Options)] if Options.xmlfile: try: xmlfile = open(Options.xmlfile, "w", 1) output_handlers += [XMLReport(Options, xmlfile)] except IOError, e: print >>sys.stderr, "cannot open %s: %s" (Options.xmlfile, e) output_handler = Forwarder(Options, output_handlers) # Determine Timer to use. Timer = None if platform() == "Linux": t = LinuxTimer() if t.available(): Timer = t if Options.update_times and not Timer: warning("unable to create timing baseline because timer is not available") # Evaluate other command line options. if Config.has_section("environment"): for (name, value) in Config.items("environment"): os.environ[name.upper()] = value Alternatives = {} if Options.alternatives: Options.alternatives = [alt.strip() for alt in Options.alternatives.split(",")] for tag in Options.alternatives: if tag == "-": continue a = Alternative(tag) try: for (name, value) in Config.itemsNoDefaults("filter-%s" % tag): if not name.startswith("__"): a.filters[name] = value except ConfigParser.NoSectionError: pass try: for (name, value) in Config.itemsNoDefaults("substitution-%s" % tag): if not name.startswith("__"): a.substitutions[name] = value except ConfigParser.NoSectionError: pass try: for (name, value) in Config.itemsNoDefaults("environment-%s" % tag): if not name.startswith("__"): a.envs[name] = value except ConfigParser.NoSectionError: pass Alternatives[tag] = a CommandPrefix = getOption("CommandPrefix", "@TEST-") RE_INPUT = re.compile("%INPUT") RE_DIR = re.compile("%DIR") RE_ENV = re.compile("\$\{(\w+)\}") RE_PART = re.compile("^(.*)#([0-9]+)$") RE_IGNORE = re.compile(CommandPrefix + "IGNORE") RE_START_NEXT_TEST = re.compile(CommandPrefix + "START-NEXT") RE_START_FILE = re.compile(CommandPrefix + "START-FILE +([^\n ]*)") RE_END_FILE = re.compile(CommandPrefix + "END-FILE") # Commands as tuple (tag, regexp, more-than-one-is-ok, optional, group-main, group-add) RE_EXEC = ("exec", re.compile(CommandPrefix + "EXEC(-FAIL)?: *(.*)"), True, False, 2, 1) RE_REQUIRES = ("requires", re.compile(CommandPrefix + "REQUIRES: *(.*)"), True, True, 1, -1) RE_GROUP = ("group", re.compile(CommandPrefix + "GROUP: *(.*)"), True, True, 1, -1) RE_SERIALIZE = ("serialize", re.compile(CommandPrefix + "SERIALIZE: *(.*)"), False, True, 1, -1) RE_INCLUDE_ALTERNATIVE = ("alternative", re.compile(CommandPrefix + "ALTERNATIVE: *(.*)"), True, True, 1, -1) RE_IGNORE_ALTERNATIVE = ("not-alternative", re.compile(CommandPrefix + "NOT-ALTERNATIVE: *(.*)"), True, True, 1, -1) RE_COPY_FILE = ("copy-file", re.compile(CommandPrefix + "COPY-FILE: *(.*)"), True, True, 1, -1) RE_KNOWN_FAILURE = ("known-failure", re.compile(CommandPrefix + "KNOWN-FAILURE"), False, True, -1, -1) RE_MEASURE_TIME = ("measure-time", re.compile(CommandPrefix + "MEASURE-TIME"), False, True, -1, -1) Commands = (RE_EXEC, RE_REQUIRES, RE_GROUP, RE_SERIALIZE, RE_INCLUDE_ALTERNATIVE, RE_IGNORE_ALTERNATIVE, RE_COPY_FILE, RE_KNOWN_FAILURE, RE_MEASURE_TIME) StateFile = os.path.abspath(getOption("StateFile", os.path.join(defaults["testbase"], ".btest.failed.dat"))) TmpDir = os.path.abspath(getOption("TmpDir", os.path.join(defaults["testbase"], ".tmp"))) BaselineDir = os.path.abspath(getOption("BaselineDir", os.path.join(defaults["testbase"], "Baseline"))) Initializer = getOption("Initializer", "") BaselineTimingDir = os.path.abspath(getOption("TimingBaselineDir", os.path.join(BaselineDir, "_Timing"))) Finalizer = getOption("Finalizer", "") PartFinalizer = getOption("PartFinalizer", "") Config.configured_tests = [] testdirs = getOption("TestDirs", "").split() if testdirs: Config.configured_tests = findTests(testdirs, output_handler) if args: tests = findTests(args, output_handler) else: if Options.rerun: (success, tests) = readStateFile() if success: if not tests: output("no tests failed last time") sys.exit(0) else: warning("cannot read state file, executing all tests") tests = Config.configured_tests else: tests = Config.configured_tests if Options.groups: Options.groups = set(Options.groups.split(",")) def rightGroup(t): if t.groups & Options.groups: return True if "-" in Options.groups and not t.groups: return True return False tests = [t for t in tests if rightGroup(t)] if not tests: output("no tests to execute") sys.exit(0) mkdir(BaselineDir) mkdir(TmpDir) tests = mergeTestParts(tests) try: # Building our own path to avoid "error: AF_UNIX path too long" on # some platforms. See BIT-862. addr = os.path.join("%s", "btest-socket-%d") % (tempfile.gettempdir(), os.getpid()) (succeeded, failed, skipped) = TestManager(address=addr).run(copy.deepcopy(tests), output_handler) total = succeeded + failed + skipped except KeyboardInterrupt: print >>sys.stderr, "Aborted." sys.exit(1) output_handler.finished() if failed > 0: skipped = (", %d skipped" % skipped) if skipped > 0 else "" if not Options.quiet: output("%d of %d test%s failed%s" % (failed, total, "s" if total > 1 else "", skipped)) sys.exit(1) else: if not Options.quiet: output("all %d tests successful" % total) sys.exit(0) btest-0.54/testing/0000775002342100234210000000000012523041075015315 5ustar johannajohanna00000000000000btest-0.54/testing/.gitignore0000664002342100234210000000004012506370126017302 0ustar johannajohanna00000000000000.tmp .btest.failed.dat diag.log btest-0.54/testing/Scripts/0000775002342100234210000000000012523041075016744 5ustar johannajohanna00000000000000btest-0.54/testing/Scripts/diff-remove-abspath0000775002342100234210000000017412506370126022522 0ustar johannajohanna00000000000000#! /usr/bin/env bash # # Replace absolute paths with the basename. sed 's#/\([^/]\{1,\}/\)\{1,\}\([^/]\{1,\}\)#<...>/\2#g' btest-0.54/testing/Scripts/strip-test-base0000775002342100234210000000014212506370126021720 0ustar johannajohanna00000000000000#! /usr/bin/env bash # dir=`dirname $0` testbase=`cd $dir/.. && pwd` sed "s#${testbase}#<...>#g" btest-0.54/testing/Scripts/dummy-script0000664002342100234210000000006012506370126021323 0ustar johannajohanna00000000000000 # A dummy file used with the copy-file script. btest-0.54/testing/Scripts/test-filter0000775002342100234210000000011212506370126021131 0ustar johannajohanna00000000000000# Test filter used by the alternatives-filter test. sed 's/E/*/g' <$1 >$2 btest-0.54/testing/tests/0000775002342100234210000000000012523041075016457 5ustar johannajohanna00000000000000btest-0.54/testing/tests/start-file.test0000664002342100234210000000045612506370126021442 0ustar johannajohanna00000000000000# %TEST-EXEC: btest %INPUT # %TEST-EXEC: btest-diff output # @TEST-EXEC: awk -f %INPUT >../../output # @TEST-EXEC: awk -f %INPUT >../../output { lines += 1; } END { print lines; } @TEST-START-FILE foo.dat 1 2 3 @TEST-END-FILE @TEST-START-FILE bar.dat A B C D @TEST-END-FILE btest-0.54/testing/tests/known-failure.btest0000664002342100234210000000026412506370126022310 0ustar johannajohanna00000000000000# %TEST-EXEC-FAIL: btest %INPUT >output 2>&1 # %TEST-EXEC: btest-diff output @TEST-EXEC: echo Hello, World! @TEST-EXEC: exit 1 @TEST-KNOWN-FAILURE: This test is expected to fail. btest-0.54/testing/tests/parts-error-start-next.test0000664002342100234210000000043712506370126023756 0ustar johannajohanna00000000000000# %TEST-EXEC-FAIL: btest test >output 2>&1 # %TEST-EXEC: btest-diff output # %TEST-START-FILE test @TEST-EXEC: echo "Hello, world!." >>../../output @TEST-START-NEXT # %TEST-END-FILE # %TEST-START-FILE test#2 @TEST-EXEC: echo "Hello, world!. Again" >>../../output # %TEST-END-FILE btest-0.54/testing/tests/start-next.test0000664002342100234210000000036512506370126021500 0ustar johannajohanna00000000000000# %TEST-EXEC: btest %INPUT # %TEST-EXEC: btest-diff output @TEST-EXEC: cat %INPUT | wc -c | awk '{print $1}' >>../../output This is the first test input in this file. # @TEST-START-NEXT ... and the second. # @TEST-START-NEXT ... and the third. btest-0.54/testing/tests/xml.test0000664002342100234210000000066212506370126020167 0ustar johannajohanna00000000000000# %TEST-EXEC-FAIL: btest -d -x output.raw.xml t1 t2 t3 # %TEST-EXEC: cat output.raw.xml | sed 's/hostname[^"]*"[^"]*"/XXX/g' | sed 's/time[^"]*"[^"]*"/XXX/g' | sed '/^$/d' | sed "s/> />~/" | tr '~' '\n' | sed 's/^[ ]*//' >output.xml # %TEST-EXEC: btest-diff output.xml %TEST-START-FILE t1 @TEST-EXEC: exit 0 %TEST-END-FILE %TEST-START-FILE t2 @TEST-EXEC: exit 1 %TEST-END-FILE %TEST-START-FILE t3 @TEST-EXEC: exit 0 %TEST-END-FILE btest-0.54/testing/tests/parts-finalizer.test0000664002342100234210000000102312506370126022471 0ustar johannajohanna00000000000000# %TEST-EXEC: btest t/test # %TEST-EXEC: btest-diff output # %TEST-START-FILE btest.cfg [btest] TestDirs = t TmpDir = .tmp BaselineDir = Baseline PartFinalizer = echo Finalizer $TEST_PART >>../../output # %TEST-END-FILE # %TEST-START-FILE t/test @TEST-EXEC: echo "Hello, world!." >>../../output # %TEST-END-FILE # %TEST-START-FILE t/test#2 @TEST-EXEC: echo "Hello, world! Again." >>../../output # %TEST-END-FILE # %TEST-START-FILE t/test#3 @TEST-EXEC: echo "Hello, world! Again. Again." >>../../output # %TEST-END-FILE btest-0.54/testing/tests/copy-file.test0000664002342100234210000000025612506370126021255 0ustar johannajohanna00000000000000# %TEST-EXEC: btest %INPUT @TEST-COPY-FILE: ${ENV2}/../../Scripts/dummy-script @TEST-EXEC: test -e dummy-script @TEST-EXEC: cmp dummy-script %DIR/../../Scripts/dummy-script btest-0.54/testing/tests/parts-error-part.test0000664002342100234210000000025212506370126022606 0ustar johannajohanna00000000000000# %TEST-EXEC-FAIL: btest test#3 >output 2>&1 # %TEST-EXEC: btest-diff output # %TEST-START-FILE test @TEST-EXEC: echo "Hello, world!." >>../../output # %TEST-END-FILE btest-0.54/testing/tests/canonifier.test0000664002342100234210000000057112506370126021503 0ustar johannajohanna00000000000000# %TEST-EXEC: chmod +x test-canonifier # %TEST-EXEC: btest -d %INPUT %TEST-START-FILE Baseline/canonifier/output ABC 123 DEF %TEST-END-FILE %TEST-START-FILE test-canonifier sed 's/[0-9][0-9][0-9]/XXX/g' %TEST-END-FILE @TEST-EXEC: echo ABC 890 DEF >output @TEST-EXEC-FAIL: btest-diff output @TEST-EXEC: TEST_DIFF_CANONIFIER="sh -c ../../test-canonifier" btest-diff output btest-0.54/testing/tests/start-next-naming.test0000664002342100234210000000060512506370126022744 0ustar johannajohanna00000000000000# %TEST-EXEC: btest -d %INPUT # # %TEST-START-FILE Baseline/start-next-naming/output X 1 # %TEST-END-FILE # %TEST-START-FILE Baseline/start-next-naming-2/output X 2 # %TEST-END-FILE # %TEST-START-FILE Baseline/start-next-naming-3/output X 3 # %TEST-END-FILE @TEST-EXEC: cat %INPUT | grep '^X.[0-9]' >output @TEST-EXEC: btest-diff output X 1 # @TEST-START-NEXT X 2 # @TEST-START-NEXT X 3 btest-0.54/testing/tests/tmps.test0000664002342100234210000000016012506370126020343 0ustar johannajohanna00000000000000# %TEST-EXEC: btest -t %INPUT # %TEST-EXEC: test -f .tmp/tmps/output @TEST-EXEC: echo "Hello, World!" >output btest-0.54/testing/tests/diag-all.test0000664002342100234210000000021612506370126021034 0ustar johannajohanna00000000000000# %TEST-EXEC: btest -D %INPUT >output 2>&1 # %TEST-EXEC: btest-diff output @TEST-EXEC: echo Hello, World! @TEST-EXEC: echo Stderr output >&2 btest-0.54/testing/tests/sphinx/0000775002342100234210000000000012523041075017770 5ustar johannajohanna00000000000000btest-0.54/testing/tests/sphinx/rst-cmd.sh0000775002342100234210000000070312506370126021703 0ustar johannajohanna00000000000000# %TEST-EXEC: bash %INPUT %TEST-START-FILE file.txt Example file. Line 2 %TEST-END-FILE unset TEST_NAME btest-rst-cmd echo Hello >>output btest-rst-cmd -o echo "Hello 2, no command" >>output btest-rst-cmd -c "Different command" echo "Hello 3, no command" >>output btest-rst-cmd -d echo "Hello 4, no output" >>output btest-rst-cmd -f 'tr e X' echo "Hello 5, filter" >>output btest-rst-cmd -r file.txt echo "Hello 6, file" >>output btest-diff output btest-0.54/testing/tests/sphinx/run-sphinx0000664002342100234210000000036112506370126022031 0ustar johannajohanna00000000000000# %TEST-REQUIRES: which sphinx-build # # %TEST-EXEC: cp -r %DIR/../../../examples/sphinx/* . # %TEST-EXEC: make clean && make # %TEST-EXEC: sed -n '/id=.testing/,/id=.indices/p' _build/html/index.html >output # %TEST-EXEC: btest-diff output btest-0.54/testing/tests/basic-succeed.test0000664002342100234210000000011612506370126022053 0ustar johannajohanna00000000000000# %TEST-EXEC: btest %INPUT @TEST-EXEC: echo Hello, World! @TEST-EXEC: exit 0 btest-0.54/testing/tests/known-failure-succeeds.btest0000664002342100234210000000027512506370126024106 0ustar johannajohanna00000000000000# %TEST-EXEC: btest %INPUT >output 2>&1 # %TEST-EXEC: btest-diff output @TEST-EXEC: echo Hello, World! @TEST-EXEC: exit 0 @TEST-KNOWN-FAILURE: This test is expected to fail, but succeeds. btest-0.54/testing/tests/parts-glob.test0000664002342100234210000000076512506370126021445 0ustar johannajohanna00000000000000# %TEST-EXEC: btest t/test # %TEST-EXEC: btest t.test # %TEST-EXEC: btest-diff output # %TEST-START-FILE btest.cfg [btest] TestDirs = t TmpDir = .tmp BaselineDir = Baseline # %TEST-END-FILE # %TEST-START-FILE t/test @TEST-EXEC: echo "Hello, world!." >>../../output # %TEST-END-FILE # %TEST-START-FILE t/test#2 @TEST-EXEC: echo "Hello, world! Again." >>../../output # %TEST-END-FILE # %TEST-START-FILE t/test#3 @TEST-EXEC: echo "Hello, world! Again. Again." >>../../output # %TEST-END-FILE btest-0.54/testing/tests/alternatives-environment.test0000664002342100234210000000032412506370126024425 0ustar johannajohanna00000000000000# %TEST-EXEC: btest %INPUT >>output 2>&1 # %TEST-EXEC: btest -a foo %INPUT >>output 2>&1 # %TEST-EXEC: btest-diff output # %TEST-EXEC: btest-diff child-output @TEST-EXEC: echo "Foo: ${FOO}" >>../../child-output btest-0.54/testing/tests/groups.test0000664002342100234210000000105612506370126020704 0ustar johannajohanna00000000000000# %TEST-EXEC: btest -g G1 t1 t2 t3 t4 >>output 2>&1 # %TEST-EXEC: btest -g G1,G2 t1 t2 t3 t4 >>output 2>&1 # %TEST-EXEC: btest -g - t1 t2 t3 t4 >>output 2>&1 # %TEST-EXEC: btest -g G1,- t1 t2 t3 t4 >>output 2>&1 # %TEST-EXEC: btest t1 t2 t3 t4 >>output 2>&1 # %TEST-EXEC: btest-diff output %TEST-START-FILE t1 @TEST-GROUP: G1 @TEST-EXEC: exit 0 %TEST-END-FILE %TEST-START-FILE t2 @TEST-GROUP: G1 @TEST-EXEC: exit 0 %TEST-END-FILE %TEST-START-FILE t3 @TEST-GROUP: G2 @TEST-EXEC: exit 0 %TEST-END-FILE %TEST-START-FILE t4 @TEST-EXEC: exit 0 %TEST-END-FILE btest-0.54/testing/tests/quiet.test0000664002342100234210000000013712506370126020513 0ustar johannajohanna00000000000000# %TEST-EXEC: btest -q %INPUT >output 2>&1 # %TEST-EXEC: btest-diff output @TEST-EXEC: exit 0 btest-0.54/testing/tests/diag-file.test0000664002342100234210000000025212506370126021203 0ustar johannajohanna00000000000000# %TEST-EXEC-FAIL: btest -f diag %INPUT >output 2>&1 # %TEST-EXEC: btest-diff output # %TEST-EXEC: btest-diff diag @TEST-EXEC: echo Stderr output >&2 @TEST-EXEC: exit 1 btest-0.54/testing/tests/verbose.test0000664002342100234210000000015612506370126021032 0ustar johannajohanna00000000000000# %TEST-EXEC: btest -v %INPUT >output 2>&1 # %TEST-EXEC: btest-diff output @TEST-EXEC: echo "Hello, World!" btest-0.54/testing/tests/parts-skipping.tests0000664002342100234210000000054312506370126022523 0ustar johannajohanna00000000000000# %TEST-EXEC: btest test # %TEST-EXEC: btest-diff output # %TEST-START-FILE test @TEST-EXEC: echo "Hello, world!." >>../../output # %TEST-END-FILE # %TEST-START-FILE test#67 @TEST-EXEC: echo "Hello, world! Again." >>../../output # %TEST-END-FILE # %TEST-START-FILE test#89 @TEST-EXEC: echo "Hello, world! Again. Again." >>../../output # %TEST-END-FILE btest-0.54/testing/tests/requires.test0000664002342100234210000000067712506370126021234 0ustar johannajohanna00000000000000# %TEST-EXEC: btest t1 t2 t3 t4 >output 2>&1 # %TEST-EXEC: btest-diff output %TEST-START-FILE t1 @TEST-REQUIRES: exit 0 @TEST-EXEC: echo Foo1 %TEST-END-FILE %TEST-START-FILE t2 @TEST-REQUIRES: exit 1 @TEST-EXEC: echo Foo2 %TEST-END-FILE %TEST-START-FILE t3 @TEST-REQUIRES: exit 0 @TEST-REQUIRES: exit 1 @TEST-EXEC: echo Foo3 %TEST-END-FILE %TEST-START-FILE t4 @TEST-REQUIRES: exit 0 @TEST-REQUIRES: exit 0 @TEST-EXEC: echo Foo4 %TEST-END-FILE btest-0.54/testing/tests/environment.test0000664002342100234210000000123612506370126021731 0ustar johannajohanna00000000000000# %TEST-EXEC: btest -d %INPUT # %TEST-EXEC: btest -U %INPUT # %TEST-EXEC: btest-diff output @TEST-EXEC: echo ${ENV1} >>../../output @TEST-EXEC: echo ${ENV2} >1 @TEST-EXEC: set >>1 @TEST-EXEC: test "${ENV2}" = `cd ../.. && pwd` && echo "testbase is correct" >>../../output @TEST-EXEC: echo ${ENV3} >>../../output @TEST-EXEC: echo ${TEST_DIAGNOSTICS} | strip-test-base >>../../output @TEST-EXEC: echo ${TEST_MODE} >>../../output @TEST-EXEC: echo ${TEST_BASELINE} | strip-test-base >>../../output @TEST-EXEC: echo ${TEST_NAME} >>../../output @TEST-EXEC: echo ${TEST_VERBOSE} | strip-test-base >>../../output @TEST-EXEC: echo ${TEST_BASE} | strip-test-base >>../../output btest-0.54/testing/tests/exit-codes.test0000664002342100234210000000065212506370126021432 0ustar johannajohanna00000000000000# %TEST-EXEC-FAIL: btest t1 t2 t3 >>out2 2>&1 # %TEST-EXEC: btest-diff out1 # %TEST-EXEC: btest-diff out2 %TEST-START-FILE t1 @TEST-EXEC: echo 1.1 >>../../out1 @TEST-EXEC: exit 100 @TEST-EXEC: echo 1.2 >>../../out1 %TEST-END-FILE %TEST-START-FILE t2 @TEST-EXEC: echo 2.1 >>../../out1 @TEST-EXEC: exit 200 @TEST-EXEC: echo 2.2 >>../../out1 %TEST-END-FILE %TEST-START-FILE t3 @TEST-EXEC: echo 3.1 >>../../out1 %TEST-END-FILE btest-0.54/testing/tests/diag.test0000664002342100234210000000051212506370126020265 0ustar johannajohanna00000000000000# %TEST-EXEC-FAIL: btest -d %INPUT 2>>raw # %TEST-EXEC: mkdir Baseline/diag # %TEST-EXEC: echo Wrong baseline >Baseline/diag/output # %TEST-EXEC-FAIL: btest -d %INPUT 2>>raw # %TEST-EXEC: cat raw | egrep -v '\+\+\+|---' >output # %TEST-EXEC: btest-diff output @TEST-EXEC: echo Hello, World! >output @TEST-EXEC: btest-diff output btest-0.54/testing/tests/finalizer.test0000664002342100234210000000034012506370126021343 0ustar johannajohanna00000000000000# %TEST-EXEC: btest -t %INPUT # %TEST-EXEC: test -f finalized @TEST-EXEC: rm -f ../../finalized %TEST-START-FILE btest.cfg [btest] TmpDir = .tmp BaselineDir = Baseline Finalizer = touch ../../finalized %TEST-END-FILE btest-0.54/testing/tests/ignore.test0000664002342100234210000000127012506370126020646 0ustar johannajohanna00000000000000# %TEST-EXEC: btest -t 2>&1 | sort >output # %TEST-EXEC: btest-diff output @TEST-EXEC: test -f ../../initialized %TEST-START-FILE all/t1 @TEST-EXEC: exit 0 %TEST-END-FILE %TEST-START-FILE all/t2 @TEST-EXEC: exit 0 %TEST-END-FILE %TEST-START-FILE all/sub/t3 @TEST-EXEC: exit 0 %TEST-END-FILE %TEST-START-FILE all/not-this-one/t4 @TEST-EXEC: exit 0 %TEST-END-FILE %TEST-START-FILE all/sub/neither-this-one/t5 @TEST-EXEC: exit 0 %TEST-END-FILE %TEST-START-FILE all/not-this-one.txt @TEST-EXEC: exit 0 %TEST-END-FILE %TEST-START-FILE btest.cfg [btest] TmpDir = .tmp BaselineDir = Baseline TestDirs = all IgnoreDirs = not-this-one sub/neither-this-one IgnoreFiles = *.txt %TEST-END-FILE btest-0.54/testing/tests/alternatives-filter.test0000664002342100234210000000031512506370126023346 0ustar johannajohanna00000000000000# %TEST-EXEC: btest %INPUT >>output 2>&1 # %TEST-EXEC: btest -a foo %INPUT >>output 2>&1 # %TEST-EXEC: btest-diff output # %TEST-EXEC: btest-diff child-output @TEST-EXEC: cat %INPUT >>../../child-output btest-0.54/testing/tests/parts.tests0000664002342100234210000000065512506370126020705 0ustar johannajohanna00000000000000# %TEST-EXEC: btest test # %TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output # %TEST-START-FILE test @TEST-EXEC: echo "Hello, world! (%INPUT)" >>../../output # %TEST-END-FILE # %TEST-START-FILE test#2 @TEST-EXEC: echo "Hello, world! Again. (%INPUT)" >>../../output # %TEST-END-FILE # %TEST-START-FILE test#3 @TEST-EXEC: echo "Hello, world! Again. Again. (%INPUT)" >>../../output # %TEST-END-FILE btest-0.54/testing/tests/initializer.test0000664002342100234210000000035212506370126021706 0ustar johannajohanna00000000000000# %TEST-EXEC: btest -t %INPUT # %TEST-EXEC: test -f initialized @TEST-EXEC: test -f ../../initialized %TEST-START-FILE btest.cfg [btest] TmpDir = .tmp BaselineDir = Baseline Initializer = touch ../../initialized %TEST-END-FILE btest-0.54/testing/tests/alternatives-substitution.test0000664002342100234210000000037012506370126024636 0ustar johannajohanna00000000000000# %TEST-EXEC: btest %INPUT >>output 2>&1 # %TEST-EXEC: btest -a foo %INPUT >>output 2>&1 # %TEST-EXEC: btest-diff output # %TEST-EXEC: btest-diff child-output @TEST-EXEC: printf 'World!' >>../../child-output @TEST-EXEC: echo >>../../child-output btest-0.54/testing/tests/basic-fail.test0000664002342100234210000000012312506370126021351 0ustar johannajohanna00000000000000# %TEST-EXEC-FAIL: btest %INPUT @TEST-EXEC: echo Hello, World! @TEST-EXEC: exit 1 btest-0.54/testing/tests/diff.test0000664002342100234210000000031212506370126020267 0ustar johannajohanna00000000000000# %TEST-EXEC-FAIL: btest %INPUT # %TEST-EXEC: btest -U %INPUT # %TEST-EXEC: test -f Baseline/diff/output # %TEST-EXEC: btest %INPUT @TEST-EXEC: echo Hello, World! >output @TEST-EXEC: btest-diff output btest-0.54/testing/tests/rerun.test0000664002342100234210000000043712506370126020522 0ustar johannajohanna00000000000000# %TEST-EXEC-FAIL: btest t1 t2 t3 >>output 2>&1 # %TEST-EXEC-FAIL: btest -r >>output 2>&1 # %TEST-EXEC: btest-diff output %TEST-START-FILE t1 @TEST-EXEC: exit 0 %TEST-END-FILE %TEST-START-FILE t2 @TEST-EXEC: exit 1 %TEST-END-FILE %TEST-START-FILE t3 @TEST-EXEC: exit 0 %TEST-END-FILE btest-0.54/testing/tests/macros.test0000664002342100234210000000010712506370126020645 0ustar johannajohanna00000000000000# %TEST-EXEC: btest -d %INPUT @TEST-EXEC: cmp %DIR/macros.test %INPUT btest-0.54/testing/tests/measure-time.tests0000644002342100234210000000206712522743240022145 0ustar johannajohanna00000000000000# %TEST-REQUIRES: which perf # %TEST-REQUIRES: perf stat -o /dev/null true 2> /dev/null # %TEST-REQUIRES: test "`uname`" == "Linux" # %TEST-REQUIRES: perf stat -x " " -e instructions true 2>&1 | grep -vq "not supported" # %TEST-EXEC: btest -D %INPUT >>output 2>&1 # %TEST-EXEC: echo ----- >>output # %TEST-EXEC: test '!' -e Baseline/_Timing # %TEST-EXEC: btest -DT %INPUT >>output 2>&1 # %TEST-EXEC: echo ----- >>output # %TEST-EXEC: test -d Baseline/_Timing # %TEST-EXEC: btest -D %INPUT >>output 2>&1 # %TEST-EXEC: echo ----- >>output # %TEST-EXEC: echo measure-time 42 >`echo Baseline/_Timing/*` # %TEST-EXEC-FAIL: btest -D %INPUT >>output 2>&1 # %TEST-EXEC: echo ----- >>output # %TEST-EXEC: btest -DT %INPUT >>output 2>&1 # %TEST-EXEC: echo ----- >>output # %TEST-EXEC: btest -D %INPUT >>output 2>&1 # %TEST-EXEC: echo ----- >>output # %TEST-EXEC: cat output | sed 's/ ([-+%.0-9]*)/ (+xx.x%)/g' >tmp # %TEST-EXEC: mv tmp output # %TEST-EXEC: btest-diff output @TEST-MEASURE-TIME @TEST-EXEC: awk 'BEGIN { for ( i = 1; i < 100000; i++ ) x += i; print x; }; done' output 2>&1 # %TEST-EXEC: btest-diff output @TEST-EXEC: exit 0 btest-0.54/testing/tests/threads.test0000664002342100234210000000227512506370126021023 0ustar johannajohanna00000000000000# %TEST-EXEC: chmod +x normalize-output # %TEST-EXEC: btest -j 5 t1 t2 t3 t4 t5 2>&1 | ./normalize-output | grep "4.*5" | sed "s/[0-36-9] //" >output.j5 # %TEST-EXEC: btest -j 1 t1 t2 t3 t4 t5 2>&1 | ./normalize-output >output.j1 # %TEST-EXEC: btest -j 0 t1 t2 t3 t4 t5 2>&1 | cat >output.j0 # %TEST-EXEC: btest-diff output.j5 # %TEST-EXEC: btest-diff output.j1 # %TEST-EXEC: btest-diff output.j0 %TEST-START-FILE normalize-output grep '\#' | \ sed 's/.#\([0-9]\). .\([0-9]\).*/test \2 thread \1/g' | \ awk '{t[$4] = t[$4] " " $2} END{ for ( i in t ) print t[i];}' | \ sort %TEST-END-FILE %TEST-START-FILE t1 @TEST-EXEC: echo t1.a >output @TEST-EXEC: sleep 1 @TEST-EXEC: echo t1.b >output %TEST-END-FILE %TEST-START-FILE t2 @TEST-EXEC: echo t2.a >output @TEST-EXEC: sleep 1 @TEST-EXEC: echo t2.b >output %TEST-END-FILE %TEST-START-FILE t3 @TEST-EXEC: echo t3.a >output @TEST-EXEC: sleep 1 @TEST-EXEC: echo t3.b >output %TEST-END-FILE %TEST-START-FILE t4 @TEST-SERIALIZE: Foo @TEST-EXEC: echo t4.a >output @TEST-EXEC: sleep 1 @TEST-EXEC: echo t4.b >output %TEST-END-FILE %TEST-START-FILE t5 @TEST-SERIALIZE: Foo @TEST-EXEC: echo t5.a >output @TEST-EXEC: sleep 1 @TEST-EXEC: echo t5.b >output %TEST-END-FILE btest-0.54/testing/btest.cfg0000664002342100234210000000071012506370126017120 0ustar johannajohanna00000000000000 # Configuration file for running btest's test suite. [btest] TestDirs = tests TmpDir = %(testbase)s/.tmp BaselineDir = %(testbase)s/Baseline IgnoreDirs = .svn CVS .tmp IgnoreFiles = *.tmp *.swp #* CommandPrefix = %TEST- Initializer = test -f btest.cfg || cp ../../btest.tests.cfg btest.cfg; echo >/dev/null [environment] PATH=%(testbase)s/..:%(testbase)s/../sphinx:%(testbase)s/Scripts:%(default_path)s # BTEST_CFG=%(testbase)s/btest.tests.cfg btest-0.54/testing/Baseline/0000775002342100234210000000000012523041075017037 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.groups/0000775002342100234210000000000012523041075021517 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.groups/output0000664002342100234210000000036512506370126023011 0ustar johannajohanna00000000000000t1 ... ok t2 ... ok all 2 tests successful t1 ... ok t2 ... ok t3 ... ok all 3 tests successful t4 ... ok all 1 tests successful t1 ... ok t2 ... ok t4 ... ok all 3 tests successful t1 ... ok t2 ... ok t3 ... ok t4 ... ok all 4 tests successful btest-0.54/testing/Baseline/tests.known-failure-succeeds/0000775002342100234210000000000012523041075024555 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.known-failure-succeeds/output0000664002342100234210000000011412506370126026037 0ustar johannajohanna00000000000000known-failure-succeeds ... ok (but expected to fail) all 1 tests successful btest-0.54/testing/Baseline/tests.measure-time/0000775002342100234210000000000012523041075022575 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.measure-time/output0000664002342100234210000000077012506370126024067 0ustar johannajohanna00000000000000measure-time ... ok % cat .stderr all 1 tests successful ----- measure-time ... ok % cat .stderr all 1 tests successful ----- measure-time ... ok (+xx.x%) % cat .stderr all 1 tests successful ----- measure-time ... failed (+xx.x%) % 'measure-time' exceeded permitted execution time deviation (+xx.x%) % cat .stderr 1 of 1 test failed ----- measure-time ... ok (+xx.x%) % cat .stderr all 1 tests successful ----- measure-time ... ok (+xx.x%) % cat .stderr all 1 tests successful ----- btest-0.54/testing/Baseline/tests.rerun/0000775002342100234210000000000012523041075021333 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.rerun/output0000664002342100234210000000012712506370126022621 0ustar johannajohanna00000000000000t1 ... ok t2 ... failed t3 ... ok 1 of 3 tests failed t2 ... failed 1 of 1 test failed btest-0.54/testing/Baseline/tests.sphinx.run-sphinx/0000775002342100234210000000000012523041075023623 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.sphinx.run-sphinx/output0000664002342100234210000000611312506370126025112 0ustar johannajohanna00000000000000

Testing¶

1
2
# echo Hello, world!
Hello, world!
1
2
# echo Hello, world! Again.
Hello, world! Again.
1
2
# echo Hello, world! Again. Again.
Hello, world! Again. Again.
1
2
# echo This will fail soon!
This will fail soon!

This should fail and include the diag output instead:

ERROR executing test 'tests.sphinx.hello-world-fail' (part 2)

% 'echo StDeRr >&2; echo 1 | grep -q 2' failed unexpectedly (exit code 1)
% cat .stderr
 StDeRr

This should succeed:

1
2
# echo This succeeds again!
This succeeds again!

This should fail again and include the diag output instead:

ERROR executing test 'tests.sphinx.hello-world-fail' (part 4)

% 'echo StDeRr >&2; echo 3 | grep -q 4' failed unexpectedly (exit code 1)
% cat .stderr
 StDeRr
 StDeRr
1
2
# echo This succeeds again!
This succeeds again!
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
btest.cfg


[btest]
TestDirs    = tests
TmpDir      = %(testbase)s/.tmp
BaselineDir = %(testbase)s/Baseline
Finalizer   = btest-diff-rst

[environment]
PATH=%(testbase)s/../../:%(testbase)s/../../sphinx:%(default_path)s
btest-0.54/testing/Baseline/tests.ignore/0000775002342100234210000000000012523041075021463 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.ignore/output0000664002342100234210000000010512506370126022745 0ustar johannajohanna00000000000000all 3 tests successful all.sub.t3 ... ok all.t1 ... ok all.t2 ... ok btest-0.54/testing/Baseline/tests.threads/0000775002342100234210000000000012523041075021632 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.threads/output.j00000664002342100234210000000011112506370126023421 0ustar johannajohanna00000000000000t1 ... ok t2 ... ok t3 ... ok t4 ... ok t5 ... ok all 5 tests successful btest-0.54/testing/Baseline/tests.threads/output.j50000664002342100234210000000000512506370126023430 0ustar johannajohanna00000000000000 4 5 btest-0.54/testing/Baseline/tests.threads/output.j10000664002342100234210000000001312506370126023423 0ustar johannajohanna00000000000000 1 2 3 4 5 btest-0.54/testing/Baseline/tests.start-file/0000775002342100234210000000000012523041075022252 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.start-file/output0000664002342100234210000000000412506370126023532 0ustar johannajohanna000000000000003 4 btest-0.54/testing/Baseline/tests.parts-error-start-next/0000775002342100234210000000000012523041075024567 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.parts-error-start-next/output0000664002342100234210000000010112506370126026045 0ustar johannajohanna00000000000000cannot use @TEST-START-NEXT with tests split across parts (test) btest-0.54/testing/Baseline/tests.exit-codes/0000775002342100234210000000000012523041075022244 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.exit-codes/out10000664002342100234210000000001412506370126023055 0ustar johannajohanna000000000000001.1 1.2 2.1 btest-0.54/testing/Baseline/tests.exit-codes/out20000664002342100234210000000003412506370126023060 0ustar johannajohanna00000000000000t1 ... failed t2 ... failed btest-0.54/testing/Baseline/tests.diag-all/0000775002342100234210000000000012523041075021652 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.diag-all/output0000664002342100234210000000011012506370126023130 0ustar johannajohanna00000000000000diag-all ... ok % cat .stderr Stderr output all 1 tests successful btest-0.54/testing/Baseline/tests.known-failure/0000775002342100234210000000000012523041075022761 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.known-failure/output0000664002342100234210000000006712506370126024252 0ustar johannajohanna00000000000000known-failure ... failed (expected) 1 of 1 test failed btest-0.54/testing/Baseline/tests.parts-skipping/0000775002342100234210000000000012523041075023153 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.parts-skipping/output0000664002342100234210000000010012506370126024430 0ustar johannajohanna00000000000000Hello, world!. Hello, world! Again. Hello, world! Again. Again. btest-0.54/testing/Baseline/tests.parts-glob/0000775002342100234210000000000012523041075022252 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.parts-glob/output0000664002342100234210000000020012506370126023530 0ustar johannajohanna00000000000000Hello, world!. Hello, world! Again. Hello, world! Again. Again. Hello, world!. Hello, world! Again. Hello, world! Again. Again. btest-0.54/testing/Baseline/tests.diag/0000775002342100234210000000000012523041075021104 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.diag/output0000664002342100234210000000115212506370126022371 0ustar johannajohanna00000000000000diag ... failed % 'btest-diff output' failed unexpectedly (exit code 100) % cat .diag == File =============================== Hello, World! == Error =============================== test-diff: no baseline found. ======================================= % cat .stderr 1 of 1 test failed diag ... failed % 'btest-diff output' failed unexpectedly (exit code 1) % cat .diag == File =============================== Hello, World! == Diff =============================== @@ -1 +1 @@ -Wrong baseline +Hello, World! ======================================= % cat .stderr 1 of 1 test failed btest-0.54/testing/Baseline/tests.start-next/0000775002342100234210000000000012523041075022311 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.start-next/output0000664002342100234210000000001212506370126023570 0ustar johannajohanna00000000000000168 20 19 btest-0.54/testing/Baseline/tests.verbose/0000775002342100234210000000000012523041075021645 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.verbose/output0000664002342100234210000000011312506370126023126 0ustar johannajohanna00000000000000verbose ... > echo "Hello, World!" ... verbose ok all 1 tests successful btest-0.54/testing/Baseline/tests.parts/0000775002342100234210000000000012523041075021331 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.parts/output0000664002342100234210000000044112506370126022616 0ustar johannajohanna00000000000000Hello, world! (/Users/robin/bro/docs/aux/btest/testing/.tmp/tests.parts/.tmp/test/test) Hello, world! Again. (/Users/robin/bro/docs/aux/btest/testing/.tmp/tests.parts/.tmp/test/test#2) Hello, world! Again. Again. (/Users/robin/bro/docs/aux/btest/testing/.tmp/tests.parts/.tmp/test/test#3) btest-0.54/testing/Baseline/tests.xml/0000775002342100234210000000000012523041075021000 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.xml/output.xml0000664002342100234210000000050012506370126023060 0ustar johannajohanna00000000000000 % 'exit 1' failed unexpectedly (exit code 1) % cat .stderr btest-0.54/testing/Baseline/tests.alternatives-filter/0000775002342100234210000000000012523041075024164 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.alternatives-filter/output0000664002342100234210000000015212506370126025450 0ustar johannajohanna00000000000000alternatives-filter ... ok all 1 tests successful alternatives-filter [foo] ... ok all 1 tests successful btest-0.54/testing/Baseline/tests.alternatives-filter/child-output0000664002342100234210000000063212506370126026534 0ustar johannajohanna00000000000000# %TEST-EXEC: btest %INPUT >>output 2>&1 # %TEST-EXEC: btest -a foo %INPUT >>output 2>&1 # %TEST-EXEC: btest-diff output # %TEST-EXEC: btest-diff child-output @TEST-EXEC: cat %INPUT >>../../child-output # %T*ST-*X*C: btest %INPUT >>output 2>&1 # %T*ST-*X*C: btest -a foo %INPUT >>output 2>&1 # %T*ST-*X*C: btest-diff output # %T*ST-*X*C: btest-diff child-output @T*ST-*X*C: cat %INPUT >>../../child-output btest-0.54/testing/Baseline/tests.diag-file/0000775002342100234210000000000012523041075022021 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.diag-file/output0000664002342100234210000000005012506370126023302 0ustar johannajohanna00000000000000diag-file ... failed 1 of 1 test failed btest-0.54/testing/Baseline/tests.diag-file/diag0000664002342100234210000000014512506370126022653 0ustar johannajohanna00000000000000diag-file ... failed % 'exit 1' failed unexpectedly (exit code 1) % cat .stderr Stderr output btest-0.54/testing/Baseline/tests.parts-finalizer/0000775002342100234210000000000012523041075023312 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.parts-finalizer/output0000664002342100234210000000017112506370126024577 0ustar johannajohanna00000000000000Hello, world!. Finalizer 1 t.test Hello, world! Again. Finalizer 2 t.test Hello, world! Again. Again. Finalizer 3 t.test btest-0.54/testing/Baseline/tests.requires/0000775002342100234210000000000012523041075022037 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.requires/output0000664002342100234210000000014712506370126023327 0ustar johannajohanna00000000000000t1 ... ok t2 ... not available, skipped t3 ... not available, skipped t4 ... ok all 4 tests successful btest-0.54/testing/Baseline/tests.alternatives-substitution/0000775002342100234210000000000012523041075025453 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.alternatives-substitution/output0000664002342100234210000000016612506370126026744 0ustar johannajohanna00000000000000alternatives-substitution ... ok all 1 tests successful alternatives-substitution [foo] ... ok all 1 tests successful btest-0.54/testing/Baseline/tests.alternatives-substitution/child-output0000664002342100234210000000002512506370126030017 0ustar johannajohanna00000000000000World! Hello, World! btest-0.54/testing/Baseline/tests.environment/0000775002342100234210000000000012523041075022544 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.environment/output0000664002342100234210000000071612506370126024036 0ustar johannajohanna00000000000000Foo testbase is correct 42 <...>/.tmp/tests.environment/.tmp/environment/.diag TEST <...>/.tmp/tests.environment/Baseline/environment environment <...>/.tmp/tests.environment/.tmp/environment/.verbose <...>/.tmp/tests.environment Foo testbase is correct 42 <...>/.tmp/tests.environment/.tmp/environment/.diag UPDATE <...>/.tmp/tests.environment/Baseline/environment environment <...>/.tmp/tests.environment/.tmp/environment/.verbose <...>/.tmp/tests.environment btest-0.54/testing/Baseline/tests.quiet/0000775002342100234210000000000012523041075021327 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.quiet/output0000664002342100234210000000000012506370126022603 0ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.brief/0000775002342100234210000000000012523041075021267 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.brief/output0000664002342100234210000000002712506370126022554 0ustar johannajohanna00000000000000all 1 tests successful btest-0.54/testing/Baseline/tests.alternatives-environment/0000775002342100234210000000000012523041075025243 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.alternatives-environment/output0000664002342100234210000000016412506370126026532 0ustar johannajohanna00000000000000alternatives-environment ... ok all 1 tests successful alternatives-environment [foo] ... ok all 1 tests successful btest-0.54/testing/Baseline/tests.alternatives-environment/child-output0000664002342100234210000000001712506370126027610 0ustar johannajohanna00000000000000Foo: Foo: BAR btest-0.54/testing/Baseline/tests.sphinx.rst-cmd/0000775002342100234210000000000012523041075023061 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.sphinx.rst-cmd/output0000664002342100234210000000145312506370126024352 0ustar johannajohanna00000000000000.. rst-class:: btest-cmd .. code-block:: none :linenos: :emphasize-lines: 1,1 # echo Hello Hello .. rst-class:: btest-include .. code-block:: guess :linenos: Hello 2, no command .. rst-class:: btest-cmd .. code-block:: none :linenos: :emphasize-lines: 1,1 # Different command Hello 3, no command .. rst-class:: btest-cmd .. code-block:: none :linenos: :emphasize-lines: 1,1 # echo Hello 4, no output .. rst-class:: btest-cmd .. code-block:: none :linenos: :emphasize-lines: 1,1 # Xcho HXllo 5, filtXr HXllo 5, filtXr .. rst-class:: btest-cmd .. code-block:: none :linenos: :emphasize-lines: 1,1 # echo Hello 6, file Example file. Line 2 btest-0.54/testing/Baseline/tests.parts-error-part/0000775002342100234210000000000012523041075023424 5ustar johannajohanna00000000000000btest-0.54/testing/Baseline/tests.parts-error-part/output0000664002342100234210000000012112506370126024704 0ustar johannajohanna00000000000000Do not specify files with part numbers directly, use the base test name (test#3) btest-0.54/testing/btest.tests.cfg0000664002342100234210000000061012506370126020260 0ustar johannajohanna00000000000000# # Configuration file used by individual tests. # # This is set so that all files will be created inside the current # sandbox. [btest] TmpDir = .tmp BaselineDir = Baseline [environment] PATH=%(default_path)s ENV1=Foo ENV2=%(testbase)s ENV3=`expr 42` [environment-foo] FOO=BAR [filter-foo] cat=%(testbase)s/../../Scripts/test-filter [substitution-foo] printf=printf 'Hello, %s' btest-0.54/testing/Makefile0000664002342100234210000000003512506370126016756 0ustar johannajohanna00000000000000 all: @../btest -f diag.log btest-0.54/sphinx/0000775002342100234210000000000012523041075015151 5ustar johannajohanna00000000000000btest-0.54/sphinx/btest-sphinx.py0000664002342100234210000001712112506370126020160 0ustar johannajohanna00000000000000 import os import os.path import tempfile import subprocess import re from docutils import nodes, statemachine, utils from docutils.parsers.rst import directives, Directive, DirectiveError, Parser from docutils.transforms import TransformError, Transform from sphinx.util.console import bold, purple, darkgreen, red, term_width_line from sphinx.errors import SphinxError from sphinx.directives.code import LiteralInclude Initialized = False App = None Reporter = None BTestBase = None BTestTests = None BTestTmp = None Tests = {} Includes = set() # Maps file name extensiosn to Pygments formatter. ExtMappings = { "bro": "bro", "rst": "rest", "c": "c", "cc": "cc", "py": "python" } def init(settings, reporter): global Intialized, App, Reporter, BTestBase, BTestTests, BTestTmp Initialized = True Reporter = reporter BTestBase = settings.env.config.btest_base BTestTests = settings.env.config.btest_tests BTestTmp = settings.env.config.btest_tmp if not BTestBase: raise SphinxError("error: btest_base not set in config") if not BTestTests: raise SphinxError("error: btest_tests not set in config") if not os.path.exists(BTestBase): raise SphinxError("error: btest_base directory '%s' does not exists" % BTestBase) joined = os.path.join(BTestBase, BTestTests) if not os.path.exists(joined): raise SphinxError("error: btest_tests directory '%s' does not exists" % joined) if not BTestTmp: BTestTmp = os.path.join(App.outdir, ".tmp/rst_output") BTestTmp = os.path.abspath(BTestTmp) if not os.path.exists(BTestTmp): os.makedirs(BTestTmp) def parsePartial(rawtext, settings): parser = Parser() document = utils.new_document("") document.settings = settings parser.parse(rawtext, document) return document.children class Test(object): def __init__(self): self.has_run = False def run(self): if self.has_run: return App.builder.info("running test %s ..." % darkgreen(self.path)) self.rst_output = os.path.join(BTestTmp, "%s" % self.tag) os.environ["BTEST_RST_OUTPUT"] = self.rst_output self.cleanTmps() try: subprocess.check_call("btest -S %s" % self.path, shell=True) except (OSError, IOError, subprocess.CalledProcessError), e: # Equivalent to Directive.error(); we don't have an # directive object here and can't pass it in because # it doesn't pickle. App.builder.warn(red("BTest error: %s" % e)) def cleanTmps(self): subprocess.call("rm %s#* 2>/dev/null" % self.rst_output, shell=True) class BTestTransform(Transform): default_priority = 800 def apply(self): pending = self.startnode (test, part) = pending.details os.chdir(BTestBase) if not test.tag in BTestTransform._run: test.run() BTestTransform._run.add(test.tag) try: rawtext = open("%s#%d" % (test.rst_output, part)).read() except IOError, e: rawtext = "" settings = self.document.settings content = parsePartial(rawtext, settings) pending.replace_self(content) _run = set() class BTest(Directive): required_arguments = 1 final_argument_whitespace = True has_content = True def error(self, msg): self.state.document.settings.env.note_reread() msg = red(msg) msg = self.state.document.reporter.error(str(msg), line=self.lineno) return [msg] def message(self, msg): Reporter.info(msg) def run(self): if not Initialized: # FIXME: Better way to handle one-time initialization? init(self.state.document.settings, self.state.document.reporter) os.chdir(BTestBase) self.assert_has_content() document = self.state_machine.document tag = self.arguments[0] if not tag in Tests: import sys test = Test() test.tag = tag test.path = os.path.join(BTestTests, tag + ".btest") test.parts = 0 Tests[tag] = test test = Tests[tag] test.parts += 1 part = test.parts # Save the test. if part == 1: file = test.path else: file = test.path + "#%d" % part out = open(file, "w") for line in self.content: print >>out, line out.close() details = (test, part) pending = nodes.pending(BTestTransform, details, rawsource=self.block_text) document.note_pending(pending) return [pending] class BTestInclude(LiteralInclude): def __init__(self, *args, **kwargs): super(BTestInclude, self).__init__(*args, **kwargs) def error(self, msg): self.state.document.settings.env.note_reread() msg = red(msg) msg = self.state.document.reporter.error(str(msg), line=self.lineno) return [msg] def message(self, msg): Reporter.info(msg) def run(self): if not Initialized: # FIXME: Better way to handle one-time initialization? init(self.state.document.settings, self.state.document.reporter) document = self.state.document if not document.settings.file_insertion_enabled: return [document.reporter.warning('File insertion disabled', line=self.lineno)] env = document.settings.env expanded_arg = os.path.expandvars(self.arguments[0]) sphinx_src_relation = os.path.relpath(expanded_arg, env.srcdir) self.arguments[0] = os.path.join(os.sep, sphinx_src_relation) (root, ext) = os.path.splitext(self.arguments[0]) if ext.startswith("."): ext = ext[1:] if ext in ExtMappings: self.options["language"] = ExtMappings[ext] else: # Note that we always need to set a language, otherwise the lineos/emphasis don't seem to work. self.options["language"] = "none" self.options["linenos"] = True self.options["prepend"] = "%s\n" % os.path.basename(self.arguments[0]) self.options["emphasize-lines"] = "1,1" self.options["style"] = "X" retnode = super(BTestInclude, self).run() os.chdir(BTestBase) tag = os.path.normpath(self.arguments[0]) tag = os.path.relpath(tag, BTestBase) tag = re.sub("[^a-zA-Z0-9-]", "_", tag) tag = re.sub("__+", "_", tag) if tag.startswith("_"): tag = tag[1:] test_path = ("include-" + tag + ".btest") if BTestTests: test_path = os.path.join(BTestTests, test_path) test_path = os.path.abspath(test_path) i = 1 (base, ext) = os.path.splitext(test_path) while test_path in Includes: i += 1 test_path = "%s@%d" % (base, i) if ext: test_path += ext Includes.add(test_path) out = open(test_path, "w") print >>out, "# @TEST-EXEC: cat %INPUT >output && btest-diff output\n" for i in retnode: out.write(i.rawsource) out.close() for node in retnode: node["classes"] += ["btest-include"] return retnode directives.register_directive('btest', BTest) directives.register_directive('btest-include', BTestInclude) def setup(app): global App App = app app.add_config_value('btest_base', None, 'env') app.add_config_value('btest_tests', None, 'env') app.add_config_value('btest_tmp', None, 'env') btest-0.54/sphinx/btest-rst-include0000775002342100234210000000061712506370126020456 0ustar johannajohanna00000000000000#! /usr/bin/env bash base=`dirname $0` function usage() { echo "usage: `basename $0` [-n ] " exit 1 } lines="" while getopts "n:" opt; do case $opt in n) lines=$OPTARG;; *) usage;; esac done shift $(($OPTIND - 1)) if [ "$1" = "" ]; then usage; fi if [ "$lines" != "" ]; then lines="-n $lines" fi $base/btest-rst-cmd $lines -o cat $1 btest-0.54/sphinx/btest-diff-rst0000775002342100234210000000061612506370126017742 0ustar johannajohanna00000000000000#! /usr/bin/env bash # # A finalizer to diff the output generated by one of the other btest-rst-* # commands. This does nothing if we're running from inside Sphinx. # btest-rst-cmd generates these files if run from inside btest (but not Sphinx). if [ "$BTEST_RST_OUTPUT" != "" ]; then exit 0 fi for output in `find btest-${TEST_NAME}*`; do echo `pwd`/$output btest-diff $output done btest-0.54/sphinx/btest-rst-pipe0000775002342100234210000000022212506370126017760 0ustar johannajohanna00000000000000#! /usr/bin/env bash base=`dirname $0` if [ "$#" = 0 ]; then echo "usage: `basename $0` " exit 1 fi $base/btest-rst-cmd -o $@ btest-0.54/sphinx/btest-rst-cmd0000775002342100234210000000640412506370126017576 0ustar johannajohanna00000000000000#! /usr/bin/env bash # # Executes a command and formats the command and its stdout in reST. # trap "rm -f $tmps; exit" INT TERM EXIT function usage() { echo echo "`basename $0` [options] " echo echo " -d Do not actually execute command; just format the command line." echo " -h Show this help." echo " -r Insert into output, rather than stdout." echo " -o Do not include command into output." echo " -c Show in output instead of the one actually executed." echo " -f Run command on command output (or file) before including." echo " -n Include only n lines of output, adding a [...] marker if there's more." echo exit 1 } function apply_filter() { eval $filter_env | eval $filter_opt } # Strip leading white-space and then indent to 6 space. function indent() { python -c " import sys input = sys.stdin.readlines(); n = 1e10 for i in input: n = min(n, len(i) - len(i.lstrip())) for i in input: print ' ' + i[n:], " } stdout=`mktemp -t $(basename $0).XXX` cmd_out=`mktemp -t $(basename $0).XXX` filter_out=`mktemp -t $(basename $0).XXX` tmps="$tmps $stdout $cmd_out $filter_out" include=$cmd_out show_command=1 cmd_display="" dry=0 lines=0 filter_env=${BTEST_RST_FILTER} while getopts "odhr:f:c:n:" opt; do case $opt in h) usage;; o) show_command=0;; r) include=$OPTARG;; d) dry=1; include="";; c) cmd_display=$OPTARG;; f) filter_opt=$OPTARG;; n) lines=$OPTARG;; *) exit 1;; esac done shift $(($OPTIND - 1)) cmd=$@ test "$cmd_display" == "" && cmd_display=$cmd test "$filter_opt" == "" && filter_opt=cat test "$filter_env" == "" && filter_env=cat test "$cmd" == "" && usage; if [ "$dry" != "1" ]; then if ! eval $cmd >$cmd_out; then exit 1 fi fi # Generate reST output. if [ "$show_command" == "1" ]; then echo ".. rst-class:: btest-cmd" >>$stdout echo >>$stdout echo " .. code-block:: none" >>$stdout echo " :linenos:" >>$stdout echo " :emphasize-lines: 1,1" >>$stdout echo >>$stdout echo " # $cmd_display" | apply_filter >>$stdout; else echo ".. rst-class:: btest-include" >>$stdout echo >>$stdout echo " .. code-block:: guess" >>$stdout echo " :linenos:" >>$stdout echo >>$stdout fi for i in $include; do echo " `basename $i`" >>$filter_out echo "" >>$filter_out cat $i | apply_filter | indent >$filter_out if [ $lines = 0 ]; then cat $filter_out >>$stdout else cat $filter_out | head -n $lines >>$stdout if [ `wc -l <$filter_out` -gt $lines ]; then echo ' [...]' >>$stdout fi fi rm -f $filter_out done echo >>$stdout # Branch depending on where this script was started from. if [ "$BTEST_RST_OUTPUT" != "" ]; then # Running from inside Sphinx, just output to where it tells us. cat $stdout >>"${BTEST_RST_OUTPUT}#${TEST_PART}" elif [ "$TEST_NAME" ]; then # Running from inside BTest, output into file that btest-diff-rst will pickup. cat $stdout >>"btest-${TEST_NAME}#${TEST_PART}" else # Running from command line, just print out. cat $stdout fi btest-0.54/setup.py0000644002342100234210000000130312522743410015346 0ustar johannajohanna00000000000000#! /usr/bin/env python from distutils.core import setup, Extension scripts = [ "btest", "btest-ask-update", "btest-bg-run", "btest-bg-run-helper", "btest-bg-wait", "btest-diff", "btest-setsid", "sphinx/btest-diff-rst", "sphinx/btest-rst-cmd", "sphinx/btest-rst-include", "sphinx/btest-rst-pipe", ] py_modules = [ "btest-sphinx" ] setup(name='btest', version="0.54", # Filled in automatically. description='A simple unit testing framework', author='Robin Sommer', author_email='robin@icir.org', url='http://www.icir.org/robin/btest', scripts=scripts, package_dir={"": "sphinx"}, py_modules=py_modules ) btest-0.54/COPYING0000664002342100234210000000345712506370126014707 0ustar johannajohanna00000000000000Copyright (c) 1995-2013, The Regents of the University of California through the Lawrence Berkeley National Laboratory and the International Computer Science Institute. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. (3) Neither the name of the University of California, Lawrence Berkeley National Laboratory, U.S. Dept. of Energy, International Computer Science Institute, nor the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Note that some files in the distribution may carry their own copyright notices. btest-0.54/Baseline/0000775002342100234210000000000012523041075015362 5ustar johannajohanna00000000000000btest-0.54/Baseline/examples.t5-2/0000775002342100234210000000000012523041075017666 5ustar johannajohanna00000000000000btest-0.54/Baseline/examples.t5-2/output0000664002342100234210000000001112506370126021144 0ustar johannajohanna00000000000000 22 btest-0.54/Baseline/examples.t7/0000775002342100234210000000000012523041075017531 5ustar johannajohanna00000000000000btest-0.54/Baseline/examples.t7/output0000664002342100234210000000020412506370126021013 0ustar johannajohanna00000000000000Part 1 - /Users/robin/bro/docs/aux/btest/.tmp/examples.t7/t7.sh#1 Part 2 - /Users/robin/bro/docs/aux/btest/.tmp/examples.t7/t7.sh#2 btest-0.54/Baseline/examples.t4/0000775002342100234210000000000012523041075017526 5ustar johannajohanna00000000000000btest-0.54/Baseline/examples.t4/dots0000664002342100234210000000234412506370126020430 0ustar johannajohanna00000000000000. .. .CFUserTextEncoding .DS_Store .Spotlight-V100 .TemporaryItems .Trash .Trashes .Xauthority .Xmodmap .Xmodmap.darwin .Xresources .abook .abookrc .anyconnect .backup .backup-to-icsi .backup-to-icsi.exclude .backup.exclude .bash .bash_history .bash_profile .bashrc .bashrc.local .cups .dropbox .elinks .fontconfig .fseventsd .gem .gnupg .growl-buffy .hostnames .inputrc .ispell_american .jed .jedrc .jedrecent .lesshst .mairix .mairix.history .mairixrc .mutt .muttprintrc .muttrc .offlineimap .offlineimap.py .offlineimap.pyc .offlineimaprc .offlineimaprc.bagend .python .rxvt-unicode-254.59.242.10.in-addr.arpa .rxvt-unicode-bagend.local .rxvt-unicode-wifi189.icsi.berkeley.edu .screen .serverauth.11038 .serverauth.1452 .serverauth.203 .serverauth.206 .serverauth.220 .serverauth.23460 .serverauth.23866 .serverauth.270 .serverauth.305 .serverauth.378 .serverauth.53652 .serverauth.55639 .signature .sleepwatcher .ssh .subversion .unison .unison.home.prf .urlview .viminfo .whitelist .xdvirc .xinitrc.d .xinitrc.darwin.leopard .xinitrc.darwin.tiger .yanag Desktop Documents Downloads Dropbox Library Mail Movies Music Pictures Public Sites bin bro data down etc generic include jed lib man mbox memos scripts share src synchronized tex tmp work www btest-0.54/Baseline/examples.t5/0000775002342100234210000000000012523041075017527 5ustar johannajohanna00000000000000btest-0.54/Baseline/examples.t5/output0000664002342100234210000000001112506370126021005 0ustar johannajohanna00000000000000 119 btest-0.54/Baseline/examples.t6/0000775002342100234210000000000012523041075017530 5ustar johannajohanna00000000000000btest-0.54/Baseline/examples.t6/output0000664002342100234210000000000212506370126021006 0ustar johannajohanna000000000000003 btest-0.54/MANIFEST0000644002342100234210000001021712523041075014770 0ustar johannajohanna00000000000000# file GENERATED by distutils, do NOT edit CHANGES COPYING MANIFEST MANIFEST.in Makefile README VERSION btest btest-ask-update btest-bg-run btest-bg-run-helper btest-bg-wait btest-diff btest-setsid btest.cfg.example setup.py Baseline/examples.t4/dots Baseline/examples.t5/output Baseline/examples.t5-2/output Baseline/examples.t6/output Baseline/examples.t7/output examples/alternative examples/my-filter examples/t1 examples/t2 examples/t3.sh examples/t4.awk examples/t5.sh examples/t6.sh examples/t7 examples/t7.sh#1 examples/t7.sh#2 examples/t7.sh#3 examples/sphinx/.gitignore examples/sphinx/Makefile examples/sphinx/btest.cfg examples/sphinx/conf.py examples/sphinx/index.rst examples/sphinx/Baseline/tests.sphinx.hello-world/btest-tests.sphinx.hello-world#1 examples/sphinx/Baseline/tests.sphinx.hello-world/btest-tests.sphinx.hello-world#2 examples/sphinx/Baseline/tests.sphinx.hello-world/btest-tests.sphinx.hello-world#3 examples/sphinx/tests/sphinx/hello-world.btest examples/sphinx/tests/sphinx/hello-world.btest#2 examples/sphinx/tests/sphinx/hello-world.btest#3 sphinx/btest-diff-rst sphinx/btest-rst-cmd sphinx/btest-rst-include sphinx/btest-rst-pipe sphinx/btest-sphinx.py testing/.gitignore testing/Makefile testing/btest.cfg testing/btest.tests.cfg testing/Baseline/tests.alternatives-environment/child-output testing/Baseline/tests.alternatives-environment/output testing/Baseline/tests.alternatives-filter/child-output testing/Baseline/tests.alternatives-filter/output testing/Baseline/tests.alternatives-substitution/child-output testing/Baseline/tests.alternatives-substitution/output testing/Baseline/tests.brief/output testing/Baseline/tests.diag/output testing/Baseline/tests.diag-all/output testing/Baseline/tests.diag-file/diag testing/Baseline/tests.diag-file/output testing/Baseline/tests.environment/output testing/Baseline/tests.exit-codes/out1 testing/Baseline/tests.exit-codes/out2 testing/Baseline/tests.groups/output testing/Baseline/tests.ignore/output testing/Baseline/tests.known-failure/output testing/Baseline/tests.known-failure-succeeds/output testing/Baseline/tests.measure-time/output testing/Baseline/tests.parts/output testing/Baseline/tests.parts-error-part/output testing/Baseline/tests.parts-error-start-next/output testing/Baseline/tests.parts-finalizer/output testing/Baseline/tests.parts-glob/output testing/Baseline/tests.parts-skipping/output testing/Baseline/tests.quiet/output testing/Baseline/tests.requires/output testing/Baseline/tests.rerun/output testing/Baseline/tests.sphinx.rst-cmd/output testing/Baseline/tests.sphinx.run-sphinx/output testing/Baseline/tests.start-file/output testing/Baseline/tests.start-next/output testing/Baseline/tests.threads/output.j0 testing/Baseline/tests.threads/output.j1 testing/Baseline/tests.threads/output.j5 testing/Baseline/tests.verbose/output testing/Baseline/tests.xml/output.xml testing/Scripts/diff-remove-abspath testing/Scripts/dummy-script testing/Scripts/strip-test-base testing/Scripts/test-filter testing/tests/alternatives-environment.test testing/tests/alternatives-filter.test testing/tests/alternatives-substitution.test testing/tests/basic-fail.test testing/tests/basic-succeed.test testing/tests/brief.test testing/tests/canonifier.test testing/tests/copy-file.test testing/tests/diag-all.test testing/tests/diag-file.test testing/tests/diag.test testing/tests/diff.test testing/tests/environment.test testing/tests/exit-codes.test testing/tests/finalizer.test testing/tests/groups.test testing/tests/ignore.test testing/tests/initializer.test testing/tests/known-failure-succeeds.btest testing/tests/known-failure.btest testing/tests/macros.test testing/tests/measure-time.tests testing/tests/parts-error-part.test testing/tests/parts-error-start-next.test testing/tests/parts-finalizer.test testing/tests/parts-glob.test testing/tests/parts-skipping.tests testing/tests/parts.tests testing/tests/quiet.test testing/tests/requires.test testing/tests/rerun.test testing/tests/start-file.test testing/tests/start-next-naming.test testing/tests/start-next.test testing/tests/threads.test testing/tests/tmps.test testing/tests/verbose.test testing/tests/xml.test testing/tests/sphinx/rst-cmd.sh testing/tests/sphinx/run-sphinx btest-0.54/VERSION0000644002342100234210000000000512522743407014710 0ustar johannajohanna000000000000000.54 btest-0.54/btest-diff0000775002342100234210000000456212506370126015627 0ustar johannajohanna00000000000000#! /usr/bin/env bash # # TEST_MODE={TEST|UPDATE|UPDATE_INTERACTIVE} # TEST_BASELINE # TEST_DIAGNOSTICS # TEST_NAME # # TEST_DIFF_CANONIFIER # TEST_DIFF_BRIEF # Maximum number of lines to show from mismatching input file. MAX_LINES=5000 if [ "$TEST_DIAGNOSTICS" = "" ]; then TEST_DIAGNOSTICS=/dev/stdout fi if [ "$#" -lt 1 ]; then echo "btest-diff: wrong number of arguments" >$TEST_DIAGNOSTICS exit 1 fi input=$1 canon=`echo $input | sed 's#/#.#g'` shift if [ ! -f $input ]; then echo "btest-diff: input $input does not exist." >$TEST_DIAGNOSTICS exit 1 fi tmpfiles="" function delete_tmps { rm -f $tmpfiles 2>/dev/null } trap delete_tmps 0 result=2 rm -f $TEST_DIAGNOSTICS 2>/dev/null echo "== File ===============================" >>$TEST_DIAGNOSTICS if [ "$TEST_DIFF_BRIEF" == "" -o ! -e $TEST_BASELINE/$canon ]; then if [ `wc -l $input | awk '{print $1}'` -le $MAX_LINES ]; then cat $input >>$TEST_DIAGNOSTICS else head -$MAX_LINES $input >>$TEST_DIAGNOSTICS echo "[... File too long, truncated ...]" >>$TEST_DIAGNOSTICS fi else echo "" >>$TEST_DIAGNOSTICS fi if [ -e $TEST_BASELINE/$canon ]; then if [ "$TEST_DIFF_CANONIFIER" != "" ]; then diff1=/tmp/test-diff.$$.$canon.baseline.tmp diff2=/tmp/test-diff.$$.$canon.tmp tmpfiles="$tmpfiles $diff1 $diff2" eval $TEST_DIFF_CANONIFIER $input <$TEST_BASELINE/$canon >$diff1 eval $TEST_DIFF_CANONIFIER $input <$input >$diff2 else diff1=$TEST_BASELINE/$canon diff2=$input fi echo "== Diff ===============================" >>$TEST_DIAGNOSTICS diff -au $@ $diff1 $diff2 >>$TEST_DIAGNOSTICS result=$? else echo "== Error ===============================" >>$TEST_DIAGNOSTICS echo "test-diff: no baseline found." >>$TEST_DIAGNOSTICS result=100 fi echo "=======================================" >>$TEST_DIAGNOSTICS if [ "$TEST_MODE" == "TEST" ]; then exit $result elif [ "$TEST_MODE" == "UPDATE_INTERACTIVE" ]; then if [ "$result" == 0 ]; then exit 0 fi btest-ask-update rc=$? echo -n "$TEST_NAME ..." >/dev/tty if [ $rc == 0 ]; then cp $input $TEST_BASELINE/$canon exit 0 fi exit $rc elif [ "$TEST_MODE" == "UPDATE" ]; then cp $input $TEST_BASELINE/$canon exit 0 fi echo "test-diff: unknown test mode $TEST_MODE" >$TEST_DIAGNOSTICS exit 1 btest-0.54/CHANGES0000644002342100234210000004311312522743407014642 0ustar johannajohanna00000000000000 0.54 | 2015-03-02 17:22:22 -0800 * Release 0.54. 0.53-6 | 2015-03-02 17:21:26 -0800 * Improve documentation of timing functionality. (Daniel Thayer) * Add a new section to documentation that lists the BTest prerequisites. (Daniel Thayer) * Add warning when btest cannot create timing baseline. (Daniel Thayer) 0.53-3 | 2015-01-22 07:25:01 -0800 * Fix some typos in the README. (Daniel Thayer) 0.53-1 | 2014-11-11 13:21:10 -0800 * In diagnostics, do not show verbose output for tests known to fail. (Robin Sommer) 0.53 | 2014-07-22 17:36:24 -0700 * Release 0.53. 0.52-2 | 2014-07-22 17:36:15 -0700 * Update MANIFEST.in and setup.py to fix packaging. (Jon Siwek) 0.52 | 2014-03-13 14:05:44 -0700 * Release 0.52. 0.51-14 | 2014-03-13 14:05:36 -0700 * Fix a link in the README. (Jon Siwek) 0.51-12 | 2014-02-11 16:12:44 -0800 * Work-around for systems reporting that a socket path is too long. Addresses BIT-862. (Robin Sommer) 0.51-11 | 2014-02-11 15:37:40 -0800 * Fix for Linux systems that have the perf tool but don't support measuring instructions. (Robin Sommer) * No longer tracking tests that are expected to fail in state file. (Robin Sommer) * Refactoring the timing code to no longer execute at all when not needed.(Robin Sommer) 0.51-7 | 2014-02-06 21:06:40 -0800 * Fix for platforms that don't support timing measurements yet. (Robin Sommer) 0.51-6 | 2014-02-06 18:19:08 -0800 * Adding a timing mode that records test execution times per host. This is for catching regressions (or improvements :) that lets execution times divert significantly. Linux only for now. See the README for more information. (Robin Sommer) * Adding color to test status when writing to console. (Robin Sommer) * A bit of refactoring to define the status messages ("ok", "failed") only at a single location. Also added a note when a test declared as expecting failure in fact succeeds. (Robin Sommer) 0.51-2 | 2013-11-17 20:21:08 -0800 * New keyword ``TEST-KNOWN-FAILURE`` to mark tests that are currently known to fail. (Robin Sommer) 0.51-1 | 2013-11-11 13:36:36 -0800 * Fixing bug with tests potentially being ignored when using alternatives. (Robin Sommer) 0.51 | 2013-10-07 17:29:50 -0700 * Updating copyright notice. (Robin Sommer) 0.5-1 | 2013-10-07 17:26:30 -0700 * Polishing how included commands and files are shown. (Robin Sommer) - Enabling CSS styling to command lines and shown file names via the new "btest-include" and "btest-cmd" classes. - Fix to enable showing line numbers in btest-sphinx generated output. - Fix to enable Pygments coloring in output. 0.5 | 2013-09-20 14:48:01 -0700 * Fix the btest-rst-pipe script. (Daniel Thayer) * A set of of documentation fixes, clarifications, and extensions. (Daniel Thayer) * A set of changes to Sphinx commands and directives. (Robin Sommer) btest-rst-*: - Always show line numbers. - Highlight the command executed. - rst-cmd-include gets an option -n to include only upto i lines. - rst-cmd-include prefixes output with "" to show what we're including. btest-include: - Set Pygments language automatically if we show a file with an extension we know (in particular ".bro"). - Prefix output with "" to show what we're including. 0.4-63 | 2013-08-28 21:10:39 -0700 * btest-sphinx now provides a new directive btest-include. This works like literalinclude (with all its options) but it also saves a version of the included text as a test to detect changes. (Robin Sommer) 0.4-60 | 2013-08-28 18:54:51 -0700 * Fix typos and reST formatting in README (Daniel Thayer) * Fix a couple of error messages. (Daniel Thayer) * Fixed a reference to a non-existent variable which was causing the "-w" option to have no effect. (Daniel Thayer) * Test portability fix. (Robin Sommer) 0.4-55 | 2013-08-22 16:09:21 -0700 * New "Sphinx-mode" for BTest, activated with -S. This allows to capture a test's diagnostic output when running from inside Sphinx; the output will now be inserted into the generated document. (Robin Sommer) * Adding an option -n to btest-rst-cmd that truncates output longer than N lines. (Robin Sommer) * Adding a PartFinalizer that runs a commmand at the completion of each test part. (Robin Sommer) 0.4-51 | 2013-08-22 10:36:34 -0700 * Improve cleanup of processes that don't terminate with btest-bg-wait. (Jon Siwek) 0.4-49 | 2013-08-13 18:43:03 -0700 * Fixing test portability problems. (Daniel Thayer) * Adding TEST_BASE environment variable. The existing TESTBASE isn't always behaving as expected and wasn't documented to begin with. (Robin Sommer) 0.4-43 | 2013-08-12 16:04:53 -0700 * Bugfix for ignored tests. (Robin Sommer) 0.4-42 | 2013-07-31 20:46:30 -0700 * Adding support for "parts": One can split a single test across multiple files by adding a numerical ``#`` postfix to their names, where each ```` represents a separate part of the test. ``btests`` will combine all of a test's parts in numerical order and execute them subsequently within the same sandbox. Example in the README. (Robin Sommer) * When running a command, TEST_PART contains the current part number. (Robin Sommer) * Extending Sphinx support. (Robin Sommer) * Adding tests for Sphinx functionality. * Support for parts in Sphinx directives. If multiple btest directives reference the same test name, each will turn into a part of a single test. * Internal change restructuring the btest Sphinx directive. We now process it in two passes: one to save the test at parse time, and one later to execute once everything has been parsed. * Adding Sphinx sandbox for testing. * Fix for tests returning no output to render at all. (Robin Sommer) 0.4-28 | 2013-07-17 21:56:18 -0700 * btest-diff now passes the name of the file under consideration on to canonifiers. (Robin Sommer) 0.4-27 | 2013-07-14 21:19:59 -0700 * When searching for tests, BTest now ignores a directories if it finds a file ".btest-ignore" in there. (Robin Sommer) 0.4-26 | 2013-07-08 20:46:22 -0700 * Fixing bug with @TEST-START-NEXT naming. (Robin Sommer) 0.4-25 | 2013-07-08 13:25:50 -0700 * A test-suite for btest. Using, of course, btest. "make test" will test most of btest's features. The main missing piece is testing the Sphinx support, we will add that next. (Robin Sommer) * When creating directories, we know also create intermediaries. That in particular means that "@TEST-START-FILE a/b/c" now creates a directory "a/b" automatically and puts the file in there. (Robin Sommer) * IgnoreDirs now also works for sub directories. (Robin Sommer) * Documentation updates. (Robin Sommer) * Adding "Initializer" option, which runs a command before each test. (Robin Sommer) * Adding "CommandPrefix" option that changes the naming of all btest commands by replacing the "@TEST-" prefix with a custom string. (Robin Sommer) * Default configuration file can be overriden via BTEST_CFG environment variable. (Robin Sommer) * s/bro-ids.org/bro.org/g (Robin Sommer) * Bugfix for -j without number. (Robin Sommer) * New @TEST-ALTERNATIVE that activates tests only for the given alternative. Renamed @TEST-NO-ALTERNATIVE to @TEST-NOT-ALTERNATIVE, and allowing "default" for both @TEST-ALTERNATIVE and @TEST-NOTALTERNATIVE to specify the case that BTest runs without any alternative given. (Robin Sommer) * Fix for alternative names containing white spaces. (Robin Sommer) 0.4-14 | 2013-01-23 18:11:22 -0800 * Fixing links in README and removing TODOs. (Robin Sommer) 0.4-13 | 2013-01-23 14:33:23 -0800 * Allowing use of -j without a value. BTest then uses the number of CPU cores as reported by the OS. (Robin Sommer) 0.4-11 | 2013-01-21 17:50:40 -0800 * Adding a new "alternative" concept that combines filters and substitutions, and adds per-alternative environment variables. (Robin Sommer) Instead of defining filters and substitutions separately, one now specifies an alternative configuration to run with "-A " and that then checks for both "[substitutions-]" and "[filter-]" section. In addition, "[environment-]" allows to define alternative-specific environment variables. The old filter/substitutions options -F and -s are gone. The sections for substitutions are renamed to "[substitutions-]" from "[subst-]". 0.4-10 | 2013-01-07 09:45:35 -0800 * btest now sets a new environment variable TEST_VERBOSE, giving the path of a file where a test can record further information about its execution that will be included with btest's ``--verbose`` output. (Robin Sommer) 0.4-9 | 2012-12-20 12:20:44 -0800 * Documentation fixes/clarifications. (Daniel Thayer) * Fix the btest "-c" option, which didn't work when the specified config file was not in the current working directory. (Daniel Thayer) 0.4-6 | 2012-11-08 16:33:51 -0800 * Putting a limit on how many input line btest-diff shows. (Robin Sommer) 0.4-5 | 2012-11-01 16:14:29 -0700 * Making Sphinx module tolerant against docutils version change. (Robin Sommer) 0.4-4 | 2012-09-25 06:24:59 -0700 * Fix a couple of reST formatting problems. (Daniel Thayer) 0.4-2 | 2012-09-24 11:41:06 -0700 * Add option -x to output test results in an XML (JUnit-like) format. (Jon Siwek) 0.4 | 2012-06-15 15:15:13 -0700 * Remove code to expand environment variables on command line. (Not needed because the command line is just passed to the shell.) (Daniel Thayer) * Clarify explanation about expansion of environment variables. (Daniel Thayer) * Fix errors in README and btest help output; added documentation for the -q option. (Daniel Thayer) * Fixed a bug in btest where it was looking for "filters-" (instead of "filter-") in the btest config file. (Daniel Thayer) 0.31-45 | 2012-05-24 16:43:14 -0700 * Correct typos in documentation. (Daniel Thayer) * Failed tests are now only recorded into the state file when we're not updating. That allows to run "btest -r" repeatedly while updating baselines in between. (Robin Sommer) * Experimentation Sphinx directive to write a btest with a Sphinx document. See README for more information. * Fixing typos, plus an console output tweak. (Robin Sommer) * Option -q now implies -b as well. (Robin Sommer) 0.31-33 | 2012-05-13 17:08:15 -0700 * New command to copy a file into a test's directory. ``@TEST-COPY-FILE: `` Copy the given file into the test's directory before the test is run. If ```` is a relative path, it's interpreted relative to the BTest's base directory. Environment variables in ```` will be replaced if enclosed in ``${..}``. This command can be given multiple times. (Robin Sommer) * Suppressing error messages when btest-diff can't remove diag file. (Robin Sommer) * Adding option -q/--quiet to suppress informational non-error output. (Robin Sommer) * Option -F also takes a comma-separated list to specify multiple filters , rather than having to give -F multiple times. (Robin Sommer) 0.31-28 | 2012-05-06 21:27:15 -0700 * Separating semantics of groups and thread serialization into separate options. -g still specifices @TEST-GROUPs that are to be executed, but these groups don't any longer control which tests get serialized in a parallel execution. For that, there's a new "@TEST-SERIALIZE: " command that takes a tag and then makes sure all tests with the same tag are run within the same thread. (Robin Sommer) * TEST-GROUPS can now be given multiple times now to assign a test to a set of groups. (Robin Sommer) * Extended -g to accept a comma-separated list of groups names to run more than one test group. (Robin Sommer) * New output handler for console output. This output is now the default when stdout is a terminal. It prints out a compressed output that updates as btest goes through; it also indicates the progress so far. If btest's output is redirected to a non-terminal, is switches back to the old style. (Robin Sommer) * New test command @TEST-NO-FILTER: This allows to ignore a test when running a specific filter. (Robin Sommer) * Changing the way filters are activated. -F now activates only the given filter, but doesn't run the standard tests in addition. But one can now give -F a command-separated list of filters to activate them all, and refer to the standard tests without filter as ``-``. (Robin Sommer) * Fix to allow numbered test to be given individually on the command line. (E.g., integer.geq-3 for a file that contains three tests). (Robin Sommer) 0.31-23 | 2012-04-16 18:10:02 -0700 * A number of smaller fixes for bugs, plus polishing, caused by the recent restructuring. (Robin Sommer) * Removing the error given when using -r with tests on the command line. It's unnessary and confusing compared to when listing tests in btest.cfg. (Robin Sommer) * Adding a new "finalizer" option. ``Finalizer`` An executable that will be executed each time any test has succesfully run. It runs in the same directory as the test itself and receives the name of the test as its parameter. The return value indicates whether the test should indeed be considered succeeded. By default, there's no finalizer set. (Robin Sommer) * btest is now again overwriting old diag files instead of appending (i.e., back to as it used to be). (Robin Sommer) * Diag output is now line-buffered. (Daniel Thayer) 0.31-13 | 2012-03-13 15:59:51 -0700 * Adding new option -r that reruns all tests that failed last time. btest now always records all failed tests in a file called. (Robin Sommer) * Internal restructuring to factor output out into sublcasses. (Robin Sommer) * Adding parallel test execution to btest. (Robin Sommer) - A new option "-j " allows to run up to tests in parallel. - A new @TEST-GROUP directive allows to group tests that can't be parallelized. All tests of the same group will be executed sequentially. - A new option "-g " allows to run only tests of a certain group, or with "-g -" all tests that don't have a group. 0.31-2 | 2012-01-25 16:58:29 -0800 * Don't add btest's path to PATH anymore. (Jon Siwek) 0.31 | 2011-11-29 12:11:49 -0600 * Submodule README conformity changes. (Jon Siwek) 0.3 | 2011-10-25 19:58:26 -0700 * More graceful error handling at startup if btest.cfg not found. (Robin Sommer) * Python 2.4 compat changes. (Jon Siwek) * When in brief mode, btest-diff now shows full output if we don't have a baseline yet. (Robin Sommer) * Adding executable permission back to script. (Robin Sommer) * Cleaning up distribution. (Robin Sommer) 0.22-28 | 2011-09-15 15:18:11 -0700 * New environment variable TEST_DIFF_BRIEF. If set btest-diff no longer includes a mismatching file's full content it the diagnostic output. This can be useful if the file being compared is very large. (Robin Sommer) 0.22-27 | 2011-08-12 22:56:12 -0700 * Fix btest-bg-wait's kill trap and -k option. (Jon Siwek) 0.22-18 | 2011-07-23 11:54:07 -0700 * A new option -u for interactively updating baselines. * Teach btest's TEST-START-FILE to make subdirectories (Jon Siwek) * Output polishing. (Robin Sommer) * Have distutils install 'btest-setsid' script. (Jon Siwek) * A portable setsid. (Robin Sommer) * Fixes for background execution of processes. * Fixing exit codes. (Robin Sommer) 0.22-6 | 2011-07-19 17:38:03 -0700 * Teach btest's TEST-START-FILE to make subdirectories (Jon Siwek) 0.22-5 | 2011-05-02 08:41:34 -0700 * A number of bug fixes, and output polishing. (Robin Sommer) * More robust background execution by btest-bg-*. (Robin Sommer) 0.22-4 | 2011-03-29 21:38:13 -0700 * A test command can now signal to btest that even if it fails subsequent test commands should still run by returning exit code 100. btest-diff uses this to continue in the case that no baseline has yet been established. * New test option @TEST-REQUIRES for running a test conditionally. See the README for more information. 0.22-2 | 2011-03-03 21:44:18 -0800 * Two new helper scripts for spawning processes in the background. See README for more information. * btest-diff can now deal with files specificied with paths. 0.22 | 2011-02-08 14:06:13 -0800 * BTest is now hosted along with the other Bro repositories on git.bro-ids.org. 0.21 | 2011-01-09 21:29:18 -0800 * In btest.cfg, option values can now include commands to execute in backticks. Example: [environment] CC=clang -emit-llvm -g `hilti-config --cflags` * Limiting substitutions to replacing whole words. * Adding "substitutions". Substitutions are similar to filters, yet they do not adapt the input but the command line being exectued. See README for more information. * Instead of giving a test's file name on the command line, one can now also use its "dotted" name as it's printed out when btest is running (e.g., "foo.bar"). That allows for easier copy/paste. * Starting CHANGES. btest-0.54/examples/0000775002342100234210000000000012523041075015456 5ustar johannajohanna00000000000000btest-0.54/examples/t10000664002342100234210000000007312506370126015730 0ustar johannajohanna00000000000000@TEST-EXEC: echo "Foo" | grep -q Foo @TEST-EXEC: test -d . btest-0.54/examples/t7.sh#30000664002342100234210000000004012506370126016467 0ustar johannajohanna00000000000000# @TEST-EXEC: btest-diff output btest-0.54/examples/t70000664002342100234210000000013412506370126015734 0ustar johannajohanna00000000000000@TEST-EXEC: echo "Foo" | grep -q Foo @TEST-EXEC: awk 'BEGIN{for(i=0; i < 50000000; i++){}}' btest-0.54/examples/t7.sh#20000664002342100234210000000005412506370126016473 0ustar johannajohanna00000000000000# @TEST-EXEC: echo Part 2 - %INPUT >>output btest-0.54/examples/t7.sh#10000664002342100234210000000005412506370126016472 0ustar johannajohanna00000000000000# @TEST-EXEC: echo Part 1 - %INPUT >>output btest-0.54/examples/t20000664002342100234210000000010612506370126015726 0ustar johannajohanna00000000000000@TEST-EXEC: echo "Foo" | grep -q Foo @TEST-EXEC: test -d DOESNOTEXIST btest-0.54/examples/sphinx/0000775002342100234210000000000012523041075016767 5ustar johannajohanna00000000000000btest-0.54/examples/sphinx/.gitignore0000664002342100234210000000002512506370126020757 0ustar johannajohanna00000000000000_* .btest.failed.dat btest-0.54/examples/sphinx/tests/0000775002342100234210000000000012523041075020131 5ustar johannajohanna00000000000000btest-0.54/examples/sphinx/tests/sphinx/0000775002342100234210000000000012523041075021442 5ustar johannajohanna00000000000000btest-0.54/examples/sphinx/tests/sphinx/hello-world.btest0000664002342100234210000000005712506370126024742 0ustar johannajohanna00000000000000@TEST-EXEC: btest-rst-cmd echo "Hello, world!" btest-0.54/examples/sphinx/tests/sphinx/hello-world.btest#30000664002342100234210000000007512506370126025070 0ustar johannajohanna00000000000000@TEST-EXEC: btest-rst-cmd echo "Hello, world! Again. Again." btest-0.54/examples/sphinx/tests/sphinx/hello-world.btest#20000664002342100234210000000006612506370126025067 0ustar johannajohanna00000000000000@TEST-EXEC: btest-rst-cmd echo "Hello, world! Again." btest-0.54/examples/sphinx/btest.cfg0000664002342100234210000000032112506370126020570 0ustar johannajohanna00000000000000 [btest] TestDirs = tests TmpDir = %(testbase)s/.tmp BaselineDir = %(testbase)s/Baseline Finalizer = btest-diff-rst [environment] PATH=%(testbase)s/../../:%(testbase)s/../../sphinx:%(default_path)s btest-0.54/examples/sphinx/index.rst0000664002342100234210000000244512506370126020640 0ustar johannajohanna00000000000000.. BTest-Sphinx Demo documentation master file, created by sphinx-quickstart on Wed May 8 15:22:37 2013. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to BTest-Sphinx Demo's documentation! ============================================= Contents: .. toctree:: :maxdepth: 2 Testing ======= .. btest:: hello-world @TEST-EXEC: btest-rst-cmd echo "Hello, world!" .. btest:: hello-world @TEST-EXEC: btest-rst-cmd echo "Hello, world! Again." .. btest:: hello-world @TEST-EXEC: btest-rst-cmd echo "Hello, world! Again. Again." .. btest:: hello-world-fail @TEST-EXEC: btest-rst-cmd echo "This will fail soon!" This should fail and include the diag output instead: .. btest:: hello-world-fail @TEST-EXEC: echo StDeRr >&2; echo 1 | grep -q 2 This should succeed: .. btest:: hello-world-fail @TEST-EXEC: btest-rst-cmd echo "This succeeds again!" This should fail again and include the diag output instead: .. btest:: hello-world-fail @TEST-EXEC: echo StDeRr >&2; echo 3 | grep -q 4 .. btest:: hello-world-fail @TEST-EXEC: btest-rst-cmd echo "This succeeds again!" .. btest-include:: btest.cfg Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` btest-0.54/examples/sphinx/Baseline/0000775002342100234210000000000012523041075020511 5ustar johannajohanna00000000000000btest-0.54/examples/sphinx/Baseline/tests.sphinx.hello-world/0000775002342100234210000000000012523041075025412 5ustar johannajohanna00000000000000btest-0.54/examples/sphinx/Baseline/tests.sphinx.hello-world/btest-tests.sphinx.hello-world#10000664002342100234210000000007612506370126033507 0ustar johannajohanna00000000000000.. code-block:: none # echo Hello, world! Hello, world! btest-0.54/examples/sphinx/Baseline/tests.sphinx.hello-world/btest-tests.sphinx.hello-world#20000664002342100234210000000011412506370126033501 0ustar johannajohanna00000000000000.. code-block:: none # echo Hello, world! Again. Hello, world! Again. btest-0.54/examples/sphinx/Baseline/tests.sphinx.hello-world/btest-tests.sphinx.hello-world#30000664002342100234210000000013212506370126033502 0ustar johannajohanna00000000000000.. code-block:: none # echo Hello, world! Again. Again. Hello, world! Again. Again. btest-0.54/examples/sphinx/conf.py0000664002342100234210000001753212506370126020301 0ustar johannajohanna00000000000000# -*- coding: utf-8 -*- # # BTest-Sphinx Demo documentation build configuration file, created by # sphinx-quickstart on Wed May 8 15:22:37 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os sys.path.append("../../sphinx") sys.path.append("../../../sphinx") # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] ### BTest extensions += ["btest-sphinx"] btest_base = "." # Relative to Sphinx-root. btest_tests = "tests/sphinx" # Relative to btest_base. ### # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'BTest-Sphinx Demo' copyright = u'2013, Foo Bar' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0' # The full version, including alpha/beta/rc tags. release = '1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'BTest-SphinxDemodoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'BTest-SphinxDemo.tex', u'BTest-Sphinx Demo Documentation', u'Foo Bar', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'btest-sphinxdemo', u'BTest-Sphinx Demo Documentation', [u'Foo Bar'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'BTest-SphinxDemo', u'BTest-Sphinx Demo Documentation', u'Foo Bar', 'BTest-SphinxDemo', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' btest-0.54/examples/sphinx/Makefile0000664002342100234210000001276412506370126020444 0ustar johannajohanna00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext all: html help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* .tmp html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/BTest-SphinxDemo.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/BTest-SphinxDemo.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/BTest-SphinxDemo" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/BTest-SphinxDemo" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." btest-0.54/examples/alternative0000664002342100234210000000017412506370126017724 0ustar johannajohanna00000000000000# @TEST-EXEC: cat %INPUT >output # @TEST-EXEC: cat output | grep original # @TEST-EXEC: set | grep MY >envs original input btest-0.54/examples/t4.awk0000664002342100234210000000013512506370126016513 0ustar johannajohanna00000000000000# @TEST-EXEC: ls -a ~ | awk -f %INPUT >dots # @TEST-EXEC: btest-diff dots /\.*/ { print $1 } btest-0.54/examples/t3.sh0000664002342100234210000000006212506370126016341 0ustar johannajohanna00000000000000# @TEST-EXEC: sh %INPUT ls /etc | grep -q passwd btest-0.54/examples/t5.sh0000664002342100234210000000023712506370126016347 0ustar johannajohanna00000000000000# @TEST-EXEC: cat %INPUT | wc -c >output # @TEST-EXEC: btest-diff output This is the first test input in this file. # @TEST-START-NEXT ... and the second. btest-0.54/examples/my-filter0000775002342100234210000000007112506370126017315 0ustar johannajohanna00000000000000# @TEST-IGNORE cat $1 | sed 's/original/filtered/g' >$2 btest-0.54/examples/t6.sh0000664002342100234210000000024612506370126016350 0ustar johannajohanna00000000000000# @TEST-EXEC: awk -f %INPUT output # @TEST-EXEC: btest-diff output { lines += 1; } END { print lines; } @TEST-START-FILE foo.dat 1 2 3 @TEST-END-FILE btest-0.54/PKG-INFO0000664002342100234210000000035612523041075014741 0ustar johannajohanna00000000000000Metadata-Version: 1.0 Name: btest Version: 0.54 Summary: A simple unit testing framework Home-page: http://www.icir.org/robin/btest Author: Robin Sommer Author-email: robin@icir.org License: UNKNOWN Description: UNKNOWN Platform: UNKNOWN btest-0.54/Makefile0000664002342100234210000000023212506370126015300 0ustar johannajohanna00000000000000 all: .PHONY: dist dist: rm -rf build/*.tar.gz python setup.py sdist -d build @printf "Package: "; echo build/*.tar.gz test: @(cd testing && make) btest-0.54/btest-setsid0000775002342100234210000000024712506370126016206 0ustar johannajohanna00000000000000#! /usr/bin/env python import os import sys try: os.setsid() except: pass prog = sys.argv[1] try: args = sys.argv[1:] except: args = [] os.execvp(prog, args)