pytest-xdist-1.22.1/0000755000372000037200000000000013242644607015126 5ustar travistravis00000000000000pytest-xdist-1.22.1/.github/0000755000372000037200000000000013242644607016466 5ustar travistravis00000000000000pytest-xdist-1.22.1/.github/PULL_REQUEST_TEMPLATE.md0000644000372000037200000000136613242644556022300 0ustar travistravis00000000000000Thanks for submitting a PR, your contribution is really appreciated! Here's a quick checklist that should be present in PRs: - [ ] Make sure to include reasonable tests for your change if necessary - [ ] We use [towncrier](https://pypi.python.org/pypi/towncrier) for changelog management, so please add a *news* file into the `changelog` folder following these guidelines: * Name it `$issue_id.$type` for example `588.bugfix`; * If you don't have an issue_id change it to the PR id after creating it * Ensure type is one of `removal`, `feature`, `bugfix`, `vendor`, `doc` or `trivial` * Make sure to use full sentences with correct case and punctuation, for example: ``` Fix issue with non-ascii contents in doctest text files. ``` pytest-xdist-1.22.1/changelog/0000755000372000037200000000000013242644607017055 5ustar travistravis00000000000000pytest-xdist-1.22.1/changelog/_template.rst0000644000372000037200000000157613242644556021575 0ustar travistravis00000000000000{% for section in sections %} {% set underline = "-" %} {% if section %} {{section}} {{ underline * section|length }}{% set underline = "~" %} {% endif %} {% if sections[section] %} {% for category, val in definitions.items() if category in sections[section] %} {{ definitions[category]['name'] }} {{ underline * definitions[category]['name']|length }} {% if definitions[category]['showcontent'] %} {% for text, values in sections[section][category]|dictsort(by='value') %} - {{ text }}{% if category != 'vendor' %} (`{{ values[0] }} `_){% endif %} {% endfor %} {% else %} - {{ sections[section][category]['']|sort|join(', ') }} {% endif %} {% if sections[section][category]|length == 0 %} No significant changes. {% else %} {% endif %} {% endfor %} {% else %} No significant changes. {% endif %} {% endfor %} pytest-xdist-1.22.1/example/0000755000372000037200000000000013242644607016561 5ustar travistravis00000000000000pytest-xdist-1.22.1/example/loadscope/0000755000372000037200000000000013242644607020532 5ustar travistravis00000000000000pytest-xdist-1.22.1/example/loadscope/epsilon/0000755000372000037200000000000013242644607022203 5ustar travistravis00000000000000pytest-xdist-1.22.1/example/loadscope/epsilon/__init__.py0000644000372000037200000000074513242644556024325 0ustar travistravis00000000000000def epsilon1(arg1, arg2=1000): """Do epsilon1 Usage: >>> epsilon1(10, 20) 40 >>> epsilon1(30) 1040 """ return arg1 + arg2 + 10 def epsilon2(arg1, arg2=1000): """Do epsilon2 Usage: >>> epsilon2(10, 20) -20 >>> epsilon2(30) -980 """ return arg1 - arg2 - 10 def epsilon3(arg1, arg2=1000): """Do epsilon3 Usage: >>> epsilon3(10, 20) 200 >>> epsilon3(30) 30000 """ return arg1 * arg2 pytest-xdist-1.22.1/example/loadscope/test/0000755000372000037200000000000013242644607021511 5ustar travistravis00000000000000pytest-xdist-1.22.1/example/loadscope/test/test_alpha.py0000644000372000037200000000101313242644556024205 0ustar travistravis00000000000000from time import sleep def test_alpha0(): sleep(5) assert True def test_alpha1(): sleep(5) assert True def test_alpha2(): sleep(5) assert True def test_alpha3(): sleep(5) assert True def test_alpha4(): sleep(5) assert True def test_alpha5(): sleep(5) assert True def test_alpha6(): sleep(5) assert True def test_alpha7(): sleep(5) assert True def test_alpha8(): sleep(5) assert True def test_alpha9(): sleep(5) assert True pytest-xdist-1.22.1/example/loadscope/test/test_beta.py0000644000372000037200000000100113242644556024030 0ustar travistravis00000000000000from time import sleep def test_beta0(): sleep(5) assert True def test_beta1(): sleep(5) assert True def test_beta2(): sleep(5) assert True def test_beta3(): sleep(5) assert True def test_beta4(): sleep(5) assert True def test_beta5(): sleep(5) assert True def test_beta6(): sleep(5) assert True def test_beta7(): sleep(5) assert True def test_beta8(): sleep(5) assert True def test_beta9(): sleep(5) assert True pytest-xdist-1.22.1/example/loadscope/test/test_delta.py0000644000372000037200000000257513242644556024227 0ustar travistravis00000000000000from time import sleep from unittest import TestCase class Delta1(TestCase): def test_delta0(self): sleep(5) assert True def test_delta1(self): sleep(5) assert True def test_delta2(self): sleep(5) assert True def test_delta3(self): sleep(5) assert True def test_delta4(self): sleep(5) assert True def test_delta5(self): sleep(5) assert True def test_delta6(self): sleep(5) assert True def test_delta7(self): sleep(5) assert True def test_delta8(self): sleep(5) assert True def test_delta9(self): sleep(5) assert True class Delta2(TestCase): def test_delta0(self): sleep(5) assert True def test_delta1(self): sleep(5) assert True def test_delta2(self): sleep(5) assert True def test_delta3(self): sleep(5) assert True def test_delta4(self): sleep(5) assert True def test_delta5(self): sleep(5) assert True def test_delta6(self): sleep(5) assert True def test_delta7(self): sleep(5) assert True def test_delta8(self): sleep(5) assert True def test_delta9(self): sleep(5) assert True pytest-xdist-1.22.1/example/loadscope/test/test_gamma.py0000644000372000037200000000101313242644556024202 0ustar travistravis00000000000000from time import sleep def test_gamma0(): sleep(5) assert True def test_gamma1(): sleep(5) assert True def test_gamma2(): sleep(5) assert True def test_gamma3(): sleep(5) assert True def test_gamma4(): sleep(5) assert True def test_gamma5(): sleep(5) assert True def test_gamma6(): sleep(5) assert True def test_gamma7(): sleep(5) assert True def test_gamma8(): sleep(5) assert True def test_gamma9(): sleep(5) assert True pytest-xdist-1.22.1/example/loadscope/requirements.txt0000644000372000037200000000002313242644556024014 0ustar travistravis00000000000000ipdb pytest ../../ pytest-xdist-1.22.1/example/loadscope/tox.ini0000644000372000037200000000055613242644556022056 0ustar travistravis00000000000000[tox] envlist = test setupdir = {toxinidir}/../../ [testenv:test] basepython = python3 passenv = http_proxy https_proxy deps = -rrequirements.txt changedir = {envtmpdir} commands = py.test -s -v \ --doctest-modules \ --junitxml=tests.xml \ --dist=loadscope \ --tx=8*popen \ {toxinidir}/test \ {toxinidir}/epsilon pytest-xdist-1.22.1/example/boxed.txt0000644000372000037200000000636513242644556020440 0ustar travistravis00000000000000.. note:: Since 1.19.0, the actual implementation of the ``--boxed`` option has been moved to a separate plugin, `pytest-forked `_ which can be installed independently. The ``--boxed`` command-line options remains for backward compatibility reasons. If your testing involves C or C++ libraries you might have to deal with crashing processes. The xdist-plugin provides the ``--boxed`` option to run each test in a controlled subprocess. Here is a basic example:: # content of test_module.py import pytest import os import time # run test function 50 times with different argument @pytest.mark.parametrize("arg", range(50)) def test_func(arg): time.sleep(0.05) # each tests takes a while if arg % 19 == 0: os.kill(os.getpid(), 15) If you run this with:: $ py.test -n1 =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev8 plugins: xdist, bugzilla, cache, oejskit, cli, pep8, cov collecting ... collected 50 items test_module.py f..................f..................f........... ================================= FAILURES ================================= _______________________________ test_func[0] _______________________________ /home/hpk/tmp/doc-exec-420/test_module.py:6: running the test CRASHED with signal 15 ______________________________ test_func[19] _______________________________ /home/hpk/tmp/doc-exec-420/test_module.py:6: running the test CRASHED with signal 15 ______________________________ test_func[38] _______________________________ /home/hpk/tmp/doc-exec-420/test_module.py:6: running the test CRASHED with signal 15 =================== 3 failed, 47 passed in 3.41 seconds ==================== You'll see that a couple of tests are reported as crashing, indicated by lower-case ``f`` and the respective failure summary. You can also use the xdist-provided parallelization feature to speed up your testing:: $ py.test -n3 =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev8 plugins: xdist, bugzilla, cache, oejskit, cli, pep8, cov gw0 I / gw1 I / gw2 I gw0 [50] / gw1 [50] / gw2 [50] scheduling tests via LoadScheduling ..f...............f..................f............ ================================= FAILURES ================================= _______________________________ test_func[0] _______________________________ [gw0] linux2 -- Python 2.7.3 /home/hpk/venv/1/bin/python /home/hpk/tmp/doc-exec-420/test_module.py:6: running the test CRASHED with signal 15 ______________________________ test_func[19] _______________________________ [gw2] linux2 -- Python 2.7.3 /home/hpk/venv/1/bin/python /home/hpk/tmp/doc-exec-420/test_module.py:6: running the test CRASHED with signal 15 ______________________________ test_func[38] _______________________________ [gw2] linux2 -- Python 2.7.3 /home/hpk/venv/1/bin/python /home/hpk/tmp/doc-exec-420/test_module.py:6: running the test CRASHED with signal 15 =================== 3 failed, 47 passed in 2.03 seconds ==================== pytest-xdist-1.22.1/testing/0000755000372000037200000000000013242644607016603 5ustar travistravis00000000000000pytest-xdist-1.22.1/testing/acceptance_test.py0000644000372000037200000010312213242644556022304 0ustar travistravis00000000000000import re import py import pytest class TestDistribution: def test_n1_pass(self, testdir): p1 = testdir.makepyfile(""" def test_ok(): pass """) result = testdir.runpytest(p1, "-n1") assert result.ret == 0 result.stdout.fnmatch_lines([ "*1 passed*", ]) def test_n1_fail(self, testdir): p1 = testdir.makepyfile(""" def test_fail(): assert 0 """) result = testdir.runpytest(p1, "-n1") assert result.ret == 1 result.stdout.fnmatch_lines([ "*1 failed*", ]) def test_n1_import_error(self, testdir): p1 = testdir.makepyfile(""" import __import_of_missing_module def test_import(): pass """) result = testdir.runpytest(p1, "-n1") assert result.ret == 1 result.stdout.fnmatch_lines([ "E *Error: No module named *__import_of_missing_module*", ]) def test_n2_import_error(self, testdir): """Check that we don't report the same import error multiple times in distributed mode.""" p1 = testdir.makepyfile(""" import __import_of_missing_module def test_import(): pass """) result1 = testdir.runpytest(p1, "-n2") result2 = testdir.runpytest(p1, "-n1") assert len(result1.stdout.lines) == len(result2.stdout.lines) def test_n1_skip(self, testdir): p1 = testdir.makepyfile(""" def test_skip(): import py py.test.skip("myreason") """) result = testdir.runpytest(p1, "-n1") assert result.ret == 0 result.stdout.fnmatch_lines([ "*1 skipped*", ]) def test_manytests_to_one_import_error(self, testdir): p1 = testdir.makepyfile(""" import __import_of_missing_module def test_import(): pass """) result = testdir.runpytest(p1, '--tx=popen', '--tx=popen') assert result.ret in (1, 2) result.stdout.fnmatch_lines([ "E *Error: No module named *__import_of_missing_module*", ]) def test_manytests_to_one_popen(self, testdir): p1 = testdir.makepyfile( """ import py def test_fail0(): assert 0 def test_fail1(): raise ValueError() def test_ok(): pass def test_skip(): py.test.skip("hello") """, ) result = testdir.runpytest(p1, "-v", '-d', '--tx=popen', '--tx=popen') result.stdout.fnmatch_lines([ "*1*Python*", "*2 failed, 1 passed, 1 skipped*", ]) assert result.ret == 1 def test_n1_fail_minus_x(self, testdir): p1 = testdir.makepyfile(""" def test_fail1(): assert 0 def test_fail2(): assert 0 """) result = testdir.runpytest(p1, "-x", "-v", "-n1") assert result.ret == 2 result.stdout.fnmatch_lines([ "*Interrupted: stopping*1*", "*1 failed*", ]) def test_basetemp_in_subprocesses(self, testdir): p1 = testdir.makepyfile(""" def test_send(tmpdir): import py assert tmpdir.relto(py.path.local(%r)), tmpdir """ % str(testdir.tmpdir)) result = testdir.runpytest_subprocess(p1, "-n1") assert result.ret == 0 result.stdout.fnmatch_lines([ "*1 passed*", ]) def test_dist_ini_specified(self, testdir): p1 = testdir.makepyfile( """ import py def test_fail0(): assert 0 def test_fail1(): raise ValueError() def test_ok(): pass def test_skip(): py.test.skip("hello") """, ) testdir.makeini(""" [pytest] addopts = --tx=3*popen """) result = testdir.runpytest(p1, '-d', "-v") result.stdout.fnmatch_lines([ "*2*Python*", "*2 failed, 1 passed, 1 skipped*", ]) assert result.ret == 1 @py.test.mark.xfail("sys.platform.startswith('java')", run=False) def test_dist_tests_with_crash(self, testdir): if not hasattr(py.std.os, 'kill'): py.test.skip("no os.kill") p1 = testdir.makepyfile(""" import py def test_fail0(): assert 0 def test_fail1(): raise ValueError() def test_ok(): pass def test_skip(): py.test.skip("hello") def test_crash(): import time import os time.sleep(0.5) os.kill(os.getpid(), 15) """) result = testdir.runpytest(p1, "-v", '-d', '-n1') result.stdout.fnmatch_lines([ "*Python*", "*PASS**test_ok*", "*node*down*", "*3 failed, 1 passed, 1 skipped*" ]) assert result.ret == 1 def test_distribution_rsyncdirs_example(self, testdir): source = testdir.mkdir("source") dest = testdir.mkdir("dest") subdir = source.mkdir("example_pkg") subdir.ensure("__init__.py") p = subdir.join("test_one.py") p.write("def test_5():\n assert not __file__.startswith(%r)" % str(p)) result = testdir.runpytest("-v", "-d", "--rsyncdir=%(subdir)s" % locals(), "--tx=popen//chdir=%(dest)s" % locals(), p) assert result.ret == 0 result.stdout.fnmatch_lines([ "*0* *cwd*", # "RSyncStart: [G1]", # "RSyncFinished: [G1]", "*1 passed*" ]) assert dest.join(subdir.basename).check(dir=1) def test_backward_compatibility_worker_terminology(self, testdir): """Ensure that we still support "config.slaveinput" for backward compatibility (#234). Keep in mind that removing this compatibility will break a ton of plugins and user code. """ testdir.makepyfile(""" def test(pytestconfig): assert hasattr(pytestconfig, 'slaveinput') assert hasattr(pytestconfig, 'workerinput') """) result = testdir.runpytest("-n1") result.stdout.fnmatch_lines("*1 passed*") assert result.ret == 0 def test_data_exchange(self, testdir): testdir.makeconftest(""" # This hook only called on master. def pytest_configure_node(node): node.workerinput['a'] = 42 node.workerinput['b'] = 7 def pytest_configure(config): # this attribute is only set on workers if hasattr(config, 'workerinput'): a = config.workerinput['a'] b = config.workerinput['b'] r = a + b config.workeroutput['r'] = r # This hook only called on master. def pytest_testnodedown(node, error): node.config.calc_result = node.workeroutput['r'] def pytest_terminal_summary(terminalreporter): if not hasattr(terminalreporter.config, 'workerinput'): calc_result = terminalreporter.config.calc_result terminalreporter._tw.sep('-', 'calculated result is %s' % calc_result) """) p1 = testdir.makepyfile("def test_func(): pass") result = testdir.runpytest("-v", p1, '-d', '--tx=popen') result.stdout.fnmatch_lines( ["*0*Python*", "*calculated result is 49*", "*1 passed*"]) assert result.ret == 0 def test_keyboardinterrupt_hooks_issue79(self, testdir): testdir.makepyfile( __init__="", test_one=""" def test_hello(): raise KeyboardInterrupt() """) testdir.makeconftest(""" def pytest_sessionfinish(session): # on the worker if hasattr(session.config, 'workeroutput'): session.config.workeroutput['s2'] = 42 # on the master def pytest_testnodedown(node, error): assert node.workeroutput['s2'] == 42 print ("s2call-finished") """) args = ["-n1", "--debug"] result = testdir.runpytest_subprocess(*args) s = result.stdout.str() assert result.ret == 2 assert 's2call' in s assert "Interrupted" in s def test_keyboard_interrupt_dist(self, testdir): # xxx could be refined to check for return code testdir.makepyfile(""" def test_sleep(): import time time.sleep(10) """) child = testdir.spawn_pytest("-n1 -v") child.expect(".*test_sleep.*") child.kill(2) # keyboard interrupt child.expect(".*KeyboardInterrupt.*") # child.expect(".*seconds.*") child.close() # assert ret == 2 def test_dist_with_collectonly(self, testdir): p1 = testdir.makepyfile(""" def test_ok(): pass """) result = testdir.runpytest(p1, "-n1", "--collect-only") assert result.ret == 0 result.stdout.fnmatch_lines([ "*collected 1 item*", ]) class TestDistEach: def test_simple(self, testdir): testdir.makepyfile(""" def test_hello(): pass """) result = testdir.runpytest_subprocess("--debug", "--dist=each", "--tx=2*popen") assert not result.ret result.stdout.fnmatch_lines(["*2 pass*"]) @py.test.mark.xfail( run=False, reason="other python versions might not have py.test installed") def test_simple_diffoutput(self, testdir): interpreters = [] for name in ("python2.5", "python2.6"): interp = py.path.local.sysfind(name) if interp is None: py.test.skip("%s not found" % name) interpreters.append(interp) testdir.makepyfile( __init__="", test_one=""" import sys def test_hello(): print("%s...%s" % sys.version_info[:2]) assert 0 """) args = ["--dist=each", "-v"] args += ["--tx", "popen//python=%s" % interpreters[0]] args += ["--tx", "popen//python=%s" % interpreters[1]] result = testdir.runpytest(*args) s = result.stdout.str() assert "2...5" in s assert "2...6" in s class TestTerminalReporting: def test_pass_skip_fail(self, testdir): testdir.makepyfile(""" import py def test_ok(): pass def test_skip(): py.test.skip("xx") def test_func(): assert 0 """) result = testdir.runpytest("-n1", "-v") result.stdout.fnmatch_lines_random([ "*PASS*test_pass_skip_fail.py*test_ok*", "*SKIP*test_pass_skip_fail.py*test_skip*", "*FAIL*test_pass_skip_fail.py*test_func*", ]) result.stdout.fnmatch_lines([ "*def test_func():", "> assert 0", "E assert 0", ]) def test_fail_platinfo(self, testdir): testdir.makepyfile(""" def test_func(): assert 0 """) result = testdir.runpytest("-n1", "-v") result.stdout.fnmatch_lines([ "*FAIL*test_fail_platinfo.py*test_func*", "*0*Python*", "*def test_func():", "> assert 0", "E assert 0", ]) @pytest.mark.parametrize('n', ['-n0', '-n1']) @pytest.mark.parametrize('warn_type', ['pytest', 'builtin']) def test_logwarning(self, testdir, n, warn_type): from pkg_resources import parse_version if parse_version(pytest.__version__) < parse_version('3.1'): pytest.skip('pytest warnings requires >= 3.1') if warn_type == 'builtin': warn_code = """warnings.warn(UserWarning('this is a warning'))""" elif warn_type == 'pytest': warn_code = """request.config.warn('', 'this is a warning', fslocation=py.path.local())""" else: assert False testdir.makepyfile(""" import warnings, py def test_func(request): {warn_code} """.format(warn_code=warn_code)) result = testdir.runpytest(n) result.stdout.fnmatch_lines([ "*this is a warning*", "*1 passed, 1 warnings*", ]) def test_logfinish_hook(self, testdir): """Ensure the pytest_runtest_logfinish hook is being properly handled""" from _pytest import hookspec if not hasattr(hookspec, 'pytest_runtest_logfinish'): pytest.skip('test requires pytest_runtest_logfinish hook in pytest (3.4+)') testdir.makeconftest(""" def pytest_runtest_logfinish(): print('pytest_runtest_logfinish hook called') """) testdir.makepyfile(""" def test_func(): pass """) result = testdir.runpytest("-n1", "-s") result.stdout.fnmatch_lines([ "*pytest_runtest_logfinish hook called*", ]) def test_teardownfails_one_function(testdir): p = testdir.makepyfile(""" def test_func(): pass def teardown_function(function): assert 0 """) result = testdir.runpytest(p, '-n1', '--tx=popen') result.stdout.fnmatch_lines( ["*def teardown_function(function):*", "*1 passed*1 error*"]) @py.test.mark.xfail def test_terminate_on_hangingnode(testdir): p = testdir.makeconftest(""" def pytest_sessionfinish(session): if session.nodeid == "my": # running on worker import time time.sleep(3) """) result = testdir.runpytest(p, '--dist=each', '--tx=popen//id=my') assert result.duration < 2.0 result.stdout.fnmatch_lines([ "*killed*my*", ]) @pytest.mark.xfail(reason="works if run outside test suite", run=False) def test_session_hooks(testdir): testdir.makeconftest(""" import sys def pytest_sessionstart(session): sys.pytestsessionhooks = session def pytest_sessionfinish(session): if hasattr(session.config, 'workerinput'): name = "worker" else: name = "master" f = open(name, "w") f.write("xy") f.close() # let's fail on the worker if name == "worker": raise ValueError(42) """) p = testdir.makepyfile(""" import sys def test_hello(): assert hasattr(sys, 'pytestsessionhooks') """) result = testdir.runpytest(p, "--dist=each", "--tx=popen") result.stdout.fnmatch_lines([ "*ValueError*", "*1 passed*", ]) assert not result.ret d = result.parseoutcomes() assert d['passed'] == 1 assert testdir.tmpdir.join("worker").check() assert testdir.tmpdir.join("master").check() def test_session_testscollected(testdir): """ Make sure master node is updating the session object with the number of tests collected from the workers. """ testdir.makepyfile(test_foo=""" import pytest @pytest.mark.parametrize('i', range(3)) def test_ok(i): pass """) testdir.makeconftest(""" def pytest_sessionfinish(session): collected = getattr(session, 'testscollected', None) with open('testscollected', 'w') as f: f.write('collected = %s' % collected) """) result = testdir.inline_run("-n1") result.assertoutcome(passed=3) collected_file = testdir.tmpdir.join('testscollected') assert collected_file.isfile() assert collected_file.read() == 'collected = 3' def test_funcarg_teardown_failure(testdir): p = testdir.makepyfile(""" import pytest @pytest.fixture def myarg(request): def teardown(val): raise ValueError(val) return request.cached_setup(setup=lambda: 42, teardown=teardown, scope="module") def test_hello(myarg): pass """) result = testdir.runpytest_subprocess("--debug", p) # , "-n1") result.stdout.fnmatch_lines([ "*ValueError*42*", "*1 passed*1 error*", ]) assert result.ret @pytest.mark.parametrize('when', ['setup', 'call', 'teardown']) def test_crashing_item(testdir, when): """Ensure crashing item is correctly reported during all testing stages""" code = dict(setup='', call='', teardown='') code[when] = 'py.process.kill(os.getpid())' p = testdir.makepyfile(""" import os import py import pytest @pytest.fixture def fix(): {setup} yield {teardown} def test_crash(fix): {call} pass def test_ok(): pass """.format(**code)) passes = 2 if when == 'teardown' else 1 result = testdir.runpytest("-n2", p) result.stdout.fnmatch_lines([ "*crashed*test_crash*", "*1 failed*%d passed*" % passes, ]) def test_multiple_log_reports(testdir): """ Ensure that pytest-xdist supports plugins that emit multiple logreports (#206). Inspired by pytest-rerunfailures. """ testdir.makeconftest(""" from _pytest.runner import runtestprotocol def pytest_runtest_protocol(item, nextitem): item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) reports = runtestprotocol(item, nextitem=nextitem) for report in reports: item.ihook.pytest_runtest_logreport(report=report) return True """) testdir.makepyfile(""" def test(): pass """) result = testdir.runpytest("-n1") result.stdout.fnmatch_lines([ "*2 passed*", ]) def test_skipping(testdir): p = testdir.makepyfile(""" import pytest def test_crash(): pytest.skip("hello") """) result = testdir.runpytest("-n1", '-rs', p) assert result.ret == 0 result.stdout.fnmatch_lines(["*hello*", "*1 skipped*"]) def test_issue34_pluginloading_in_subprocess(testdir): testdir.tmpdir.join("plugin123.py").write( py.code.Source(""" def pytest_namespace(): return {'sample_variable': 'testing'} """)) testdir.makepyfile(""" import pytest def test_hello(): assert pytest.sample_variable == "testing" """) result = testdir.runpytest_subprocess("-n1", "-p", "plugin123") assert result.ret == 0 result.stdout.fnmatch_lines([ "*1 passed*", ]) def test_fixture_scope_caching_issue503(testdir): p1 = testdir.makepyfile(""" import pytest @pytest.fixture(scope='session') def fix(): assert fix.counter == 0, \ 'session fixture was invoked multiple times' fix.counter += 1 fix.counter = 0 def test_a(fix): pass def test_b(fix): pass """) result = testdir.runpytest(p1, '-v', '-n1') assert result.ret == 0 result.stdout.fnmatch_lines([ "*2 passed*", ]) def test_issue_594_random_parametrize(testdir): """ Make sure that tests that are randomly parametrized display an appropriate error message, instead of silently skipping the entire test run. """ p1 = testdir.makepyfile(""" import pytest import random xs = list(range(10)) random.shuffle(xs) @pytest.mark.parametrize('x', xs) def test_foo(x): assert 1 """) result = testdir.runpytest(p1, '-v', '-n4') assert result.ret == 1 result.stdout.fnmatch_lines([ "Different tests were collected between gw* and gw*", ]) def test_tmpdir_disabled(testdir): """Test xdist doesn't break if internal tmpdir plugin is disabled (#22). """ p1 = testdir.makepyfile(""" def test_ok(): pass """) result = testdir.runpytest(p1, "-n1", '-p', 'no:tmpdir') assert result.ret == 0 result.stdout.fnmatch_lines("*1 passed*") @pytest.mark.parametrize('plugin', ['xdist.looponfail', 'xdist.boxed']) def test_sub_plugins_disabled(testdir, plugin): """Test that xdist doesn't break if we disable any of its sub-plugins. (#32) """ p1 = testdir.makepyfile(""" def test_ok(): pass """) result = testdir.runpytest(p1, "-n1", '-p', 'no:%s' % plugin) assert result.ret == 0 result.stdout.fnmatch_lines("*1 passed*") class TestNodeFailure: def test_load_single(self, testdir): f = testdir.makepyfile(""" import os def test_a(): os._exit(1) def test_b(): pass """) res = testdir.runpytest(f, '-n1') res.stdout.fnmatch_lines([ "*Replacing crashed worker*", "*Worker*crashed while running*", "*1 failed*1 passed*", ]) def test_load_multiple(self, testdir): f = testdir.makepyfile(""" import os def test_a(): pass def test_b(): os._exit(1) def test_c(): pass def test_d(): pass """) res = testdir.runpytest(f, '-n2') res.stdout.fnmatch_lines([ "*Replacing crashed worker*", "*Worker*crashed while running*", "*1 failed*3 passed*", ]) def test_each_single(self, testdir): f = testdir.makepyfile(""" import os def test_a(): os._exit(1) def test_b(): pass """) res = testdir.runpytest(f, '--dist=each', '--tx=popen') res.stdout.fnmatch_lines([ "*Replacing crashed worker*", "*Worker*crashed while running*", "*1 failed*1 passed*", ]) @pytest.mark.xfail(reason='#20: xdist race condition on node restart') def test_each_multiple(self, testdir): f = testdir.makepyfile(""" import os def test_a(): os._exit(1) def test_b(): pass """) res = testdir.runpytest(f, '--dist=each', '--tx=2*popen') res.stdout.fnmatch_lines([ "*Replacing crashed worker*", "*Worker*crashed while running*", "*2 failed*2 passed*", ]) def test_max_worker_restart(self, testdir): f = testdir.makepyfile(""" import os def test_a(): pass def test_b(): os._exit(1) def test_c(): os._exit(1) def test_d(): pass """) res = testdir.runpytest(f, '-n4', '--max-worker-restart=1') res.stdout.fnmatch_lines([ "*Replacing crashed worker*", "*Maximum crashed workers reached: 1*", "*Worker*crashed while running*", "*Worker*crashed while running*", "*2 failed*2 passed*", ]) def test_max_worker_restart_die(self, testdir): f = testdir.makepyfile(""" import os os._exit(1) """) res = testdir.runpytest(f, '-n4', '--max-worker-restart=0') res.stdout.fnmatch_lines([ "*Unexpectedly no active workers*", "*INTERNALERROR*" ]) def test_disable_restart(self, testdir): f = testdir.makepyfile(""" import os def test_a(): pass def test_b(): os._exit(1) def test_c(): pass """) res = testdir.runpytest(f, '-n4', '--max-worker-restart=0') res.stdout.fnmatch_lines([ "*Worker restarting disabled*", "*Worker*crashed while running*", "*1 failed*2 passed*", ]) @pytest.mark.parametrize('n', [0, 2]) def test_worker_id_fixture(testdir, n): import glob f = testdir.makepyfile(""" import pytest @pytest.mark.parametrize("run_num", range(2)) def test_worker_id1(worker_id, run_num): with open("worker_id%s.txt" % run_num, "w") as f: f.write(worker_id) """) result = testdir.runpytest(f, "-n%d" % n) result.stdout.fnmatch_lines('* 2 passed in *') worker_ids = set() for fname in glob.glob(str(testdir.tmpdir.join("*.txt"))): with open(fname) as f: worker_ids.add(f.read().strip()) if n == 0: assert worker_ids == {'master'} else: assert worker_ids == {'gw0', 'gw1'} @pytest.mark.parametrize('tb', ['auto', 'long', 'short', 'no', 'line', 'native']) def test_error_report_styles(testdir, tb): testdir.makepyfile(""" import pytest def test_error_report_styles(): raise RuntimeError('some failure happened') """) result = testdir.runpytest('-n1', '--tb=%s' % tb) if tb != 'no': result.stdout.fnmatch_lines('*some failure happened*') result.assert_outcomes(failed=1) def test_color_yes_collection_on_non_atty(testdir, request): """skip collect progress report when working on non-terminals. Similar to pytest-dev/pytest#1397 """ tr = request.config.pluginmanager.getplugin("terminalreporter") if not hasattr(tr, 'isatty'): pytest.skip('only valid for newer pytest versions') testdir.makepyfile(""" import pytest @pytest.mark.parametrize('i', range(10)) def test_this(i): assert 1 """) args = ['--color=yes', '-n2'] result = testdir.runpytest(*args) assert 'test session starts' in result.stdout.str() assert '\x1b[1m' in result.stdout.str() assert 'gw0 [10] / gw1 [10]' in result.stdout.str() assert 'gw0 C / gw1 C' not in result.stdout.str() def test_internal_error_with_maxfail(testdir): """ Internal error when using --maxfail option (#62, #65). """ testdir.makepyfile(""" import pytest @pytest.fixture(params=['1', '2']) def crasher(): raise RuntimeError def test_aaa0(crasher): pass def test_aaa1(crasher): pass """) result = testdir.runpytest_subprocess('--maxfail=1', '-n1') result.stdout.fnmatch_lines(['* 1 error in *']) assert 'INTERNALERROR' not in result.stderr.str() class TestLoadScope: def test_by_module(self, testdir): test_file = """ import pytest @pytest.mark.parametrize('i', range(10)) def test(i): pass """ testdir.makepyfile( test_a=test_file, test_b=test_file, ) result = testdir.runpytest('-n2', '--dist=loadscope', '-v') assert get_workers_and_test_count_by_prefix( 'test_a.py::test', result.outlines) in ({'gw0': 10}, {'gw1': 10}) assert get_workers_and_test_count_by_prefix( 'test_b.py::test', result.outlines) in ({'gw0': 10}, {'gw1': 10}) def test_by_class(self, testdir): testdir.makepyfile(test_a=""" import pytest class TestA: @pytest.mark.parametrize('i', range(10)) def test(self, i): pass class TestB: @pytest.mark.parametrize('i', range(10)) def test(self, i): pass """) result = testdir.runpytest('-n2', '--dist=loadscope', '-v') assert get_workers_and_test_count_by_prefix( 'test_a.py::TestA', result.outlines) in ({'gw0': 10}, {'gw1': 10}) assert get_workers_and_test_count_by_prefix( 'test_a.py::TestB', result.outlines) in ({'gw0': 10}, {'gw1': 10}) def test_module_single_start(self, testdir): """Fix test suite never finishing in case all workers start with a single test (#277).""" test_file1 = """ import pytest def test(): pass """ test_file2 = """ import pytest def test_1(): pass def test_2(): pass """ testdir.makepyfile( test_a=test_file1, test_b=test_file1, test_c=test_file2 ) result = testdir.runpytest('-n2', '--dist=loadscope', '-v') a = get_workers_and_test_count_by_prefix('test_a.py::test', result.outlines) b = get_workers_and_test_count_by_prefix('test_b.py::test', result.outlines) c1 = get_workers_and_test_count_by_prefix('test_c.py::test_1', result.outlines) c2 = get_workers_and_test_count_by_prefix('test_c.py::test_2', result.outlines) assert a in ({'gw0': 1}, {'gw1': 1}) assert b in ({'gw0': 1}, {'gw1': 1}) assert a.items() != b.items() assert c1 == c2 class TestFileScope: def test_by_module(self, testdir): test_file = """ import pytest class TestA: @pytest.mark.parametrize('i', range(10)) def test(self, i): pass class TestB: @pytest.mark.parametrize('i', range(10)) def test(self, i): pass """ testdir.makepyfile( test_a=test_file, test_b=test_file, ) result = testdir.runpytest('-n2', '--dist=loadfile', '-v') test_a_workers_and_test_count = get_workers_and_test_count_by_prefix( 'test_a.py::TestA', result.outlines) test_b_workers_and_test_count = get_workers_and_test_count_by_prefix( 'test_b.py::TestB', result.outlines) assert test_a_workers_and_test_count in ({'gw0': 10}, {'gw1': 0}) or \ test_a_workers_and_test_count in ({'gw0': 0}, {'gw1': 10}) assert test_b_workers_and_test_count in ({'gw0': 10}, {'gw1': 0}) or \ test_b_workers_and_test_count in ({'gw0': 0}, {'gw1': 10}) def test_by_class(self, testdir): testdir.makepyfile(test_a=""" import pytest class TestA: @pytest.mark.parametrize('i', range(10)) def test(self, i): pass class TestB: @pytest.mark.parametrize('i', range(10)) def test(self, i): pass """) result = testdir.runpytest('-n2', '--dist=loadfile', '-v') test_a_workers_and_test_count = get_workers_and_test_count_by_prefix( 'test_a.py::TestA', result.outlines) test_b_workers_and_test_count = get_workers_and_test_count_by_prefix( 'test_a.py::TestB', result.outlines) assert test_a_workers_and_test_count in ({'gw0': 10}, {'gw1': 0}) or \ test_a_workers_and_test_count in ({'gw0': 0}, {'gw1': 10}) assert test_b_workers_and_test_count in ({'gw0': 10}, {'gw1': 0}) or \ test_b_workers_and_test_count in ({'gw0': 0}, {'gw1': 10}) def test_module_single_start(self, testdir): """Fix test suite never finishing in case all workers start with a single test (#277).""" test_file1 = """ import pytest def test(): pass """ test_file2 = """ import pytest def test_1(): pass def test_2(): pass """ testdir.makepyfile( test_a=test_file1, test_b=test_file1, test_c=test_file2 ) result = testdir.runpytest('-n2', '--dist=loadfile', '-v') a = get_workers_and_test_count_by_prefix('test_a.py::test', result.outlines) b = get_workers_and_test_count_by_prefix('test_b.py::test', result.outlines) c1 = get_workers_and_test_count_by_prefix('test_c.py::test_1', result.outlines) c2 = get_workers_and_test_count_by_prefix('test_c.py::test_2', result.outlines) assert a in ({'gw0': 1}, {'gw1': 1}) assert b in ({'gw0': 1}, {'gw1': 1}) assert a.items() != b.items() assert c1 == c2 def parse_tests_and_workers_from_output(lines): result = [] for line in lines: # example match: "[gw0] PASSED test_a.py::test[7]" m = re.match(r''' \[(gw\d)\] # worker \s* (?:\[\s*\d+%\])? # progress indicator (pytest >=3.3) \s(.*?) # status string ("PASSED") \s(.*::.*) # nodeid ''', line.strip(), re.VERBOSE) if m: worker, status, nodeid = m.groups() result.append((worker, status, nodeid)) return result def get_workers_and_test_count_by_prefix(prefix, lines, expected_status='PASSED'): result = {} for worker, status, nodeid in parse_tests_and_workers_from_output(lines): if expected_status == status and nodeid.startswith(prefix): result[worker] = result.get(worker, 0) + 1 return result pytest-xdist-1.22.1/testing/conftest.py0000644000372000037200000000344013242644556021006 0ustar travistravis00000000000000import py import pytest import execnet @pytest.fixture(scope="session", autouse=True) def _ensure_imports(): # we import some modules because pytest-2.8's testdir fixture # will unload all modules after each test and this cause # (unknown) problems with execnet.Group() execnet.Group execnet.makegateway pytest_plugins = "pytester" # rsyncdirs = ['.', '../xdist', py.path.local(execnet.__file__).dirpath()] @pytest.fixture(autouse=True) def _divert_atexit(request, monkeypatch): import atexit finalizers = [] def finish(): while finalizers: finalizers.pop()() monkeypatch.setattr(atexit, "register", finalizers.append) request.addfinalizer(finish) def pytest_addoption(parser): parser.addoption('--gx', action="append", dest="gspecs", help="add a global test environment, XSpec-syntax. ") @pytest.fixture def specssh(request): return getspecssh(request.config) @pytest.fixture def testdir(testdir): # pytest before 2.8 did not have a runpytest_subprocess if not hasattr(testdir, "runpytest_subprocess"): testdir.runpytest_subprocess = testdir.runpytest return testdir # configuration information for tests def getgspecs(config): return [execnet.XSpec(spec) for spec in config.getvalueorskip("gspecs")] def getspecssh(config): xspecs = getgspecs(config) for spec in xspecs: if spec.ssh: if not py.path.local.sysfind("ssh"): py.test.skip("command not found: ssh") return str(spec) py.test.skip("need '--gx ssh=...'") def getsocketspec(config): xspecs = getgspecs(config) for spec in xspecs: if spec.socket: return spec py.test.skip("need '--gx socket=...'") pytest-xdist-1.22.1/testing/test_dsession.py0000644000372000037200000002314113242644556022047 0ustar travistravis00000000000000from xdist.dsession import DSession from xdist.report import report_collection_diff from xdist.scheduler import ( EachScheduling, LoadScheduling, ) import py import pytest import execnet XSpec = execnet.XSpec def run(item, node, excinfo=None): runner = item.config.pluginmanager.getplugin("runner") rep = runner.ItemTestReport(item=item, excinfo=excinfo, when="call") rep.node = node return rep class MockGateway: _count = 0 def __init__(self): self.id = str(self._count) self._count += 1 class MockNode: def __init__(self): self.sent = [] self.gateway = MockGateway() self._shutdown = False def send_runtest_some(self, indices): self.sent.extend(indices) def send_runtest_all(self): self.sent.append("ALL") def shutdown(self): self._shutdown = True @property def shutting_down(self): return self._shutdown def dumpqueue(queue): while queue.qsize(): print(queue.get()) class TestEachScheduling: def test_schedule_load_simple(self, testdir): node1 = MockNode() node2 = MockNode() config = testdir.parseconfig("--tx=2*popen") sched = EachScheduling(config) sched.add_node(node1) sched.add_node(node2) collection = ["a.py::test_1", ] assert not sched.collection_is_completed sched.add_node_collection(node1, collection) assert not sched.collection_is_completed sched.add_node_collection(node2, collection) assert sched.collection_is_completed assert sched.node2collection[node1] == collection assert sched.node2collection[node2] == collection sched.schedule() assert sched.tests_finished assert node1.sent == ['ALL'] assert node2.sent == ['ALL'] sched.mark_test_complete(node1, 0) assert sched.tests_finished sched.mark_test_complete(node2, 0) assert sched.tests_finished def test_schedule_remove_node(self, testdir): node1 = MockNode() config = testdir.parseconfig("--tx=popen") sched = EachScheduling(config) sched.add_node(node1) collection = ["a.py::test_1", ] assert not sched.collection_is_completed sched.add_node_collection(node1, collection) assert sched.collection_is_completed assert sched.node2collection[node1] == collection sched.schedule() assert sched.tests_finished crashitem = sched.remove_node(node1) assert crashitem assert sched.tests_finished assert not sched.nodes class TestLoadScheduling: def test_schedule_load_simple(self, testdir): config = testdir.parseconfig("--tx=2*popen") sched = LoadScheduling(config) sched.add_node(MockNode()) sched.add_node(MockNode()) node1, node2 = sched.nodes collection = ["a.py::test_1", "a.py::test_2"] assert not sched.collection_is_completed sched.add_node_collection(node1, collection) assert not sched.collection_is_completed sched.add_node_collection(node2, collection) assert sched.collection_is_completed assert sched.node2collection[node1] == collection assert sched.node2collection[node2] == collection sched.schedule() assert not sched.pending assert sched.tests_finished assert len(node1.sent) == 1 assert len(node2.sent) == 1 assert node1.sent == [0] assert node2.sent == [1] sched.mark_test_complete(node1, node1.sent[0]) assert sched.tests_finished def test_schedule_batch_size(self, testdir): config = testdir.parseconfig("--tx=2*popen") sched = LoadScheduling(config) sched.add_node(MockNode()) sched.add_node(MockNode()) node1, node2 = sched.nodes col = ["xyz"] * 6 sched.add_node_collection(node1, col) sched.add_node_collection(node2, col) sched.schedule() # assert not sched.tests_finished sent1 = node1.sent sent2 = node2.sent assert sent1 == [0, 2] assert sent2 == [1, 3] assert sched.pending == [4, 5] assert sched.node2pending[node1] == sent1 assert sched.node2pending[node2] == sent2 assert len(sched.pending) == 2 sched.mark_test_complete(node1, 0) assert node1.sent == [0, 2, 4] assert sched.pending == [5] assert node2.sent == [1, 3] sched.mark_test_complete(node1, 2) assert node1.sent == [0, 2, 4, 5] assert not sched.pending def test_schedule_fewer_tests_than_nodes(self, testdir): config = testdir.parseconfig("--tx=2*popen") sched = LoadScheduling(config) sched.add_node(MockNode()) sched.add_node(MockNode()) sched.add_node(MockNode()) node1, node2, node3 = sched.nodes col = ["xyz"] * 2 sched.add_node_collection(node1, col) sched.add_node_collection(node2, col) sched.schedule() # assert not sched.tests_finished sent1 = node1.sent sent2 = node2.sent sent3 = node3.sent assert sent1 == [0] assert sent2 == [1] assert sent3 == [] assert not sched.pending def test_schedule_fewer_than_two_tests_per_node(self, testdir): config = testdir.parseconfig("--tx=2*popen") sched = LoadScheduling(config) sched.add_node(MockNode()) sched.add_node(MockNode()) sched.add_node(MockNode()) node1, node2, node3 = sched.nodes col = ["xyz"] * 5 sched.add_node_collection(node1, col) sched.add_node_collection(node2, col) sched.schedule() # assert not sched.tests_finished sent1 = node1.sent sent2 = node2.sent sent3 = node3.sent assert sent1 == [0, 3] assert sent2 == [1, 4] assert sent3 == [2] assert not sched.pending def test_add_remove_node(self, testdir): node = MockNode() config = testdir.parseconfig("--tx=popen") sched = LoadScheduling(config) sched.add_node(node) collection = ["test_file.py::test_func"] sched.add_node_collection(node, collection) assert sched.collection_is_completed sched.schedule() assert not sched.pending crashitem = sched.remove_node(node) assert crashitem == collection[0] def test_different_tests_collected(self, testdir): """ Test that LoadScheduling is reporting collection errors when different test ids are collected by workers. """ class CollectHook(object): """ Dummy hook that stores collection reports. """ def __init__(self): self.reports = [] def pytest_collectreport(self, report): self.reports.append(report) collect_hook = CollectHook() config = testdir.parseconfig("--tx=2*popen") config.pluginmanager.register(collect_hook, "collect_hook") node1 = MockNode() node2 = MockNode() sched = LoadScheduling(config) sched.add_node(node1) sched.add_node(node2) sched.add_node_collection(node1, ["a.py::test_1"]) sched.add_node_collection(node2, ["a.py::test_2"]) sched.schedule() assert len(collect_hook.reports) == 1 rep = collect_hook.reports[0] assert 'Different tests were collected between' in rep.longrepr class TestDistReporter: @py.test.mark.xfail def test_rsync_printing(self, testdir, linecomp): config = testdir.parseconfig() from _pytest.pytest_terminal import TerminalReporter rep = TerminalReporter(config, file=linecomp.stringio) config.pluginmanager.register(rep, "terminalreporter") dsession = DSession(config) class gw1: id = "X1" spec = execnet.XSpec("popen") class gw2: id = "X2" spec = execnet.XSpec("popen") # class rinfo: # version_info = (2, 5, 1, 'final', 0) # executable = "hello" # platform = "xyz" # cwd = "qwe" # dsession.pytest_xdist_newgateway(gw1, rinfo) # linecomp.assert_contains_lines([ # "*X1*popen*xyz*2.5*" # ]) dsession.pytest_xdist_rsyncstart(source="hello", gateways=[gw1, gw2]) linecomp.assert_contains_lines(["[X1,X2] rsyncing: hello", ]) def test_report_collection_diff_equal(): """Test reporting of equal collections.""" from_collection = to_collection = ['aaa', 'bbb', 'ccc'] assert report_collection_diff(from_collection, to_collection, 1, 2) is None def test_report_collection_diff_different(): """Test reporting of different collections.""" from_collection = ['aaa', 'bbb', 'ccc', 'YYY'] to_collection = ['aZa', 'bbb', 'XXX', 'ccc'] error_message = ( 'Different tests were collected between 1 and 2. The difference is:\n' '--- 1\n' '\n' '+++ 2\n' '\n' '@@ -1,4 +1,4 @@\n' '\n' '-aaa\n' '+aZa\n' ' bbb\n' '+XXX\n' ' ccc\n' '-YYY') msg = report_collection_diff(from_collection, to_collection, '1', '2') assert msg == error_message @pytest.mark.xfail(reason="duplicate test ids not supported yet") def test_pytest_issue419(testdir): testdir.makepyfile(""" import pytest @pytest.mark.parametrize('birth_year', [1988, 1988, ]) def test_2011_table(birth_year): pass """) reprec = testdir.inline_run("-n1") reprec.assertoutcome(passed=2) assert 0 pytest-xdist-1.22.1/testing/test_looponfail.py0000644000372000037200000002060413242644556022363 0ustar travistravis00000000000000import py from xdist.looponfail import RemoteControl from xdist.looponfail import StatRecorder class TestStatRecorder: def test_filechange(self, tmpdir): tmp = tmpdir hello = tmp.ensure("hello.py") sd = StatRecorder([tmp]) changed = sd.check() assert not changed hello.write("world") changed = sd.check() assert changed (hello + "c").write("hello") changed = sd.check() assert not changed p = tmp.ensure("new.py") changed = sd.check() assert changed p.remove() changed = sd.check() assert changed tmp.join("a", "b", "c.py").ensure() changed = sd.check() assert changed tmp.join("a", "c.txt").ensure() changed = sd.check() assert changed changed = sd.check() assert not changed tmp.join("a").remove() changed = sd.check() assert changed def test_dirchange(self, tmpdir): tmp = tmpdir tmp.ensure("dir", "hello.py") sd = StatRecorder([tmp]) assert not sd.fil(tmp.join("dir")) def test_filechange_deletion_race(self, tmpdir, monkeypatch): tmp = tmpdir sd = StatRecorder([tmp]) changed = sd.check() assert not changed p = tmp.ensure("new.py") changed = sd.check() assert changed p.remove() # make check()'s visit() call return our just removed # path as if we were in a race condition monkeypatch.setattr(tmp, 'visit', lambda *args: [p]) changed = sd.check() assert changed def test_pycremoval(self, tmpdir): tmp = tmpdir hello = tmp.ensure("hello.py") sd = StatRecorder([tmp]) changed = sd.check() assert not changed pycfile = hello + "c" pycfile.ensure() hello.write("world") changed = sd.check() assert changed assert not pycfile.check() def test_waitonchange(self, tmpdir, monkeypatch): tmp = tmpdir sd = StatRecorder([tmp]) ret_values = [True, False] monkeypatch.setattr(StatRecorder, 'check', lambda self: ret_values.pop()) sd.waitonchange(checkinterval=0.2) assert not ret_values class TestRemoteControl: def test_nofailures(self, testdir): item = testdir.getitem("def test_func(): pass\n") control = RemoteControl(item.config) control.setup() topdir, failures = control.runsession()[:2] assert not failures def test_failures_somewhere(self, testdir): item = testdir.getitem("def test_func():\n assert 0\n") control = RemoteControl(item.config) control.setup() failures = control.runsession() assert failures control.setup() item.fspath.write("def test_func():\n assert 1\n") removepyc(item.fspath) topdir, failures = control.runsession()[:2] assert not failures def test_failure_change(self, testdir): modcol = testdir.getitem(""" def test_func(): assert 0 """) control = RemoteControl(modcol.config) control.loop_once() assert control.failures modcol.fspath.write(py.code.Source(""" def test_func(): assert 1 def test_new(): assert 0 """)) removepyc(modcol.fspath) control.loop_once() assert not control.failures control.loop_once() assert control.failures assert str(control.failures).find("test_new") != -1 def test_failure_subdir_no_init(self, testdir): modcol = testdir.getitem(""" def test_func(): assert 0 """) parent = modcol.fspath.dirpath().dirpath() parent.chdir() modcol.config.args = [py.path.local(x).relto(parent) for x in modcol.config.args] control = RemoteControl(modcol.config) control.loop_once() assert control.failures control.loop_once() assert control.failures class TestLooponFailing: def test_looponfail_from_fail_to_ok(self, testdir): modcol = testdir.getmodulecol(""" def test_one(): x = 0 assert x == 1 def test_two(): assert 1 """) remotecontrol = RemoteControl(modcol.config) remotecontrol.loop_once() assert len(remotecontrol.failures) == 1 modcol.fspath.write(py.code.Source(""" def test_one(): assert 1 def test_two(): assert 1 """)) removepyc(modcol.fspath) remotecontrol.loop_once() assert not remotecontrol.failures def test_looponfail_from_one_to_two_tests(self, testdir): modcol = testdir.getmodulecol(""" def test_one(): assert 0 """) remotecontrol = RemoteControl(modcol.config) remotecontrol.loop_once() assert len(remotecontrol.failures) == 1 assert 'test_one' in remotecontrol.failures[0] modcol.fspath.write(py.code.Source(""" def test_one(): assert 1 # passes now def test_two(): assert 0 # new and fails """)) removepyc(modcol.fspath) remotecontrol.loop_once() assert len(remotecontrol.failures) == 0 remotecontrol.loop_once() assert len(remotecontrol.failures) == 1 assert 'test_one' not in remotecontrol.failures[0] assert 'test_two' in remotecontrol.failures[0] @py.test.mark.xfail(py.test.__version__ >= "3.1", reason="broken by pytest 3.1+") def test_looponfail_removed_test(self, testdir): modcol = testdir.getmodulecol(""" def test_one(): assert 0 def test_two(): assert 0 """) remotecontrol = RemoteControl(modcol.config) remotecontrol.loop_once() assert len(remotecontrol.failures) == 2 modcol.fspath.write(py.code.Source(""" def test_xxx(): # renamed test assert 0 def test_two(): assert 1 # pass now """)) removepyc(modcol.fspath) remotecontrol.loop_once() assert len(remotecontrol.failures) == 0 remotecontrol.loop_once() assert len(remotecontrol.failures) == 1 def test_looponfail_multiple_errors(self, testdir, monkeypatch): modcol = testdir.getmodulecol(""" def test_one(): assert 0 """) remotecontrol = RemoteControl(modcol.config) orig_runsession = remotecontrol.runsession def runsession_dups(): # twisted.trial test cases may report multiple errors. failures, reports, collection_failed = orig_runsession() print(failures) return failures * 2, reports, collection_failed monkeypatch.setattr(remotecontrol, 'runsession', runsession_dups) remotecontrol.loop_once() assert len(remotecontrol.failures) == 1 class TestFunctional: def test_fail_to_ok(self, testdir): p = testdir.makepyfile(""" def test_one(): x = 0 assert x == 1 """) # p = testdir.mkdir("sub").join(p1.basename) # p1.move(p) child = testdir.spawn_pytest("-f %s --traceconfig" % p) child.expect("def test_one") child.expect("x == 1") child.expect("1 failed") child.expect("### LOOPONFAILING ####") child.expect("waiting for changes") p.write(py.code.Source(""" def test_one(): x = 1 assert x == 1 """)) child.expect(".*1 passed.*") child.kill(15) def test_xfail_passes(self, testdir): p = testdir.makepyfile(""" import py @py.test.mark.xfail def test_one(): pass """) child = testdir.spawn_pytest("-f %s" % p) child.expect("1 xpass") # child.expect("### LOOPONFAILING ####") child.expect("waiting for changes") child.kill(15) def removepyc(path): # XXX damn those pyc files pyc = path + "c" if pyc.check(): pyc.remove() c = path.dirpath("__pycache__") if c.check(): c.remove() pytest-xdist-1.22.1/testing/test_newhooks.py0000644000372000037200000000404613242644556022060 0ustar travistravis00000000000000import pytest class TestHooks: @pytest.fixture(autouse=True) def create_test_file(self, testdir): testdir.makepyfile(""" import os def test_a(): pass def test_b(): pass def test_c(): pass """) def test_runtest_logreport(self, testdir): """Test that log reports from pytest_runtest_logreport when running with xdist contain "node", "nodeid" and "worker_id" attributes. (#8) """ testdir.makeconftest(""" def pytest_runtest_logreport(report): if hasattr(report, 'node'): if report.when == "call": workerid = report.node.workerinput['workerid'] if workerid != report.worker_id: print("HOOK: Worker id mismatch: %s %s" % (workerid, report.worker_id)) else: print("HOOK: %s %s" % (report.nodeid, report.worker_id)) """) res = testdir.runpytest('-n1', '-s') res.stdout.fnmatch_lines([ '*HOOK: test_runtest_logreport.py::test_a gw0*', '*HOOK: test_runtest_logreport.py::test_b gw0*', '*HOOK: test_runtest_logreport.py::test_c gw0*', '*3 passed*', ]) def test_node_collection_finished(self, testdir): """Test pytest_xdist_node_collection_finished hook (#8). """ testdir.makeconftest(""" def pytest_xdist_node_collection_finished(node, ids): workerid = node.workerinput['workerid'] stripped_ids = [x.split('::')[1] for x in ids] print("HOOK: %s %s" % (workerid, ', '.join(stripped_ids))) """) res = testdir.runpytest('-n2', '-s') res.stdout.fnmatch_lines_random([ '*HOOK: gw0 test_a, test_b, test_c', '*HOOK: gw1 test_a, test_b, test_c', ]) res.stdout.fnmatch_lines([ '*3 passed*', ]) pytest-xdist-1.22.1/testing/test_plugin.py0000644000372000037200000001015613242644556021520 0ustar travistravis00000000000000import py import execnet from xdist.workermanage import NodeManager def test_dist_incompatibility_messages(testdir): result = testdir.runpytest("--pdb", "--looponfail") assert result.ret != 0 result = testdir.runpytest("--pdb", "-n", "3") assert result.ret != 0 assert "incompatible" in result.stderr.str() result = testdir.runpytest("--pdb", "-d", "--tx", "popen") assert result.ret != 0 assert "incompatible" in result.stderr.str() def test_dist_options(testdir): from xdist.plugin import pytest_cmdline_main as check_options config = testdir.parseconfigure("-n 2") check_options(config) assert config.option.dist == "load" assert config.option.tx == ['popen'] * 2 config = testdir.parseconfigure("--numprocesses", "2") check_options(config) assert config.option.dist == "load" assert config.option.tx == ['popen'] * 2 config = testdir.parseconfigure("-d") check_options(config) assert config.option.dist == "load" def test_auto_detect_cpus(testdir, monkeypatch): import os if hasattr(os, 'cpu_count'): monkeypatch.setattr(os, 'cpu_count', lambda: 99) else: import multiprocessing monkeypatch.setattr(multiprocessing, 'cpu_count', lambda: 99) config = testdir.parseconfigure("-n2") assert config.getoption('numprocesses') == 2 config = testdir.parseconfigure("-nauto") assert config.getoption('numprocesses') == 99 def test_boxed_with_collect_only(testdir): from xdist.plugin import pytest_cmdline_main as check_options config = testdir.parseconfigure("-n1", "--boxed") check_options(config) assert config.option.forked config = testdir.parseconfigure("-n1", "--collect-only") check_options(config) assert not config.option.forked config = testdir.parseconfigure("-n1", "--boxed", "--collect-only") check_options(config) assert config.option.forked def test_dsession_with_collect_only(testdir): from xdist.plugin import pytest_cmdline_main as check_options from xdist.plugin import pytest_configure as configure config = testdir.parseconfigure("-n1") check_options(config) configure(config) assert config.pluginmanager.hasplugin("dsession") config = testdir.parseconfigure("-n1", "--collect-only") check_options(config) configure(config) assert not config.pluginmanager.hasplugin("dsession") class TestDistOptions: def test_getxspecs(self, testdir): config = testdir.parseconfigure("--tx=popen", "--tx", "ssh=xyz") nodemanager = NodeManager(config) xspecs = nodemanager._getxspecs() assert len(xspecs) == 2 print(xspecs) assert xspecs[0].popen assert xspecs[1].ssh == "xyz" def test_xspecs_multiplied(self, testdir): config = testdir.parseconfigure("--tx=3*popen", ) xspecs = NodeManager(config)._getxspecs() assert len(xspecs) == 3 assert xspecs[1].popen def test_getrsyncdirs(self, testdir): config = testdir.parseconfigure('--rsyncdir=' + str(testdir.tmpdir)) nm = NodeManager(config, specs=[execnet.XSpec("popen")]) assert not nm._getrsyncdirs() nm = NodeManager(config, specs=[execnet.XSpec("popen//chdir=qwe")]) assert nm.roots assert testdir.tmpdir in nm.roots def test_getrsyncignore(self, testdir): config = testdir.parseconfigure('--rsyncignore=fo*') nm = NodeManager(config, specs=[execnet.XSpec("popen//chdir=qwe")]) assert 'fo*' in nm.rsyncoptions['ignores'] def test_getrsyncdirs_with_conftest(self, testdir): p = py.path.local() for bn in 'x y z'.split(): p.mkdir(bn) testdir.makeini(""" [pytest] rsyncdirs= x """) config = testdir.parseconfigure( testdir.tmpdir, '--rsyncdir=y', '--rsyncdir=z') nm = NodeManager(config, specs=[execnet.XSpec("popen//chdir=xyz")]) roots = nm._getrsyncdirs() # assert len(roots) == 3 + 1 # pylib assert py.path.local('y') in roots assert py.path.local('z') in roots assert testdir.tmpdir.join('x') in roots pytest-xdist-1.22.1/testing/test_remote.py0000644000372000037200000003431613242644556021521 0ustar travistravis00000000000000import py import pytest from xdist.workermanage import WorkerController, unserialize_report from xdist.remote import serialize_report import execnet import marshal queue = py.builtin._tryimport("queue", "Queue") WAIT_TIMEOUT = 10.0 def check_marshallable(d): try: marshal.dumps(d) except ValueError: py.std.pprint.pprint(d) raise ValueError("not marshallable") class EventCall: def __init__(self, eventcall): self.name, self.kwargs = eventcall def __str__(self): return "" % (self.name, self.kwargs) class WorkerSetup: use_callback = False def __init__(self, request, testdir): self.request = request self.testdir = testdir self.events = queue.Queue() def setup(self, ): self.testdir.chdir() # import os ; os.environ['EXECNET_DEBUG'] = "2" self.gateway = execnet.makegateway() self.config = config = self.testdir.parseconfigure() putevent = self.use_callback and self.events.put or None class DummyMananger: specs = [0, 1] self.slp = WorkerController(DummyMananger, self.gateway, config, putevent) self.request.addfinalizer(self.slp.ensure_teardown) self.slp.setup() def popevent(self, name=None): while 1: if self.use_callback: data = self.events.get(timeout=WAIT_TIMEOUT) else: data = self.slp.channel.receive(timeout=WAIT_TIMEOUT) ev = EventCall(data) if name is None or ev.name == name: return ev print("skipping %s" % (ev, )) def sendcommand(self, name, **kwargs): self.slp.sendcommand(name, **kwargs) @pytest.fixture def worker(request, testdir): return WorkerSetup(request, testdir) @pytest.mark.xfail(reason='#59') def test_remoteinitconfig(testdir): from xdist.remote import remote_initconfig config1 = testdir.parseconfig() config2 = remote_initconfig(config1.option.__dict__, config1.args) assert config2.option.__dict__ == config1.option.__dict__ assert config2.pluginmanager.getplugin("terminal") in (-1, None) class TestReportSerialization: def test_xdist_longrepr_to_str_issue_241(self, testdir): testdir.makepyfile(""" import os def test_a(): assert False def test_b(): pass """) testdir.makeconftest(""" def pytest_runtest_logreport(report): print(report.longrepr) """) res = testdir.runpytest('-n1', '-s') res.stdout.fnmatch_lines([ '*1 failed, 1 passed *' ]) def test_xdist_report_longrepr_reprcrash_130(self, testdir): reprec = testdir.inline_runsource(""" import py def test_fail(): assert False, 'Expected Message' """) reports = reprec.getreports("pytest_runtest_logreport") assert len(reports) == 3 rep = reports[1] added_section = ('Failure Metadata', str("metadata metadata"), "*") rep.longrepr.sections.append(added_section) d = serialize_report(rep) check_marshallable(d) a = unserialize_report("testreport", d) # Check assembled == rep assert a.__dict__.keys() == rep.__dict__.keys() for key in rep.__dict__.keys(): if key != 'longrepr': assert getattr(a, key) == getattr(rep, key) assert rep.longrepr.reprcrash.lineno == a.longrepr.reprcrash.lineno assert rep.longrepr.reprcrash.message == a.longrepr.reprcrash.message assert rep.longrepr.reprcrash.path == a.longrepr.reprcrash.path assert rep.longrepr.reprtraceback.entrysep \ == a.longrepr.reprtraceback.entrysep assert rep.longrepr.reprtraceback.extraline \ == a.longrepr.reprtraceback.extraline assert rep.longrepr.reprtraceback.style \ == a.longrepr.reprtraceback.style assert rep.longrepr.sections == a.longrepr.sections # Missing section attribute PR171 assert added_section in a.longrepr.sections def test_reprentries_serialization_170(self, testdir): from _pytest._code.code import ReprEntry reprec = testdir.inline_runsource(""" def test_repr_entry(): x = 0 assert x """, '--showlocals') reports = reprec.getreports("pytest_runtest_logreport") assert len(reports) == 3 rep = reports[1] d = serialize_report(rep) a = unserialize_report("testreport", d) rep_entries = rep.longrepr.reprtraceback.reprentries a_entries = a.longrepr.reprtraceback.reprentries for i in range(len(a_entries)): assert isinstance(rep_entries[i], ReprEntry) assert rep_entries[i].lines == a_entries[i].lines assert rep_entries[i].localssep == a_entries[i].localssep assert rep_entries[i].reprfileloc.lineno == a_entries[i].reprfileloc.lineno assert rep_entries[i].reprfileloc.message == a_entries[i].reprfileloc.message assert rep_entries[i].reprfileloc.path == a_entries[i].reprfileloc.path assert rep_entries[i].reprfuncargs.args == a_entries[i].reprfuncargs.args assert rep_entries[i].reprlocals.lines == a_entries[i].reprlocals.lines assert rep_entries[i].style == a_entries[i].style def test_reprentries_serialization_196(self, testdir): from _pytest._code.code import ReprEntryNative reprec = testdir.inline_runsource(""" def test_repr_entry_native(): x = 0 assert x """, '--tb=native') reports = reprec.getreports("pytest_runtest_logreport") assert len(reports) == 3 rep = reports[1] d = serialize_report(rep) a = unserialize_report("testreport", d) rep_entries = rep.longrepr.reprtraceback.reprentries a_entries = a.longrepr.reprtraceback.reprentries for i in range(len(a_entries)): assert isinstance(rep_entries[i], ReprEntryNative) assert rep_entries[i].lines == a_entries[i].lines def test_itemreport_outcomes(self, testdir): reprec = testdir.inline_runsource(""" import py def test_pass(): pass def test_fail(): 0/0 @py.test.mark.skipif("True") def test_skip(): pass def test_skip_imperative(): py.test.skip("hello") @py.test.mark.xfail("True") def test_xfail(): 0/0 def test_xfail_imperative(): py.test.xfail("hello") """) reports = reprec.getreports("pytest_runtest_logreport") assert len(reports) == 17 # with setup/teardown "passed" reports for rep in reports: d = serialize_report(rep) check_marshallable(d) newrep = unserialize_report("testreport", d) assert newrep.passed == rep.passed assert newrep.failed == rep.failed assert newrep.skipped == rep.skipped if newrep.skipped and not hasattr(newrep, "wasxfail"): assert len(newrep.longrepr) == 3 assert newrep.outcome == rep.outcome assert newrep.when == rep.when assert newrep.keywords == rep.keywords if rep.failed: assert newrep.longreprtext == rep.longreprtext def test_collectreport_passed(self, testdir): reprec = testdir.inline_runsource("def test_func(): pass") reports = reprec.getreports("pytest_collectreport") for rep in reports: d = serialize_report(rep) check_marshallable(d) newrep = unserialize_report("collectreport", d) assert newrep.passed == rep.passed assert newrep.failed == rep.failed assert newrep.skipped == rep.skipped def test_collectreport_fail(self, testdir): reprec = testdir.inline_runsource("qwe abc") reports = reprec.getreports("pytest_collectreport") assert reports for rep in reports: d = serialize_report(rep) check_marshallable(d) newrep = unserialize_report("collectreport", d) assert newrep.passed == rep.passed assert newrep.failed == rep.failed assert newrep.skipped == rep.skipped if rep.failed: assert newrep.longrepr == str(rep.longrepr) def test_extended_report_deserialization(self, testdir): reprec = testdir.inline_runsource("qwe abc") reports = reprec.getreports("pytest_collectreport") assert reports for rep in reports: rep.extra = True d = serialize_report(rep) check_marshallable(d) newrep = unserialize_report("collectreport", d) assert newrep.extra assert newrep.passed == rep.passed assert newrep.failed == rep.failed assert newrep.skipped == rep.skipped if rep.failed: assert newrep.longrepr == str(rep.longrepr) class TestWorkerInteractor: def test_basic_collect_and_runtests(self, worker): worker.testdir.makepyfile(""" def test_func(): pass """) worker.setup() ev = worker.popevent() assert ev.name == "workerready" ev = worker.popevent() assert ev.name == "collectionstart" assert not ev.kwargs ev = worker.popevent("collectionfinish") assert ev.kwargs['topdir'] == worker.testdir.tmpdir ids = ev.kwargs['ids'] assert len(ids) == 1 worker.sendcommand("runtests", indices=list(range(len(ids)))) worker.sendcommand("shutdown") ev = worker.popevent("logstart") assert ev.kwargs["nodeid"].endswith("test_func") assert len(ev.kwargs["location"]) == 3 ev = worker.popevent("testreport") # setup ev = worker.popevent("testreport") assert ev.name == "testreport" rep = unserialize_report(ev.name, ev.kwargs['data']) assert rep.nodeid.endswith("::test_func") assert rep.passed assert rep.when == "call" ev = worker.popevent("workerfinished") assert 'workeroutput' in ev.kwargs @pytest.mark.skipif(pytest.__version__ >= '3.0', reason='skip at module level illegal in pytest 3.0') def test_remote_collect_skip(self, worker): worker.testdir.makepyfile(""" import py py.test.skip("hello") """) worker.setup() ev = worker.popevent("collectionstart") assert not ev.kwargs ev = worker.popevent() assert ev.name == "collectreport" ev = worker.popevent() assert ev.name == "collectreport" rep = unserialize_report(ev.name, ev.kwargs['data']) assert rep.skipped ev = worker.popevent("collectionfinish") assert not ev.kwargs['ids'] def test_remote_collect_fail(self, worker): worker.testdir.makepyfile("""aasd qwe""") worker.setup() ev = worker.popevent("collectionstart") assert not ev.kwargs ev = worker.popevent() assert ev.name == "collectreport" ev = worker.popevent() assert ev.name == "collectreport" rep = unserialize_report(ev.name, ev.kwargs['data']) assert rep.failed ev = worker.popevent("collectionfinish") assert not ev.kwargs['ids'] def test_runtests_all(self, worker): worker.testdir.makepyfile(""" def test_func(): pass def test_func2(): pass """) worker.setup() ev = worker.popevent() assert ev.name == "workerready" ev = worker.popevent() assert ev.name == "collectionstart" assert not ev.kwargs ev = worker.popevent("collectionfinish") ids = ev.kwargs['ids'] assert len(ids) == 2 worker.sendcommand("runtests_all", ) worker.sendcommand("shutdown", ) for func in "::test_func", "::test_func2": for i in range(3): # setup/call/teardown ev = worker.popevent("testreport") assert ev.name == "testreport" rep = unserialize_report(ev.name, ev.kwargs['data']) assert rep.nodeid.endswith(func) ev = worker.popevent("workerfinished") assert 'workeroutput' in ev.kwargs def test_happy_run_events_converted(self, testdir, worker): py.test.xfail("implement a simple test for event production") assert not worker.use_callback worker.testdir.makepyfile(""" def test_func(): pass """) worker.setup() hookrec = testdir.getreportrecorder(worker.config) for data in worker.slp.channel: worker.slp.process_from_remote(data) worker.slp.process_from_remote(worker.slp.ENDMARK) py.std.pprint.pprint(hookrec.hookrecorder.calls) hookrec.hookrecorder.contains([ ("pytest_collectstart", "collector.fspath == aaa"), ("pytest_pycollect_makeitem", "name == 'test_func'"), ("pytest_collectreport", "report.collector.fspath == aaa"), ("pytest_collectstart", "collector.fspath == bbb"), ("pytest_pycollect_makeitem", "name == 'test_func'"), ("pytest_collectreport", "report.collector.fspath == bbb"), ]) def test_process_from_remote_error_handling(self, worker, capsys): worker.use_callback = True worker.setup() worker.slp.process_from_remote(('', ())) out, err = capsys.readouterr() assert 'INTERNALERROR> ValueError: unknown event: ' in out ev = worker.popevent() assert ev.name == "errordown" def test_remote_env_vars(testdir): testdir.makepyfile(''' import os def test(): assert os.environ['PYTEST_XDIST_WORKER'] in ('gw0', 'gw1') assert os.environ['PYTEST_XDIST_WORKER_COUNT'] == '2' ''') result = testdir.runpytest('-n2', '--max-worker-restart=0') assert result.ret == 0 pytest-xdist-1.22.1/testing/test_slavemanage.py0000644000372000037200000002276013242644556022511 0ustar travistravis00000000000000import py import pytest import execnet from _pytest.pytester import HookRecorder from xdist import workermanage, newhooks from xdist.workermanage import HostRSync, NodeManager pytest_plugins = "pytester" @pytest.fixture def hookrecorder(request, config): hookrecorder = HookRecorder(config.pluginmanager) if hasattr(hookrecorder, "start_recording"): hookrecorder.start_recording(newhooks) request.addfinalizer(hookrecorder.finish_recording) return hookrecorder @pytest.fixture def config(testdir): return testdir.parseconfig() @pytest.fixture def mysetup(tmpdir): class mysetup: source = tmpdir.mkdir("source") dest = tmpdir.mkdir("dest") return mysetup() @pytest.fixture def workercontroller(monkeypatch): class MockController(object): def __init__(self, *args): pass def setup(self): pass monkeypatch.setattr(workermanage, 'WorkerController', MockController) return MockController class TestNodeManagerPopen: def test_popen_no_default_chdir(self, config): gm = NodeManager(config, ["popen"]) assert gm.specs[0].chdir is None def test_default_chdir(self, config): specs = ["ssh=noco", "socket=xyz"] for spec in NodeManager(config, specs).specs: assert spec.chdir == "pyexecnetcache" for spec in NodeManager(config, specs, defaultchdir="abc").specs: assert spec.chdir == "abc" def test_popen_makegateway_events(self, config, hookrecorder, workercontroller): hm = NodeManager(config, ["popen"] * 2) hm.setup_nodes(None) call = hookrecorder.popcall("pytest_xdist_setupnodes") assert len(call.specs) == 2 call = hookrecorder.popcall("pytest_xdist_newgateway") assert call.gateway.spec == execnet.XSpec("popen") assert call.gateway.id == "gw0" call = hookrecorder.popcall("pytest_xdist_newgateway") assert call.gateway.id == "gw1" assert len(hm.group) == 2 hm.teardown_nodes() assert not len(hm.group) def test_popens_rsync(self, config, mysetup, workercontroller): source = mysetup.source hm = NodeManager(config, ["popen"] * 2) hm.setup_nodes(None) assert len(hm.group) == 2 for gw in hm.group: class pseudoexec: args = [] def __init__(self, *args): self.args.extend(args) def waitclose(self): pass gw.remote_exec = pseudoexec notifications = [] for gw in hm.group: hm.rsync(gw, source, notify=lambda *args: notifications.append(args)) assert not notifications hm.teardown_nodes() assert not len(hm.group) assert "sys.path.insert" in gw.remote_exec.args[0] def test_rsync_popen_with_path(self, config, mysetup, workercontroller): source, dest = mysetup.source, mysetup.dest hm = NodeManager(config, ["popen//chdir=%s" % dest] * 1) hm.setup_nodes(None) source.ensure("dir1", "dir2", "hello") notifications = [] for gw in hm.group: hm.rsync(gw, source, notify=lambda *args: notifications.append(args)) assert len(notifications) == 1 assert notifications[0] == ("rsyncrootready", hm.group['gw0'].spec, source) hm.teardown_nodes() dest = dest.join(source.basename) assert dest.join("dir1").check() assert dest.join("dir1", "dir2").check() assert dest.join("dir1", "dir2", 'hello').check() def test_rsync_same_popen_twice(self, config, mysetup, hookrecorder, workercontroller): source, dest = mysetup.source, mysetup.dest hm = NodeManager(config, ["popen//chdir=%s" % dest] * 2) hm.roots = [] hm.setup_nodes(None) source.ensure("dir1", "dir2", "hello") gw = hm.group[0] hm.rsync(gw, source) call = hookrecorder.popcall("pytest_xdist_rsyncstart") assert call.source == source assert len(call.gateways) == 1 assert call.gateways[0] in hm.group call = hookrecorder.popcall("pytest_xdist_rsyncfinish") class TestHRSync: def test_hrsync_filter(self, mysetup): source, _ = mysetup.source, mysetup.dest # noqa source.ensure("dir", "file.txt") source.ensure(".svn", "entries") source.ensure(".somedotfile", "moreentries") source.ensure("somedir", "editfile~") syncer = HostRSync(source, ignores=NodeManager.DEFAULT_IGNORES) files = list(source.visit(rec=syncer.filter, fil=syncer.filter)) assert len(files) == 3 basenames = [x.basename for x in files] assert 'dir' in basenames assert 'file.txt' in basenames assert 'somedir' in basenames def test_hrsync_one_host(self, mysetup): source, dest = mysetup.source, mysetup.dest gw = execnet.makegateway("popen//chdir=%s" % dest) finished = [] rsync = HostRSync(source) rsync.add_target_host(gw, finished=lambda: finished.append(1)) source.join("hello.py").write("world") rsync.send() gw.exit() assert dest.join(source.basename, "hello.py").check() assert len(finished) == 1 class TestNodeManager: @py.test.mark.xfail(run=False) def test_rsync_roots_no_roots(self, testdir, mysetup): mysetup.source.ensure("dir1", "file1").write("hello") config = testdir.parseconfig(mysetup.source) nodemanager = NodeManager(config, ["popen//chdir=%s" % mysetup.dest]) # assert nodemanager.config.topdir == source == config.topdir nodemanager.makegateways() nodemanager.rsync_roots() p, = nodemanager.gwmanager.multi_exec( "import os ; channel.send(os.getcwd())").receive_each() p = py.path.local(p) py.builtin.print_("remote curdir", p) assert p == mysetup.dest.join(config.topdir.basename) assert p.join("dir1").check() assert p.join("dir1", "file1").check() def test_popen_rsync_subdir(self, testdir, mysetup, workercontroller): source, dest = mysetup.source, mysetup.dest dir1 = mysetup.source.mkdir("dir1") dir2 = dir1.mkdir("dir2") dir2.ensure("hello") for rsyncroot in (dir1, source): dest.remove() nodemanager = NodeManager(testdir.parseconfig( "--tx", "popen//chdir=%s" % dest, "--rsyncdir", rsyncroot, source, )) nodemanager.setup_nodes(None) # calls .rsync_roots() if rsyncroot == source: dest = dest.join("source") assert dest.join("dir1").check() assert dest.join("dir1", "dir2").check() assert dest.join("dir1", "dir2", 'hello').check() nodemanager.teardown_nodes() def test_init_rsync_roots(self, testdir, mysetup, workercontroller): source, dest = mysetup.source, mysetup.dest dir2 = source.ensure("dir1", "dir2", dir=1) source.ensure("dir1", "somefile", dir=1) dir2.ensure("hello") source.ensure("bogusdir", "file") source.join("tox.ini").write(py.std.textwrap.dedent(""" [pytest] rsyncdirs=dir1/dir2 """)) config = testdir.parseconfig(source) nodemanager = NodeManager(config, ["popen//chdir=%s" % dest]) nodemanager.setup_nodes(None) # calls .rsync_roots() assert dest.join("dir2").check() assert not dest.join("dir1").check() assert not dest.join("bogus").check() def test_rsyncignore(self, testdir, mysetup, workercontroller): source, dest = mysetup.source, mysetup.dest dir2 = source.ensure("dir1", "dir2", dir=1) source.ensure("dir5", "dir6", "bogus") source.ensure("dir5", "file") dir2.ensure("hello") source.ensure("foo", "bar") source.ensure("bar", "foo") source.join("tox.ini").write(py.std.textwrap.dedent(""" [pytest] rsyncdirs = dir1 dir5 rsyncignore = dir1/dir2 dir5/dir6 foo* """)) config = testdir.parseconfig(source) config.option.rsyncignore = ['bar'] nodemanager = NodeManager(config, ["popen//chdir=%s" % dest]) nodemanager.setup_nodes(None) # calls .rsync_roots() assert dest.join("dir1").check() assert not dest.join("dir1", "dir2").check() assert dest.join("dir5", "file").check() assert not dest.join("dir6").check() assert not dest.join('foo').check() assert not dest.join('bar').check() def test_optimise_popen(self, testdir, mysetup, workercontroller): source = mysetup.source specs = ["popen"] * 3 source.join("conftest.py").write("rsyncdirs = ['a']") source.ensure('a', dir=1) config = testdir.parseconfig(source) nodemanager = NodeManager(config, specs) nodemanager.setup_nodes(None) # calls .rysnc_roots() for gwspec in nodemanager.specs: assert gwspec._samefilesystem() assert not gwspec.chdir def test_ssh_setup_nodes(self, specssh, testdir): testdir.makepyfile(__init__="", test_x=""" def test_one(): pass """) reprec = testdir.inline_run("-d", "--rsyncdir=%s" % testdir.tmpdir, "--tx", specssh, testdir.tmpdir) rep, = reprec.getreports("pytest_runtest_logreport") assert rep.passed pytest-xdist-1.22.1/xdist/0000755000372000037200000000000013242644607016261 5ustar travistravis00000000000000pytest-xdist-1.22.1/xdist/scheduler/0000755000372000037200000000000013242644607020237 5ustar travistravis00000000000000pytest-xdist-1.22.1/xdist/scheduler/__init__.py0000644000372000037200000000036313242644556022355 0ustar travistravis00000000000000from xdist.scheduler.each import EachScheduling # noqa from xdist.scheduler.load import LoadScheduling # noqa from xdist.scheduler.loadscope import LoadScopeScheduling # noqa from xdist.scheduler.filescope import LoadFileScheduling # noqa pytest-xdist-1.22.1/xdist/scheduler/each.py0000644000372000037200000001146013242644556021516 0ustar travistravis00000000000000from py.log import Producer from xdist.workermanage import parse_spec_config from xdist.report import report_collection_diff class EachScheduling: """Implement scheduling of test items on all nodes If a node gets added after the test run is started then it is assumed to replace a node which got removed before it finished its collection. In this case it will only be used if a node with the same spec got removed earlier. Any nodes added after the run is started will only get items assigned if a node with a matching spec was removed before it finished all its pending items. The new node will then be assigned the remaining items from the removed node. """ def __init__(self, config, log=None): self.config = config self.numnodes = len(parse_spec_config(config)) self.node2collection = {} self.node2pending = {} self._started = [] self._removed2pending = {} if log is None: self.log = Producer("eachsched") else: self.log = log.eachsched self.collection_is_completed = False @property def nodes(self): """A list of all nodes in the scheduler.""" return list(self.node2pending.keys()) @property def tests_finished(self): if not self.collection_is_completed: return False if self._removed2pending: return False for pending in self.node2pending.values(): if len(pending) >= 2: return False return True @property def has_pending(self): """Return True if there are pending test items This indicates that collection has finished and nodes are still processing test items, so this can be thought of as "the scheduler is active". """ for pending in self.node2pending.values(): if pending: return True return False def add_node(self, node): assert node not in self.node2pending self.node2pending[node] = [] def add_node_collection(self, node, collection): """Add the collected test items from a node Collection is complete once all nodes have submitted their collection. In this case its pending list is set to an empty list. When the collection is already completed this submission is from a node which was restarted to replace a dead node. In this case we already assign the pending items here. In either case ``.schedule()`` will instruct the node to start running the required tests. """ assert node in self.node2pending if not self.collection_is_completed: self.node2collection[node] = list(collection) self.node2pending[node] = [] if len(self.node2collection) >= self.numnodes: self.collection_is_completed = True elif self._removed2pending: for deadnode in self._removed2pending: if deadnode.gateway.spec == node.gateway.spec: dead_collection = self.node2collection[deadnode] if collection != dead_collection: msg = report_collection_diff(dead_collection, collection, deadnode.gateway.id, node.gateway.id) self.log(msg) return pending = self._removed2pending.pop(deadnode) self.node2pending[node] = pending break def mark_test_complete(self, node, item_index, duration=0): self.node2pending[node].remove(item_index) def remove_node(self, node): # KeyError if we didn't get an add_node() yet pending = self.node2pending.pop(node) if not pending: return crashitem = self.node2collection[node][pending.pop(0)] if pending: self._removed2pending[node] = pending return crashitem def schedule(self): """Schedule the test items on the nodes If the node's pending list is empty it is a new node which needs to run all the tests. If the pending list is already populated (by ``.add_node_collection()``) then it replaces a dead node and we only need to run those tests. """ assert self.collection_is_completed for node, pending in self.node2pending.items(): if node in self._started: continue if not pending: pending[:] = range(len(self.node2collection[node])) node.send_runtest_all() else: node.send_runtest_some(pending) self._started.append(node) pytest-xdist-1.22.1/xdist/scheduler/filescope.py0000644000372000037200000000420313242644556022564 0ustar travistravis00000000000000from . import LoadScopeScheduling from py.log import Producer class LoadFileScheduling(LoadScopeScheduling): """Implement load scheduling across nodes, but grouping test test file. This distributes the tests collected across all nodes so each test is run just once. All nodes collect and submit the list of tests and when all collections are received it is verified they are identical collections. Then the collection gets divided up in work units, grouped by test file, and those work units get submitted to nodes. Whenever a node finishes an item, it calls ``.mark_test_complete()`` which will trigger the scheduler to assign more work units if the number of pending tests for the node falls below a low-watermark. When created, ``numnodes`` defines how many nodes are expected to submit a collection. This is used to know when all nodes have finished collection. This class behaves very much like LoadScopeScheduling, but with a file-level scope. """ def __init(self, config, log=None): super(LoadFileScheduling, self).__init__(config, log) if log is None: self.log = Producer('loadfilesched') else: self.log = log.loadfilesched def _split_scope(self, nodeid): """Determine the scope (grouping) of a nodeid. There are usually 3 cases for a nodeid:: example/loadsuite/test/test_beta.py::test_beta0 example/loadsuite/test/test_delta.py::Delta1::test_delta0 example/loadsuite/epsilon/__init__.py::epsilon.epsilon #. Function in a test module. #. Method of a class in a test module. #. Doctest in a function in a package. This function will group tests with the scope determined by splitting the first ``::`` from the left. That is, test will be grouped in a single work unit when they reside in the same file. In the above example, scopes will be:: example/loadsuite/test/test_beta.py example/loadsuite/test/test_delta.py example/loadsuite/epsilon/__init__.py """ return nodeid.split('::', 1)[0] pytest-xdist-1.22.1/xdist/scheduler/load.py0000644000372000037200000002524713242644556021545 0ustar travistravis00000000000000from itertools import cycle from py.log import Producer from _pytest.runner import CollectReport from xdist.workermanage import parse_spec_config from xdist.report import report_collection_diff class LoadScheduling: """Implement load scheduling across nodes. This distributes the tests collected across all nodes so each test is run just once. All nodes collect and submit the test suite and when all collections are received it is verified they are identical collections. Then the collection gets divided up in chunks and chunks get submitted to nodes. Whenever a node finishes an item, it calls ``.mark_test_complete()`` which will trigger the scheduler to assign more tests if the number of pending tests for the node falls below a low-watermark. When created, ``numnodes`` defines how many nodes are expected to submit a collection. This is used to know when all nodes have finished collection or how large the chunks need to be created. Attributes: :numnodes: The expected number of nodes taking part. The actual number of nodes will vary during the scheduler's lifetime as nodes are added by the DSession as they are brought up and removed either because of a dead node or normal shutdown. This number is primarily used to know when the initial collection is completed. :node2collection: Map of nodes and their test collection. All collections should always be identical. :node2pending: Map of nodes and the indices of their pending tests. The indices are an index into ``.pending`` (which is identical to their own collection stored in ``.node2collection``). :collection: The one collection once it is validated to be identical between all the nodes. It is initialised to None until ``.schedule()`` is called. :pending: List of indices of globally pending tests. These are tests which have not yet been allocated to a chunk for a node to process. :log: A py.log.Producer instance. :config: Config object, used for handling hooks. """ def __init__(self, config, log=None): self.numnodes = len(parse_spec_config(config)) self.node2collection = {} self.node2pending = {} self.pending = [] self.collection = None if log is None: self.log = Producer("loadsched") else: self.log = log.loadsched self.config = config @property def nodes(self): """A list of all nodes in the scheduler.""" return list(self.node2pending.keys()) @property def collection_is_completed(self): """Boolean indication initial test collection is complete. This is a boolean indicating all initial participating nodes have finished collection. The required number of initial nodes is defined by ``.numnodes``. """ return len(self.node2collection) >= self.numnodes @property def tests_finished(self): """Return True if all tests have been executed by the nodes.""" if not self.collection_is_completed: return False if self.pending: return False for pending in self.node2pending.values(): if len(pending) >= 2: return False return True @property def has_pending(self): """Return True if there are pending test items This indicates that collection has finished and nodes are still processing test items, so this can be thought of as "the scheduler is active". """ if self.pending: return True for pending in self.node2pending.values(): if pending: return True return False def add_node(self, node): """Add a new node to the scheduler. From now on the node will be allocated chunks of tests to execute. Called by the ``DSession.worker_workerready`` hook when it successfully bootstraps a new node. """ assert node not in self.node2pending self.node2pending[node] = [] def add_node_collection(self, node, collection): """Add the collected test items from a node The collection is stored in the ``.node2collection`` map. Called by the ``DSession.worker_collectionfinish`` hook. """ assert node in self.node2pending if self.collection_is_completed: # A new node has been added later, perhaps an original one died. # .schedule() should have # been called by now assert self.collection if collection != self.collection: other_node = next(iter(self.node2collection.keys())) msg = report_collection_diff(self.collection, collection, other_node.gateway.id, node.gateway.id) self.log(msg) return self.node2collection[node] = list(collection) def mark_test_complete(self, node, item_index, duration=0): """Mark test item as completed by node The duration it took to execute the item is used as a hint to the scheduler. This is called by the ``DSession.worker_testreport`` hook. """ self.node2pending[node].remove(item_index) self.check_schedule(node, duration=duration) def check_schedule(self, node, duration=0): """Maybe schedule new items on the node If there are any globally pending nodes left then this will check if the given node should be given any more tests. The ``duration`` of the last test is optionally used as a heuristic to influence how many tests the node is assigned. """ if node.shutting_down: return if self.pending: # how many nodes do we have? num_nodes = len(self.node2pending) # if our node goes below a heuristic minimum, fill it out to # heuristic maximum items_per_node_min = max(2, len(self.pending) // num_nodes // 4) items_per_node_max = max(2, len(self.pending) // num_nodes // 2) node_pending = self.node2pending[node] if len(node_pending) < items_per_node_min: if duration >= 0.1 and len(node_pending) >= 2: # seems the node is doing long-running tests # and has enough items to continue # so let's rather wait with sending new items return num_send = items_per_node_max - len(node_pending) self._send_tests(node, num_send) self.log("num items waiting for node:", len(self.pending)) def remove_node(self, node): """Remove a node from the scheduler This should be called either when the node crashed or at shutdown time. In the former case any pending items assigned to the node will be re-scheduled. Called by the ``DSession.worker_workerfinished`` and ``DSession.worker_errordown`` hooks. Return the item which was being executing while the node crashed or None if the node has no more pending items. """ pending = self.node2pending.pop(node) if not pending: return # The node crashed, reassing pending items crashitem = self.collection[pending.pop(0)] self.pending.extend(pending) for node in self.node2pending: self.check_schedule(node) return crashitem def schedule(self): """Initiate distribution of the test collection Initiate scheduling of the items across the nodes. If this gets called again later it behaves the same as calling ``.check_schedule()`` on all nodes so that newly added nodes will start to be used. This is called by the ``DSession.worker_collectionfinish`` hook if ``.collection_is_completed`` is True. """ assert self.collection_is_completed # Initial distribution already happened, reschedule on all nodes if self.collection is not None: for node in self.nodes: self.check_schedule(node) return # XXX allow nodes to have different collections if not self._check_nodes_have_same_collection(): self.log('**Different tests collected, aborting run**') return # Collections are identical, create the index of pending items. self.collection = list(self.node2collection.values())[0] self.pending[:] = range(len(self.collection)) if not self.collection: return # Send a batch of tests to run. If we don't have at least two # tests per node, we have to send them all so that we can send # shutdown signals and get all nodes working. initial_batch = max(len(self.pending) // 4, 2 * len(self.nodes)) # distribute tests round-robin up to the batch size # (or until we run out) nodes = cycle(self.nodes) for i in range(initial_batch): self._send_tests(next(nodes), 1) if not self.pending: # initial distribution sent all tests, start node shutdown for node in self.nodes: node.shutdown() def _send_tests(self, node, num): tests_per_node = self.pending[:num] if tests_per_node: del self.pending[:num] self.node2pending[node].extend(tests_per_node) node.send_runtest_some(tests_per_node) def _check_nodes_have_same_collection(self): """Return True if all nodes have collected the same items. If collections differ, this method returns False while logging the collection differences and posting collection errors to pytest_collectreport hook. """ node_collection_items = list(self.node2collection.items()) first_node, col = node_collection_items[0] same_collection = True for node, collection in node_collection_items[1:]: msg = report_collection_diff( col, collection, first_node.gateway.id, node.gateway.id, ) if msg: same_collection = False self.log(msg) if self.config is not None: rep = CollectReport( node.gateway.id, 'failed', longrepr=msg, result=[]) self.config.hook.pytest_collectreport(report=rep) return same_collection pytest-xdist-1.22.1/xdist/scheduler/loadscope.py0000644000372000037200000003407713242644556022600 0ustar travistravis00000000000000from collections import OrderedDict from _pytest.runner import CollectReport from py.log import Producer from xdist.report import report_collection_diff from xdist.workermanage import parse_spec_config class LoadScopeScheduling: """Implement load scheduling across nodes, but grouping test by scope. This distributes the tests collected across all nodes so each test is run just once. All nodes collect and submit the list of tests and when all collections are received it is verified they are identical collections. Then the collection gets divided up in work units, grouped by test scope, and those work units get submitted to nodes. Whenever a node finishes an item, it calls ``.mark_test_complete()`` which will trigger the scheduler to assign more work units if the number of pending tests for the node falls below a low-watermark. When created, ``numnodes`` defines how many nodes are expected to submit a collection. This is used to know when all nodes have finished collection. Attributes: :numnodes: The expected number of nodes taking part. The actual number of nodes will vary during the scheduler's lifetime as nodes are added by the DSession as they are brought up and removed either because of a dead node or normal shutdown. This number is primarily used to know when the initial collection is completed. :collection: The final list of tests collected by all nodes once it is validated to be identical between all the nodes. It is initialised to None until ``.schedule()`` is called. :workqueue: Ordered dictionary that maps all available scopes with their associated tests (nodeid). Nodeids are in turn associated with their completion status. One entry of the workqueue is called a work unit. In turn, a collection of work unit is called a workload. :: workqueue = { '///test_module.py': { '///test_module.py::test_case1': False, '///test_module.py::test_case2': False, (...) }, (...) } :assigned_work: Ordered dictionary that maps worker nodes with their assigned work units. :: assigned_work = { '': { '///test_module.py': { '///test_module.py::test_case1': False, '///test_module.py::test_case2': False, (...) }, (...) }, (...) } :registered_collections: Ordered dictionary that maps worker nodes with their collection of tests gathered during test discovery. :: registered_collections = { '': [ '///test_module.py::test_case1', '///test_module.py::test_case2', ], (...) } :log: A py.log.Producer instance. :config: Config object, used for handling hooks. """ def __init__(self, config, log=None): self.numnodes = len(parse_spec_config(config)) self.collection = None self.workqueue = OrderedDict() self.assigned_work = OrderedDict() self.registered_collections = OrderedDict() if log is None: self.log = Producer('loadscopesched') else: self.log = log.loadscopesched self.config = config @property def nodes(self): """A list of all active nodes in the scheduler.""" return list(self.assigned_work.keys()) @property def collection_is_completed(self): """Boolean indication initial test collection is complete. This is a boolean indicating all initial participating nodes have finished collection. The required number of initial nodes is defined by ``.numnodes``. """ return len(self.registered_collections) >= self.numnodes @property def tests_finished(self): """Return True if all tests have been executed by the nodes.""" if not self.collection_is_completed: return False if self.workqueue: return False for assigned_unit in self.assigned_work.values(): if self._pending_of(assigned_unit) >= 2: return False return True @property def has_pending(self): """Return True if there are pending test items. This indicates that collection has finished and nodes are still processing test items, so this can be thought of as "the scheduler is active". """ if self.workqueue: return True for assigned_unit in self.assigned_work.values(): if self._pending_of(assigned_unit) > 0: return True return False def add_node(self, node): """Add a new node to the scheduler. From now on the node will be assigned work units to be executed. Called by the ``DSession.worker_workerready`` hook when it successfully bootstraps a new node. """ assert node not in self.assigned_work self.assigned_work[node] = OrderedDict() def remove_node(self, node): """Remove a node from the scheduler. This should be called either when the node crashed or at shutdown time. In the former case any pending items assigned to the node will be re-scheduled. Called by the hooks: - ``DSession.worker_workerfinished``. - ``DSession.worker_errordown``. Return the item being executed while the node crashed or None if the node has no more pending items. """ workload = self.assigned_work.pop(node) if not self._pending_of(workload): return None # The node crashed, identify test that crashed for work_unit in workload.values(): for nodeid, completed in work_unit.items(): if not completed: crashitem = nodeid break else: continue break else: raise RuntimeError( 'Unable to identify crashitem on a workload with ' 'pending items' ) # Made uncompleted work unit available again self.workqueue.update(workload) for node in self.assigned_work: self._reschedule(node) return crashitem def add_node_collection(self, node, collection): """Add the collected test items from a node. The collection is stored in the ``.registered_collections`` dictionary. Called by the hook: - ``DSession.worker_collectionfinish``. """ # Check that add_node() was called on the node before assert node in self.assigned_work # A new node has been added later, perhaps an original one died. if self.collection_is_completed: # Assert that .schedule() should have been called by now assert self.collection # Check that the new collection matches the official collection if collection != self.collection: other_node = next(iter(self.registered_collections.keys())) msg = report_collection_diff( self.collection, collection, other_node.gateway.id, node.gateway.id ) self.log(msg) return self.registered_collections[node] = list(collection) def mark_test_complete(self, node, item_index, duration=0): """Mark test item as completed by node. Called by the hook: - ``DSession.worker_testreport``. """ nodeid = self.registered_collections[node][item_index] scope = self._split_scope(nodeid) self.assigned_work[node][scope][nodeid] = True self._reschedule(node) def _assign_work_unit(self, node): """Assign a work unit to a node.""" assert self.workqueue # Grab a unit of work scope, work_unit = self.workqueue.popitem(last=False) # Keep track of the assigned work assigned_to_node = self.assigned_work.setdefault( node, default=OrderedDict() ) assigned_to_node[scope] = work_unit # Ask the node to execute the workload worker_collection = self.registered_collections[node] nodeids_indexes = [ worker_collection.index(nodeid) for nodeid, completed in work_unit.items() if not completed ] node.send_runtest_some(nodeids_indexes) def _split_scope(self, nodeid): """Determine the scope (grouping) of a nodeid. There are usually 3 cases for a nodeid:: example/loadsuite/test/test_beta.py::test_beta0 example/loadsuite/test/test_delta.py::Delta1::test_delta0 example/loadsuite/epsilon/__init__.py::epsilon.epsilon #. Function in a test module. #. Method of a class in a test module. #. Doctest in a function in a package. This function will group tests with the scope determined by splitting the first ``::`` from the right. That is, classes will be grouped in a single work unit, and functions from a test module will be grouped by their module. In the above example, scopes will be:: example/loadsuite/test/test_beta.py example/loadsuite/test/test_delta.py::Delta1 example/loadsuite/epsilon/__init__.py """ return nodeid.rsplit('::', 1)[0] def _pending_of(self, workload): """Return the number of pending tests in a workload.""" pending = sum( list(scope.values()).count(False) for scope in workload.values() ) return pending def _reschedule(self, node): """Maybe schedule new items on the node. If there are any globally pending work units left then this will check if the given node should be given any more tests. """ # Do not add more work to a node shutting down if node.shutting_down: return # Check that more work is available if not self.workqueue: return self.log('Number of units waiting for node:', len(self.workqueue)) # Check that the node is almost depleted of work # 2: Heuristic of minimum tests to enqueue more work if self._pending_of(self.assigned_work[node]) > 2: return # Pop one unit of work and assign it self._assign_work_unit(node) def schedule(self): """Initiate distribution of the test collection. Initiate scheduling of the items across the nodes. If this gets called again later it behaves the same as calling ``._reschedule()`` on all nodes so that newly added nodes will start to be used. If ``.collection_is_completed`` is True, this is called by the hook: - ``DSession.worker_collectionfinish``. """ assert self.collection_is_completed # Initial distribution already happened, reschedule on all nodes if self.collection is not None: for node in self.nodes: self._reschedule(node) return # Check that all nodes collected the same tests if not self._check_nodes_have_same_collection(): self.log('**Different tests collected, aborting run**') return # Collections are identical, create the final list of items self.collection = list( next(iter(self.registered_collections.values())) ) if not self.collection: return # Determine chunks of work (scopes) for nodeid in self.collection: scope = self._split_scope(nodeid) work_unit = self.workqueue.setdefault(scope, default=OrderedDict()) work_unit[nodeid] = False # Avoid having more workers than work extra_nodes = len(self.nodes) - len(self.workqueue) if extra_nodes > 0: self.log('Shuting down {0} nodes'.format(extra_nodes)) for _ in range(extra_nodes): unused_node, assigned = self.assigned_work.popitem(last=True) self.log('Shuting down unused node {0}'.format(unused_node)) unused_node.shutdown() # Assign initial workload for node in self.nodes: self._assign_work_unit(node) # Ensure nodes start with at least two work units if possible (#277) for node in self.nodes: self._reschedule(node) # Initial distribution sent all tests, start node shutdown if not self.workqueue: for node in self.nodes: node.shutdown() def _check_nodes_have_same_collection(self): """Return True if all nodes have collected the same items. If collections differ, this method returns False while logging the collection differences and posting collection errors to pytest_collectreport hook. """ node_collection_items = list(self.registered_collections.items()) first_node, col = node_collection_items[0] same_collection = True for node, collection in node_collection_items[1:]: msg = report_collection_diff( col, collection, first_node.gateway.id, node.gateway.id, ) if not msg: continue same_collection = False self.log(msg) if self.config is None: continue rep = CollectReport( node.gateway.id, 'failed', longrepr=msg, result=[] ) self.config.hook.pytest_collectreport(report=rep) return same_collection pytest-xdist-1.22.1/xdist/__init__.py0000644000372000037200000000011513242644556020372 0ustar travistravis00000000000000from xdist._version import version as __version__ __all__ = ['__version__'] pytest-xdist-1.22.1/xdist/_version.py0000644000372000037200000000016513242644606020460 0ustar travistravis00000000000000# coding: utf-8 # file generated by setuptools_scm # don't change, don't track in version control version = '1.22.1' pytest-xdist-1.22.1/xdist/dsession.py0000644000372000037200000003460713242644556020477 0ustar travistravis00000000000000import py import pytest from xdist.workermanage import NodeManager from xdist.scheduler import ( EachScheduling, LoadScheduling, LoadScopeScheduling, LoadFileScheduling, ) queue = py.builtin._tryimport('queue', 'Queue') class Interrupted(KeyboardInterrupt): """ signals an immediate interruption. """ class DSession: """A py.test plugin which runs a distributed test session At the beginning of the test session this creates a NodeManager instance which creates and starts all nodes. Nodes then emit events processed in the pytest_runtestloop hook using the worker_* methods. Once a node is started it will automatically start running the py.test mainloop with some custom hooks. This means a node automatically starts collecting tests. Once tests are collected it will wait for instructions. """ def __init__(self, config): self.config = config self.log = py.log.Producer("dsession") if not config.option.debug: py.log.setconsumer(self.log._keywords, None) self.nodemanager = None self.sched = None self.shuttingdown = False self.countfailures = 0 self.maxfail = config.getvalue("maxfail") self.queue = queue.Queue() self._session = None self._failed_collection_errors = {} self._active_nodes = set() self._failed_nodes_count = 0 self._max_worker_restart = self.config.option.maxworkerrestart if self._max_worker_restart is not None: self._max_worker_restart = int(self._max_worker_restart) try: self.terminal = config.pluginmanager.getplugin("terminalreporter") except KeyError: self.terminal = None else: self.trdist = TerminalDistReporter(config) config.pluginmanager.register(self.trdist, "terminaldistreporter") @property def session_finished(self): """Return True if the distributed session has finished This means all nodes have executed all test items. This is used by pytest_runtestloop to break out of its loop. """ return bool(self.shuttingdown and not self._active_nodes) def report_line(self, line): if self.terminal and self.config.option.verbose >= 0: self.terminal.write_line(line) @pytest.mark.trylast def pytest_sessionstart(self, session): """Creates and starts the nodes. The nodes are setup to put their events onto self.queue. As soon as nodes start they will emit the worker_workerready event. """ self.nodemanager = NodeManager(self.config) nodes = self.nodemanager.setup_nodes(putevent=self.queue.put) self._active_nodes.update(nodes) self._session = session def pytest_sessionfinish(self, session): """Shutdown all nodes.""" nm = getattr(self, 'nodemanager', None) # if not fully initialized if nm is not None: nm.teardown_nodes() self._session = None def pytest_collection(self): # prohibit collection of test items in master process return True @pytest.mark.trylast def pytest_xdist_make_scheduler(self, config, log): dist = config.getvalue('dist') schedulers = { 'each': EachScheduling, 'load': LoadScheduling, 'loadscope': LoadScopeScheduling, 'loadfile': LoadFileScheduling, } return schedulers[dist](config, log) def pytest_runtestloop(self): self.sched = self.config.hook.pytest_xdist_make_scheduler( config=self.config, log=self.log ) assert self.sched is not None self.shouldstop = False while not self.session_finished: self.loop_once() if self.shouldstop: self.triggershutdown() raise Interrupted(str(self.shouldstop)) return True def loop_once(self): """Process one callback from one of the workers.""" while 1: if not self._active_nodes: # If everything has died stop looping self.triggershutdown() raise RuntimeError("Unexpectedly no active workers available") try: eventcall = self.queue.get(timeout=2.0) break except queue.Empty: continue callname, kwargs = eventcall assert callname, kwargs method = "worker_" + callname call = getattr(self, method) self.log("calling method", method, kwargs) call(**kwargs) if self.sched.tests_finished: self.triggershutdown() # # callbacks for processing events from workers # def worker_workerready(self, node, workerinfo): """Emitted when a node first starts up. This adds the node to the scheduler, nodes continue with collection without any further input. """ node.workerinfo = workerinfo node.workerinfo['id'] = node.gateway.id node.workerinfo['spec'] = node.gateway.spec # TODO: (#234 task) needs this for pytest. Remove when refactor in pytest repo node.slaveinfo = node.workerinfo self.config.hook.pytest_testnodeready(node=node) if self.shuttingdown: node.shutdown() else: self.sched.add_node(node) def worker_workerfinished(self, node): """Emitted when node executes its pytest_sessionfinish hook. Removes the node from the scheduler. The node might not be in the scheduler if it had not emitted workerready before shutdown was triggered. """ self.config.hook.pytest_testnodedown(node=node, error=None) if node.workeroutput['exitstatus'] == 2: # keyboard-interrupt self.shouldstop = "%s received keyboard-interrupt" % (node,) self.worker_errordown(node, "keyboard-interrupt") return if node in self.sched.nodes: crashitem = self.sched.remove_node(node) assert not crashitem, (crashitem, node) self._active_nodes.remove(node) def worker_errordown(self, node, error): """Emitted by the WorkerController when a node dies.""" self.config.hook.pytest_testnodedown(node=node, error=error) try: crashitem = self.sched.remove_node(node) except KeyError: pass else: if crashitem: self.handle_crashitem(crashitem, node) self._failed_nodes_count += 1 maximum_reached = (self._max_worker_restart is not None and self._failed_nodes_count > self._max_worker_restart) if maximum_reached: if self._max_worker_restart == 0: msg = 'Worker restarting disabled' else: msg = "Maximum crashed workers reached: %d" % \ self._max_worker_restart self.report_line(msg) else: self.report_line("Replacing crashed worker %s" % node.gateway.id) self._clone_node(node) self._active_nodes.remove(node) def worker_collectionfinish(self, node, ids): """worker has finished test collection. This adds the collection for this node to the scheduler. If the scheduler indicates collection is finished (i.e. all initial nodes have submitted their collections), then tells the scheduler to schedule the collected items. When initiating scheduling the first time it logs which scheduler is in use. """ if self.shuttingdown: return self.config.hook.pytest_xdist_node_collection_finished(node=node, ids=ids) # tell session which items were effectively collected otherwise # the master node will finish the session with EXIT_NOTESTSCOLLECTED self._session.testscollected = len(ids) self.sched.add_node_collection(node, ids) if self.terminal: self.trdist.setstatus(node.gateway.spec, "[%d]" % (len(ids))) if self.sched.collection_is_completed: if self.terminal and not self.sched.has_pending: self.trdist.ensure_show_status() self.terminal.write_line("") self.terminal.write_line("scheduling tests via %s" % ( self.sched.__class__.__name__)) self.sched.schedule() def worker_logstart(self, node, nodeid, location): """Emitted when a node calls the pytest_runtest_logstart hook.""" self.config.hook.pytest_runtest_logstart( nodeid=nodeid, location=location) def worker_logfinish(self, node, nodeid, location): """Emitted when a node calls the pytest_runtest_logfinish hook.""" self.config.hook.pytest_runtest_logfinish( nodeid=nodeid, location=location) def worker_testreport(self, node, rep): """Emitted when a node calls the pytest_runtest_logreport hook.""" rep.node = node self.config.hook.pytest_runtest_logreport(report=rep) self._handlefailures(rep) def worker_runtest_protocol_complete(self, node, item_index, duration): """ Emitted when a node fires the 'runtest_protocol_complete' event, signalling that a test has completed the runtestprotocol and should be removed from the pending list in the scheduler. """ self.sched.mark_test_complete(node, item_index, duration) def worker_collectreport(self, node, rep): """Emitted when a node calls the pytest_collectreport hook.""" if rep.failed: self._failed_worker_collectreport(node, rep) def worker_logwarning(self, message, code, nodeid, fslocation): """Emitted when a node calls the pytest_logwarning hook.""" kwargs = dict(message=message, code=code, nodeid=nodeid, fslocation=fslocation) self.config.hook.pytest_logwarning.call_historic(kwargs=kwargs) def _clone_node(self, node): """Return new node based on an existing one. This is normally for when a node dies, this will copy the spec of the existing node and create a new one with a new id. The new node will have been setup so it will start calling the "worker_*" hooks and do work soon. """ spec = node.gateway.spec spec.id = None self.nodemanager.group.allocate_id(spec) node = self.nodemanager.setup_node(spec, self.queue.put) self._active_nodes.add(node) return node def _failed_worker_collectreport(self, node, rep): # Check we haven't already seen this report (from # another worker). if rep.longrepr not in self._failed_collection_errors: self._failed_collection_errors[rep.longrepr] = True self.config.hook.pytest_collectreport(report=rep) self._handlefailures(rep) def _handlefailures(self, rep): if rep.failed: self.countfailures += 1 if self.maxfail and self.countfailures >= self.maxfail: self.shouldstop = "stopping after %d failures" % ( self.countfailures) def triggershutdown(self): self.log("triggering shutdown") self.shuttingdown = True for node in self.sched.nodes: node.shutdown() def handle_crashitem(self, nodeid, worker): # XXX get more reporting info by recording pytest_runtest_logstart? # XXX count no of failures and retry N times runner = self.config.pluginmanager.getplugin("runner") fspath = nodeid.split("::")[0] msg = "Worker %r crashed while running %r" % (worker.gateway.id, nodeid) rep = runner.TestReport(nodeid, (fspath, None, fspath), (), "failed", msg, "???") rep.node = worker self.config.hook.pytest_runtest_logreport(report=rep) class TerminalDistReporter: def __init__(self, config): self.config = config self.tr = config.pluginmanager.getplugin("terminalreporter") self._status = {} self._lastlen = 0 self._isatty = getattr(self.tr, 'isatty', self.tr.hasmarkup) def write_line(self, msg): self.tr.write_line(msg) def ensure_show_status(self): if not self._isatty: self.write_line(self.getstatus()) def setstatus(self, spec, status, show=True): self._status[spec.id] = status if show and self._isatty: self.rewrite(self.getstatus()) def getstatus(self): parts = ["%s %s" % (spec.id, self._status[spec.id]) for spec in self._specs] return " / ".join(parts) def rewrite(self, line, newline=False): pline = line + " " * max(self._lastlen - len(line), 0) if newline: self._lastlen = 0 pline += "\n" else: self._lastlen = len(line) self.tr.rewrite(pline, bold=True) def pytest_xdist_setupnodes(self, specs): self._specs = specs for spec in specs: self.setstatus(spec, "I", show=False) self.setstatus(spec, "I", show=True) self.ensure_show_status() def pytest_xdist_newgateway(self, gateway): if self.config.option.verbose > 0: rinfo = gateway._rinfo() version = "%s.%s.%s" % rinfo.version_info[:3] self.rewrite("[%s] %s Python %s cwd: %s" % ( gateway.id, rinfo.platform, version, rinfo.cwd), newline=True) self.setstatus(gateway.spec, "C") def pytest_testnodeready(self, node): if self.config.option.verbose > 0: d = node.workerinfo infoline = "[%s] Python %s" % ( d['id'], d['version'].replace('\n', ' -- '),) self.rewrite(infoline, newline=True) self.setstatus(node.gateway.spec, "ok") def pytest_testnodedown(self, node, error): if not error: return self.write_line("[%s] node down: %s" % (node.gateway.id, error)) # def pytest_xdist_rsyncstart(self, source, gateways): # targets = ",".join([gw.id for gw in gateways]) # msg = "[%s] rsyncing: %s" %(targets, source) # self.write_line(msg) # def pytest_xdist_rsyncfinish(self, source, gateways): # targets = ", ".join(["[%s]" % gw.id for gw in gateways]) # self.write_line("rsyncfinish: %s -> %s" %(source, targets)) pytest-xdist-1.22.1/xdist/looponfail.py0000644000372000037200000002102113242644556020774 0ustar travistravis00000000000000""" Implement -f aka looponfailing for py.test. NOTE that we try to avoid loading and depending on application modules within the controlling process (the one that starts repeatedly test processes) otherwise changes to source code can crash the controlling process which should best never happen. """ import py import pytest import sys import execnet def pytest_addoption(parser): group = parser.getgroup("xdist", "distributed and subprocess testing") group._addoption( '-f', '--looponfail', action="store_true", dest="looponfail", default=False, help="run tests in subprocess, wait for modified files " "and re-run failing test set until all pass.") def pytest_cmdline_main(config): if config.getoption("looponfail"): usepdb = config.getoption('usepdb') # a core option if usepdb: raise pytest.UsageError( "--pdb incompatible with --looponfail.") looponfail_main(config) return 2 # looponfail only can get stop with ctrl-C anyway def looponfail_main(config): remotecontrol = RemoteControl(config) rootdirs = config.getini("looponfailroots") statrecorder = StatRecorder(rootdirs) try: while 1: remotecontrol.loop_once() if not remotecontrol.failures and remotecontrol.wasfailing: # the last failures passed, let's immediately rerun all continue repr_pytest_looponfailinfo( failreports=remotecontrol.failures, rootdirs=rootdirs) statrecorder.waitonchange(checkinterval=2.0) except KeyboardInterrupt: print() class RemoteControl(object): def __init__(self, config): self.config = config self.failures = [] def trace(self, *args): if self.config.option.debug: msg = " ".join([str(x) for x in args]) py.builtin.print_("RemoteControl:", msg) def initgateway(self): return execnet.makegateway("popen") def setup(self, out=None): if out is None: out = py.io.TerminalWriter() if hasattr(self, 'gateway'): raise ValueError("already have gateway %r" % self.gateway) self.trace("setting up worker session") self.gateway = self.initgateway() self.channel = channel = self.gateway.remote_exec( init_worker_session, args=self.config.args, option_dict=vars(self.config.option), ) remote_outchannel = channel.receive() def write(s): out._file.write(s) out._file.flush() remote_outchannel.setcallback(write) def ensure_teardown(self): if hasattr(self, 'channel'): if not self.channel.isclosed(): self.trace("closing", self.channel) self.channel.close() del self.channel if hasattr(self, 'gateway'): self.trace("exiting", self.gateway) self.gateway.exit() del self.gateway def runsession(self): try: self.trace("sending", self.failures) self.channel.send(self.failures) try: return self.channel.receive() except self.channel.RemoteError: e = sys.exc_info()[1] self.trace("ERROR", e) raise finally: self.ensure_teardown() def loop_once(self): self.setup() self.wasfailing = self.failures and len(self.failures) result = self.runsession() failures, reports, collection_failed = result if collection_failed: pass # "Collection failed, keeping previous failure set" else: uniq_failures = [] for failure in failures: if failure not in uniq_failures: uniq_failures.append(failure) self.failures = uniq_failures def repr_pytest_looponfailinfo(failreports, rootdirs): tr = py.io.TerminalWriter() if failreports: tr.sep("#", "LOOPONFAILING", bold=True) for report in failreports: if report: tr.line(report, red=True) tr.sep("#", "waiting for changes", bold=True) for rootdir in rootdirs: tr.line("### Watching: %s" % (rootdir,), bold=True) def init_worker_session(channel, args, option_dict): import os import sys outchannel = channel.gateway.newchannel() sys.stdout = sys.stderr = outchannel.makefile('w') channel.send(outchannel) # prune sys.path to not contain relative paths newpaths = [] for p in sys.path: if p: if not os.path.isabs(p): p = os.path.abspath(p) newpaths.append(p) sys.path[:] = newpaths # fullwidth, hasmarkup = channel.receive() from _pytest.config import Config config = Config.fromdictargs(option_dict, list(args)) config.args = args from xdist.looponfail import WorkerFailSession WorkerFailSession(config, channel).main() class WorkerFailSession: def __init__(self, config, channel): self.config = config self.channel = channel self.recorded_failures = [] self.collection_failed = False config.pluginmanager.register(self) config.option.looponfail = False config.option.usepdb = False def DEBUG(self, *args): if self.config.option.debug: print(" ".join(map(str, args))) def pytest_collection(self, session): self.session = session self.trails = self.current_command hook = self.session.ihook try: items = session.perform_collect(self.trails or None) except pytest.UsageError: items = session.perform_collect(None) hook.pytest_collection_modifyitems( session=session, config=session.config, items=items) hook.pytest_collection_finish(session=session) return True def pytest_runtest_logreport(self, report): if report.failed: self.recorded_failures.append(report) def pytest_collectreport(self, report): if report.failed: self.recorded_failures.append(report) self.collection_failed = True def main(self): self.DEBUG("WORKER: received configuration, waiting for command trails") try: command = self.channel.receive() except KeyboardInterrupt: return # in the worker we can't do much about this self.DEBUG("received", command) self.current_command = command self.config.hook.pytest_cmdline_main(config=self.config) trails, failreports = [], [] for rep in self.recorded_failures: trails.append(rep.nodeid) loc = rep.longrepr loc = str(getattr(loc, 'reprcrash', loc)) failreports.append(loc) self.channel.send((trails, failreports, self.collection_failed)) class StatRecorder: def __init__(self, rootdirlist): self.rootdirlist = rootdirlist self.statcache = {} self.check() # snapshot state def fil(self, p): return p.check(file=1, dotfile=0) and p.ext != ".pyc" def rec(self, p): return p.check(dotfile=0) def waitonchange(self, checkinterval=1.0): while 1: changed = self.check() if changed: return py.std.time.sleep(checkinterval) def check(self, removepycfiles=True): # noqa, too complex changed = False statcache = self.statcache newstat = {} for rootdir in self.rootdirlist: for path in rootdir.visit(self.fil, self.rec): oldstat = statcache.pop(path, None) try: newstat[path] = curstat = path.stat() except py.error.ENOENT: if oldstat: changed = True else: if oldstat: if oldstat.mtime != curstat.mtime or \ oldstat.size != curstat.size: changed = True py.builtin.print_("# MODIFIED", path) if removepycfiles and path.ext == ".py": pycfile = path + "c" if pycfile.check(): pycfile.remove() else: changed = True if statcache: changed = True self.statcache = newstat return changed pytest-xdist-1.22.1/xdist/newhooks.py0000644000372000037200000000263513242644556020501 0ustar travistravis00000000000000""" xdist hooks. Additionally, pytest-xdist will also decorate a few other hooks with the worker instance that executed the hook originally: ``pytest_runtest_logreport``: ``rep`` parameter has a ``node`` attribute. You can use this hooks just as you would use normal pytest hooks, but some care must be taken in plugins in case ``xdist`` is not installed. Please see: http://pytest.org/en/latest/writing_plugins.html#optionally-using-hooks-from-3rd-party-plugins """ import pytest def pytest_xdist_setupnodes(config, specs): """ called before any remote node is set up. """ def pytest_xdist_newgateway(gateway): """ called on new raw gateway creation. """ def pytest_xdist_rsyncstart(source, gateways): """ called before rsyncing a directory to remote gateways takes place. """ def pytest_xdist_rsyncfinish(source, gateways): """ called after rsyncing a directory to remote gateways takes place. """ def pytest_configure_node(node): """ configure node information before it gets instantiated. """ def pytest_testnodeready(node): """ Test Node is ready to operate. """ def pytest_testnodedown(node, error): """ Test Node is down. """ def pytest_xdist_node_collection_finished(node, ids): """called by the master node when a node finishes collecting. """ @pytest.mark.firstresult def pytest_xdist_make_scheduler(config, log): """ return a node scheduler implementation """ pytest-xdist-1.22.1/xdist/plugin.py0000644000372000037200000001255113242644556020140 0ustar travistravis00000000000000import py import pytest def parse_numprocesses(s): if s == 'auto': try: from os import cpu_count except ImportError: from multiprocessing import cpu_count try: n = cpu_count() except NotImplementedError: return 1 return n if n else 1 else: return int(s) def pytest_addoption(parser): group = parser.getgroup("xdist", "distributed and subprocess testing") group._addoption( '-n', '--numprocesses', dest="numprocesses", metavar="numprocesses", action="store", type=parse_numprocesses, help="shortcut for '--dist=load --tx=NUM*popen', " "you can use 'auto' here for auto detection CPUs number on " "host system") group.addoption('--max-worker-restart', '--max-slave-restart', action="store", default=None, dest="maxworkerrestart", help="maximum number of workers that can be restarted " "when crashed (set to zero to disable this feature)\n" "'--max-slave-restart' option is deprecated and will be removed in " "a future release") group.addoption( '--dist', metavar="distmode", action="store", choices=['each', 'load', 'loadscope', 'loadfile', 'no'], dest="dist", default="no", help=("set mode for distributing tests to exec environments.\n\n" "each: send each test to all available environments.\n\n" "load: load balance by sending any pending test to any" " available environment.\n\n" "loadscope: load balance by sending pending groups of tests in" " the same scope to any available environment.\n\n" "loadfile: load balance by sending test grouped by file" " to any available environment.\n\n" "(default) no: run tests inprocess, don't distribute.")) group.addoption( '--tx', dest="tx", action="append", default=[], metavar="xspec", help=("add a test execution environment. some examples: " "--tx popen//python=python2.5 --tx socket=192.168.1.102:8888 " "--tx ssh=user@codespeak.net//chdir=testcache")) group._addoption( '-d', action="store_true", dest="distload", default=False, help="load-balance tests. shortcut for '--dist=load'") group.addoption( '--rsyncdir', action="append", default=[], metavar="DIR", help="add directory for rsyncing to remote tx nodes.") group.addoption( '--rsyncignore', action="append", default=[], metavar="GLOB", help="add expression for ignores when rsyncing to remote tx nodes.") group.addoption( "--boxed", action="store_true", help="backward compatibility alias for pytest-forked --forked") parser.addini( 'rsyncdirs', 'list of (relative) paths to be rsynced for' ' remote distributed testing.', type="pathlist") parser.addini( 'rsyncignore', 'list of (relative) glob-style paths to be ignored ' 'for rsyncing.', type="pathlist") parser.addini( "looponfailroots", type="pathlist", help="directories to check for changes", default=[py.path.local()]) # ------------------------------------------------------------------------- # distributed testing hooks # ------------------------------------------------------------------------- def pytest_addhooks(pluginmanager): from xdist import newhooks # avoid warnings with pytest-2.8 method = getattr(pluginmanager, "add_hookspecs", None) if method is None: method = pluginmanager.addhooks method(newhooks) # ------------------------------------------------------------------------- # distributed testing initialization # ------------------------------------------------------------------------- @pytest.mark.trylast def pytest_configure(config): if config.getoption("dist") != "no" and not config.getvalue("collectonly"): from xdist.dsession import DSession session = DSession(config) config.pluginmanager.register(session, "dsession") tr = config.pluginmanager.getplugin("terminalreporter") tr.showfspath = False if config.getoption("boxed"): config.option.forked = True @pytest.mark.tryfirst def pytest_cmdline_main(config): if config.option.numprocesses: if config.option.dist == 'no': config.option.dist = "load" config.option.tx = ['popen'] * config.option.numprocesses if config.option.distload: config.option.dist = "load" val = config.getvalue if not val("collectonly"): usepdb = config.getoption('usepdb') # a core option if val("dist") != "no": if usepdb: raise pytest.UsageError( "--pdb is incompatible with distributing tests; try using -n0.") # noqa: E501 # ------------------------------------------------------------------------- # fixtures # ------------------------------------------------------------------------- @pytest.fixture(scope="session") def worker_id(request): """Return the id of the current worker ('gw0', 'gw1', etc) or 'master' if running on the master node. """ if hasattr(request.config, 'workerinput'): return request.config.workerinput['workerid'] else: return 'master' pytest-xdist-1.22.1/xdist/remote.py0000644000372000037200000001564113242644556020140 0ustar travistravis00000000000000""" This module is executed in remote subprocesses and helps to control a remote testing session and relay back information. It assumes that 'py' is importable and does not have dependencies on the rest of the xdist code. This means that the xdist-plugin needs not to be installed in remote environments. """ import sys import os import time import _pytest.hookspec import pytest class WorkerInteractor: def __init__(self, config, channel): self.config = config self.workerid = config.workerinput.get('workerid', "?") self.log = py.log.Producer("worker-%s" % self.workerid) if not config.option.debug: py.log.setconsumer(self.log._keywords, None) self.channel = channel config.pluginmanager.register(self) def sendevent(self, name, **kwargs): self.log("sending", name, kwargs) self.channel.send((name, kwargs)) def pytest_internalerror(self, excrepr): for line in str(excrepr).split("\n"): self.log("IERROR>", line) def pytest_sessionstart(self, session): self.session = session workerinfo = getinfodict() self.sendevent("workerready", workerinfo=workerinfo) @pytest.hookimpl(hookwrapper=True) def pytest_sessionfinish(self, exitstatus): self.config.workeroutput['exitstatus'] = exitstatus yield self.sendevent("workerfinished", workeroutput=self.config.workeroutput) def pytest_collection(self, session): self.sendevent("collectionstart") def pytest_runtestloop(self, session): self.log("entering main loop") torun = [] while 1: try: name, kwargs = self.channel.receive() except EOFError: return True self.log("received command", name, kwargs) if name == "runtests": torun.extend(kwargs['indices']) elif name == "runtests_all": torun.extend(range(len(session.items))) self.log("items to run:", torun) # only run if we have an item and a next item while len(torun) >= 2: self.run_one_test(torun) if name == "shutdown": if torun: self.run_one_test(torun) break return True def run_one_test(self, torun): items = self.session.items self.item_index = torun.pop(0) item = items[self.item_index] if torun: nextitem = items[torun[0]] else: nextitem = None start = time.time() self.config.hook.pytest_runtest_protocol( item=item, nextitem=nextitem) duration = time.time() - start self.sendevent("runtest_protocol_complete", item_index=self.item_index, duration=duration) def pytest_collection_finish(self, session): self.sendevent( "collectionfinish", topdir=str(session.fspath), ids=[item.nodeid for item in session.items]) def pytest_runtest_logstart(self, nodeid, location): self.sendevent("logstart", nodeid=nodeid, location=location) # the pytest_runtest_logfinish hook was introduced in pytest 3.4 if hasattr(_pytest.hookspec, 'pytest_runtest_logfinish'): def pytest_runtest_logfinish(self, nodeid, location): self.sendevent("logfinish", nodeid=nodeid, location=location) def pytest_runtest_logreport(self, report): data = serialize_report(report) data["item_index"] = self.item_index data["worker_id"] = self.workerid assert self.session.items[self.item_index].nodeid == report.nodeid self.sendevent("testreport", data=data) def pytest_collectreport(self, report): data = serialize_report(report) self.sendevent("collectreport", data=data) def pytest_logwarning(self, message, code, nodeid, fslocation): self.sendevent("logwarning", message=message, code=code, nodeid=nodeid, fslocation=str(fslocation)) def serialize_report(rep): def disassembled_report(rep): reprtraceback = rep.longrepr.reprtraceback.__dict__.copy() reprcrash = rep.longrepr.reprcrash.__dict__.copy() new_entries = [] for entry in reprtraceback['reprentries']: entry_data = { 'type': type(entry).__name__, 'data': entry.__dict__.copy(), } for key, value in entry_data['data'].items(): if hasattr(value, '__dict__'): entry_data['data'][key] = value.__dict__.copy() new_entries.append(entry_data) reprtraceback['reprentries'] = new_entries return { 'reprcrash': reprcrash, 'reprtraceback': reprtraceback, 'sections': rep.longrepr.sections } import py d = rep.__dict__.copy() if hasattr(rep.longrepr, 'toterminal'): if hasattr(rep.longrepr, 'reprtraceback') \ and hasattr(rep.longrepr, 'reprcrash'): d['longrepr'] = disassembled_report(rep) else: d['longrepr'] = str(rep.longrepr) else: d['longrepr'] = rep.longrepr for name in d: if isinstance(d[name], py.path.local): d[name] = str(d[name]) elif name == "result": d[name] = None # for now return d def getinfodict(): import platform return dict( version=sys.version, version_info=tuple(sys.version_info), sysplatform=sys.platform, platform=platform.platform(), executable=sys.executable, cwd=os.getcwd(), ) def remote_initconfig(option_dict, args): from _pytest.config import Config option_dict['plugins'].append("no:terminal") config = Config.fromdictargs(option_dict, args) config.option.looponfail = False config.option.usepdb = False config.option.dist = "no" config.option.distload = False config.option.numprocesses = None config.args = args return config if __name__ == '__channelexec__': channel = channel # noqa workerinput, args, option_dict = channel.receive() importpath = os.getcwd() sys.path.insert(0, importpath) # XXX only for remote situations os.environ['PYTHONPATH'] = ( importpath + os.pathsep + os.environ.get('PYTHONPATH', '')) os.environ['PYTEST_XDIST_WORKER'] = workerinput['workerid'] os.environ['PYTEST_XDIST_WORKER_COUNT'] = str(workerinput['workercount']) # os.environ['PYTHONPATH'] = importpath import py config = remote_initconfig(option_dict, args) config.workerinput = workerinput config.workeroutput = {} # TODO: deprecated name, backward compatibility only. Remove it in future config.slaveinput = config.workerinput config.slaveoutput = config.workeroutput interactor = WorkerInteractor(config, channel) config.hook.pytest_cmdline_main(config=config) pytest-xdist-1.22.1/xdist/report.py0000644000372000037200000000145313242644556020154 0ustar travistravis00000000000000import py from difflib import unified_diff def report_collection_diff(from_collection, to_collection, from_id, to_id): """Report the collected test difference between two nodes. :returns: detailed message describing the difference between the given collections, or None if they are equal. """ if from_collection == to_collection: return None diff = unified_diff( from_collection, to_collection, fromfile=from_id, tofile=to_id, ) error_message = py.builtin._totext( 'Different tests were collected between {from_id} and {to_id}. ' 'The difference is:\n' '{diff}' ).format(from_id=from_id, to_id=to_id, diff='\n'.join(diff)) msg = "\n".join([x.rstrip() for x in error_message.split("\n")]) return msg pytest-xdist-1.22.1/xdist/workermanage.py0000644000372000037200000003734413242644556021333 0ustar travistravis00000000000000import fnmatch import os import re import py import pytest import execnet import xdist.remote from _pytest import runner # XXX load dynamically def parse_spec_config(config): xspeclist = [] for xspec in config.getvalue("tx"): i = xspec.find("*") try: num = int(xspec[:i]) except ValueError: xspeclist.append(xspec) else: xspeclist.extend([xspec[i + 1:]] * num) if not xspeclist: raise pytest.UsageError( "MISSING test execution (tx) nodes: please specify --tx") return xspeclist class NodeManager(object): EXIT_TIMEOUT = 10 DEFAULT_IGNORES = ['.*', '*.pyc', '*.pyo', '*~'] def __init__(self, config, specs=None, defaultchdir="pyexecnetcache"): self.config = config self._nodesready = py.std.threading.Event() self.trace = self.config.trace.get("nodemanager") self.group = execnet.Group() if specs is None: specs = self._getxspecs() self.specs = [] for spec in specs: if not isinstance(spec, execnet.XSpec): spec = execnet.XSpec(spec) if not spec.chdir and not spec.popen: spec.chdir = defaultchdir self.group.allocate_id(spec) self.specs.append(spec) self.roots = self._getrsyncdirs() self.rsyncoptions = self._getrsyncoptions() self._rsynced_specs = py.builtin.set() def rsync_roots(self, gateway): """Rsync the set of roots to the node's gateway cwd.""" if self.roots: for root in self.roots: self.rsync(gateway, root, **self.rsyncoptions) def setup_nodes(self, putevent): self.config.hook.pytest_xdist_setupnodes(config=self.config, specs=self.specs) self.trace("setting up nodes") nodes = [] for spec in self.specs: nodes.append(self.setup_node(spec, putevent)) return nodes def setup_node(self, spec, putevent): gw = self.group.makegateway(spec) self.config.hook.pytest_xdist_newgateway(gateway=gw) self.rsync_roots(gw) node = WorkerController(self, gw, self.config, putevent) gw.node = node # keep the node alive node.setup() self.trace("started node %r" % node) return node def teardown_nodes(self): self.group.terminate(self.EXIT_TIMEOUT) def _getxspecs(self): return [execnet.XSpec(x) for x in parse_spec_config(self.config)] def _getrsyncdirs(self): for spec in self.specs: if not spec.popen or spec.chdir: break else: return [] import pytest import _pytest pytestpath = pytest.__file__.rstrip("co") pytestdir = py.path.local(_pytest.__file__).dirpath() config = self.config candidates = [py._pydir, pytestpath, pytestdir] candidates += config.option.rsyncdir rsyncroots = config.getini("rsyncdirs") if rsyncroots: candidates.extend(rsyncroots) roots = [] for root in candidates: root = py.path.local(root).realpath() if not root.check(): raise pytest.UsageError("rsyncdir doesn't exist: %r" % (root,)) if root not in roots: roots.append(root) return roots def _getrsyncoptions(self): """Get options to be passed for rsync.""" ignores = list(self.DEFAULT_IGNORES) ignores += self.config.option.rsyncignore ignores += self.config.getini("rsyncignore") return { 'ignores': ignores, 'verbose': self.config.option.verbose, } def rsync(self, gateway, source, notify=None, verbose=False, ignores=None): """Perform rsync to remote hosts for node.""" # XXX This changes the calling behaviour of # pytest_xdist_rsyncstart and pytest_xdist_rsyncfinish to # be called once per rsync target. rsync = HostRSync(source, verbose=verbose, ignores=ignores) spec = gateway.spec if spec.popen and not spec.chdir: # XXX This assumes that sources are python-packages # and that adding the basedir does not hurt. gateway.remote_exec(""" import sys ; sys.path.insert(0, %r) """ % os.path.dirname(str(source))).waitclose() return if (spec, source) in self._rsynced_specs: return def finished(): if notify: notify("rsyncrootready", spec, source) rsync.add_target_host(gateway, finished=finished) self._rsynced_specs.add((spec, source)) self.config.hook.pytest_xdist_rsyncstart( source=source, gateways=[gateway], ) rsync.send() self.config.hook.pytest_xdist_rsyncfinish( source=source, gateways=[gateway], ) class HostRSync(execnet.RSync): """ RSyncer that filters out common files """ def __init__(self, sourcedir, *args, **kwargs): self._synced = {} self._ignores = [] ignores = kwargs.pop('ignores', None) or [] for x in ignores: x = getattr(x, 'strpath', x) self._ignores.append(re.compile(fnmatch.translate(x))) super(HostRSync, self).__init__(sourcedir=sourcedir, **kwargs) def filter(self, path): path = py.path.local(path) for cre in self._ignores: if cre.match(path.basename) or cre.match(path.strpath): return False else: return True def add_target_host(self, gateway, finished=None): remotepath = os.path.basename(self._sourcedir) super(HostRSync, self).add_target(gateway, remotepath, finishedcallback=finished, delete=True,) def _report_send_file(self, gateway, modified_rel_path): if self._verbose: path = os.path.basename(self._sourcedir) + "/" + modified_rel_path remotepath = gateway.spec.chdir py.builtin.print_('%s:%s <= %s' % (gateway.spec, remotepath, path)) def make_reltoroot(roots, args): # XXX introduce/use public API for splitting py.test args splitcode = "::" result = [] for arg in args: parts = arg.split(splitcode) fspath = py.path.local(parts[0]) for root in roots: x = fspath.relto(root) if x or fspath == root: parts[0] = root.basename + "/" + x break else: raise ValueError("arg %s not relative to an rsync root" % (arg,)) result.append(splitcode.join(parts)) return result class WorkerController(object): ENDMARK = -1 def __init__(self, nodemanager, gateway, config, putevent): self.nodemanager = nodemanager self.putevent = putevent self.gateway = gateway self.config = config self.workerinput = {'workerid': gateway.id, 'workercount': len(nodemanager.specs), 'slaveid': gateway.id, 'slavecount': len(nodemanager.specs) } # TODO: deprecated name, backward compatibility only. Remove it in future self.slaveinput = self.workerinput self._down = False self._shutdown_sent = False self.log = py.log.Producer("workerctl-%s" % gateway.id) if not self.config.option.debug: py.log.setconsumer(self.log._keywords, None) def __repr__(self): return "<%s %s>" % (self.__class__.__name__, self.gateway.id,) @property def shutting_down(self): return self._down or self._shutdown_sent def setup(self): self.log("setting up worker session") spec = self.gateway.spec args = self.config.args if not spec.popen or spec.chdir: args = make_reltoroot(self.nodemanager.roots, args) option_dict = vars(self.config.option) if spec.popen: name = "popen-%s" % self.gateway.id if hasattr(self.config, '_tmpdirhandler'): basetemp = self.config._tmpdirhandler.getbasetemp() option_dict['basetemp'] = str(basetemp.join(name)) self.config.hook.pytest_configure_node(node=self) self.channel = self.gateway.remote_exec(xdist.remote) self.channel.send((self.workerinput, args, option_dict)) if self.putevent: self.channel.setcallback( self.process_from_remote, endmarker=self.ENDMARK) def ensure_teardown(self): if hasattr(self, 'channel'): if not self.channel.isclosed(): self.log("closing", self.channel) self.channel.close() # del self.channel if hasattr(self, 'gateway'): self.log("exiting", self.gateway) self.gateway.exit() # del self.gateway def send_runtest_some(self, indices): self.sendcommand("runtests", indices=indices) def send_runtest_all(self): self.sendcommand("runtests_all",) def shutdown(self): if not self._down: try: self.sendcommand("shutdown") except IOError: pass self._shutdown_sent = True def sendcommand(self, name, **kwargs): """ send a named parametrized command to the other side. """ self.log("sending command %s(**%s)" % (name, kwargs)) self.channel.send((name, kwargs)) def notify_inproc(self, eventname, **kwargs): self.log("queuing %s(**%s)" % (eventname, kwargs)) self.putevent((eventname, kwargs)) def process_from_remote(self, eventcall): # noqa too complex """ this gets called for each object we receive from the other side and if the channel closes. Note that channel callbacks run in the receiver thread of execnet gateways - we need to avoid raising exceptions or doing heavy work. """ try: if eventcall == self.ENDMARK: err = self.channel._getremoteerror() if not self._down: if not err or isinstance(err, EOFError): err = "Not properly terminated" # lost connection? self.notify_inproc("errordown", node=self, error=err) self._down = True return eventname, kwargs = eventcall if eventname in ("collectionstart",): self.log("ignoring %s(%s)" % (eventname, kwargs)) elif eventname == "workerready": self.notify_inproc(eventname, node=self, **kwargs) elif eventname == "workerfinished": self._down = True self.workeroutput = kwargs['workeroutput'] self.notify_inproc("workerfinished", node=self) elif eventname in ("logstart", "logfinish"): self.notify_inproc(eventname, node=self, **kwargs) elif eventname in ( "testreport", "collectreport", "teardownreport"): item_index = kwargs.pop("item_index", None) rep = unserialize_report(eventname, kwargs['data']) if item_index is not None: rep.item_index = item_index self.notify_inproc(eventname, node=self, rep=rep) elif eventname == "collectionfinish": self.notify_inproc(eventname, node=self, ids=kwargs['ids']) elif eventname == "runtest_protocol_complete": self.notify_inproc(eventname, node=self, **kwargs) elif eventname == "logwarning": self.notify_inproc(eventname, message=kwargs['message'], code=kwargs['code'], nodeid=kwargs['nodeid'], fslocation=kwargs['nodeid']) else: raise ValueError("unknown event: %s" % (eventname,)) except KeyboardInterrupt: # should not land in receiver-thread raise except: # noqa excinfo = py.code.ExceptionInfo() py.builtin.print_("!" * 20, excinfo) self.config.notify_exception(excinfo) self.shutdown() self.notify_inproc("errordown", node=self, error=excinfo) def unserialize_report(name, reportdict): def assembled_report(reportdict): from _pytest._code.code import ( ReprEntry, ReprEntryNative, ReprExceptionInfo, ReprFileLocation, ReprFuncArgs, ReprLocals, ReprTraceback ) if reportdict['longrepr']: if 'reprcrash' in reportdict['longrepr'] and 'reprtraceback' in reportdict['longrepr']: reprtraceback = reportdict['longrepr']['reprtraceback'] reprcrash = reportdict['longrepr']['reprcrash'] unserialized_entries = [] reprentry = None for entry_data in reprtraceback['reprentries']: data = entry_data['data'] entry_type = entry_data['type'] if entry_type == 'ReprEntry': reprfuncargs = None reprfileloc = None reprlocals = None if data['reprfuncargs']: reprfuncargs = ReprFuncArgs( **data['reprfuncargs']) if data['reprfileloc']: reprfileloc = ReprFileLocation( **data['reprfileloc']) if data['reprlocals']: reprlocals = ReprLocals( data['reprlocals']['lines']) reprentry = ReprEntry( lines=data['lines'], reprfuncargs=reprfuncargs, reprlocals=reprlocals, filelocrepr=reprfileloc, style=data['style'] ) elif entry_type == 'ReprEntryNative': reprentry = ReprEntryNative(data['lines']) else: report_unserialization_failure( entry_type, name, reportdict) unserialized_entries.append(reprentry) reprtraceback['reprentries'] = unserialized_entries exception_info = ReprExceptionInfo( reprtraceback=ReprTraceback(**reprtraceback), reprcrash=ReprFileLocation(**reprcrash), ) for section in reportdict['longrepr']['sections']: exception_info.addsection(*section) reportdict['longrepr'] = exception_info return reportdict if name == "testreport": return runner.TestReport(**assembled_report(reportdict)) elif name == "collectreport": return runner.CollectReport(**assembled_report(reportdict)) def report_unserialization_failure(type_name, report_name, reportdict): from pprint import pprint url = 'https://github.com/pytest-dev/pytest-xdist/issues' stream = py.io.TextIO() pprint('-' * 100, stream=stream) pprint('INTERNALERROR: Unknown entry type returned: %s' % type_name, stream=stream) pprint('report_name: %s' % report_name, stream=stream) pprint(reportdict, stream=stream) pprint('Please report this bug at %s' % url, stream=stream) pprint('-' * 100, stream=stream) assert 0, stream.getvalue() pytest-xdist-1.22.1/.travis.yml0000644000372000037200000000347613242644556017254 0ustar travistravis00000000000000sudo: false language: python notifications: irc: channels: - 'chat.freenode.net#pytest' on_success: change on_failure: change skip_join: true email: - pytest-commit@python.org python: - '2.7' - '3.4' - '3.5' - '3.6' env: - TOXENV=py-pytest30 - TOXENV=py-pytest31 - TOXENV=py-pytest32 - TOXENV=py-pytest33 install: pip install tox setuptools_scm script: tox jobs: include: - stage: test # python x env above are already included into this stage - python: "2.7" env: TOXENV=py27-pytestmaster - python: "2.7" env: TOXENV=py27-pytestfeatures - python: "3.6" env: TOXENV=py36-pytestmaster - python: "3.6" env: TOXENV=py36-pytestfeatures - python: "3.6" env: TOXENV=flakes - python: "3.6" env: TOXENV=readme - stage: deploy python: '3.6' env: install: pip install -U setuptools setuptools_scm script: skip deploy: provider: pypi user: ronny distributions: sdist bdist_wheel skip_upload_docs: true password: secure: cxmSDho5d+PYKEM4ZCg8ms1P4lzhYkrw6fEOm2HtTcsuCyY6aZMSgImWAnEYbJHSkdzgcxlXK9UKJ9B0YenXmBCkAr7UjdnpNXNmkySr0sYzlH/sfqt/dDATCHFaRKxnkOSOVywaDYhT9n8YudbXI77pXwD12i/CeSSJDbHhsu0JYUfAcb+D6YjRYoA2SEGCnzSzg+gDDfwXZx4ZiODCGLVwieNp1klCg88YROUE1BaYYNuUOONvfXX8+TWowbCF6ChH1WL/bZ49OStEYQNuYxZQZr4yClIqu9VJbchrU8j860K9ott2kkGTgfB/dDrQB/XncBubyIX9ikzCQAmmBXWAI3eyvWLPDk2Jz7kW2l2RT7syct80tCq3JhvQ1qdwr5ap7siocTLgnBW0tF4tkHSTFN3510fkc43npnp6FThebESQpnI24vqpwJ9hI/kW5mYi014Og2E/cpCXnz2XO8iZPDbqAMQpDsqEQoyhfGNgPTGp4K30TxRtwZBI5hHhDKnnR16fXtRgt1gYPvz/peUQvvpOm4JzIzGXPzluuutpnCBy75v5+oiwT3YRrLL/Meims9FtDDXL3qQubAE/ezIOOpm0N5XXV8DxIom8EN71yq5ab1tqhM+tBX7owRjy4FR4If2Q8feBdmTuh26DIQt/y+qSG8VkB9Sw/JCjc7c= on: tags: true repo: pytest-dev/pytest-xdist pytest-xdist-1.22.1/CHANGELOG.rst0000644000372000037200000003105413242644556017155 0ustar travistravis00000000000000pytest-xdist 1.22.1 (2018-02-19) ================================ Bug Fixes --------- - Fix issue when using ``loadscope`` or ``loadfile`` where tests would fail to start if the first scope had only one test. (`#257 `_) Trivial Changes --------------- - Change terminology used by ``pytest-xdist`` to *master* and *worker* in arguments and messages (for example ``--max-worker-reset``). (`#234 `_) pytest-xdist 1.22.0 (2018-01-11) ================================ Features -------- - Add support for the ``pytest_runtest_logfinish`` hook which will be released in pytest 3.4. (`#266 `_) pytest-xdist 1.21.0 (2017-12-22) ================================ Deprecations and Removals ------------------------- - Drop support for EOL Python 2.6. (`#259 `_) Features -------- - New ``--dist=loadfile`` option which load-distributes test to workers grouped by the file the tests live in. (`#242 `_) Bug Fixes --------- - Fix accidental mutation of test report during serialization causing longrepr string-ification to break. (`#241 `_) pytest-xdist 1.20.1 (2017-10-05) ================================ Bug Fixes --------- - Fix hang when all worker nodes crash and restart limit is reached (`#45 `_) - Fix issue where the -n option would still run distributed tests when pytest was run with the --collect-only option (`#5 `_) pytest-xdist 1.20.0 (2017-08-17) ================================ Features -------- - ``xdist`` now supports tests to log results multiple times, improving integration with plugins which require it like `pytest-rerunfailures `_ and `flaky `_. (`#206 `_) Bug Fixes --------- - Fix issue where tests were being incorrectly identified if a worker crashed during the ``teardown`` stage of the test. (`#124 `_) pytest-xdist 1.19.1 (2017-08-10) ================================ Bug Fixes --------- - Fix crash when transferring internal pytest warnings from workers to the master node. (`#214 `_) pytest-xdist 1.19.0 (2017-08-09) ================================ Deprecations and Removals ------------------------- - ``--boxed`` functionality has been moved to a separate plugin, `pytest-forked `_. This release now depends on `` pytest-forked`` and provides ``--boxed`` as a backward compatibility option. (`#1 `_) Features -------- - New ``--dist=loadscope`` option: sends group of related tests to the same worker. Tests are grouped by module for test functions and by class for test methods. See ``README.rst`` for more information. (`#191 `_) - Warnings are now properly transferred from workers to the master node. (`#92 `_) Bug Fixes --------- - Fix serialization of native tracebacks (``--tb=native``). (`#196 `_) pytest-xdist 1.18.2 (2017-07-28) ================================ Bug Fixes --------- - Removal of unnecessary dependency on incorrect version of py. (`#105 `_) - Fix bug in internal event-loop error handler in the master node. This bug would shadow the original errors making extremely hard/impossible for users to diagnose the problem properly. (`#175 `_) pytest-xdist 1.18.1 (2017-07-05) ================================ Bug Fixes --------- - Fixed serialization of ``longrepr.sections`` during error reporting from workers. (`#171 `_) - Fix ``ReprLocal`` not being unserialized breaking --showlocals usages. (`#176 `_) pytest-xdist 1.18.0 (2017-06-26) ================================ - ``pytest-xdist`` now requires ``pytest>=3.0.0``. Features -------- - Add long option `--numprocesses` as alternative for `-n`. (#168) Bug Fixes --------- - Fix serialization and deserialization dropping longrepr details. (#133) pytest-xdist 1.17.1 (2017-06-10) ================================ Bug Fixes --------- - Hot fix release reverting the change introduced by #124, unfortunately it broke a number of test suites so we are reversing this change while we investigate the problem. (#157) Improved Documentation ---------------------- - Introduced ``towncrier`` for ``CHANGELOG`` management. (#154) - Added ``HOWTORELEASE`` documentation. (#155) .. You should *NOT* be adding new change log entries to this file, this file is managed by towncrier. You *may* edit previous change logs to fix problems like typo corrections or such. To add a new change log entry, please see https://pip.pypa.io/en/latest/development/#adding-a-news-entry We named the news folder ``changelog`` .. towncrier release notes start 1.17.0 ------ - fix #124: xdist would mark test as complete after 'call' step. As a result, xdist could identify the wrong test as failing when test crashes at teardown. To address this issue, xdist now marks test as complete at teardown. 1.16.0 ------ - ``pytest-xdist`` now requires pytest 2.7 or later. - Add ``worker_id`` attribute in the TestReport - new hook: ``pytest_xdist_make_scheduler(config, log)``, can return custom tests items distribution logic implementation. You can take a look at built-in ``LoadScheduling`` and ``EachScheduling`` implementations. Note that required scheduler class public API may change in next ``pytest-xdist`` versions. 1.15.0 ------ - new ``worker_id`` fixture, returns the id of the worker in a test or fixture. Thanks Jared Hellman for the PR. - display progress during collection only when in a terminal, similar to pytest #1397 issue. Thanks Bruno Oliveira for the PR. - fix internal error message when ``--maxfail`` is used (#62, #65). Thanks Collin RM Stocks and Bryan A. Jones for reports and Bruno Oliveira for the PR. 1.14 ---- - new hook: ``pytest_xdist_node_collection_finished(node, ids)``, called when a worker has finished collection. Thanks Omer Katz for the request and Bruno Oliveira for the PR. - fix README display on pypi - fix #22: xdist now works if the internal tmpdir plugin is disabled. Thanks Bruno Oliveira for the PR. - fix #32: xdist now works if looponfail or boxed are disabled. Thanks Bruno Oliveira for the PR. 1.13.1 ------- - fix a regression -n 0 now disables xdist again 1.13 ------------------------- - extended the tox matrix with the supported py.test versions - split up the plugin into 3 plugin's to prepare the departure of boxed and looponfail. looponfail will be a part of core and forked boxed will be replaced with a more reliable primitive based on xdist - conforming with new pytest-2.8 behavior of returning non-zero when all tests were skipped or deselected. - new "--max-slave-restart" option that can be used to control maximum number of times pytest-xdist can restart slaves due to crashes. Thanks to Anatoly Bubenkov for the report and Bruno Oliveira for the PR. - release as wheel - "-n" option now can be set to "auto" for automatic detection of number of cpus in the host system. Thanks Suloev Dmitry for the PR. 1.12 ------------------------- - fix issue594: properly report errors when the test collection is random. Thanks Bruno Oliveira. - some internal test suite adaptation (to become forward compatible with the upcoming pytest-2.8) 1.11 ------------------------- - fix pytest/xdist issue485 (also depends on py-1.4.22): attach stdout/stderr on --boxed processes that die. - fix pytest/xdist issue503: make sure that a node has usually two items to execute to avoid scoped fixtures to be torn down pre-maturely (fixture teardown/setup is "nextitem" sensitive). Thanks to Andreas Pelme for bug analysis and failing test. - restart crashed nodes by internally refactoring setup handling of nodes. Also includes better code documentation. Many thanks to Floris Bruynooghe for the complete PR. 1.10 ------------------------- - add glob support for rsyncignores, add command line option to pass additional rsyncignores. Thanks Anatoly Bubenkov. - fix pytest issue382 - produce "pytest_runtest_logstart" event again in master. Thanks Aron Curzon. - fix pytest issue419 by sending/receiving indices into the test collection instead of node ids (which are not necessarily unique for functions parametrized with duplicate values) - send multiple "to test" indices in one network message to a slave and improve heuristics for sending chunks where the chunksize depends on the number of remaining tests rather than fixed numbers. This reduces the number of master -> node messages (but not the reverse direction) 1.9 ------------------------- - changed LICENSE to MIT - fix duplicate reported test ids with --looponfailing (thanks Jeremy Thurgood) - fix pytest issue41: re-run tests on all file changes, not just randomly select ones like .py/.c. - fix pytest issue347: slaves running on top of Python3.2 will set PYTHONDONTWRITEYBTECODE to 1 to avoid import concurrency bugs. 1.8 ------------------------- - fix pytest-issue93 - use the refined pytest-2.2.1 runtestprotocol interface to perform eager teardowns for test items. 1.7 ------------------------- - fix incompatibilities with pytest-2.2.0 (allow multiple pytest_runtest_logreport reports for a test item) 1.6 ------------------------- - terser collection reporting - fix issue34 - distributed testing with -p plugin now works correctly - fix race condition in looponfail mode where a concurrent file removal could cause a crash 1.5 ------------------------- - adapt to and require pytest-2.0 changes, rsyncdirs and rsyncignore can now only be specified in [pytest] sections of ini files, see "py.test -h" for details. - major internal refactoring to match the pytest-2.0 event refactoring - perform test collection always at slave side instead of at the master - make python2/python3 bridging work, remove usage of pickling - improve initial reporting by using line-rewriting - remove all trailing whitespace from source 1.4 ------------------------- - perform distributed testing related reporting in the plugin rather than having dist-related code in the generic py.test distribution - depend on execnet-1.0.7 which adds "env1:NAME=value" keys to gateway specification strings. - show detailed gateway setup and platform information only when "-v" or "--verbose" is specified. 1.3 ------------------------- - fix --looponfailing - it would not actually run against the fully changed source tree when initial conftest files load application state. - adapt for py-1.3.1's new --maxfailure option 1.2 ------------------------- - fix issue79: sessionfinish/teardown hooks are now called systematically on the slave side - introduce a new data input/output mechanism to allow the master side to send and receive data from a slave. - fix race condition in underlying pickling/unpickling handling - use and require new register hooks facility of py.test>=1.3.0 - require improved execnet>=1.0.6 because of various race conditions that can arise in xdist testing modes. - fix some python3 related pickling related race conditions - fix PyPI description 1.1 ------------------------- - fix an indefinite hang which would wait for events although no events are pending - this happened if items arrive very quickly while the "reschedule-event" tried unconditionally avoiding a busy-loop and not schedule new work. 1.0 ------------------------- - moved code out of py-1.1.1 into its own plugin - use a new, faster and more sensible model to do load-balancing of tests - now no magic "MAXITEMSPERHOST" is needed and load-testing works effectively even with very few tests. - cleaned up termination handling - make -x cause hard killing of test nodes to decrease wait time until the traceback shows up on first failure pytest-xdist-1.22.1/HOWTORELEASE.rst0000644000372000037200000000236013242644556017565 0ustar travistravis00000000000000====================== Releasing pytest-xdist ====================== This document describes the steps to make a new ``pytest-xdist`` release. Version ------- ``master`` should always be green and a potential release candidate. ``pytest-xdist`` follows semantic versioning, so given that the current version is ``X.Y.Z``, to find the next version number one needs to look at the ``changelog`` folder: - If there is any file named ``*.feature``, then we must make a new **minor** release: next release will be ``X.Y+1.0``. - Otherwise it is just a **bug fix** release: ``X.Y.Z+1``. Steps ----- To publish a new release ``X.Y.Z``, the steps are as follows: #. Create a new branch named ``release-X.Y.Z`` from the latest ``master``. #. Install ``pytest-xdist`` and dev requirements in a virtualenv:: $ pip install -e . -U -r dev-requirements.txt #. Update ``CHANGELOG.rst`` file by running:: $ towncrier --version X.Y.Z --yes #. Commit and push the branch for review. #. Once PR is **green** and **approved**, create and push a tag:: $ export VERSION=X.Y.Z $ git tag v$VERSION release-$VERSION $ git push git@github.com:pytest-dev/pytest-xdist.git v$VERSION That will build the package and publish it on ``PyPI`` automatically. pytest-xdist-1.22.1/ISSUES.txt0000644000372000037200000000144513242644556016711 0ustar travistravis00000000000000next release critical ----------------------------------------------- tag: bug miserably fails: --dist=each --tx popen --tx socket=... rename / hooks ----------------------------------------------- tag: bug node -> slave transition for hooks? configure_node -> configure_slave allow to remotely run xdist tests with xdist ----------------------------------------------- tag: feature allow to run xdist own tests using its own mechanism. currently this doesn't work because the remote side has no py.test plugin. How to configure/do register "xdist.plugin" on the remote side? see to avoid any "from _pytest" internal imports ----------------------------------------------- tag: feature currently tests and even xdist core code imports names from the internal _pytest namespace. See to avoid it. pytest-xdist-1.22.1/LICENSE0000644000372000037200000000204513242644556016137 0ustar travistravis00000000000000 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. pytest-xdist-1.22.1/OVERVIEW.md0000644000372000037200000001055013242644556016722 0ustar travistravis00000000000000# Overview # `xdist` works by spawning one or more **workers**, which are controlled by the **master**. Each **worker** is responsible for performing a full test collection and afterwards running tests as dictated by the **master**. The execution flow is: 1. **master** spawns one or more **workers** at the beginning of the test session. The communication between **master** and **worker** nodes makes use of [execnet](http://codespeak.net/execnet/) and its [gateways](http://codespeak.net/execnet/basics.html#gateways-bootstrapping-python-interpreters). The actual interpreters executing the code for the **workers** might be remote or local. 1. Each **worker** itself is a mini pytest runner. **workers** at this point perform a full test collection, sending back the collected test-ids back to the **master** which does not perform any collection itself. 1. The **master** receives the result of the collection from all nodes. At this point the **master** performs some sanity check to ensure that all **workers** collected the same tests (including order), bailing out otherwise. If all is well, it converts the list of test-ids into a list of simple indexes, where each index corresponds to the position of that test in the original collection list. This works because all nodes have the same collection list, and saves bandwidth because the **master** can now tell one of the workers to just *execute test index 3* index of passing the full test id. 1. If **dist-mode** is **each**: the **master** just sends the full list of test indexes to each node at this moment. 1. If **dist-mode** is **load**: the **master** takes around 25% of the tests and sends them one by one to each **worker** in a round robin fashion. The rest of the tests will be distributed later as **workers** finish tests (see below). 1. Note that `pytest_xdist_make_scheduler` hook can be used to implement custom tests distribution logic. 1. **workers** re-implement `pytest_runtestloop`: pytest's default implementation basically loops over all collected items in the `session` object and executes the `pytest_runtest_protocol` for each test item, but in xdist **workers** sit idly waiting for **master** to send tests for execution. As tests are received by **workers**, `pytest_runtest_protocol` is executed for each test. Here it worth noting an implementation detail: **workers** always must keep at least one test item on their queue due to how the `pytest_runtest_protocol(item, nextitem)` hook is defined: in order to pass the `nextitem` to the hook, the worker must wait for more instructions from master before executing that remaining test. If it receives more tests, then it can safely call `pytest_runtest_protocol` because it knows what the `nextitem` parameter will be. If it receives a "shutdown" signal, then it can execute the hook passing `nextitem` as `None`. 1. As tests are started and completed at the **workers**, the results are sent back to the **master**, which then just forwards the results to the appropriate pytest hooks: `pytest_runtest_logstart` and `pytest_runtest_logreport`. This way other plugins (for example `junitxml`) can work normally. The **master** (when in dist-mode **load**) decides to send more tests to a node when a test completes, using some heuristics such as test durations and how many tests each **worker** still has to run. 1. When the **master** has no more pending tests it will send a "shutdown" signal to all **workers**, which will then run their remaining tests to completion and shut down. At this point the **master** will sit waiting for **workers** to shut down, still processing events such as `pytest_runtest_logreport`. ## FAQ ## > Why does each worker do its own collection, as opposed to having the master collect once and distribute from that collection to the workers? If collection was performed by master then it would have to serialize collected items to send them through the wire, as workers live in another process. The problem is that test items are not easily (impossible?) to serialize, as they contain references to the test functions, fixture managers, config objects, etc. Even if one manages to serialize it, it seems it would be very hard to get it right and easy to break by any small change in pytest. pytest-xdist-1.22.1/README.rst0000644000372000037200000002201713242644556016622 0ustar travistravis00000000000000 .. image:: http://img.shields.io/pypi/v/pytest-xdist.svg :alt: PyPI version :target: https://pypi.python.org/pypi/pytest-xdist .. image:: https://img.shields.io/pypi/pyversions/pytest-xdist.svg :alt: Python versions :target: https://pypi.python.org/pypi/pytest-xdist .. image:: https://anaconda.org/conda-forge/pytest-xdist/badges/version.svg :alt: Anaconda version :target: https://anaconda.org/conda-forge/pytest-xdist .. image:: https://travis-ci.org/pytest-dev/pytest-xdist.svg?branch=master :alt: Travis CI build status :target: https://travis-ci.org/pytest-dev/pytest-xdist .. image:: https://ci.appveyor.com/api/projects/status/56eq1a1avd4sdd7e/branch/master?svg=true :alt: AppVeyor build status :target: https://ci.appveyor.com/project/pytestbot/pytest-xdist xdist: pytest distributed testing plugin ======================================== The `pytest-xdist`_ plugin extends py.test with some unique test execution modes: * test run parallelization_: if you have multiple CPUs or hosts you can use those for a combined test run. This allows to speed up development or to use special resources of `remote machines`_. * ``--looponfail``: run your tests repeatedly in a subprocess. After each run py.test waits until a file in your project changes and then re-runs the previously failing tests. This is repeated until all tests pass after which again a full run is performed. * `Multi-Platform`_ coverage: you can specify different Python interpreters or different platforms and run tests in parallel on all of them. Before running tests remotely, ``py.test`` efficiently "rsyncs" your program source code to the remote place. All test results are reported back and displayed to your local terminal. You may specify different Python versions and interpreters. If you would like to know how pytest-xdist works under the covers, checkout `OVERVIEW `_. Installation ------------ Install the plugin with:: pip install pytest-xdist or use the package in develop/in-place mode with a checkout of the `pytest-xdist repository`_ :: pip install --editable . .. _parallelization: Speed up test runs by sending tests to multiple CPUs ---------------------------------------------------- To send tests to multiple CPUs, type:: py.test -n NUM Especially for longer running tests or tests requiring a lot of I/O this can lead to considerable speed ups. This option can also be set to ``auto`` for automatic detection of the number of CPUs. If a test crashes the interpreter, pytest-xdist will automatically restart that worker and report the failure as usual. You can use the ``--max-worker-restart`` option to limit the number of workers that can be restarted, or disable restarting altogether using ``--max-worker-restart=0``. By default, the ``-n`` option will send pending tests to any worker that is available, without any guaranteed order, but you can control this with these options: * ``--dist=loadscope``: tests will be grouped by **module** for *test functions* and by **class** for *test methods*, then each group will be sent to an available worker, guaranteeing that all tests in a group run in the same process. This can be useful if you have expensive module-level or class-level fixtures. Currently the groupings can't be customized, with grouping by class takes priority over grouping by module. This feature was added in version ``1.19``. * ``--dist=loadfile``: tests will be grouped by file name, and then will be sent to an available worker, guaranteeing that all tests in a group run in the same worker. This feature was added in version ``1.21``. Running tests in a Python subprocess ------------------------------------ To instantiate a python3.5 subprocess and send tests to it, you may type:: py.test -d --tx popen//python=python3.5 This will start a subprocess which is run with the ``python3.5`` Python interpreter, found in your system binary lookup path. If you prefix the --tx option value like this:: --tx 3*popen//python=python3.5 then three subprocesses would be created and tests will be load-balanced across these three processes. .. _boxed: Running tests in a boxed subprocess ----------------------------------- This functionality has been moved to the `pytest-forked `_ plugin, but the ``--boxed`` option is still kept for backward compatibility. .. _`remote machines`: Sending tests to remote SSH accounts ------------------------------------ Suppose you have a package ``mypkg`` which contains some tests that you can successfully run locally. And you have a ssh-reachable machine ``myhost``. Then you can ad-hoc distribute your tests by typing:: py.test -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg This will synchronize your :code:`mypkg` package directory to an remote ssh account and then locally collect tests and send them to remote places for execution. You can specify multiple :code:`--rsyncdir` directories to be sent to the remote side. .. note:: For py.test to collect and send tests correctly you not only need to make sure all code and tests directories are rsynced, but that any test (sub) directory also has an :code:`__init__.py` file because internally py.test references tests as a fully qualified python module path. **You will otherwise get strange errors** during setup of the remote side. You can specify multiple :code:`--rsyncignore` glob patterns to be ignored when file are sent to the remote side. There are also internal ignores: :code:`.*, *.pyc, *.pyo, *~` Those you cannot override using rsyncignore command-line or ini-file option(s). Sending tests to remote Socket Servers -------------------------------------- Download the single-module `socketserver.py`_ Python program and run it like this:: python socketserver.py It will tell you that it starts listening on the default port. You can now on your home machine specify this new socket host with something like this:: py.test -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg mypkg .. _`atonce`: .. _`Multi-Platform`: Running tests on many platforms at once --------------------------------------- The basic command to run tests on multiple platforms is:: py.test --dist=each --tx=spec1 --tx=spec2 If you specify a windows host, an OSX host and a Linux environment this command will send each tests to all platforms - and report back failures from all platforms at once. The specifications strings use the `xspec syntax`_. .. _`xspec syntax`: http://codespeak.net/execnet/basics.html#xspec .. _`socketserver.py`: http://bitbucket.org/hpk42/execnet/raw/2af991418160/execnet/script/socketserver.py .. _`execnet`: http://codespeak.net/execnet Identifying the worker process during a test -------------------------------------------- *New in version 1.15.* If you need to determine the identity of a worker process in a test or fixture, you may use the ``worker_id`` fixture to do so: .. code-block:: python @pytest.fixture() def user_account(worker_id): """ use a different account in each xdist worker """ return "account_%s" % worker_id When ``xdist`` is disabled (running with ``-n0`` for example), then ``worker_id`` will return ``"master"``. Additionally, worker processes have the following environment variables defined: * ``PYTEST_XDIST_WORKER``: the name of the worker, e.g., ``"gw2"``. * ``PYTEST_XDIST_WORKER_COUNT``: the total number of workers in this session, e.g., ``"4"`` when ``-n 4`` is given in the command-line. The information about the worker_id in a test is stored in the ``TestReport`` as well, under the ``worker_id`` attribute. Specifying test exec environments in an ini file ------------------------------------------------ You can use pytest's ini file configuration to avoid typing common options. You can for example make running with three subprocesses your default like this: .. code-block:: ini [pytest] addopts = -n3 You can also add default environments like this: .. code-block:: ini [pytest] addopts = --tx ssh=myhost//python=python3.5 --tx ssh=myhost//python=python3.6 and then just type:: py.test --dist=each to run tests in each of the environments. Specifying "rsync" dirs in an ini-file -------------------------------------- In a ``tox.ini`` or ``setup.cfg`` file in your root project directory you may specify directories to include or to exclude in synchronisation: .. code-block:: ini [pytest] rsyncdirs = . mypkg helperpkg rsyncignore = .hg These directory specifications are relative to the directory where the configuration file was found. .. _`pytest-xdist`: http://pypi.python.org/pypi/pytest-xdist .. _`pytest-xdist repository`: https://github.com/pytest-dev/pytest-xdist .. _`pytest`: http://pytest.org pytest-xdist-1.22.1/appveyor.yml0000644000372000037200000000077313242644556017530 0ustar travistravis00000000000000environment: matrix: # note: please use "tox --listenvs" to populate the build matrix - TOXENV: "py27-pytest33" - TOXENV: "py34-pytest33" - TOXENV: "py35-pytest33" - TOXENV: "py36-pytest33" - TOXENV: "py27-pytest33-pexpect" - TOXENV: "py36-pytest33-pexpect" - TOXENV: "flakes" - TOXENV: "readme" install: - C:\Python35\python -m pip install -U tox setuptools_scm pip build: false # Not a C# project, build stuff at the test step instead. test_script: - C:\Python35\python -m tox pytest-xdist-1.22.1/dev-requirements.txt0000644000372000037200000000001213242644556021162 0ustar travistravis00000000000000towncrier pytest-xdist-1.22.1/pyproject.toml0000644000372000037200000000141013242644556020041 0ustar travistravis00000000000000[tool.towncrier] package = "xdist" filename = "CHANGELOG.rst" directory = "changelog/" title_format = "pytest-xdist {version} ({project_date})" template = "changelog/_template.rst" [[tool.towncrier.type]] directory = "removal" name = "Deprecations and Removals" showcontent = true [[tool.towncrier.type]] directory = "feature" name = "Features" showcontent = true [[tool.towncrier.type]] directory = "bugfix" name = "Bug Fixes" showcontent = true [[tool.towncrier.type]] directory = "vendor" name = "Vendored Libraries" showcontent = true [[tool.towncrier.type]] directory = "doc" name = "Improved Documentation" showcontent = true [[tool.towncrier.type]] directory = "trivial" name = "Trivial Changes" showcontent = true pytest-xdist-1.22.1/setup.cfg0000644000372000037200000000020613242644607016745 0ustar travistravis00000000000000[bdist_wheel] universal = 1 [metadata] license_file = LICENSE [flake8] max-line-length = 100 [egg_info] tag_build = tag_date = 0 pytest-xdist-1.22.1/setup.py0000644000372000037200000000335113242644556016645 0ustar travistravis00000000000000from setuptools import setup, find_packages install_requires = ['execnet>=1.1', 'pytest>=3.0.0', 'pytest-forked'] setup( name="pytest-xdist", use_scm_version={'write_to': 'xdist/_version.py'}, description='py.test xdist plugin for distributed testing' ' and loop-on-failing modes', long_description=open('README.rst').read(), license='MIT', author='holger krekel and contributors', author_email='pytest-dev@python.org,holger@merlinux.eu', url='https://github.com/pytest-dev/pytest-xdist', platforms=['linux', 'osx', 'win32'], packages=find_packages(exclude=['testing', 'example']), entry_points={ 'pytest11': [ 'xdist = xdist.plugin', 'xdist.looponfail = xdist.looponfail', ], }, zip_safe=False, python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', install_requires=install_requires, setup_requires=['setuptools_scm'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Framework :: Pytest', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX', 'Operating System :: Microsoft :: Windows', 'Operating System :: MacOS :: MacOS X', 'Topic :: Software Development :: Testing', 'Topic :: Software Development :: Quality Assurance', 'Topic :: Utilities', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], ) pytest-xdist-1.22.1/tox.ini0000644000372000037200000000167713242644556016457 0ustar travistravis00000000000000[tox] # if you change the envlist, please update .travis.yml file as well envlist= py{27,34,35,36}-pytest{30,31,32,33} py{27,36}-pytest{30,31,32,33}-pexpect py{27,36}-pytest{master,features} flakes readme [testenv] changedir=testing passenv = USER USERNAME deps = pycmd # to avoid .eggs setuptools_scm pytest30: pytest~=3.0.5 pytest31: pytest~=3.1.0 pytest32: pytest~=3.2.0 pytest33: pytest~=3.3.0 pytestmaster: git+https://github.com/pytest-dev/pytest.git@master pytestfeatures: git+https://github.com/pytest-dev/pytest.git@features pexpect: pexpect platform= pexpect: linux|darwin commands= # always clean to avoid code unmarshal mismatch on old python/pytest py.cleanup -aq py.test {posargs} [testenv:flakes] changedir= deps = flake8 commands = flake8 setup.py testing xdist [testenv:readme] changedir = deps = readme skip_install = true commands = python setup.py check -r -s [pytest] addopts = -rsfxX ;; hello pytest-xdist-1.22.1/PKG-INFO0000644000372000037200000003011013242644607016216 0ustar travistravis00000000000000Metadata-Version: 1.2 Name: pytest-xdist Version: 1.22.1 Summary: py.test xdist plugin for distributed testing and loop-on-failing modes Home-page: https://github.com/pytest-dev/pytest-xdist Author: holger krekel and contributors Author-email: pytest-dev@python.org,holger@merlinux.eu License: MIT Description-Content-Type: UNKNOWN Description: .. image:: http://img.shields.io/pypi/v/pytest-xdist.svg :alt: PyPI version :target: https://pypi.python.org/pypi/pytest-xdist .. image:: https://img.shields.io/pypi/pyversions/pytest-xdist.svg :alt: Python versions :target: https://pypi.python.org/pypi/pytest-xdist .. image:: https://anaconda.org/conda-forge/pytest-xdist/badges/version.svg :alt: Anaconda version :target: https://anaconda.org/conda-forge/pytest-xdist .. image:: https://travis-ci.org/pytest-dev/pytest-xdist.svg?branch=master :alt: Travis CI build status :target: https://travis-ci.org/pytest-dev/pytest-xdist .. image:: https://ci.appveyor.com/api/projects/status/56eq1a1avd4sdd7e/branch/master?svg=true :alt: AppVeyor build status :target: https://ci.appveyor.com/project/pytestbot/pytest-xdist xdist: pytest distributed testing plugin ======================================== The `pytest-xdist`_ plugin extends py.test with some unique test execution modes: * test run parallelization_: if you have multiple CPUs or hosts you can use those for a combined test run. This allows to speed up development or to use special resources of `remote machines`_. * ``--looponfail``: run your tests repeatedly in a subprocess. After each run py.test waits until a file in your project changes and then re-runs the previously failing tests. This is repeated until all tests pass after which again a full run is performed. * `Multi-Platform`_ coverage: you can specify different Python interpreters or different platforms and run tests in parallel on all of them. Before running tests remotely, ``py.test`` efficiently "rsyncs" your program source code to the remote place. All test results are reported back and displayed to your local terminal. You may specify different Python versions and interpreters. If you would like to know how pytest-xdist works under the covers, checkout `OVERVIEW `_. Installation ------------ Install the plugin with:: pip install pytest-xdist or use the package in develop/in-place mode with a checkout of the `pytest-xdist repository`_ :: pip install --editable . .. _parallelization: Speed up test runs by sending tests to multiple CPUs ---------------------------------------------------- To send tests to multiple CPUs, type:: py.test -n NUM Especially for longer running tests or tests requiring a lot of I/O this can lead to considerable speed ups. This option can also be set to ``auto`` for automatic detection of the number of CPUs. If a test crashes the interpreter, pytest-xdist will automatically restart that worker and report the failure as usual. You can use the ``--max-worker-restart`` option to limit the number of workers that can be restarted, or disable restarting altogether using ``--max-worker-restart=0``. By default, the ``-n`` option will send pending tests to any worker that is available, without any guaranteed order, but you can control this with these options: * ``--dist=loadscope``: tests will be grouped by **module** for *test functions* and by **class** for *test methods*, then each group will be sent to an available worker, guaranteeing that all tests in a group run in the same process. This can be useful if you have expensive module-level or class-level fixtures. Currently the groupings can't be customized, with grouping by class takes priority over grouping by module. This feature was added in version ``1.19``. * ``--dist=loadfile``: tests will be grouped by file name, and then will be sent to an available worker, guaranteeing that all tests in a group run in the same worker. This feature was added in version ``1.21``. Running tests in a Python subprocess ------------------------------------ To instantiate a python3.5 subprocess and send tests to it, you may type:: py.test -d --tx popen//python=python3.5 This will start a subprocess which is run with the ``python3.5`` Python interpreter, found in your system binary lookup path. If you prefix the --tx option value like this:: --tx 3*popen//python=python3.5 then three subprocesses would be created and tests will be load-balanced across these three processes. .. _boxed: Running tests in a boxed subprocess ----------------------------------- This functionality has been moved to the `pytest-forked `_ plugin, but the ``--boxed`` option is still kept for backward compatibility. .. _`remote machines`: Sending tests to remote SSH accounts ------------------------------------ Suppose you have a package ``mypkg`` which contains some tests that you can successfully run locally. And you have a ssh-reachable machine ``myhost``. Then you can ad-hoc distribute your tests by typing:: py.test -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg This will synchronize your :code:`mypkg` package directory to an remote ssh account and then locally collect tests and send them to remote places for execution. You can specify multiple :code:`--rsyncdir` directories to be sent to the remote side. .. note:: For py.test to collect and send tests correctly you not only need to make sure all code and tests directories are rsynced, but that any test (sub) directory also has an :code:`__init__.py` file because internally py.test references tests as a fully qualified python module path. **You will otherwise get strange errors** during setup of the remote side. You can specify multiple :code:`--rsyncignore` glob patterns to be ignored when file are sent to the remote side. There are also internal ignores: :code:`.*, *.pyc, *.pyo, *~` Those you cannot override using rsyncignore command-line or ini-file option(s). Sending tests to remote Socket Servers -------------------------------------- Download the single-module `socketserver.py`_ Python program and run it like this:: python socketserver.py It will tell you that it starts listening on the default port. You can now on your home machine specify this new socket host with something like this:: py.test -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg mypkg .. _`atonce`: .. _`Multi-Platform`: Running tests on many platforms at once --------------------------------------- The basic command to run tests on multiple platforms is:: py.test --dist=each --tx=spec1 --tx=spec2 If you specify a windows host, an OSX host and a Linux environment this command will send each tests to all platforms - and report back failures from all platforms at once. The specifications strings use the `xspec syntax`_. .. _`xspec syntax`: http://codespeak.net/execnet/basics.html#xspec .. _`socketserver.py`: http://bitbucket.org/hpk42/execnet/raw/2af991418160/execnet/script/socketserver.py .. _`execnet`: http://codespeak.net/execnet Identifying the worker process during a test -------------------------------------------- *New in version 1.15.* If you need to determine the identity of a worker process in a test or fixture, you may use the ``worker_id`` fixture to do so: .. code-block:: python @pytest.fixture() def user_account(worker_id): """ use a different account in each xdist worker """ return "account_%s" % worker_id When ``xdist`` is disabled (running with ``-n0`` for example), then ``worker_id`` will return ``"master"``. Additionally, worker processes have the following environment variables defined: * ``PYTEST_XDIST_WORKER``: the name of the worker, e.g., ``"gw2"``. * ``PYTEST_XDIST_WORKER_COUNT``: the total number of workers in this session, e.g., ``"4"`` when ``-n 4`` is given in the command-line. The information about the worker_id in a test is stored in the ``TestReport`` as well, under the ``worker_id`` attribute. Specifying test exec environments in an ini file ------------------------------------------------ You can use pytest's ini file configuration to avoid typing common options. You can for example make running with three subprocesses your default like this: .. code-block:: ini [pytest] addopts = -n3 You can also add default environments like this: .. code-block:: ini [pytest] addopts = --tx ssh=myhost//python=python3.5 --tx ssh=myhost//python=python3.6 and then just type:: py.test --dist=each to run tests in each of the environments. Specifying "rsync" dirs in an ini-file -------------------------------------- In a ``tox.ini`` or ``setup.cfg`` file in your root project directory you may specify directories to include or to exclude in synchronisation: .. code-block:: ini [pytest] rsyncdirs = . mypkg helperpkg rsyncignore = .hg These directory specifications are relative to the directory where the configuration file was found. .. _`pytest-xdist`: http://pypi.python.org/pypi/pytest-xdist .. _`pytest-xdist repository`: https://github.com/pytest-dev/pytest-xdist .. _`pytest`: http://pytest.org Platform: linux Platform: osx Platform: win32 Classifier: Development Status :: 5 - Production/Stable Classifier: Framework :: Pytest Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: POSIX Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: MacOS :: MacOS X Classifier: Topic :: Software Development :: Testing Classifier: Topic :: Software Development :: Quality Assurance Classifier: Topic :: Utilities Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*