os-testr-1.0.0/0000775000175000017500000000000013154262453014423 5ustar jenkinsjenkins00000000000000os-testr-1.0.0/os_testr/0000775000175000017500000000000013154262453016265 5ustar jenkinsjenkins00000000000000os-testr-1.0.0/os_testr/ostestr.py0000775000175000017500000002716213154262145020353 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python2 # Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse import copy import io import os import subprocess import sys import warnings import pbr.version import six.moves from stestr import commands from subunit import run as subunit_run from testtools import run as testtools_run from os_testr import regex_builder as rb __version__ = pbr.version.VersionInfo('os_testr').version_string() def get_parser(args): parser = argparse.ArgumentParser( description='Tool to run openstack tests') parser.add_argument('--version', action='version', version='%s' % __version__) parser.add_argument('--blacklist-file', '-b', '--blacklist_file', help='Path to a blacklist file, this file ' 'contains a separate regex exclude on each ' 'newline') parser.add_argument('--whitelist-file', '-w', '--whitelist_file', help='Path to a whitelist file, this file ' 'contains a separate regex on each newline.') group = parser.add_mutually_exclusive_group() group.add_argument('--regex', '-r', help='A normal testr selection regex.') group.add_argument('--path', metavar='FILE_OR_DIRECTORY', help='A file name or directory of tests to run.') group.add_argument('--no-discover', '-n', metavar='TEST_ID', help="Takes in a single test to bypasses test " "discover and just execute the test specified. " "A file name may be used in place of a test " "name.") parser.add_argument('--black-regex', '-B', help='Test rejection regex. If a test cases name ' 'matches on re.search() operation , ' 'it will be removed from the final test list. ' 'Effectively the black-regex is added to ' ' black regex list, but you do need to edit a file. ' 'The black filtering happens after the initial ' ' white selection, which by default is everything.') pretty = parser.add_mutually_exclusive_group() pretty.add_argument('--pretty', '-p', dest='pretty', action='store_true', help='Print pretty output from subunit-trace. This is ' 'mutually exclusive with --subunit') pretty.add_argument('--no-pretty', dest='pretty', action='store_false', help='Disable the pretty output with subunit-trace') parser.add_argument('--subunit', '-s', action='store_true', help='output the raw subunit v2 from the test run ' 'this is mutually exclusive with --pretty') parser.add_argument('--list', '-l', action='store_true', help='List all the tests which will be run.') parser.add_argument('--color', action='store_true', help='Use color in the pretty output') slowest = parser.add_mutually_exclusive_group() slowest.add_argument('--slowest', dest='slowest', action='store_true', help="after the test run print the slowest tests") slowest.add_argument('--no-slowest', dest='slowest', action='store_false', help="after the test run don't print the slowest " "tests") parser.add_argument('--pdb', metavar='TEST_ID', help='Run a single test that has pdb traces added') parallel = parser.add_mutually_exclusive_group() parallel.add_argument('--parallel', dest='parallel', action='store_true', help='Run tests in parallel (this is the default)') parallel.add_argument('--serial', dest='parallel', action='store_false', help='Run tests serially') parser.add_argument('--concurrency', '-c', type=int, metavar='WORKERS', help='The number of workers to use when running in ' 'parallel. By default this is the number of cpus') parser.add_argument('--until-failure', action='store_true', help='Run the tests in a loop until a failure is ' 'encountered. Running with subunit or pretty' 'output enable will force the loop to run tests' 'serially') parser.add_argument('--print-exclude', action='store_true', help='If an exclude file is used this option will ' 'prints the comment from the same line and all ' 'skipped tests before the test run') parser.set_defaults(pretty=True, slowest=True, parallel=True) return parser.parse_known_args(args) def call_testr(regex, subunit, pretty, list_tests, slowest, parallel, concur, until_failure, color, others=None, blacklist_file=None, whitelist_file=None, black_regex=None, load_list=None): # Handle missing .stestr.conf from users from before stestr migration test_dir = None top_dir = None group_regex = None if not os.path.isfile('.stestr.conf') and os.path.isfile('.testr.conf'): msg = ('No .stestr.conf file found in the CWD. Please create one to ' 'to replace the .testr.conf. You can find a script to do this ' 'in the stestr git repository.') warnings.warn(msg) with open('.testr.conf', 'r') as testr_conf_file: config = six.moves.configparser.ConfigParser() config.readfp(testr_conf_file) test_command = config.get('DEFAULT', 'test_command') group_regex = None if config.has_option('DEFAULT', 'group_regex'): group_regex = config.get('DEFAULT', 'group_regex') for line in test_command.split('\n'): if 'subunit.run discover' in line: command_parts = line.split(' ') top_dir_present = '-t' in line for idx, val in enumerate(command_parts): if top_dir_present: if val == '-t': top_dir = command_parts[idx + 1] test_dir = command_parts[idx + 2] else: if val == 'discover': test_dir = command_parts[idx + 2] elif not os.path.isfile( '.testr.conf') and not os.path.isfile('.stestr.conf'): msg = ('No .stestr.conf found, please create one.') print(msg) sys.exit(1) regexes = None if regex: regexes = regex.split() serial = not parallel if list_tests: # TODO(mtreinish): remove init call after list command detects and # autocreates the repository if not os.path.isdir('.stestr'): commands.init_command() return commands.list_command(filters=regexes) return_code = commands.run_command(filters=regexes, subunit_out=subunit, concurrency=concur, test_path=test_dir, top_dir=top_dir, group_regex=group_regex, until_failure=until_failure, serial=serial, pretty_out=pretty, load_list=load_list, blacklist_file=blacklist_file, whitelist_file=whitelist_file, black_regex=black_regex) if slowest: sys.stdout.write("\nSlowest Tests:\n") commands.slowest_command() return return_code def call_subunit_run(test_id, pretty, subunit): env = copy.deepcopy(os.environ) cmd_save_results = ['testr', 'load', '--subunit'] if pretty: # Use subunit run module cmd = ['python', '-m', 'subunit.run', test_id] ps = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE) # Save subunit results via testr pfile = subprocess.Popen(cmd_save_results, env=env, stdin=ps.stdout, stdout=subprocess.PIPE) ps.stdout.close() # Transform output via subunit-trace proc = subprocess.Popen(['subunit-trace', '--no-failure-debug', '-f'], env=env, stdin=pfile.stdout) pfile.stdout.close() proc.communicate() return proc.returncode elif subunit: sstdout = io.BytesIO() subunit_run.main([sys.argv[0], test_id], sstdout) pfile = subprocess.Popen(cmd_save_results, env=env, stdin=subprocess.PIPE) pfile.communicate(input=sstdout.getvalue()) else: testtools_run.main([sys.argv[0], test_id], sys.stdout) def _select_and_call_runner(opts, exclude_regex, others): ec = 1 if not opts.no_discover and not opts.pdb: ec = call_testr(exclude_regex, opts.subunit, opts.pretty, opts.list, opts.slowest, opts.parallel, opts.concurrency, opts.until_failure, opts.color, others, blacklist_file=opts.blacklist_file, whitelist_file=opts.whitelist_file, black_regex=opts.black_regex) else: if others: print('Unexpected arguments: ' + ' '.join(others)) return 2 test_to_run = opts.no_discover or opts.pdb if test_to_run.find('/') != -1: test_to_run = rb.path_to_regex(test_to_run) ec = call_subunit_run(test_to_run, opts.pretty, opts.subunit) return ec def ostestr(args): opts, others = get_parser(args) if opts.pretty and opts.subunit: msg = ('Subunit output and pretty output cannot be specified at the ' 'same time') print(msg) return 2 if opts.list and opts.no_discover: msg = ('you can not list tests when you are bypassing discovery to ' 'run a single test') print(msg) return 3 if not opts.parallel and opts.concurrency: msg = "You can't specify a concurrency to use when running serially" print(msg) return 4 if (opts.pdb or opts.no_discover) and opts.until_failure: msg = "You can not use until_failure mode with pdb or no-discover" print(msg) return 5 if ((opts.pdb or opts.no_discover) and (opts.blacklist_file or opts.whitelist_file)): msg = "You can not use blacklist or whitelist with pdb or no-discover" print(msg) return 6 if ((opts.pdb or opts.no_discover) and (opts.black_regex)): msg = "You can not use black-regex with pdb or no-discover" print(msg) return 7 if opts.path: regex = rb.path_to_regex(opts.path) else: regex = opts.regex return _select_and_call_runner(opts, regex, others) def main(): exit(ostestr(sys.argv[1:])) if __name__ == '__main__': main() os-testr-1.0.0/os_testr/__init__.py0000664000175000017500000000123013154262145020370 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version __version__ = pbr.version.VersionInfo( 'os_testr').version_string() os-testr-1.0.0/os_testr/regex_builder.py0000664000175000017500000000667213154262145021470 0ustar jenkinsjenkins00000000000000# Copyright 2016 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os import subprocess def _get_test_list(regex, env=None): env = env or copy.deepcopy(os.environ) testr_args = ['stestr', 'list'] if regex: testr_args.append(regex) proc = subprocess.Popen(testr_args, env=env, stdout=subprocess.PIPE, universal_newlines=True) out = proc.communicate()[0] raw_test_list = out.split('\n') bad = False test_list = [] exclude_list = ['OS_', 'CAPTURE', 'TEST_TIMEOUT', 'PYTHON', 'subunit.run discover'] for line in raw_test_list: for exclude in exclude_list: if exclude in line or not line: bad = True break if not bad: test_list.append(line) bad = False return test_list def print_skips(regex, message): test_list = _get_test_list(regex) if test_list: if message: print(message) else: print('Skipped because of regex %s:' % regex) for test in test_list: print(test) # Extra whitespace to separate print('\n') def path_to_regex(path): root, _ = os.path.splitext(path) return root.replace('/', '.') def get_regex_from_whitelist_file(file_path): lines = [] with open(file_path) as white_file: for line in white_file.read().splitlines(): split_line = line.strip().split('#') # Before the # is the regex line_regex = split_line[0].strip() if line_regex: lines.append(line_regex) return '|'.join(lines) def construct_regex(blacklist_file, whitelist_file, regex, print_exclude): """Deprecated, please use testlist_builder.construct_list instead.""" if not blacklist_file: exclude_regex = '' else: with open(blacklist_file, 'r') as black_file: exclude_regex = '' for line in black_file: raw_line = line.strip() split_line = raw_line.split('#') # Before the # is the regex line_regex = split_line[0].strip() if len(split_line) > 1: # After the # is a comment comment = split_line[1].strip() else: comment = '' if line_regex: if print_exclude: print_skips(line_regex, comment) if exclude_regex: exclude_regex = '|'.join([line_regex, exclude_regex]) else: exclude_regex = line_regex if exclude_regex: exclude_regex = "^((?!" + exclude_regex + ").)*$" if regex: exclude_regex += regex if whitelist_file: exclude_regex += '%s' % get_regex_from_whitelist_file(whitelist_file) return exclude_regex os-testr-1.0.0/os_testr/utils/0000775000175000017500000000000013154262453017425 5ustar jenkinsjenkins00000000000000os-testr-1.0.0/os_testr/utils/__init__.py0000664000175000017500000000000013154262145021522 0ustar jenkinsjenkins00000000000000os-testr-1.0.0/os_testr/utils/colorizer.py0000664000175000017500000000716513154262145022016 0ustar jenkinsjenkins00000000000000# Copyright 2015 NEC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Colorizer Code is borrowed from Twisted: # Copyright (c) 2001-2010 Twisted Matrix Laboratories. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import sys class AnsiColorizer(object): """A colorizer is an object that loosely wraps around a stream allowing callers to write text to the stream in a particular color. Colorizer classes must implement C{supported()} and C{write(text, color)}. """ _colors = dict(black=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37) def __init__(self, stream): self.stream = stream @classmethod def supported(cls, stream=sys.stdout): """Check the current platform supports coloring terminal output A class method that returns True if the current platform supports coloring terminal output using this method. Returns False otherwise. """ if not stream.isatty(): return False # auto color only on TTYs try: import curses except ImportError: return False else: try: try: return curses.tigetnum("colors") > 2 except curses.error: curses.setupterm() return curses.tigetnum("colors") > 2 except Exception: # guess false in case of error return False def write(self, text, color): """Write the given text to the stream in the given color. @param text: Text to be written to the stream. @param color: A string label for a color. e.g. 'red', 'white'. """ color = self._colors[color] self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text)) class NullColorizer(object): """See _AnsiColorizer docstring.""" def __init__(self, stream): self.stream = stream @classmethod def supported(cls, stream=sys.stdout): return True def write(self, text, color): self.stream.write(text) os-testr-1.0.0/os_testr/testlist_builder.py0000664000175000017500000000703013154262145022216 0ustar jenkinsjenkins00000000000000# Copyright 2016 RedHat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_testr import regex_builder import re def black_reader(blacklist_file): black_file = open(blacklist_file, 'r') regex_comment_lst = [] # tupple of (regex_compild, msg, skipped_lst) for line in black_file: raw_line = line.strip() split_line = raw_line.split('#') # Before the # is the regex line_regex = split_line[0].strip() if len(split_line) > 1: # After the # is a comment comment = ''.join(split_line[1:]).strip() else: comment = 'Skipped because of regex %s:' % line_regex if not line_regex: continue regex_comment_lst.append((re.compile(line_regex), comment, [])) return regex_comment_lst def print_skips(regex, message, test_list): for test in test_list: print(test) # Extra whitespace to separate print('\n') def construct_list(blacklist_file, whitelist_file, regex, black_regex, print_exclude): """Filters the discovered test cases :retrun: iterable of strings. The strings are full test cases names, including tags like.: "project.api.TestClass.test_case[positive]" """ if not regex: regex = '' # handle the other false things if whitelist_file: white_re = regex_builder.get_regex_from_whitelist_file(whitelist_file) else: white_re = '' if not regex and white_re: regex = white_re elif regex and white_re: regex = '|'.join((regex, white_re)) if blacklist_file: black_data = black_reader(blacklist_file) else: black_data = None if black_regex: msg = "Skipped because of regex provided as a command line argument:" record = (re.compile(black_regex), msg, []) if black_data: black_data.append(record) else: black_data = [record] search_filter = re.compile(regex) # NOTE(afazekas): we do not want to pass a giant re # to an external application due to the arg length limitatios list_of_test_cases = [test_case for test_case in regex_builder._get_test_list('') if search_filter.search(test_case)] set_of_test_cases = set(list_of_test_cases) if not black_data: return set_of_test_cases # NOTE(afazekas): We might use a faster logic when the # print option is not requested for (rex, msg, s_list) in black_data: for test_case in list_of_test_cases: if rex.search(test_case): # NOTE(mtreinish): In the case of overlapping regex the test # case might have already been removed from the set of tests if test_case in set_of_test_cases: set_of_test_cases.remove(test_case) s_list.append(test_case) if print_exclude: for (rex, msg, s_list) in black_data: if s_list: print_skips(rex, msg, s_list) return set_of_test_cases os-testr-1.0.0/os_testr/subunit2html.py0000775000175000017500000005531713154262145021313 0ustar jenkinsjenkins00000000000000#!/usr/bin/python # # Copyright 2012-2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility to convert a subunit stream to an html results file. Code is adapted from the pyunit Html test runner at http://tungwaiyip.info/software/HTMLTestRunner.html Takes two arguments. First argument is path to subunit log file, second argument is path of desired output file. Second argument is optional, defaults to 'results.html'. Original HTMLTestRunner License: ------------------------------------------------------------------------ Copyright (c) 2004-2007, Wai Yip Tung All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name Wai Yip Tung nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import collections import datetime import io import sys import traceback from xml.sax import saxutils import pbr.version import subunit import testtools __version__ = pbr.version.VersionInfo('os_testr').version_string() class TemplateData(object): """Define a HTML template for report customerization and generation. Overall structure of an HTML report HTML +------------------------+ | | | | | | | STYLESHEET | | +----------------+ | | | | | | +----------------+ | | | | | | | | | | | | HEADING | | +----------------+ | | | | | | +----------------+ | | | | REPORT | | +----------------+ | | | | | | +----------------+ | | | | ENDING | | +----------------+ | | | | | | +----------------+ | | | | | | | +------------------------+ """ STATUS = { 0: 'pass', 1: 'fail', 2: 'error', 3: 'skip', } DEFAULT_TITLE = 'Unit Test Report' DEFAULT_DESCRIPTION = '' # ------------------------------------------------------------------------ # HTML Template HTML_TMPL = r""" %(title)s %(stylesheet)s %(heading)s %(report)s %(ending)s """ # variables: (title, generator, stylesheet, heading, report, ending) # ------------------------------------------------------------------------ # Stylesheet # # alternatively use a for external style sheet, e.g. # STYLESHEET_TMPL = """ """ # ------------------------------------------------------------------------ # Heading # HEADING_TMPL = """

%(title)s

%(parameters)s

%(description)s

""" # variables: (title, parameters, description) HEADING_ATTRIBUTE_TMPL = """

%(name)s: %(value)s

""" # variables: (name, value) # ------------------------------------------------------------------------ # Report # REPORT_TMPL = """

Show Summary Failed All

%(test_list)s
Test Group/Test case Count Pass Fail Error Skip View
Total %(count)s %(Pass)s %(fail)s %(error)s %(skip)s    
""" # variables: (test_list, count, Pass, fail, error) REPORT_CLASS_TMPL = r""" %(desc)s %(count)s %(Pass)s %(fail)s %(error)s %(skip)s Detail """ # variables: (style, desc, count, Pass, fail, error, cid) REPORT_TEST_WITH_OUTPUT_TMPL = r"""
%(desc)s
%(status)s """ # variables: (tid, Class, style, desc, status) REPORT_TEST_NO_OUTPUT_TMPL = r"""
%(desc)s
%(status)s """ # variables: (tid, Class, style, desc, status) REPORT_TEST_OUTPUT_TMPL = r""" %(id)s: %(output)s """ # variables: (id, output) # ------------------------------------------------------------------------ # ENDING # ENDING_TMPL = """
 
""" # -------------------- The end of the Template class ------------------- class ClassInfoWrapper(object): def __init__(self, name, mod): self.name = name self.mod = mod def __repr__(self): return "%s" % (self.name) class HtmlOutput(testtools.TestResult): """Output test results in html.""" def __init__(self, html_file='result.html'): super(HtmlOutput, self).__init__() self.success_count = 0 self.failure_count = 0 self.error_count = 0 self.skip_count = 0 self.result = [] self.html_file = html_file def addSuccess(self, test): self.success_count += 1 output = test.shortDescription() if output is None: output = test.id() self.result.append((0, test, output, '')) def addSkip(self, test, err): output = test.shortDescription() if output is None: output = test.id() self.skip_count += 1 self.result.append((3, test, output, '')) def addError(self, test, err): output = test.shortDescription() if output is None: output = test.id() # Skipped tests are handled by SkipTest Exceptions. # if err[0] == SkipTest: # self.skip_count += 1 # self.result.append((3, test, output, '')) else: self.error_count += 1 _exc_str = self.formatErr(err) self.result.append((2, test, output, _exc_str)) def addFailure(self, test, err): print(test) self.failure_count += 1 _exc_str = self.formatErr(err) output = test.shortDescription() if output is None: output = test.id() self.result.append((1, test, output, _exc_str)) def formatErr(self, err): exctype, value, tb = err return ''.join(traceback.format_exception(exctype, value, tb)) def stopTestRun(self): super(HtmlOutput, self).stopTestRun() self.stopTime = datetime.datetime.now() report_attrs = self._getReportAttributes() generator = 'subunit2html %s' % __version__ heading = self._generate_heading(report_attrs) report = self._generate_report() ending = self._generate_ending() output = TemplateData.HTML_TMPL % dict( title=saxutils.escape(TemplateData.DEFAULT_TITLE), generator=generator, stylesheet=TemplateData.STYLESHEET_TMPL, heading=heading, report=report, ending=ending, ) if self.html_file: with open(self.html_file, 'wb') as html_file: html_file.write(output.encode('utf8')) def _getReportAttributes(self): """Return report attributes as a list of (name, value).""" status = [] if self.success_count: status.append('Pass %s' % self.success_count) if self.failure_count: status.append('Failure %s' % self.failure_count) if self.error_count: status.append('Error %s' % self.error_count) if self.skip_count: status.append('Skip %s' % self.skip_count) if status: status = ' '.join(status) else: status = 'none' return [ ('Status', status), ] def _generate_heading(self, report_attrs): a_lines = [] for name, value in report_attrs: line = TemplateData.HEADING_ATTRIBUTE_TMPL % dict( name=saxutils.escape(name), value=saxutils.escape(value), ) a_lines.append(line) heading = TemplateData.HEADING_TMPL % dict( title=saxutils.escape(TemplateData.DEFAULT_TITLE), parameters=''.join(a_lines), description=saxutils.escape(TemplateData.DEFAULT_DESCRIPTION), ) return heading def _generate_report(self): rows = [] sortedResult = self._sortResult(self.result) for cid, (cls, cls_results) in enumerate(sortedResult): # subtotal for a class np = nf = ne = ns = 0 for n, t, o, e in cls_results: if n == 0: np += 1 elif n == 1: nf += 1 elif n == 2: ne += 1 else: ns += 1 # format class description if cls.mod == "__main__": name = cls.name else: name = "%s" % (cls.name) doc = cls.__doc__ and cls.__doc__.split("\n")[0] or "" desc = doc and '%s: %s' % (name, doc) or name row = TemplateData.REPORT_CLASS_TMPL % dict( style=(ne > 0 and 'errorClass' or nf > 0 and 'failClass' or 'passClass'), desc = desc, count = np + nf + ne + ns, Pass = np, fail = nf, error = ne, skip = ns, cid = 'c%s' % (cid + 1), ) rows.append(row) for tid, (n, t, o, e) in enumerate(cls_results): self._generate_report_test(rows, cid, tid, n, t, o, e) report = TemplateData.REPORT_TMPL % dict( test_list=''.join(rows), count=str(self.success_count + self.failure_count + self.error_count + self.skip_count), Pass=str(self.success_count), fail=str(self.failure_count), error=str(self.error_count), skip=str(self.skip_count), ) return report def _sortResult(self, result_list): # unittest does not seems to run in any particular order. # Here at least we want to group them together by class. rmap = {} classes = [] # Differentiate between classes that have test failures so we can sort # them at the top of the html page for easier troubleshooting clsmap_has_failure = collections.defaultdict(bool) def track_has_failure(name, n): if n == 1 or n == 2: clsmap_has_failure[name] = True for n, t, o, e in result_list: if hasattr(t, '_tests'): for inner_test in t._tests: name = self._add_cls(rmap, classes, inner_test, (n, inner_test, o, e)) track_has_failure(name, n) else: name = self._add_cls(rmap, classes, t, (n, t, o, e)) track_has_failure(name, n) failclasses = [] passclasses = [] for cls in classes: append_to = (failclasses if clsmap_has_failure[str(cls)] else passclasses) append_to.append(cls) classort = lambda s: str(s) sortedfailclasses = sorted(failclasses, key=classort) sortedpassclasses = sorted(passclasses, key=classort) sortedclasses = sortedfailclasses + sortedpassclasses r = [(cls, rmap[str(cls)]) for cls in sortedclasses] return r def _add_cls(self, rmap, classes, test, data_tuple): if hasattr(test, 'test'): test = test.test if test.__class__ == subunit.RemotedTestCase: cl = test._RemotedTestCase__description.rsplit('.', 1)[0] else: cl = test.id().rsplit('.', 1)[0] mod = cl.rsplit('.', 1)[0] cls = ClassInfoWrapper(cl, mod) if not str(cls) in rmap: rmap[str(cls)] = [] classes.append(cls) rmap[str(cls)].append(data_tuple) return str(cls) def _generate_report_test(self, rows, cid, tid, n, t, o, e): # e.g. 'pt1.1', 'ft1.1', etc # ptx.x for passed/skipped tests and ftx.x for failed/errored tests. has_output = bool(o or e) tid = ((n == 0 or n == 3) and 'p' or 'f') + 't%s.%s' % (cid + 1, tid + 1) name = t.id().split('.')[-1] # if shortDescription is not the function name, use it if t.shortDescription().find(name) == -1: doc = t.shortDescription() else: doc = None desc = doc and ('%s: %s' % (name, doc)) or name tmpl = (has_output and TemplateData.REPORT_TEST_WITH_OUTPUT_TMPL or TemplateData.REPORT_TEST_NO_OUTPUT_TMPL) script = TemplateData.REPORT_TEST_OUTPUT_TMPL % dict( id=tid, output=saxutils.escape(o + e), ) row = tmpl % dict( tid=tid, Class=((n == 0 or n == 3) and 'hiddenRow' or 'none'), style=(n == 2 and 'errorCase' or (n == 1 and 'failCase' or 'none')), desc=desc, script=script, status=TemplateData.STATUS[n], ) rows.append(row) if not has_output: return def _generate_ending(self): return TemplateData.ENDING_TMPL def startTestRun(self): super(HtmlOutput, self).startTestRun() class FileAccumulator(testtools.StreamResult): def __init__(self): super(FileAccumulator, self).__init__() self.route_codes = collections.defaultdict(io.BytesIO) def status(self, **kwargs): if kwargs.get('file_name') != 'stdout': return file_bytes = kwargs.get('file_bytes') if not file_bytes: return route_code = kwargs.get('route_code') stream = self.route_codes[route_code] stream.write(file_bytes) def main(): if '--version' in sys.argv: print(__version__) exit(0) if len(sys.argv) < 2: print("Need at least one argument: path to subunit log.") exit(1) subunit_file = sys.argv[1] if len(sys.argv) > 2: html_file = sys.argv[2] else: html_file = 'results.html' html_result = HtmlOutput(html_file) stream = open(subunit_file, 'rb') # Feed the subunit stream through both a V1 and V2 parser. # Depends on having the v2 capable libraries installed. # First V2. # Non-v2 content and captured non-test output will be presented as file # segments called stdout. suite = subunit.ByteStreamToStreamResult(stream, non_subunit_name='stdout') # The HTML output code is in legacy mode. result = testtools.StreamToExtendedDecorator(html_result) # Divert non-test output accumulator = FileAccumulator() result = testtools.StreamResultRouter(result) result.add_rule(accumulator, 'test_id', test_id=None) result.startTestRun() suite.run(result) # Now reprocess any found stdout content as V1 subunit for bytes_io in accumulator.route_codes.values(): bytes_io.seek(0) suite = subunit.ProtocolTestCase(bytes_io) suite.run(html_result) result.stopTestRun() if __name__ == '__main__': main() os-testr-1.0.0/os_testr/tests/0000775000175000017500000000000013154262453017427 5ustar jenkinsjenkins00000000000000os-testr-1.0.0/os_testr/tests/test_ostestr.py0000664000175000017500000001570713154262145022553 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_os_testr ---------------------------------- Tests for `os_testr` module. """ import io import mock from os_testr import ostestr as os_testr from os_testr.tests import base class TestGetParser(base.TestCase): def test_pretty(self): namespace = os_testr.get_parser(['--pretty']) self.assertEqual(True, namespace[0].pretty) namespace = os_testr.get_parser(['--no-pretty']) self.assertEqual(False, namespace[0].pretty) self.assertRaises(SystemExit, os_testr.get_parser, ['--no-pretty', '--pretty']) def test_slowest(self): namespace = os_testr.get_parser(['--slowest']) self.assertEqual(True, namespace[0].slowest) namespace = os_testr.get_parser(['--no-slowest']) self.assertEqual(False, namespace[0].slowest) self.assertRaises(SystemExit, os_testr.get_parser, ['--no-slowest', '--slowest']) def test_parallel(self): namespace = os_testr.get_parser(['--parallel']) self.assertEqual(True, namespace[0].parallel) namespace = os_testr.get_parser(['--serial']) self.assertEqual(False, namespace[0].parallel) self.assertRaises(SystemExit, os_testr.get_parser, ['--parallel', '--serial']) class TestCallers(base.TestCase): def test_no_discover(self): namespace = os_testr.get_parser(['-n', 'project.tests.foo']) def _fake_exit(arg): self.assertTrue(arg) def _fake_run(*args, **kwargs): return 'project.tests.foo' in args with mock.patch.object(os_testr, 'exit', side_effect=_fake_exit), \ mock.patch.object(os_testr, 'get_parser', return_value=namespace), \ mock.patch.object(os_testr, 'call_subunit_run', side_effect=_fake_run): os_testr.main() def test_no_discover_path(self): namespace = os_testr.get_parser(['-n', 'project/tests/foo']) def _fake_exit(arg): self.assertTrue(arg) def _fake_run(*args, **kwargs): return 'project.tests.foo' in args with mock.patch.object(os_testr, 'exit', side_effect=_fake_exit), \ mock.patch.object(os_testr, 'get_parser', return_value=namespace), \ mock.patch.object(os_testr, 'call_subunit_run', side_effect=_fake_run): os_testr.main() def test_pdb(self): namespace = os_testr.get_parser(['--pdb', 'project.tests.foo']) def _fake_exit(arg): self.assertTrue(arg) def _fake_run(*args, **kwargs): return 'project.tests.foo' in args with mock.patch.object(os_testr, 'exit', side_effect=_fake_exit), \ mock.patch.object(os_testr, 'get_parser', return_value=namespace), \ mock.patch.object(os_testr, 'call_subunit_run', side_effect=_fake_run): os_testr.main() def test_pdb_path(self): namespace = os_testr.get_parser(['--pdb', 'project/tests/foo']) def _fake_exit(arg): self.assertTrue(arg) def _fake_run(*args, **kwargs): return 'project.tests.foo' in args with mock.patch.object(os_testr, 'exit', side_effect=_fake_exit), \ mock.patch.object(os_testr, 'get_parser', return_value=namespace), \ mock.patch.object(os_testr, 'call_subunit_run', side_effect=_fake_run): os_testr.main() def test_call_subunit_run_pretty(self): '''Test call_subunit_run Test ostestr call_subunit_run function when: Pretty is True ''' pretty = True subunit = False with mock.patch('subprocess.Popen', autospec=True) as mock_popen: mock_popen.return_value.returncode = 0 mock_popen.return_value.stdout = io.BytesIO() os_testr.call_subunit_run('project.tests.foo', pretty, subunit) # Validate Popen was called three times self.assertTrue(mock_popen.called, 'Popen was never called') count = mock_popen.call_count self.assertEqual(3, count, 'Popen was called %s' ' instead of 3 times' % count) # Validate Popen called the right functions called = mock_popen.call_args_list msg = "Function %s not called" function = ['python', '-m', 'subunit.run', 'project.tests.foo'] self.assertIn(function, called[0][0], msg % 'subunit.run') function = ['testr', 'load', '--subunit'] self.assertIn(function, called[1][0], msg % 'testr load') function = ['subunit-trace', '--no-failure-debug', '-f'] self.assertIn(function, called[2][0], msg % 'subunit-trace') def test_call_subunit_run_sub(self): '''Test call_subunit run Test ostestr call_subunit_run function when: Pretty is False and Subunit is True ''' pretty = False subunit = True with mock.patch('subprocess.Popen', autospec=True) as mock_popen: os_testr.call_subunit_run('project.tests.foo', pretty, subunit) # Validate Popen was called once self.assertTrue(mock_popen.called, 'Popen was never called') count = mock_popen.call_count self.assertEqual(1, count, 'Popen was called more than once') # Validate Popen called the right function called = mock_popen.call_args function = ['testr', 'load', '--subunit'] self.assertIn(function, called[0], "testr load not called") def test_call_subunit_run_testtools(self): '''Test call_subunit_run Test ostestr call_subunit_run function when: Pretty is False and Subunit is False ''' pretty = False subunit = False with mock.patch('testtools.run.main', autospec=True) as mock_run: os_testr.call_subunit_run('project.tests.foo', pretty, subunit) # Validate testtool.run was called once self.assertTrue(mock_run.called, 'testtools.run was never called') count = mock_run.call_count self.assertEqual(1, count, 'testtools.run called more than once') os-testr-1.0.0/os_testr/tests/test_return_codes.py0000664000175000017500000001011113154262145023524 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import subprocess import tempfile import testtools from os_testr.tests import base from six import StringIO DEVNULL = open(os.devnull, 'wb') class TestReturnCodes(base.TestCase): def setUp(self): super(TestReturnCodes, self).setUp() # Setup test dirs self.directory = tempfile.mkdtemp(prefix='ostestr-unit') self.addCleanup(shutil.rmtree, self.directory) self.test_dir = os.path.join(self.directory, 'tests') os.mkdir(self.test_dir) # Setup Test files self.testr_conf_file = os.path.join(self.directory, '.stestr.conf') self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg') self.passing_file = os.path.join(self.test_dir, 'test_passing.py') self.failing_file = os.path.join(self.test_dir, 'test_failing.py') self.init_file = os.path.join(self.test_dir, '__init__.py') self.setup_py = os.path.join(self.directory, 'setup.py') shutil.copy('os_testr/tests/files/stestr-conf', self.testr_conf_file) shutil.copy('os_testr/tests/files/passing-tests', self.passing_file) shutil.copy('os_testr/tests/files/failing-tests', self.failing_file) shutil.copy('setup.py', self.setup_py) shutil.copy('os_testr/tests/files/setup.cfg', self.setup_cfg_file) shutil.copy('os_testr/tests/files/__init__.py', self.init_file) self.stdout = StringIO() self.stderr = StringIO() # Change directory, run wrapper and check result self.addCleanup(os.chdir, os.path.abspath(os.curdir)) os.chdir(self.directory) def assertRunExit(self, cmd, expected, subunit=False): p = subprocess.Popen( "%s" % cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if not subunit: self.assertEqual( p.returncode, expected, "Stdout: %s; Stderr: %s" % (out, err)) else: self.assertEqual(p.returncode, expected, "Expected return code: %s doesn't match actual " "return code of: %s" % (expected, p.returncode)) def test_default_passing(self): self.assertRunExit('ostestr --regex passing', 0) def test_default_fails(self): self.assertRunExit('ostestr', 1) def test_default_passing_no_slowest(self): self.assertRunExit('ostestr --no-slowest --regex passing', 0) def test_default_fails_no_slowest(self): self.assertRunExit('ostestr --no-slowest', 1) def test_default_serial_passing(self): self.assertRunExit('ostestr --serial --regex passing', 0) def test_default_serial_fails(self): self.assertRunExit('ostestr --serial', 1) def test_testr_subunit_passing(self): self.assertRunExit('ostestr --no-pretty --subunit --regex passing', 0, subunit=True) @testtools.skip('Skipped because of testrepository lp bug #1411804') def test_testr_subunit_fails(self): self.assertRunExit('ostestr --no-pretty --subunit', 1, subunit=True) def test_testr_no_pretty_passing(self): self.assertRunExit('ostestr --no-pretty --regex passing', 0) def test_testr_no_pretty_fails(self): self.assertRunExit('ostestr --no-pretty', 1) def test_list(self): self.assertRunExit('ostestr --list', 0) def test_no_test(self): self.assertRunExit('ostestr --regex a --black-regex a', 1) os-testr-1.0.0/os_testr/tests/sample_streams/0000775000175000017500000000000013154262453022446 5ustar jenkinsjenkins00000000000000os-testr-1.0.0/os_testr/tests/sample_streams/successful.subunit0000664000175000017500000003042313154262145026240 0ustar jenkinsjenkins00000000000000³+@JV&`Þ‹¸8os_testr.tests.test_os_testr.TestGetParser.test_parallelJ”½Þ³+pBV&`Þœ³P8os_testr.tests.test_os_testr.TestGetParser.test_paralleltext/plain;charset="utf8"stderrA²usage: run.py [-h] [--blacklist_file BLACKLIST_FILE] [--regex REGEX | --path FILE_OR_DIRECTORY | --no-discover TEST_ID] [--pretty | --no-pretty] [--subunit] [--list] [--slowest | --no-slowest] [--pdb TEST_ID] [--parallel | --serial] [--concurrency WORKERS] [--until-failure] [--print-exclude] run.py: error: argument --serial: not allowed with argument --parallel ܱW?³+p@lV&`Þœ³P8os_testr.tests.test_os_testr.TestGetParser.test_paralleltext/plain;charset="utf8"stdoutÞ³+ƒ@VV&bŪl˜:os_testr.tests.test_return_codes.TestReturnCodes.test_listworker-6´W9'³+@^V&aã°@Kos_testr.tests.test_return_codes.TestReturnCodes.test_testr_subunit_passingàö ‹³+p@€V&bÎGˆh@Kos_testr.tests.test_return_codes.TestReturnCodes.test_testr_subunit_passingtext/plain;charset="utf8"stderrÆœ/J³+p@€V&bÎGˆh@Kos_testr.tests.test_return_codes.TestReturnCodes.test_testr_subunit_passingtext/plain;charset="utf8"stdoutú4~-³+ƒ@hV&bÎGˆh@Kos_testr.tests.test_return_codes.TestReturnCodes.test_testr_subunit_passingworker-7‰‰W–os-testr-1.0.0/os_testr/tests/sample_streams/all_skips.subunit0000664000175000017500000000462513154262145026047 0ustar jenkinsjenkins00000000000000³+@\VK¬ÎÛ§X€@IsetUpClass (tempest.api.data_processing.test_data_sources.DataSourceTest)1`£’³+p@–VK¬ÎÛ§X€@IsetUpClass (tempest.api.data_processing.test_data_sources.DataSourceTest)text/plain;charset=utf8reasonSahara support is requiredûkùγ+…@fVK¬ÎÛ§X€@IsetUpClass (tempest.api.data_processing.test_data_sources.DataSourceTest)worker-1ð°+ê³+@LVK¬ÎÛ®æ:setUpClass (tempest.api.data_processing.test_jobs.JobTest)D-T³+p@†VK¬ÎÛ®æ:setUpClass (tempest.api.data_processing.test_jobs.JobTest)text/plain;charset=utf8reasonSahara support is required[×lK³+…@VVK¬ÎÛ®æ:setUpClass (tempest.api.data_processing.test_jobs.JobTest)worker-1Ï›™È³+@fVK¬ÎçÚªÐ@SsetUpClass (tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest)„‘³+p@ VK¬ÎçÚªÐ@SsetUpClass (tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest)text/plain;charset=utf8reasonSahara support is required¯=wÞ³+…@pVK¬ÎçÚªÐ@SsetUpClass (tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest)worker-3“Ü8!³+@kVK¬Îõš+@XsetUpClass (tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest)Ôsh³+p@¥VK¬Îõš+@XsetUpClass (tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest)text/plain;charset=utf8reasonSahara support is requiredÁV¤³+…@uVK¬Îõš+@XsetUpClass (tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest)worker-2>öÿ³+@kVK¬Îõ£¨°@XsetUpClass (tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest)sZU³+p@¥VK¬Îõ£¨°@XsetUpClass (tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest)text/plain;charset=utf8reasonSahara support is requiredq\aZ³+…@uVK¬Îõ£¨°@XsetUpClass (tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest)worker-2×Ïãê³+@[VK¬Ðò È@HsetUpClass (tempest.api.data_processing.test_job_binaries.JobBinaryTest),y³+p@•VK¬Ðò È@HsetUpClass (tempest.api.data_processing.test_job_binaries.JobBinaryTest)text/plain;charset=utf8reasonSahara support is requiredM[¾³+…@eVK¬Ðò È@HsetUpClass (tempest.api.data_processing.test_job_binaries.JobBinaryTest)worker-0T§ F³+@TVK¬Ðò®¸@AsetUpClass (tempest.api.data_processing.test_plugins.PluginsTest)]Q$þ³+p@ŽVK¬Ðò®¸@AsetUpClass (tempest.api.data_processing.test_plugins.PluginsTest)text/plain;charset=utf8reasonSahara support is requiredƒj9æ³+…@^VK¬Ðò®¸@AsetUpClass (tempest.api.data_processing.test_plugins.PluginsTest)worker-0YЮÒos-testr-1.0.0/os_testr/tests/__init__.py0000664000175000017500000000000013154262145021524 0ustar jenkinsjenkins00000000000000os-testr-1.0.0/os_testr/tests/test_regex_builder.py0000664000175000017500000002031513154262145023657 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import mock from os_testr import regex_builder as os_testr from os_testr.tests import base class TestPathToRegex(base.TestCase): def test_file_name(self): result = os_testr.path_to_regex("tests/network/v2/test_net.py") self.assertEqual("tests.network.v2.test_net", result) result = os_testr.path_to_regex("openstack/tests/network/v2") self.assertEqual("openstack.tests.network.v2", result) class TestConstructRegex(base.TestCase): def test_regex_passthrough(self): result = os_testr.construct_regex(None, None, 'fake_regex', False) self.assertEqual(result, 'fake_regex') def test_blacklist_regex_with_comments(self): with io.StringIO() as blacklist_file: for i in range(4): blacklist_file.write(u'fake_regex_%s # A Comment\n' % i) blacklist_file.seek(0) with mock.patch('six.moves.builtins.open', return_value=blacklist_file): result = os_testr.construct_regex( 'fake_path', None, None, False) self.assertEqual(result, "^((?!fake_regex_3|fake_regex_2|" "fake_regex_1|fake_regex_0).)*$") def test_whitelist_regex_with_comments(self): with io.StringIO() as whitelist_file: for i in range(4): whitelist_file.write(u'fake_regex_%s # A Comment\n' % i) whitelist_file.seek(0) with mock.patch('six.moves.builtins.open', return_value=whitelist_file): result = os_testr.construct_regex( None, 'fake_path', None, False) self.assertEqual( result, "fake_regex_0|fake_regex_1|fake_regex_2|fake_regex_3") def test_blacklist_regex_without_comments(self): with io.StringIO() as blacklist_file: for i in range(4): blacklist_file.write(u'fake_regex_%s\n' % i) blacklist_file.seek(0) with mock.patch('six.moves.builtins.open', return_value=blacklist_file): result = os_testr.construct_regex( 'fake_path', None, None, False) self.assertEqual(result, "^((?!fake_regex_3|fake_regex_2|" "fake_regex_1|fake_regex_0).)*$") def test_blacklist_regex_with_comments_and_regex(self): with io.StringIO() as blacklist_file: for i in range(4): blacklist_file.write(u'fake_regex_%s # Comments\n' % i) blacklist_file.seek(0) with mock.patch('six.moves.builtins.open', return_value=blacklist_file): result = os_testr.construct_regex('fake_path', None, 'fake_regex', False) expected_regex = ( "^((?!fake_regex_3|fake_regex_2|fake_regex_1|" "fake_regex_0).)*$fake_regex") self.assertEqual(result, expected_regex) def test_blacklist_regex_without_comments_and_regex(self): with io.StringIO() as blacklist_file: for i in range(4): blacklist_file.write(u'fake_regex_%s\n' % i) blacklist_file.seek(0) with mock.patch('six.moves.builtins.open', return_value=blacklist_file): result = os_testr.construct_regex('fake_path', None, 'fake_regex', False) expected_regex = ( "^((?!fake_regex_3|fake_regex_2|fake_regex_1|" "fake_regex_0).)*$fake_regex") self.assertEqual(result, expected_regex) @mock.patch.object(os_testr, 'print_skips') def test_blacklist_regex_with_comment_print_skips(self, print_mock): with io.StringIO() as blacklist_file: for i in range(4): blacklist_file.write(u'fake_regex_%s # Comment\n' % i) blacklist_file.seek(0) with mock.patch('six.moves.builtins.open', return_value=blacklist_file): result = os_testr.construct_regex('fake_path', None, None, True) expected_regex = ("^((?!fake_regex_3|fake_regex_2|fake_regex_1|" "fake_regex_0).)*$") self.assertEqual(result, expected_regex) calls = print_mock.mock_calls self.assertEqual(len(calls), 4) args = list(map(lambda x: x[1], calls)) self.assertIn(('fake_regex_0', 'Comment'), args) self.assertIn(('fake_regex_1', 'Comment'), args) self.assertIn(('fake_regex_2', 'Comment'), args) self.assertIn(('fake_regex_3', 'Comment'), args) @mock.patch.object(os_testr, 'print_skips') def test_blacklist_regex_without_comment_print_skips(self, print_mock): with io.StringIO() as blacklist_file: for i in range(4): blacklist_file.write(u'fake_regex_%s\n' % i) blacklist_file.seek(0) with mock.patch('six.moves.builtins.open', return_value=blacklist_file): result = os_testr.construct_regex('fake_path', None, None, True) expected_regex = ("^((?!fake_regex_3|fake_regex_2|" "fake_regex_1|fake_regex_0).)*$") self.assertEqual(result, expected_regex) calls = print_mock.mock_calls self.assertEqual(len(calls), 4) args = list(map(lambda x: x[1], calls)) self.assertIn(('fake_regex_0', ''), args) self.assertIn(('fake_regex_1', ''), args) self.assertIn(('fake_regex_2', ''), args) self.assertIn(('fake_regex_3', ''), args) class TestWhitelistFile(base.TestCase): def test_read_whitelist_file(self): file_contents = u"""regex_a regex_b""" with io.StringIO() as whitelist_file: whitelist_file.write(file_contents) whitelist_file.seek(0) with mock.patch('six.moves.builtins.open', return_value=whitelist_file): regex = os_testr.get_regex_from_whitelist_file( '/path/to/not_used') self.assertEqual('regex_a|regex_b', regex) def test_whitelist_regex_without_comments_and_regex(self): file_contents = u"""regex_a regex_b""" with io.StringIO() as whitelist_file: whitelist_file.write(file_contents) whitelist_file.seek(0) with mock.patch('six.moves.builtins.open', return_value=whitelist_file): result = os_testr.construct_regex(None, 'fake_path', None, False) expected_regex = 'regex_a|regex_b' self.assertEqual(result, expected_regex) class TestGetTestList(base.TestCase): def test__get_test_list(self): test_list = os_testr._get_test_list('test__get_test_list') self.assertIn('test__get_test_list', test_list[0]) def test__get_test_list_regex_is_empty(self): test_list = os_testr._get_test_list('') self.assertIn('', test_list[0]) def test__get_test_list_regex_is_none(self): test_list = os_testr._get_test_list(None) # NOTE(masayukig): We should get all of the tests. So we should have # more than one test case. self.assertGreater(len(test_list), 1) self.assertIn('os_testr.tests.test_regex_builder.' 'TestGetTestList.test__get_test_list_regex_is_none', test_list) os-testr-1.0.0/os_testr/tests/base.py0000664000175000017500000000143213154262145020711 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base class TestCase(base.BaseTestCase): """Test case base class for all unit tests.""" os-testr-1.0.0/os_testr/tests/utils/0000775000175000017500000000000013154262453020567 5ustar jenkinsjenkins00000000000000os-testr-1.0.0/os_testr/tests/utils/test_colorizer.py0000664000175000017500000000474613154262145024221 0ustar jenkinsjenkins00000000000000# Copyright 2016 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six import sys from ddt import data from ddt import ddt from ddt import unpack from os_testr.tests import base from os_testr.utils import colorizer @ddt class TestNullColorizer(base.TestCase): @data(None, "foo", sys.stdout, ) def test_supported_always_true(self, stream): self.assertTrue(colorizer.NullColorizer.supported(stream)) @data(("foo", "red"), ("foo", "bar")) @unpack def test_write_string_ignore_color(self, text, color): output = six.StringIO() c = colorizer.NullColorizer(output) c.write(text, color) self.assertEqual(text, output.getvalue()) @data((None, "red"), (None, None)) @unpack def test_write_none_exception(self, text, color): c = colorizer.NullColorizer(sys.stdout) self.assertRaises(TypeError, c.write, text, color) @ddt class TestAnsiColorizer(base.TestCase): def test_supported_false(self): # NOTE(masayukig): This returns False because our unittest env isn't # interactive self.assertFalse(colorizer.AnsiColorizer.supported(sys.stdout)) @data(None, "foo") def test_supported_error(self, stream): self.assertRaises(AttributeError, colorizer.AnsiColorizer.supported, stream) @data(("foo", "red", "31"), ("foo", "blue", "34")) @unpack def test_write_string_valid_color(self, text, color, color_code): output = six.StringIO() c = colorizer.AnsiColorizer(output) c.write(text, color) self.assertIn(text, output.getvalue()) self.assertIn(color_code, output.getvalue()) @data(("foo", None), ("foo", "invalid_color")) @unpack def test_write_string_invalid_color(self, text, color): output = six.StringIO() c = colorizer.AnsiColorizer(output) self.assertRaises(KeyError, c.write, text, color) os-testr-1.0.0/os_testr/tests/utils/__init__.py0000664000175000017500000000000013154262145022664 0ustar jenkinsjenkins00000000000000os-testr-1.0.0/os_testr/tests/testlist_builder.py0000664000175000017500000001473713154262145023374 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import re import six from os_testr import testlist_builder as list_builder from os_testr.tests import base class TestBlackReader(base.TestCase): def test_black_reader(self): blacklist_file = six.StringIO() for i in range(4): blacklist_file.write('fake_regex_%s\n' % i) blacklist_file.write('fake_regex_with_note_%s # note\n' % i) blacklist_file.seek(0) with mock.patch('six.moves.builtins.open', return_value=blacklist_file): result = list_builder.black_reader('fake_path') self.assertEqual(2 * 4, len(result)) note_cnt = 0 # not assuming ordering, mainly just testing the type for r in result: self.assertEqual(r[2], []) if r[1] == 'note': note_cnt += 1 self.assertIn('search', dir(r[0])) # like a compiled regex self.assertEqual(note_cnt, 4) class TestConstructList(base.TestCase): def test_simple_re(self): test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])'] with mock.patch('os_testr.regex_builder._get_test_list', return_value=test_lists): result = list_builder.construct_list(None, None, 'foo', None, False) self.assertEqual(list(result), ['fake_test(scen)[egg,foo])']) def test_simple_black_re(self): test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])'] with mock.patch('os_testr.regex_builder._get_test_list', return_value=test_lists): result = list_builder.construct_list(None, None, None, 'foo', False) self.assertEqual(list(result), ['fake_test(scen)[tag,bar])']) def test_blacklist(self): black_list = [(re.compile('foo'), 'foo not liked', [])] test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])'] with mock.patch('os_testr.regex_builder._get_test_list', return_value=test_lists): with mock.patch('os_testr.testlist_builder.black_reader', return_value=black_list): result = list_builder.construct_list('file', None, 'fake_test', None, False) self.assertEqual(list(result), ['fake_test(scen)[tag,bar])']) def test_whitelist(self): white_list = 'fake_test1|fake_test2' test_lists = ['fake_test1[tg]', 'fake_test2[tg]', 'fake_test3[tg]'] white_getter = 'os_testr.regex_builder.get_regex_from_whitelist_file' with mock.patch('os_testr.regex_builder._get_test_list', return_value=test_lists): with mock.patch(white_getter, return_value=white_list): result = list_builder.construct_list(None, 'file', None, None, False) self.assertEqual(set(result), set(('fake_test1[tg]', 'fake_test2[tg]'))) def test_whitelist_blacklist_re(self): white_list = 'fake_test1|fake_test2' test_lists = ['fake_test1[tg]', 'fake_test2[spam]', 'fake_test3[tg,foo]', 'fake_test4[spam]'] black_list = [(re.compile('spam'), 'spam not liked', [])] white_getter = 'os_testr.regex_builder.get_regex_from_whitelist_file' with mock.patch('os_testr.regex_builder._get_test_list', return_value=test_lists): with mock.patch(white_getter, return_value=white_list): with mock.patch('os_testr.testlist_builder.black_reader', return_value=black_list): result = list_builder.construct_list('black_file', 'white_file', 'foo', None, False) self.assertEqual(set(result), set(('fake_test1[tg]', 'fake_test3[tg,foo]'))) def test_overlapping_black_regex(self): black_list = [(re.compile('compute.test_keypairs.KeypairsTestV210'), '', []), (re.compile('compute.test_keypairs.KeypairsTestV21'), '', [])] test_lists = [ 'compute.test_keypairs.KeypairsTestV210.test_create_keypair', 'compute.test_keypairs.KeypairsTestV21.test_create_keypair', 'compute.test_fake.FakeTest.test_fake_test'] with mock.patch('os_testr.regex_builder._get_test_list', return_value=test_lists): with mock.patch('os_testr.testlist_builder.black_reader', return_value=black_list): result = list_builder.construct_list('file', None, 'fake_test', None, False) self.assertEqual( list(result), ['compute.test_fake.FakeTest.test_fake_test']) os-testr-1.0.0/os_testr/tests/files/0000775000175000017500000000000013154262453020531 5ustar jenkinsjenkins00000000000000os-testr-1.0.0/os_testr/tests/files/failing-tests0000664000175000017500000000147413154262145023231 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools class FakeTestClass(testtools.TestCase): def test_pass(self): self.assertTrue(False) def test_pass_list(self): test_list = ['test', 'a', 'b'] self.assertIn('fail', test_list) os-testr-1.0.0/os_testr/tests/files/stestr-conf0000664000175000017500000000006413154262145022721 0ustar jenkinsjenkins00000000000000[DEFAULT] test_path=./tests group_regex=([^\.]*\.)* os-testr-1.0.0/os_testr/tests/files/__init__.py0000664000175000017500000000000013154262145022626 0ustar jenkinsjenkins00000000000000os-testr-1.0.0/os_testr/tests/files/passing-tests0000664000175000017500000000147313154262145023263 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools class FakeTestClass(testtools.TestCase): def test_pass(self): self.assertTrue(True) def test_pass_list(self): test_list = ['test', 'a', 'b'] self.assertIn('test', test_list) os-testr-1.0.0/os_testr/tests/files/setup.cfg0000664000175000017500000000114213154262145022346 0ustar jenkinsjenkins00000000000000[metadata] name = tempest_unit_tests version = 1 summary = Fake Project for testing wrapper scripts author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://www.openstack.org/ classifier = Intended Audience :: Information Technology Intended Audience :: System Administrators Intended Audience :: Developers License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 [global] setup-hooks = pbr.hooks.setup_hook os-testr-1.0.0/os_testr/tests/test_subunit_trace.py0000664000175000017500000000640013154262145023705 0ustar jenkinsjenkins00000000000000# Copyright 2015 SUSE Linux GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime as dt import io import os import subprocess import sys from ddt import data from ddt import ddt from ddt import unpack from mock import patch import six from os_testr import subunit_trace from os_testr.tests import base @ddt class TestSubunitTrace(base.TestCase): @data(([dt(2015, 4, 17, 22, 23, 14, 111111), dt(2015, 4, 17, 22, 23, 14, 111111)], "0.000000s"), ([dt(2015, 4, 17, 22, 23, 14, 111111), dt(2015, 4, 17, 22, 23, 15, 111111)], "1.000000s"), ([dt(2015, 4, 17, 22, 23, 14, 111111), None], "")) @unpack def test_get_durating(self, timestamps, expected_result): self.assertEqual(subunit_trace.get_duration(timestamps), expected_result) @data(([dt(2015, 4, 17, 22, 23, 14, 111111), dt(2015, 4, 17, 22, 23, 14, 111111)], 0.0), ([dt(2015, 4, 17, 22, 23, 14, 111111), dt(2015, 4, 17, 22, 23, 15, 111111)], 1.0), ([dt(2015, 4, 17, 22, 23, 14, 111111), None], 0.0)) @unpack def test_run_time(self, timestamps, expected_result): patched_res = { 0: [ {'timestamps': timestamps} ] } with patch.dict(subunit_trace.RESULTS, patched_res, clear=True): self.assertEqual(subunit_trace.run_time(), expected_result) def test_return_code_all_skips(self): skips_stream = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'sample_streams/all_skips.subunit') p = subprocess.Popen(['subunit-trace'], stdin=subprocess.PIPE) with open(skips_stream, 'rb') as stream: p.communicate(stream.read()) self.assertEqual(1, p.returncode) def test_return_code_normal_run(self): regular_stream = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'sample_streams/successful.subunit') p = subprocess.Popen(['subunit-trace'], stdin=subprocess.PIPE) with open(regular_stream, 'rb') as stream: p.communicate(stream.read()) self.assertEqual(0, p.returncode) def test_trace(self): regular_stream = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'sample_streams/successful.subunit') bytes_ = io.BytesIO() with open(regular_stream, 'rb') as stream: bytes_.write(six.binary_type(stream.read())) bytes_.seek(0) stdin = io.TextIOWrapper(io.BufferedReader(bytes_)) returncode = subunit_trace.trace(stdin, sys.stdout) self.assertEqual(0, returncode) os-testr-1.0.0/os_testr/tests/test_subunit2html.py0000664000175000017500000000613113154262145023477 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from ddt import data from ddt import ddt from subunit import RemotedTestCase from testtools import PlaceHolder from os_testr import subunit2html from os_testr.tests import base @ddt class TestSubunit2html(base.TestCase): @data(RemotedTestCase, PlaceHolder) def test_class_parsing(self, test_cls): """Tests that the class paths are parsed for v1 & v2 tests""" test_ = test_cls("example.path.to.test.method") obj_ = subunit2html.HtmlOutput() cls_ = [] obj_._add_cls({}, cls_, test_, ()) self.assertEqual("example.path.to.test", cls_[0].name) @data(RemotedTestCase, PlaceHolder) def test_result_sorting(self, test_cls): tests = [] for i in range(9): tests.append(test_cls('example.path.to.test%d.method' % i)) # addFailure, addError, and addSkip need the real exc_info try: raise Exception('fake') except Exception: err = sys.exc_info() obj = subunit2html.HtmlOutput() obj.addSuccess(tests[3]) obj.addSuccess(tests[1]) # example.path.to.test2 has a failure obj.addFailure(tests[2], err) obj.addSkip(tests[0], err) obj.addSuccess(tests[8]) # example.path.to.test5 has a failure (error) obj.addError(tests[5], err) # example.path.to.test4 has a failure obj.addFailure(tests[4], err) obj.addSuccess(tests[7]) # example.path.to.test6 has a success, a failure, and a success obj.addSuccess(tests[6]) obj.addFailure(tests[6], err) obj.addSuccess(tests[6]) sorted_result = obj._sortResult(obj.result) # _sortResult returns a list of results of format: # [(class, [test_result_tuple, ...]), ...] # sorted by str(class) # # Classes with failures (2, 4, 5, and 6) should be sorted separately # at the top. The rest of the classes should be in sorted order after. expected_class_order = ['example.path.to.test2', 'example.path.to.test4', 'example.path.to.test5', 'example.path.to.test6', 'example.path.to.test0', 'example.path.to.test1', 'example.path.to.test3', 'example.path.to.test7', 'example.path.to.test8'] for i, r in enumerate(sorted_result): self.assertEqual(expected_class_order[i], str(r[0])) os-testr-1.0.0/os_testr/subunit_trace.py0000775000175000017500000003453713154262145021523 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2014 Hewlett-Packard Development Company, L.P. # Copyright 2014 Samsung Electronics # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Trace a subunit stream in reasonable detail and high accuracy.""" from __future__ import absolute_import import argparse import datetime import functools import os import re import sys import pbr.version import subunit import testtools from os_testr.utils import colorizer # NOTE(mtreinish) on python3 anydbm was renamed dbm and the python2 dbm module # was renamed to dbm.ndbm, this block takes that into account try: import anydbm as dbm except ImportError: import dbm DAY_SECONDS = 60 * 60 * 24 FAILS = [] RESULTS = {} def total_seconds(timedelta): # NOTE(mtreinish): This method is built-in to the timedelta class in # python >= 2.7 it is here to enable it's use on older versions return ((timedelta.days * DAY_SECONDS + timedelta.seconds) * 10 ** 6 + timedelta.microseconds) / 10 ** 6 def cleanup_test_name(name, strip_tags=True, strip_scenarios=False): """Clean up the test name for display. By default we strip out the tags in the test because they don't help us in identifying the test that is run to it's result. Make it possible to strip out the testscenarios information (not to be confused with tempest scenarios) however that's often needed to indentify generated negative tests. """ if strip_tags: tags_start = name.find('[') tags_end = name.find(']') if tags_start > 0 and tags_end > tags_start: newname = name[:tags_start] newname += name[tags_end + 1:] name = newname if strip_scenarios: tags_start = name.find('(') tags_end = name.find(')') if tags_start > 0 and tags_end > tags_start: newname = name[:tags_start] newname += name[tags_end + 1:] name = newname return name def get_duration(timestamps): start, end = timestamps if not start or not end: duration = '' else: delta = end - start duration = '%d.%06ds' % ( delta.days * DAY_SECONDS + delta.seconds, delta.microseconds) return duration def find_worker(test): """Get the worker number. If there are no workers because we aren't in a concurrent environment, assume the worker number is 0. """ for tag in test['tags']: if tag.startswith('worker-'): return int(tag[7:]) return 0 # Print out stdout/stderr if it exists, always def print_attachments(stream, test, all_channels=False): """Print out subunit attachments. Print out subunit attachments that contain content. This runs in 2 modes, one for successes where we print out just stdout and stderr, and an override that dumps all the attachments. """ channels = ('stdout', 'stderr') for name, detail in test['details'].items(): # NOTE(sdague): the subunit names are a little crazy, and actually # are in the form pythonlogging:'' (with the colon and quotes) name = name.split(':')[0] if detail.content_type.type == 'test': detail.content_type.type = 'text' if (all_channels or name in channels) and detail.as_text(): title = "Captured %s:" % name stream.write("\n%s\n%s\n" % (title, ('~' * len(title)))) # indent attachment lines 4 spaces to make them visually # offset for line in detail.as_text().split('\n'): line = line.encode('utf8') stream.write(" %s\n" % line) def find_test_run_time_diff(test_id, run_time): times_db_path = os.path.join(os.path.join(os.getcwd(), '.testrepository'), 'times.dbm') if os.path.isfile(times_db_path): try: test_times = dbm.open(times_db_path) except Exception: return False try: avg_runtime = float(test_times.get(str(test_id), False)) except Exception: try: avg_runtime = float(test_times[str(test_id)]) except Exception: avg_runtime = False if avg_runtime and avg_runtime > 0: run_time = float(run_time.rstrip('s')) perc_diff = ((run_time - avg_runtime) / avg_runtime) * 100 return perc_diff return False def show_outcome(stream, test, print_failures=False, failonly=False, enable_diff=False, threshold='0', abbreviate=False, enable_color=False): global RESULTS status = test['status'] # TODO(sdague): ask lifeless why on this? if status == 'exists': return worker = find_worker(test) name = cleanup_test_name(test['id']) duration = get_duration(test['timestamps']) if worker not in RESULTS: RESULTS[worker] = [] RESULTS[worker].append(test) # don't count the end of the return code as a fail if name == 'process-returncode': return for color in [colorizer.AnsiColorizer, colorizer.NullColorizer]: if not enable_color: color = colorizer.NullColorizer(stream) break if color.supported(): color = color(stream) break if status == 'fail' or status == 'uxsuccess': FAILS.append(test) if abbreviate: color.write('F', 'red') else: stream.write('{%s} %s [%s] ... ' % ( worker, name, duration)) color.write('FAILED', 'red') stream.write('\n') if not print_failures: print_attachments(stream, test, all_channels=True) elif not failonly: if status == 'success' or status == 'xfail': if abbreviate: color.write('.', 'green') else: out_string = '{%s} %s [%s' % (worker, name, duration) perc_diff = find_test_run_time_diff(test['id'], duration) if enable_diff: if perc_diff and abs(perc_diff) >= abs(float(threshold)): if perc_diff > 0: out_string = out_string + ' +%.2f%%' % perc_diff else: out_string = out_string + ' %.2f%%' % perc_diff stream.write(out_string + '] ... ') color.write('ok', 'green') stream.write('\n') print_attachments(stream, test) elif status == 'skip': if abbreviate: color.write('S', 'blue') else: reason = test['details'].get('reason', '') if reason: reason = ': ' + reason.as_text() stream.write('{%s} %s ... ' % ( worker, name)) color.write('SKIPPED', 'blue') stream.write('%s' % (reason)) stream.write('\n') else: if abbreviate: stream.write('%s' % test['status'][0]) else: stream.write('{%s} %s [%s] ... %s\n' % ( worker, name, duration, test['status'])) if not print_failures: print_attachments(stream, test, all_channels=True) stream.flush() def print_fails(stream): """Print summary failure report. Currently unused, however there remains debate on inline vs. at end reporting, so leave the utility function for later use. """ if not FAILS: return stream.write("\n==============================\n") stream.write("Failed %s tests - output below:" % len(FAILS)) stream.write("\n==============================\n") for f in FAILS: stream.write("\n%s\n" % f['id']) stream.write("%s\n" % ('-' * len(f['id']))) print_attachments(stream, f, all_channels=True) stream.write('\n') def count_tests(key, value): count = 0 for k, v in RESULTS.items(): for item in v: if key in item: if re.search(value, item[key]): count += 1 return count def run_time(): runtime = 0.0 for k, v in RESULTS.items(): for test in v: test_dur = get_duration(test['timestamps']).strip('s') # NOTE(toabctl): get_duration() can return an empty string # which leads to a ValueError when casting to float if test_dur: runtime += float(test_dur) return runtime def worker_stats(worker): tests = RESULTS[worker] num_tests = len(tests) stop_time = tests[-1]['timestamps'][1] start_time = tests[0]['timestamps'][0] if not start_time or not stop_time: delta = 'N/A' else: delta = stop_time - start_time return num_tests, str(delta) def print_summary(stream, elapsed_time): stream.write("\n======\nTotals\n======\n") stream.write("Ran: %s tests in %.4f sec.\n" % ( count_tests('status', '.*'), total_seconds(elapsed_time))) stream.write(" - Passed: %s\n" % count_tests('status', '^success$')) stream.write(" - Skipped: %s\n" % count_tests('status', '^skip$')) stream.write(" - Expected Fail: %s\n" % count_tests('status', '^xfail$')) stream.write(" - Unexpected Success: %s\n" % count_tests('status', '^uxsuccess$')) stream.write(" - Failed: %s\n" % count_tests('status', '^fail$')) stream.write("Sum of execute time for each test: %.4f sec.\n" % run_time()) # we could have no results, especially as we filter out the process-codes if RESULTS: stream.write("\n==============\nWorker Balance\n==============\n") for w in range(max(RESULTS.keys()) + 1): if w not in RESULTS: stream.write( " - WARNING: missing Worker %s! " "Race in testr accounting.\n" % w) else: num, time = worker_stats(w) out_str = " - Worker %s (%s tests) => %s" % (w, num, time) if time.isdigit(): out_str += 's' out_str += '\n' stream.write(out_str) __version__ = pbr.version.VersionInfo('os_testr').version_string() def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version='%s' % __version__) parser.add_argument('--no-failure-debug', '-n', action='store_true', dest='print_failures', help='Disable printing failure ' 'debug information in realtime') parser.add_argument('--fails', '-f', action='store_true', dest='post_fails', help='Print failure debug ' 'information after the stream is proccesed') parser.add_argument('--failonly', action='store_true', dest='failonly', help="Don't print success items", default=( os.environ.get('TRACE_FAILONLY', False) is not False)) parser.add_argument('--abbreviate', '-a', action='store_true', dest='abbreviate', help='Print one character status' 'for each test') parser.add_argument('--perc-diff', '-d', action='store_true', dest='enable_diff', help="Print percent change in run time on each test ") parser.add_argument('--diff-threshold', '-t', dest='threshold', help="Threshold to use for displaying percent change " "from the avg run time. If one is not specified " "the percent change will always be displayed") parser.add_argument('--no-summary', action='store_true', help="Don't print the summary of the test run after " " completes") parser.add_argument('--color', action='store_true', help="Print results with colors") return parser.parse_args() def trace(stdin, stdout, print_failures=False, failonly=False, enable_diff=False, abbreviate=False, color=False, post_fails=False, no_summary=False): stream = subunit.ByteStreamToStreamResult( stdin, non_subunit_name='stdout') outcomes = testtools.StreamToDict( functools.partial(show_outcome, stdout, print_failures=print_failures, failonly=failonly, enable_diff=enable_diff, abbreviate=abbreviate, enable_color=color)) summary = testtools.StreamSummary() result = testtools.CopyStreamResult([outcomes, summary]) result = testtools.StreamResultRouter(result) cat = subunit.test_results.CatFiles(stdout) result.add_rule(cat, 'test_id', test_id=None) start_time = datetime.datetime.utcnow() result.startTestRun() try: stream.run(result) finally: result.stopTestRun() stop_time = datetime.datetime.utcnow() elapsed_time = stop_time - start_time if count_tests('status', '.*') == 0: print("The test run didn't actually run any tests") return 1 if post_fails: print_fails(stdout) if not no_summary: print_summary(stdout, elapsed_time) # NOTE(mtreinish): Ideally this should live in testtools streamSummary # this is just in place until the behavior lands there (if it ever does) if count_tests('status', '^success$') == 0: print("\nNo tests were successful during the run") return 1 return 0 if summary.wasSuccessful() else 1 def main(): args = parse_args() exit(trace(sys.stdin, sys.stdout, args.print_failures, args.failonly, args.enable_diff, args.abbreviate, args.color, args.post_fails, args.no_summary)) if __name__ == '__main__': main() os-testr-1.0.0/os_testr/generate_subunit.py0000775000175000017500000000312713154262145022206 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python2 # Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import sys import pbr.version import subunit from subunit import iso8601 __version__ = pbr.version.VersionInfo('os_testr').version_string() def main(): if '--version' in sys.argv: print(__version__) exit(0) start_time = datetime.datetime.fromtimestamp(float(sys.argv[1])).replace( tzinfo=iso8601.UTC) elapsed_time = datetime.timedelta(seconds=int(sys.argv[2])) stop_time = start_time + elapsed_time if len(sys.argv) > 3: status = sys.argv[3] else: status = 'success' if len(sys.argv) > 4: test_id = sys.argv[4] else: test_id = 'devstack' # Write the subunit test output = subunit.v2.StreamResultToBytes(sys.stdout) output.startTestRun() output.status(timestamp=start_time, test_id=test_id) # Write the end of the test output.status(test_status=status, timestamp=stop_time, test_id=test_id) output.stopTestRun() if __name__ == '__main__': main() os-testr-1.0.0/requirements.txt0000664000175000017500000000051513154262145017706 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr!=2.1.0,>=2.0.0 # Apache-2.0 stestr>=1.0.0 # Apache-2.0 python-subunit>=0.0.18 # Apache-2.0/BSD testtools>=1.4.0 # MIT os-testr-1.0.0/os_testr.egg-info/0000775000175000017500000000000013154262453017757 5ustar jenkinsjenkins00000000000000os-testr-1.0.0/os_testr.egg-info/top_level.txt0000664000175000017500000000001113154262452022500 0ustar jenkinsjenkins00000000000000os_testr os-testr-1.0.0/os_testr.egg-info/pbr.json0000664000175000017500000000005613154262452021435 0ustar jenkinsjenkins00000000000000{"git_version": "7dd678e", "is_release": true}os-testr-1.0.0/os_testr.egg-info/PKG-INFO0000664000175000017500000000400313154262452021050 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: os-testr Version: 1.0.0 Summary: A testr wrapper to provide functionality for OpenStack projects Home-page: http://docs.openstack.org/developer/os-testr/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description-Content-Type: UNKNOWN Description: ======== os-testr ======== .. image:: https://img.shields.io/pypi/v/os-testr.svg :target: https://pypi.python.org/pypi/os-testr/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/os-testr.svg :target: https://pypi.python.org/pypi/os-testr/ :alt: Downloads A testr wrapper to provide functionality for OpenStack projects. * Free software: Apache license * Documentation: http://docs.openstack.org/os-testr/ * Source: http://git.openstack.org/cgit/openstack/os-testr * Bugs: http://bugs.launchpad.net/os-testr Features -------- * ``ostestr``: a testr wrapper that uses subunit-trace for output and builds some helpful extra functionality around testr * ``subunit-trace``: an output filter for a subunit stream which provides useful information about the run * ``subunit2html``: generates a test results html page from a subunit stream * ``generate-subunit``: generate a subunit stream for a single test Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 os-testr-1.0.0/os_testr.egg-info/SOURCES.txt0000664000175000017500000000305113154262453021642 0ustar jenkinsjenkins00000000000000.coveragerc .mailmap .stestr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MANIFEST.in README.rst babel.cfg requirements.txt setup.cfg setup.py test-requirements.txt tox.ini doc/source/conf.py doc/source/index.rst doc/source/contributor/index.rst doc/source/install/index.rst doc/source/user/generate_subunit.rst doc/source/user/history.rst doc/source/user/index.rst doc/source/user/ostestr.rst doc/source/user/subunit2html.rst doc/source/user/subunit_trace.rst os_testr/__init__.py os_testr/generate_subunit.py os_testr/ostestr.py os_testr/regex_builder.py os_testr/subunit2html.py os_testr/subunit_trace.py os_testr/testlist_builder.py os_testr.egg-info/PKG-INFO os_testr.egg-info/SOURCES.txt os_testr.egg-info/dependency_links.txt os_testr.egg-info/entry_points.txt os_testr.egg-info/not-zip-safe os_testr.egg-info/pbr.json os_testr.egg-info/requires.txt os_testr.egg-info/top_level.txt os_testr/tests/__init__.py os_testr/tests/base.py os_testr/tests/test_ostestr.py os_testr/tests/test_regex_builder.py os_testr/tests/test_return_codes.py os_testr/tests/test_subunit2html.py os_testr/tests/test_subunit_trace.py os_testr/tests/testlist_builder.py os_testr/tests/files/__init__.py os_testr/tests/files/failing-tests os_testr/tests/files/passing-tests os_testr/tests/files/setup.cfg os_testr/tests/files/stestr-conf os_testr/tests/sample_streams/all_skips.subunit os_testr/tests/sample_streams/successful.subunit os_testr/tests/utils/__init__.py os_testr/tests/utils/test_colorizer.py os_testr/utils/__init__.py os_testr/utils/colorizer.py tools/tox_install.shos-testr-1.0.0/os_testr.egg-info/entry_points.txt0000664000175000017500000000027313154262452023256 0ustar jenkinsjenkins00000000000000[console_scripts] generate-subunit = os_testr.generate_subunit:main ostestr = os_testr.ostestr:main subunit-trace = os_testr.subunit_trace:main subunit2html = os_testr.subunit2html:main os-testr-1.0.0/os_testr.egg-info/requires.txt0000664000175000017500000000011113154262452022347 0ustar jenkinsjenkins00000000000000pbr!=2.1.0,>=2.0.0 stestr>=1.0.0 python-subunit>=0.0.18 testtools>=1.4.0 os-testr-1.0.0/os_testr.egg-info/dependency_links.txt0000664000175000017500000000000113154262452024024 0ustar jenkinsjenkins00000000000000 os-testr-1.0.0/os_testr.egg-info/not-zip-safe0000664000175000017500000000000113154262445022206 0ustar jenkinsjenkins00000000000000 os-testr-1.0.0/AUTHORS0000664000175000017500000000203713154262452015474 0ustar jenkinsjenkins00000000000000Andreas Jaeger Assaf Muller Attila Fazekas Christian Berendt Davanum Srinivas Divyansh Acharya Dong Ma Doug Hellmann Jake Yip James Page John Griffith Kun Huang Luz Cazares Masayuki Igawa Masayuki Igawa Masayuki Igawa Matthew Treinish Monty Taylor Shu Muto TerryHowe Thomas Bechtold Tony Breeds Yushiro FURUKAWA guo yunxian janonymous melanie witt ricolin step6829 os-testr-1.0.0/.mailmap0000664000175000017500000000013113154262145016035 0ustar jenkinsjenkins00000000000000# Format is: # # os-testr-1.0.0/PKG-INFO0000664000175000017500000000400313154262453015515 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: os-testr Version: 1.0.0 Summary: A testr wrapper to provide functionality for OpenStack projects Home-page: http://docs.openstack.org/developer/os-testr/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description-Content-Type: UNKNOWN Description: ======== os-testr ======== .. image:: https://img.shields.io/pypi/v/os-testr.svg :target: https://pypi.python.org/pypi/os-testr/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/os-testr.svg :target: https://pypi.python.org/pypi/os-testr/ :alt: Downloads A testr wrapper to provide functionality for OpenStack projects. * Free software: Apache license * Documentation: http://docs.openstack.org/os-testr/ * Source: http://git.openstack.org/cgit/openstack/os-testr * Bugs: http://bugs.launchpad.net/os-testr Features -------- * ``ostestr``: a testr wrapper that uses subunit-trace for output and builds some helpful extra functionality around testr * ``subunit-trace``: an output filter for a subunit stream which provides useful information about the run * ``subunit2html``: generates a test results html page from a subunit stream * ``generate-subunit``: generate a subunit stream for a single test Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 os-testr-1.0.0/MANIFEST.in0000664000175000017500000000013613154262145016157 0ustar jenkinsjenkins00000000000000include AUTHORS include ChangeLog exclude .gitignore exclude .gitreview global-exclude *.pyc os-testr-1.0.0/babel.cfg0000664000175000017500000000002113154262145016140 0ustar jenkinsjenkins00000000000000[python: **.py] os-testr-1.0.0/setup.py0000664000175000017500000000200613154262145016131 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) os-testr-1.0.0/.stestr.conf0000664000175000017500000000005613154262145016673 0ustar jenkinsjenkins00000000000000[DEFAULT] test_path=os_testr/tests top_dir=./ os-testr-1.0.0/test-requirements.txt0000664000175000017500000000070113154262145020660 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 sphinx>=1.6.2 # BSD openstackdocstheme>=1.16.0 # Apache-2.0 oslotest>=1.10.0 # Apache-2.0 testscenarios>=0.4 # Apache-2.0/BSD ddt>=1.0.1 # MIT six>=1.9.0 # MIT os-testr-1.0.0/ChangeLog0000664000175000017500000001116513154262452016200 0ustar jenkinsjenkins00000000000000CHANGES ======= 1.0.0 ----- * Switch to stestr under the covers * Just changed the opening of the file * Updated from global requirements * Switch from oslosphinx to openstackdocstheme * update doc URL to the new location in the readme * rearrange content to fit the new standard layout * Turn on warning-is-error in sphinx build * Fail when no test case selected * Updated from global requirements 0.8.2 ----- * Updated from global requirements * Remove unused Babel setup 0.8.1 ----- * Updated from global requirements * [Fix gate]Update test requirement * Fix error when without --regex option * Updated from global requirements * Add Constraints support * Fix typo and change regexp to regex * Track failures during sorting of test results 0.8.0 ----- * Updated from global requirements * Handle overlapping black regexes * Updated from global requirements * Update home-page url * Add ostestr as a function * Error on invalid list parameter combination * Use dash instead of underscore in args * Add --black-regex/-B option * Allow to specifiy black/white list at the same time * Make the listbuilder the default strategy * Construct a list of test cases instead of passing a regexp * Add support for Python versions * Delete openstack/common in flake8 exclude list * Save subunit results file when using --no-discover * Simplify logic in \_get\_test\_list * Remove discover from test-requirements * Make subprocess outputs to text for Python 3 * Fix doc warnings and disable smarty-pants * Updated from global requirements * Remove the TODO file * Make unexpected success as fail * Add documentation for generate-subunit * Sort failed tests at the top on HTML result page 0.7.0 ----- * Add whitelist file to ostestr docs * Seperate regex builder logic into a seperate module * Split functionality out of main * Updated from global requirements * Fix docs typos * Updated from global requirements * Fix coverage option and execution * Add version option for ostestr and subunit-trace * Remove openstack-common.conf * Add pypi download + version badges into README.rst * remove python 3.3 trove classifier * Treat xfail output like success * Updated from global requirements * correct typo * Enable testr run passthrough arguments * Updated from global requirements * Add unit test for colorizer 0.6.0 ----- * Support comments in whitelist files * Add tool to create a subunit stream * py26/py33 are no longer supported by Infra's CI * remove python 2.6 trove classifier 0.5.0 ----- * Add support to ostestr to use subunit-trace color * Add subunit\_trace --color option's doc * Fix documentation typos * Fix coverage section in tox.ini * Add delete \*.pyc command before executing ostestr * Change to always parsing classes from test\_id * Add colored output feature to subunit-trace * Add \*.egg\* to .gitignore * Fail if no tests were successfully executed * Fix syntax of the README file 0.4.2 ----- * Force utf8 encoding on subunit attachments output 0.4.1 ----- * Better blacklist - tested with Nova 0.4.0 ----- * Add whitelist file support * Fix issues with the blacklist file regex generation * Use test\_to\_run var in no-discover * Minor refactoring to make os\_testr more testable * Switch to using autogenerated ChangeLog in docs * Change ignore-errors to ignore\_errors * Handle a skipped test without a reason message * Minimize output when --abbreviate is used * Make use of argparse groups and add some tests 0.3.0 ----- * Convert file names to regular expressions * Handle incomplete subunit streams * Set min pbr version in setup\_requires * update requirements * Add TODO entry for moving away from subprocess in ostestr * Improved docs for os-testr commands 0.2.0 ----- * Dogfood things for unit tests * Disable printing percent change on run time by default * Misc Python 3 compatibility fixes * Catch exception trying to extract test time * Fix ValueError in subunit\_trace * Add support for having comments in the exclude file * Add TODO file to os-testr 0.1.0 ----- * Fix pep8 issues and add apache header to subunit2html * Flush out the readme in preparation for the first release * Add subunit2html from jenkins slave scripts * Ensure failure is printed with --until-failure in pretty mode * Add --until-failure option to ostestr * Add basic unit tests to check ostestr return codes * Add percent change to duration on subunit-trace output * Use python APIs to call run modules in ostestr * Add ostestr options to control parallelism * Fix the testr init subprocess call * Improve the arguments for ostestr * Fix pep8 issues * Fix return code on test run failure * Add --pdb flag and fix --no-discover flag * Add subunit-trace and ostestr * Initial Cookiecutter Commit os-testr-1.0.0/tox.ini0000664000175000017500000000226313154262145015737 0ustar jenkinsjenkins00000000000000[tox] minversion = 2.0 envlist = py35,py34,py27,pypy,pep8 skipsdist = True [testenv] usedevelop = True install_command = {toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} setenv = VIRTUAL_ENV={envdir} BRANCH_NAME=master CLIENT_NAME=os-testr OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=500 whitelist_externals = find deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = find . -type f -name "*.pyc" -delete ostestr {posargs} [testenv:pep8] commands = flake8 [testenv:venv] commands = {posargs} [testenv:cover] setenv = VIRTUAL_ENV={envdir} PYTHON=coverage run --source os_testr --parallel-mode commands = ostestr {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml [testenv:docs] commands = python setup.py build_sphinx [testenv:debug] commands = oslo_debug_helper {posargs} [flake8] # E123, E125 skipped as they are invalid PEP-8. show-source = True ignore = E123,E125 builtins = _ exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build os-testr-1.0.0/LICENSE0000664000175000017500000002363713154262145015441 0ustar jenkinsjenkins00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. os-testr-1.0.0/HACKING.rst0000664000175000017500000000024013154262145016213 0ustar jenkinsjenkins00000000000000os-testr Style Commandments =============================================== Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ os-testr-1.0.0/tools/0000775000175000017500000000000013154262453015563 5ustar jenkinsjenkins00000000000000os-testr-1.0.0/tools/tox_install.sh0000775000175000017500000000203613154262145020461 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash # Client constraint file contains this client version pin that is in conflict # with installing the client from source. We should remove the version pin in # the constraints file before applying it for from-source installation. CONSTRAINTS_FILE="$1" shift 1 set -e # NOTE(tonyb): Place this in the tox enviroment's log dir so it will get # published to logs.openstack.org for easy debugging. localfile="$VIRTUAL_ENV/log/upper-constraints.txt" if [[ "$CONSTRAINTS_FILE" != http* ]]; then CONSTRAINTS_FILE="file://$CONSTRAINTS_FILE" fi # NOTE(tonyb): need to add curl to bindep.txt if the project supports bindep curl "$CONSTRAINTS_FILE" --insecure --progress-bar --output "$localfile" pip install -c"$localfile" openstack-requirements # This is the main purpose of the script: Allow local installation of # the current repo. It is listed in constraints file and thus any # install will be constrained and we need to unconstrain it. edit-constraints "$localfile" -- "$CLIENT_NAME" pip install -c"$localfile" -U "$@" exit $? os-testr-1.0.0/doc/0000775000175000017500000000000013154262453015170 5ustar jenkinsjenkins00000000000000os-testr-1.0.0/doc/source/0000775000175000017500000000000013154262453016470 5ustar jenkinsjenkins00000000000000os-testr-1.0.0/doc/source/conf.py0000775000175000017500000000621013154262145017767 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', # 'sphinx.ext.intersphinx', 'openstackdocstheme' ] # openstackdocstheme options repository_name = 'openstack/os-testr' bug_project = 'os-testr' bug_tag = '' # Must set this variable to include year, month, day, hours, and minutes. html_last_updated_fmt = '%Y-%m-%d %H:%M' # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'os-testr' copyright = u'2015, Matthew Treinish' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] html_theme = 'openstackdocs' # html_static_path = ['static'] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. html_use_smartypants = False # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, u'%s Documentation' % project, u'Matthew Treinish', 'manual'), ] man_pages = [('ostestr', 'ostestr', 'tooling to run OpenStack tests', ['Matthew Treinish'], 1), ('subunit_trace', 'subunit-trace', 'pretty output filter for ' 'subunit streams', ['Matthew Treinish'], 1), ('subunit2html', 'subunit2html', 'generate a html results page ' 'from a subunit stream', ['Matthew Treinish'], 1)] # Example configuration for intersphinx: refer to the Python standard library. # intersphinx_mapping = {'http://docs.python.org/': None} os-testr-1.0.0/doc/source/contributor/0000775000175000017500000000000013154262453021042 5ustar jenkinsjenkins00000000000000os-testr-1.0.0/doc/source/contributor/index.rst0000664000175000017500000000011613154262145022677 0ustar jenkinsjenkins00000000000000============ Contributing ============ .. include:: ../../../CONTRIBUTING.rst os-testr-1.0.0/doc/source/index.rst0000664000175000017500000000145413154262145020333 0ustar jenkinsjenkins00000000000000==================================== Welcome to os-testr's documentation! ==================================== .. image:: https://img.shields.io/pypi/v/os-testr.svg :target: https://pypi.python.org/pypi/os-testr/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/os-testr.svg :target: https://pypi.python.org/pypi/os-testr/ :alt: Downloads A testr wrapper to provide functionality for OpenStack projects. * Free software: Apache license * Documentation: http://docs.openstack.org/os-testr/ * Source: http://git.openstack.org/cgit/openstack/os-testr * Bugs: http://bugs.launchpad.net/os-testr Contents: .. toctree:: :maxdepth: 2 install/index contributor/index user/index Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` os-testr-1.0.0/doc/source/install/0000775000175000017500000000000013154262453020136 5ustar jenkinsjenkins00000000000000os-testr-1.0.0/doc/source/install/index.rst0000664000175000017500000000030113154262145021767 0ustar jenkinsjenkins00000000000000============ Installation ============ At the command line:: $ pip install os-testr Or, if you have virtualenvwrapper installed:: $ mkvirtualenv os-testr $ pip install os-testr os-testr-1.0.0/doc/source/user/0000775000175000017500000000000013154262453017446 5ustar jenkinsjenkins00000000000000os-testr-1.0.0/doc/source/user/subunit_trace.rst0000664000175000017500000001102713154262145023046 0ustar jenkinsjenkins00000000000000.. _subunit_trace: subunit-trace ============= subunit-trace is an output filter for subunit streams. It is often used in conjunction with test runners that emit subunit to enable a consistent and useful realtime output from a test run. Summary ------- subunit-trace [--fails|-f] [--failonly] [--perc-diff|-d] [--no-summary] [--diff-threshold|-t ] [--color] Options ------- --no-failure-debug, -n Disable printing failure debug information in realtime --fails, -f Print failure debug information after the stream is processed --failonly Don't print success items --perc-diff, -d Print percent change in run time on each test --diff-threshold THRESHOLD, -t THRESHOLD Threshold to use for displaying percent change from the avg run time. If one is not specified the percent change will always be displayed. --no-summary Don't print the summary of the test run after completes --color Print result with colors Usage ----- subunit-trace will take a subunit stream in via STDIN. This is the only input into the tool. It will then print on STDOUT the formatted test result output for the test run information contained in the stream. A subunit v2 stream must be passed into subunit-trace. If only a subunit v1 stream is available you must use the subunit-1to2 utility to convert it before passing the stream into subunit-trace. For example this can be done by chaining pipes:: $ cat subunit_v1 | subunit-1to2 | subunit-trace Adjusting per test output ^^^^^^^^^^^^^^^^^^^^^^^^^ subunit-trace provides several options to customize it's output. This allows users to customize the output from subunit-trace to suit their needs. The output from subunit-trace basically comes in 2 parts, the per test output, and the summary at the end. By default subunit-trace will print failure messages during the per test output, meaning when a test fails it will also print the message and any traceback and other attachments at that time. However this can be disabled by using --no-failure-debug, -n. For example:: $ testr run --subunit | subunit-trace --no-failure-debug Here is also the option to print all failures together at the end of the test run before the summary view. This is done using the --fails/-f option. For example:: $ testr run --subunit | subunit-trace --fails Often the --fails and --no-failure-debug options are used in conjunction to only print failures at the end of a test run. This is useful for large test suites where an error message might be lost in the noise. To do this :: $ testr run --subunit | subunit-trace --fails --no-failure-debug By default subunit-trace will print a line for each test after it completes with the test status. However, if you only want to see the run time output for failures and not any other test status you can use the --failonly option. For example:: $ testr run --subunit | subunit-trace --failonly The last output option provided by subunit-trace is to disable the summary view of the test run which is normally displayed at the end of a run. You can do this using the --no-summary option. For example:: $ testr run --subunit | subunit-trace --no-summary Show per test run time percent change ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ subunit-trace provides an option to display the percent change in run time from the previous run. To do this subunit-trace leverages the testr internals a bit. It uses the times.dbm database which, the file repository type in testrepository will create, to get the previous run time for a test. If testr hasn't ever been used before or for whatever reason subunit-trace is unable to find the times.dbm file from testr no percentages will be displayed even if it's enabled. Additionally, if a test is run which does not have an entry in the times.dbm file will not have a percentage printed for it. To enable this feature you use --perc-diff/-d, for example:: $ testr run --subunit | subunit-trace --perc-diff There is also the option to set a threshold value for this option. If used it acts as an absolute value and only percentage changes that exceed it will be printed. Use the --diff-threshold/-t option to set a threshold, for example:: $ testr run --subunit | subunit-trace --perc-diff --threshold 45 This will only display percent differences when the change in run time is either >=45% faster or <=45% slower. os-testr-1.0.0/doc/source/user/generate_subunit.rst0000664000175000017500000000401513154262145023541 0ustar jenkinsjenkins00000000000000.. generate_subunit: generate-subunit ================ generate-subunit is a simple tool to, as its name implies, generate a subunit stream. It will generate a stream with a single test result to STDOUT. The subunit protocol lets you concatenate multiple streams together so if you want to generate a stream with multiple just append the output of multiple executions of generate-subunit. Summary ------- generate-subunit timestamp secs [status] [test_id] Usage ----- generate-subunit has 2 mandatory arguments. These are needed to specify when the "test" started running and how long it took. The first argument is a POSIX timestamp (which can returned by the date util using ``date +%s``) for when it started running. The second argument is the number of seconds it took for the execution to finish. For example:: $ generate-subunit $(date +%s) 42 will generate a stream with the test_id 'devstack' successfully running for 42 secs starting when the command was executed. This leads into the 2 optional arguments. The first optional argument is for specifying the status. This must be the 3rd argument when calling generate-subunit. Valid status options can be found in the `testtools documentation`_. If status is not specified it will default to success. For example:: $ generate-subunit $(date +%s) 42 fail will be the same as the previous example except that it marks the test as failing. .. _testtools documentation: http://testtools.readthedocs.io/en/latest/api.html#testtools.StreamResult.status The other optional argument is the test_id (aka test name) and is used to identify the "test" being run. For better or worse this defaults to *devstack*. (which is an artifact of why this tool was originally created) Note, this must be the 4th argument when calling generate-subunit. This means you also must specify a status if you want to set your own test_id. For example:: $ generate-subunit %(date +%s) 42 fail my_little_test will generate a subunit stream as before except instead the test will be named my_little_test. os-testr-1.0.0/doc/source/user/ostestr.rst0000664000175000017500000002406113154262145021704 0ustar jenkinsjenkins00000000000000.. _ostestr: ostestr ======= The ostestr command provides a wrapper around the testr command included in the testrepository package. It's designed to build on the functionality included in testr and workaround several UI bugs in the short term. By default it also has output that is much more useful for OpenStack's test suites which are lengthy in both runtime and number of tests. Please note that the CLI semantics are still a work in progress as the project is quite young, so default behavior might change in future version. Summary ------- ostestr [-b|--blacklist-file ] [-r|--regex REGEX] [-w|--whitelist-file ] [-p|--pretty] [--no-pretty] [-s|--subunit] [-l|--list] [-n|--no-discover ] [--slowest] [--no-slowest] [--pdb ] [--parallel] [--serial] [-c|--concurrency ] [--until-failure] [--print-exclude] Options ------- --blacklist-file BLACKLIST_FILE, -b BLACKLIST_FILE Path to a blacklist file, this file contains a separate regex exclude on each newline --whitelist-file WHITELIST_FILE, -w WHITELIST_FILE Path to a whitelist file, this file contains a separate regex on each newline --regex REGEX, -r REGEX A normal testr selection regex. --black-regex BLACK_REGEX, -B BLACK_REGEX Test rejection regex. If the test cases durring a search opration matches, it will be removed from the final test list. --pretty, -p Print pretty output from subunit-trace. This is mutually exclusive with --subunit --no-pretty Disable the pretty output with subunit-trace --subunit, -s output the raw subunit v2 from the test run this is mutually exclusive with --pretty --list, -l List all the tests which will be run. --no-discover TEST_ID, -n TEST_ID Takes in a single test to bypasses test discover and just execute the test specified --slowest After the test run print the slowest tests --no-slowest After the test run don't print the slowest tests --pdb TEST_ID Run a single test that has pdb traces added --parallel Run tests in parallel (this is the default) --serial Run tests serially --concurrency WORKERS, -c WORKERS The number of workers to use when running in parallel. By default this is the number of cpus --until-failure Run the tests in a loop until a failure is encountered. Running with subunit or prettyoutput enable will force the loop to run testsserially --print-exclude If an exclude file is used this option will prints the comment from the same line and all skipped tests before the test run Running Tests ------------- os-testr is primarily for running tests at it's basic level you just invoke ostestr to run a test suite for a project. (assuming it's setup to run tests using testr already) For example:: $ ostestr This will run tests in parallel (with the number of workers matching the number of CPUs) and with subunit-trace output. If you need to run tests in serial you can use the serial option:: $ ostestr --serial Or if you need to adjust the concurrency but still run in parallel you can use -c/--concurrency:: $ ostestr --concurrency 2 If you only want to run an individual test module or more specific (a single class, or test) and parallel execution doesn't matter, you can use the -n/--no-discover to skip test discovery and just directly calls subunit.run on the tests under the covers. Bypassing discovery is desirable when running a small subset of tests in a larger test suite because the discovery time can often far exceed the total run time of the tests. For example:: $ ostestr --no-discover test.test_thing.TestThing.test_thing_method Additionally, if you need to run a single test module, class, or single test with pdb enabled you can use --pdb to directly call testtools.run under the covers which works with pdb. For example:: $ ostestr --pdb tests.test_thing.TestThing.test_thing_method Test Selection -------------- ostestr intially designed to build on top of the test selection in testr. testr only exposed a regex option to select tests. This functionality is exposed via the --regex option. For example:: $ ostestr --regex 'magic\.regex' This will do a straight passthrough of the provided regex to testr. When ostestr is asked to do more complex test selection than a sinlge regex, it will ask testr for a full list of tests than passing the filtered test list back to testr. ostestr allows you do to do simple test exclusion via apssing rejection/black regex:: $ ostestr --black-regex 'slow_tests|bad_tests' ostestr also allow you to combine these argumants:: $ ostestr --regex ui\.interface --black-regex 'slow_tests|bad_tests' Here first we selected all tests which matches to 'ui\.interface', than we are dropping all test which matches 'slow_tests|bad_tests' from the final list. ostestr also allows you to specify a blacklist file to define a set of regexes to exclude. You can specify a blacklist file with the --blacklist_file/-b option, for example:: $ ostestr --blacklist_file $path_to_file The format for the file is line separated regex, with '#' used to signify the start of a comment on a line. For example:: # Blacklist File ^regex1 # Excludes these tests .*regex2 # exclude those tests The regex used in the blacklist File or passed as argument, will be used to drop tests from the initial selection list. Will generate a list which will exclude both any tests matching '^regex1' and '.*regex2'. If a blacklist file is used in conjunction with the --regex option the regex specified with --regex will be used for the intial test selection. Also it's worth noting that the regex test selection options can not be used in conjunction with the --no-discover or --pdb options described in the previous section. This is because the regex selection requires using testr under the covers to actually do the filtering, and those 2 options do not use testr. The dual of the blacklist file is the whitelist file which altering the initial test selection regex, by joining the white list elements by '|'. You can specify the path to the file with --whitelist_file/-w, for example:: $ ostestr --whitelist_file $path_to_file The format for the file is more or less identical to the blacklist file:: # Whitelist File ^regex1 # Include these tests .*regex2 # include those tests However, instead of excluding the matches it will include them. It's also worth noting that you can use the test list option to dry run any selection arguments you are using. You just need to use --list/-l with your selection options to do this, for example:: $ ostestr --regex 'regex3.*' --blacklist_file blacklist.txt --list This will list all the tests which will be run by ostestr using that combination of arguments. Please not that all of this selection functionality will be expanded on in the future and a default grammar for selecting multiple tests will be chosen in a future release. However as of right now all current arguments (which have guarantees on always remaining in place) are still required to perform any selection logic while this functionality is still under development. Output Options -------------- By default ostestr will use subunit-trace as the output filter on the test run. It will also print the slowest tests from the run after the run is concluded. You can disable the printing the slowest tests with the --no-slowest flag, for example:: $ ostestr --no-slowest If you'd like to disable the subunit-trace output you can do this using --no-pretty:: $ ostestr --no-pretty ostestr also provides the option to just output the raw subunit stream on STDOUT with --subunit/-s. Note if you want to use this you also have to specify --no-pretty as the subunit-trace output and the raw subunit output are mutually exclusive. For example, to get raw subunit output the arguments would be:: $ ostestr --no-pretty --subunit An additional option on top of the blacklist file is --print-exclude option. When this option is specified when using a blacklist file before the tests are run ostestr will print all the tests it will be excluding from the blacklist file. If a line in the blacklist file has a comment that will be printed before listing the tests which will be excluded by that line's regex. If no comment is present on a line the regex from that line will be used instead. For example, if you were using the example blacklist file from the previous section the output before the regular test run output would be:: $ ostestr -b blacklist-file blacklist.txt --print-exclude Excludes these tests regex1_match regex1_exclude exclude those tests regex2_match regex2_exclude ... Notes for running with tox -------------------------- If you use `tox`_ for running your tests and call ostestr as the test command it's recommended that you set a posargs following ostestr on the commands stanza. For example:: [testenv] commands = ostestr {posargs} .. _tox: https://tox.readthedocs.org/en/latest/ this will enable end users to pass args to configure the output, use the selection logic, or any other options directly from the tox cli. This will let tox take care of the venv management and the environment separation but enable direct access to all of the ostestr options to easily customize your test run. For example, assuming the above posargs usage you would be to do:: $ tox -epy34 -- --regex ^regex1 or to skip discovery:: $ tox -epy34 -- -n test.test_thing.TestThing.test_thing_method os-testr-1.0.0/doc/source/user/history.rst0000664000175000017500000000004013154262145021671 0ustar jenkinsjenkins00000000000000.. include:: ../../../ChangeLog os-testr-1.0.0/doc/source/user/subunit2html.rst0000664000175000017500000000162113154262145022636 0ustar jenkinsjenkins00000000000000.. _subunit2html: subunit2html ============ subunit2html is a tool that takes in a subunit stream file and will output an html page Summary ------- subunit2html subunit_stream [output] Usage ----- subunit2html takes in 1 mandatory argument. This is used to specify the location of the subunit stream file. For example:: $ subunit2html subunit_stream By default subunit2html will store the generated html results file at results.html file in the current working directory. An optional second argument can be provided to set the output path of the html results file that is generated. If it is provided this will be the output path for saving the generated file, otherwise results.html in the current working directory will be used. For example:: $ subunit2html subunit_stream test_results.html will write the generated html results file to test_results.html in the current working directory os-testr-1.0.0/doc/source/user/index.rst0000664000175000017500000000031413154262145021303 0ustar jenkinsjenkins00000000000000===== Usage ===== This section contains the documentation for each of tools packaged in os-testr .. toctree:: :maxdepth: 2 ostestr subunit_trace subunit2html generate_subunit history os-testr-1.0.0/setup.cfg0000664000175000017500000000213613154262453016246 0ustar jenkinsjenkins00000000000000[metadata] name = os-testr summary = A testr wrapper to provide functionality for OpenStack projects description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://docs.openstack.org/developer/os-testr/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.4 Programming Language :: Python :: 3.5 [files] packages = os_testr [entry_points] console_scripts = subunit-trace = os_testr.subunit_trace:main ostestr = os_testr.ostestr:main subunit2html = os_testr.subunit2html:main generate-subunit = os_testr.generate_subunit:main [build_sphinx] source-dir = doc/source build-dir = doc/build all_files = 1 warning-is-error = 1 [upload_sphinx] upload-dir = doc/build/html [egg_info] tag_build = tag_date = 0 os-testr-1.0.0/README.rst0000664000175000017500000000165413154262145016116 0ustar jenkinsjenkins00000000000000======== os-testr ======== .. image:: https://img.shields.io/pypi/v/os-testr.svg :target: https://pypi.python.org/pypi/os-testr/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/os-testr.svg :target: https://pypi.python.org/pypi/os-testr/ :alt: Downloads A testr wrapper to provide functionality for OpenStack projects. * Free software: Apache license * Documentation: http://docs.openstack.org/os-testr/ * Source: http://git.openstack.org/cgit/openstack/os-testr * Bugs: http://bugs.launchpad.net/os-testr Features -------- * ``ostestr``: a testr wrapper that uses subunit-trace for output and builds some helpful extra functionality around testr * ``subunit-trace``: an output filter for a subunit stream which provides useful information about the run * ``subunit2html``: generates a test results html page from a subunit stream * ``generate-subunit``: generate a subunit stream for a single test os-testr-1.0.0/CONTRIBUTING.rst0000664000175000017500000000103313154262145017057 0ustar jenkinsjenkins00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: http://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/os-testr os-testr-1.0.0/.coveragerc0000664000175000017500000000016213154262145016541 0ustar jenkinsjenkins00000000000000[run] branch = True source = os_testr omit = os_testr/tests/*,os_testr/openstack/* [report] ignore_errors = True