noseofyeti-2.4.7/pytest.ini0000644000000000000000000000012113615410400012653 0ustar00[pytest] addopts = "--tb=short" testpaths = tests console_output_style = classic noseofyeti-2.4.7/run.sh0000755000000000000000000000237113615410400011776 0ustar00#!/bin/bash # Bash does not make it easy to find where this file is # Here I'm making it so it doesn't matter what directory you are in # when you execute this script. And it doesn't matter whether you're # executing a symlink to this script # Note the `-h` in the while loop asks if this path is a symlink pushd . >'/dev/null' SCRIPT_PATH="${BASH_SOURCE[0]:-$0}" while [ -h "$SCRIPT_PATH" ]; do cd "$(dirname -- "$SCRIPT_PATH")" SCRIPT_PATH="$(readlink -f -- "$SCRIPT_PATH")" done cd "$(dirname -- "$SCRIPT_PATH")" >'/dev/null' # We use noseOfYeti here, so let's make black compatible with it export NOSE_OF_YETI_BLACK_COMPAT=true export NOSE_OF_YETI_IT_RETURN_TYPE=true HANDLED=0 # Special case activate to make the virtualenv active in this session if [[ "$0" != "$BASH_SOURCE" ]]; then HANDLED=1 if [[ "activate" == "$1" ]]; then VENVSTARTER_ONLY_MAKE_VENV=1 ./tools/venv source ./tools/.python/bin/activate else echo "only say \`source run.sh activate\`" fi fi if [[ $HANDLED != 1 ]]; then if [[ "$#" == "1" && "$1" == "activate" ]]; then if [[ "$0" = "$BASH_SOURCE" ]]; then echo "You need to run as 'source ./run.sh $1'" exit 1 fi fi exec ./tools/venv "$@" fi noseofyeti-2.4.7/test.sh0000755000000000000000000000013113615410400012141 0ustar00#!/bin/bash set -e cd "$(git rev-parse --show-toplevel)" exec ./tools/venv tests "$@" noseofyeti-2.4.7/example/converted.test.py0000644000000000000000000000533013615410400015605 0ustar00# coding: spec # You ensure the file has the coding: spec comment as the first line as above # and that nose-of-yeti has registered the spec codec # The codec will then turn what you have written into python code that can be # executed. # The test can then be specified using describes and its from unittest import TestCase def test_is_possible_to_add_numbers (): assert 1 +1 ==2 def test_is_possible_to_add_the_number_three (three =3 ): # Contrived example of default arguments assert 1 +three ==4 class TestPythonMathematics : # ^^ is replaced with "class test_Python_Mathematics:" def test_is_be_able_to_add_two_numbers (self ): # ^^ is replaced with "def test_is_able_to_add_two_numbers(self):" assert 2 +3 ==5 assert 2 +0 ==2 def test_cant_divide_by_zero (self ): try : 2 /0 assert False ,"Expected an error" except ZeroDivisionError : pass # We can also define a class for the describes # Either when we create the tokeniser and register it # Or inside the spec file itself, per describe class DifferentBase (TestCase ): def x (self ): return 5 class TestInheritance (DifferentBase ): def test_has_an_x_equal_to_5 (self ): self .assertEqual (self .x (),5 ) # You can even nest describes class TestNumbers (TestCase ): def setUp (self ): __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();self .number1 =1 self .number2 =2 def test_has_number1_as_1 (self ): self .assertEqual (self .number1 ,1 ) class TestNumbers_TestingNumber3 (TestNumbers ): def setUp (self ): __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();self .number3 =3 def test_has_number1_from_the_lower_level_describe (self ): self .assertEqual (self .number1 ,1 ) def test_also_has_number3 (self ): self .assertEqual (self .number3 ,3 ) class TestNumbers_TestingNumber3_LetsChangeANumber (TestNumbers_TestingNumber3 ): def setUp (self ): __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();self .number1 =4 def test_changed_number1_but_kept_others (self ): self .assertEqual (self .number1 ,4 ) self .assertEqual (self .number2 ,2 ) self .assertEqual (self .number3 ,3 ) TestPythonMathematics .is_noy_spec =True # type: ignore TestInheritance .is_noy_spec =True # type: ignore TestNumbers .is_noy_spec =True # type: ignore TestNumbers_TestingNumber3 .is_noy_spec =True # type: ignore TestNumbers_TestingNumber3_LetsChangeANumber .is_noy_spec =True # type: ignore TestPythonMathematics .test_cant_divide_by_zero .__testname__ ="can't divide by zero" # type: ignore noseofyeti-2.4.7/example/test.py0000644000000000000000000000373713615410400013626 0ustar00# coding: spec # You ensure the file has the coding: spec comment as the first line as above # and that nose-of-yeti has registered the spec codec # The codec will then turn what you have written into python code that can be # executed. # The test can then be specified using describes and its from unittest import TestCase it "is possible to add numbers": assert 1 + 1 == 2 it "is possible to add the number three", three=3: # Contrived example of default arguments assert 1 + three == 4 describe "Python Mathematics": # ^^ is replaced with "class test_Python_Mathematics:" it 'is be able to add two numbers': # ^^ is replaced with "def test_is_able_to_add_two_numbers(self):" assert 2 + 3 == 5 assert 2 + 0 == 2 it "can't divide by zero": try: 2 / 0 assert False, "Expected an error" except ZeroDivisionError: pass # We can also define a class for the describes # Either when we create the tokeniser and register it # Or inside the spec file itself, per describe class DifferentBase(TestCase): def x(self): return 5 describe DifferentBase 'Inheritance': it 'has an x equal to 5': self.assertEqual(self.x(), 5) # You can even nest describes describe TestCase, 'numbers': before_each: self.number1 = 1 self.number2 = 2 it 'has number1 as 1': self.assertEqual(self.number1, 1) describe 'testing number 3': before_each: self.number3 = 3 it 'has number1 from the lower level describe': self.assertEqual(self.number1, 1) it 'also has number3': self.assertEqual(self.number3, 3) describe "let's change a number": before_each: self.number1 = 4 it 'changed number1 but kept others': self.assertEqual(self.number1, 4) self.assertEqual(self.number2, 2) self.assertEqual(self.number3, 3) noseofyeti-2.4.7/noseOfYeti/__init__.py0000644000000000000000000000000013615410400015013 0ustar00noseofyeti-2.4.7/noseOfYeti/version.py0000644000000000000000000000002213615410400014745 0ustar00VERSION = "2.4.7" noseofyeti-2.4.7/noseOfYeti/black/Grammar.noy.txt0000644000000000000000000002662413615410400016735 0ustar00# Grammar for 2to3. This grammar supports Python 2.x and 3.x. # NOTE WELL: You should also follow all the steps listed at # https://devguide.python.org/grammar/ # Start symbols for the grammar: # file_input is a module or sequence of commands read from an input file; # single_input is a single interactive statement; # eval_input is the input for the eval() and input() functions. # NB: compound_stmt in single_input is followed by extra NEWLINE! file_input: (NEWLINE | stmt)* ENDMARKER single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE eval_input: testlist NEWLINE* ENDMARKER decorator: '@' namedexpr_test NEWLINE decorators: decorator+ decorated: decorators (classdef | funcdef | async_funcdef | noy_stmt) async_funcdef: ASYNC (funcdef | it_stmt | setup_teardown_stmts) funcdef: 'def' NAME parameters ['->' test] ':' suite parameters: '(' [typedargslist] ')' # The following definition for typedarglist is equivalent to this set of rules: # # arguments = argument (',' argument)* # argument = tfpdef ['=' test] # kwargs = '**' tname [','] # args = '*' [tname_star] # kwonly_kwargs = (',' argument)* [',' [kwargs]] # args_kwonly_kwargs = args kwonly_kwargs | kwargs # poskeyword_args_kwonly_kwargs = arguments [',' [args_kwonly_kwargs]] # typedargslist_no_posonly = poskeyword_args_kwonly_kwargs | args_kwonly_kwargs # typedarglist = arguments ',' '/' [',' [typedargslist_no_posonly]])|(typedargslist_no_posonly)" # # It needs to be fully expanded to allow our LL(1) parser to work on it. typedargslist: tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [ ',' [((tfpdef ['=' test] ',')* ('*' [tname_star] (',' tname ['=' test])* [',' ['**' tname [',']]] | '**' tname [',']) | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])] ] | ((tfpdef ['=' test] ',')* ('*' [tname_star] (',' tname ['=' test])* [',' ['**' tname [',']]] | '**' tname [',']) | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) tname: NAME [':' test] tname_star: NAME [':' (test|star_expr)] tfpdef: tname | '(' tfplist ')' tfplist: tfpdef (',' tfpdef)* [','] # The following definition for varargslist is equivalent to this set of rules: # # arguments = argument (',' argument )* # argument = vfpdef ['=' test] # kwargs = '**' vname [','] # args = '*' [vname] # kwonly_kwargs = (',' argument )* [',' [kwargs]] # args_kwonly_kwargs = args kwonly_kwargs | kwargs # poskeyword_args_kwonly_kwargs = arguments [',' [args_kwonly_kwargs]] # vararglist_no_posonly = poskeyword_args_kwonly_kwargs | args_kwonly_kwargs # varargslist = arguments ',' '/' [','[(vararglist_no_posonly)]] | (vararglist_no_posonly) # # It needs to be fully expanded to allow our LL(1) parser to work on it. varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ ((vfpdef ['=' test] ',')* ('*' [vname] (',' vname ['=' test])* [',' ['**' vname [',']]] | '**' vname [',']) | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) ]] | ((vfpdef ['=' test] ',')* ('*' [vname] (',' vname ['=' test])* [',' ['**' vname [',']]]| '**' vname [',']) | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) vname: NAME vfpdef: vname | '(' vfplist ')' vfplist: vfpdef (',' vfpdef)* [','] stmt: simple_stmt | compound_stmt simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | exec_stmt | assert_stmt) expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))*) annassign: ':' test ['=' (yield_expr|testlist_star_expr)] testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//=') # For normal and annotated assignments, additional restrictions enforced by the interpreter print_stmt: 'print' ( [ test (',' test)* [','] ] | '>>' test [ (',' test)+ [','] ] ) del_stmt: 'del' exprlist pass_stmt: 'pass' flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt break_stmt: 'break' continue_stmt: 'continue' return_stmt: 'return' [testlist_star_expr] yield_stmt: yield_expr raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]] import_stmt: import_name | import_from import_name: 'import' dotted_as_names import_from: ('from' ('.'* dotted_name | '.'+) 'import' ('*' | '(' import_as_names ')' | import_as_names)) import_as_name: NAME ['as' NAME] dotted_as_name: dotted_name ['as' NAME] import_as_names: import_as_name (',' import_as_name)* [','] dotted_as_names: dotted_as_name (',' dotted_as_name)* dotted_name: NAME ('.' NAME)* global_stmt: ('global' | 'nonlocal') NAME (',' NAME)* exec_stmt: 'exec' expr ['in' test [',' test]] assert_stmt: 'assert' test [',' test] compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt | match_stmt | noy_stmt async_stmt: ASYNC (funcdef | with_stmt | for_stmt | it_stmt | setup_teardown_stmts) if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite] while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite] for_stmt: 'for' exprlist 'in' testlist_star_expr ':' suite ['else' ':' suite] try_stmt: ('try' ':' suite ((except_clause ':' suite)+ ['else' ':' suite] ['finally' ':' suite] | 'finally' ':' suite)) with_stmt: 'with' asexpr_test (',' asexpr_test)* ':' suite # NB compile.c makes sure that the default except clause is last except_clause: 'except' ['*'] [test [(',' | 'as') test]] suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT # Backward compatibility cruft to support: # [ x for x in lambda: True, lambda: False if x() ] # even while also allowing: # lambda x: 5 if x else 2 # (But not a mix of the two) testlist_safe: old_test [(',' old_test)+ [',']] old_test: or_test | old_lambdef old_lambdef: 'lambda' [varargslist] ':' old_test namedexpr_test: asexpr_test [':=' asexpr_test] # This is actually not a real rule, though since the parser is very # limited in terms of the strategy about match/case rules, we are inserting # a virtual case ( as ) as a valid expression. Unless a better # approach is thought, the only side effect of this seem to be just allowing # more stuff to be parser (which would fail on the ast). asexpr_test: test ['as' test] test: or_test ['if' or_test 'else' test] | lambdef or_test: and_test ('or' and_test)* and_test: not_test ('and' not_test)* not_test: 'not' not_test | comparison comparison: expr (comp_op expr)* comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' star_expr: '*' expr expr: xor_expr ('|' xor_expr)* xor_expr: and_expr ('^' and_expr)* and_expr: shift_expr ('&' shift_expr)* shift_expr: arith_expr (('<<'|'>>') arith_expr)* arith_expr: term (('+'|'-') term)* term: factor (('*'|'@'|'/'|'%'|'//') factor)* factor: ('+'|'-'|'~') factor | power power: [AWAIT] atom trailer* ['**' factor] atom: ('(' [yield_expr|testlist_gexp] ')' | '[' [listmaker] ']' | '{' [dictsetmaker] '}' | '`' testlist1 '`' | NAME | NUMBER | STRING+ | '.' '.' '.') listmaker: (namedexpr_test|star_expr) ( old_comp_for | (',' (namedexpr_test|star_expr))* [','] ) testlist_gexp: (namedexpr_test|star_expr) ( old_comp_for | (',' (namedexpr_test|star_expr))* [','] ) lambdef: 'lambda' [varargslist] ':' test trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: (subscript|star_expr) (',' (subscript|star_expr))* [','] subscript: test [':=' test] | [test] ':' [test] [sliceop] sliceop: ':' [test] exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] testlist: test (',' test)* [','] dictsetmaker: ( ((test ':' asexpr_test | '**' expr) (comp_for | (',' (test ':' asexpr_test | '**' expr))* [','])) | ((test [':=' test] | star_expr) (comp_for | (',' (test [':=' test] | star_expr))* [','])) ) classdef: 'class' NAME ['(' [arglist] ')'] ':' suite arglist: argument (',' argument)* [','] # "test '=' test" is really "keyword '=' test", but we have no such token. # These need to be in a single rule to avoid grammar that is ambiguous # to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, # we explicitly match '*' here, too, to give it proper precedence. # Illegal combinations and orderings are blocked in ast.c: # multiple (test comp_for) arguments are blocked; keyword unpackings # that precede iterable unpackings are blocked; etc. argument: ( test [comp_for] | test ':=' test [comp_for] | test 'as' test | test '=' asexpr_test | '**' test | '*' test ) comp_iter: comp_for | comp_if comp_for: [ASYNC] 'for' exprlist 'in' or_test [comp_iter] comp_if: 'if' old_test [comp_iter] # As noted above, testlist_safe extends the syntax allowed in list # comprehensions and generators. We can't use it indiscriminately in all # derivations using a comp_for-like pattern because the testlist_safe derivation # contains comma which clashes with trailing comma in arglist. # # This was an issue because the parser would not follow the correct derivation # when parsing syntactically valid Python code. Since testlist_safe was created # specifically to handle list comprehensions and generator expressions enclosed # with parentheses, it's safe to only use it in those. That avoids the issue; we # can parse code like set(x for x in [],). # # The syntax supported by this set of rules is not a valid Python 3 syntax, # hence the prefix "old". # # See https://bugs.python.org/issue27494 old_comp_iter: old_comp_for | old_comp_if old_comp_for: [ASYNC] 'for' exprlist 'in' testlist_safe [old_comp_iter] old_comp_if: 'if' old_test [old_comp_iter] testlist1: test (',' test)* # not used in grammar, but may appear in "node" passed from Parser to Compiler encoding_decl: NAME yield_expr: 'yield' [yield_arg] yield_arg: 'from' test | testlist_star_expr # 3.10 match statement definition # PS: normally the grammar is much much more restricted, but # at this moment for not trying to bother much with encoding the # exact same DSL in a LL(1) parser, we will just accept an expression # and let the ast.parse() step of the safe mode to reject invalid # grammar. # The reason why it is more restricted is that, patterns are some # sort of a DSL (more advanced than our LHS on assignments, but # still in a very limited python subset). They are not really # expressions, but who cares. If we can parse them, that is enough # to reformat them. match_stmt: "match" subject_expr ':' NEWLINE INDENT case_block+ DEDENT # This is more permissive than the actual version. For example it # accepts `match *something:`, even though single-item starred expressions # are forbidden. subject_expr: (namedexpr_test|star_expr) (',' (namedexpr_test|star_expr))* [','] # cases case_block: "case" patterns [guard] ':' suite guard: 'if' namedexpr_test patterns: pattern (',' pattern)* [','] pattern: (expr|star_expr) it_stmt: ('it' | 'ignore') STRING (',' NAME [':' [test]])* ':' ['->' test] NEWLINE INDENT stmt+ DEDENT setup_teardown_stmts: ('before_each' | 'after_each') ':' suite describe_stmt: ('context' | 'describe') [dotted_name ','] STRING ':' suite noy_stmt: setup_teardown_stmts | it_stmt | describe_stmt['as' expr] noseofyeti-2.4.7/noseOfYeti/black/noy_black.pth0000644000000000000000000000050213615410400016443 0ustar00# make black support noseOfYeti if NOSE_OF_YETI_BLACK_COMPAT=true is in the environment import importlib; importlib.import_module("noseOfYeti.plugins.black_compat").maybe_modify_black(); import sys; [sys.modules.pop(k) for k in list(sys.modules) if k.startswith("noseOfYeti") and not k.startswith("noseOfYeti.tokeniser")] noseofyeti-2.4.7/noseOfYeti/plugins/__init__.py0000644000000000000000000000000013615410400016474 0ustar00noseofyeti-2.4.7/noseOfYeti/plugins/black_compat.py0000644000000000000000000001142013615410400017364 0ustar00import os import re import sys import tempfile from pathlib import Path load_grammar_line = '_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")' def is_supported_black_version(): # I'm caring too much about internals to be confident # about future compatibility try: import black # noqa import blib2to3 # noqa import importlib_resources # noqa except ImportError: return return not black.COMPILED and black.__version__ == "22.10.0" def maybe_modify_black(): if os.environ.get("NOSE_OF_YETI_BLACK_COMPAT") == "true": modify_black() def replace_pygram(): from importlib.machinery import ModuleSpec, SourceFileLoader from importlib.util import module_from_spec import importlib_resources as resources for k, m in list(sys.modules.items()): if "blib2to3" in k or "black" in k: del sys.modules[k] with resources.as_file( resources.files("noseOfYeti").joinpath("black", "Grammar.noy.txt") ) as noy_grammar_path: if noy_grammar_path.exists(): location = Path(__import__("blib2to3").__file__).parent / "Grammar.noy.txt" current = None if location.exists(): with open(location) as crnt: current = crnt.read() with open(noy_grammar_path) as ngp: content = ngp.read() if content != current: with open(location, "w") as fle: fle.write(content) with tempfile.TemporaryDirectory() as directory: location = Path(directory) / "pygram.py" with open(location, "w") as fle: noy_grammar_line = ( '_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.noy.txt")' ) fle.write( (resources.files("blib2to3") / "pygram.py") .read_text() .replace(load_grammar_line, noy_grammar_line) ) loader = SourceFileLoader("pygram", location) mod = module_from_spec(ModuleSpec("pygram", loader)) mod.__file__ = str(resources.files("blib2to3").joinpath("pygram.py")) mod.__package__ = "blib2to3" loader.exec_module(mod) sys.modules["blib2to3.pygram"] = mod def modify_black(spec_codec=None): """ This will make it so calling black after this in the same session will deal with noseOfYeti spec files. """ if not is_supported_black_version(): return import blib2to3 from blib2to3 import pygram if not str(pygram.__loader__.path).startswith(str(Path(blib2to3.__file__).parent)): return del pygram del blib2to3 if spec_codec is None: from noseOfYeti.tokeniser.spec_codec import codec, register spec_codec = codec() register(transform=False) replace_pygram() import black from black.linegen import LineGenerator from blib2to3.pgen2 import parse as blibparse token = blibparse.token Parser = blibparse.Parser class ModifiedLineGenerator(LineGenerator): def visit_setup_teardown_stmts(self, node): yield from self.line() yield from self.visit_default(node) def visit_describe_stmt(self, node): yield from self.line() yield from self.visit_default(node) def visit_it_stmt(self, node): yield from self.line() yield from self.visit_default(node) class ModifiedParser(Parser): def classify(self, type, value, context): special = [ "it", "ignore", "context", "describe", "before_each", "after_each", ] if type == token.NAME and value in special: dfa, state, node = self.stack[-1] if node and node[-1]: if node[-1][-1].type < 256 and node[-1][-1].type not in ( token.INDENT, token.DEDENT, token.NEWLINE, token.ASYNC, ): return [self.grammar.tokens.get(token.NAME)] return super().classify(type, value, context) original_assert_equivalent = black.assert_equivalent def assert_equivalent(src, dst): if src and re.match(r"#\s*coding\s*:\s*spec", src.split("\n")[0]): src = spec_codec.translate(src, transform=True) dst = spec_codec.translate(dst, transform=True) original_assert_equivalent(src, dst) blibparse.Parser = ModifiedParser black.LineGenerator = ModifiedLineGenerator black.assert_equivalent = assert_equivalent noseofyeti-2.4.7/noseOfYeti/plugins/mypy.py0000644000000000000000000000030713615410400015745 0ustar00from mypy.plugin import Plugin from noseOfYeti.tokeniser.spec_codec import register class NoyPlugin(Plugin): pass def plugin(version: str): register(transform=True) return NoyPlugin noseofyeti-2.4.7/noseOfYeti/plugins/nosetests.py0000644000000000000000000000172513615410400017003 0ustar00from nose.plugins import Plugin from noseOfYeti.tokeniser.chooser import TestChooser from noseOfYeti.tokeniser.spec_codec import register class Plugin(Plugin): name = "noseOfYeti" def __init__(self, *args, **kwargs): self.test_chooser = TestChooser() super(Plugin, self).__init__(*args, **kwargs) def options(self, parser, env={}): super(Plugin, self).options(parser, env) parser.add_option( "--with-noy", default=False, action="store_true", dest="enabled", help="Enable nose of yeti", ) def wantModule(self, mod): self.test_chooser.new_module() def wantMethod(self, method): return self.test_chooser.consider(method) def configure(self, options, conf): super(Plugin, self).configure(options, conf) if options.enabled: self.done = {} self.enabled = True register(transform=True) noseofyeti-2.4.7/noseOfYeti/plugins/pylama.py0000644000000000000000000000321513615410400016233 0ustar00import codecs import re import pylama.context from pylama.config import LOGGER from pylama.errors import Error from pylama.lint import Linter as BaseLinter from noseOfYeti.tokeniser.spec_codec import codec, register spec_codec = codec() regexes = { "position": re.compile(r"line (\d+), column (\d+)"), "encoding": re.compile(r"#\s*coding\s*:\s*spec"), } def translate(path: str, contents: str) -> str: lines = contents.split("\n") pattern = "%(filename)s:%(lnum)s:%(col)s: [%(type)s] %(text)s" if contents and regexes["encoding"].match(lines[0]): contents = spec_codec.translate(contents) if "--- internal spec codec error ---" in lines[-1]: found = [] useful = False for line in lines[1:-2]: if useful or "SyntaxError:" in line: found.append(line) useful = True lnum = "0" column = "1" for line in found: m = regexes["position"].search(line) if m: lnum, column = m.groups() break msg = "\t".join(found) err = Error(linter="pylama", col=int(column), lnum=int(lnum), text=msg, filename=path) LOGGER.warning(pattern, err._info) return contents def better_read(filename: str) -> str: with codecs.open(filename) as fle: return translate(filename, fle.read()) def setup(): register(transform=True) pylama.context.read = better_read class Linter(BaseLinter): def allow(self, path): return False def run(self, path, **meta): return [] setup() noseofyeti-2.4.7/noseOfYeti/plugins/pyls.py0000644000000000000000000000262513615410400015743 0ustar00import re from pyls import hookimpl from noseOfYeti.tokeniser.spec_codec import codec spec_codec = codec() regexes = { "encoding": re.compile(r"#\s*coding\s*:\s*spec"), "first_whitespace": re.compile(r"^(\s*)"), } @hookimpl(hookwrapper=True) def pyls_initialize(config, workspace): spec_codec.register(transform=True) yield @hookimpl(hookwrapper=True) def pyls_document_did_open(config, workspace, document): contents = document._source lines = contents.split("\n") if contents and regexes["encoding"].match(lines[0]): translated = spec_codec.translate(contents) translated = translated.split("\n")[: len(lines)] replacement = [] for orig, new in zip(lines, translated): if new.startswith("class"): # We still need the root classes to be at the start of the document # So that their parent classes exist at that scope replacement.append(new) else: # Everything else however needs to be at their original indentation # So that pyls doesn't get confused by columns or_space = regexes["first_whitespace"].search(orig).groups()[0] tr_space = regexes["first_whitespace"].search(new).groups()[0] replacement.append(f"{or_space}{new[len(tr_space):]}") document._source = "\n".join(replacement) yield noseofyeti-2.4.7/noseOfYeti/plugins/pytest.py0000644000000000000000000000422113615410400016276 0ustar00""" Modified pytest discovery classes. This is so you can use noseOfYeti to have nested describes without running the same tests multiple times. """ import inspect from unittest import mock import pytest from _pytest.unittest import UnitTestCase from noseOfYeti.tokeniser.spec_codec import register pytest_has_instance_class = int(pytest.__version__.split(".")[0]) < 7 def pytest_configure(): register(transform=True) def change_item(res, obj): original_item_collect = res.collect if isinstance(res, UnitTestCase): def collect(): yield from filter_collection(original_item_collect(), obj) else: def collect(): res.session._fixturemanager.parsefactories(res) yield from filter_collection(original_item_collect(), obj) mock.patch.object(res, "collect", collect).start() @pytest.hookimpl(hookwrapper=True) def pytest_pycollect_makeitem(collector, name, obj): """Make sure we can have nested noseOfYeti describes""" outcome = yield res = outcome.get_result() if not isinstance(res, pytest.Class): return change_item(res, obj) def filter_collection(collected, obj): for thing in collected: if pytest_has_instance_class and isinstance(thing, pytest.Instance): if getattr(thing.obj, "is_noy_spec", False): change_item(thing, thing.obj) if not isinstance(thing, pytest.Function): yield thing continue try: isclass = inspect.isclass(obj) if not isclass: obj = obj.__class__ except Exception: pass if obj.__dict__.get("__only_run_tests_in_children__"): # Only run these tests in the children, not in this class itself continue if thing.obj.__name__ in obj.__dict__: yield thing else: method_passed_down = any( thing.obj.__name__ in superkls.__dict__ and getattr(superkls, "__only_run_tests_in_children__", False) for superkls in obj.__bases__ ) if method_passed_down: yield thing noseofyeti-2.4.7/noseOfYeti/tokeniser/__init__.py0000644000000000000000000000040413615410400017026 0ustar00from noseOfYeti.tokeniser.spec_codec import TokeniserCodec, register from noseOfYeti.tokeniser.support import TestSetup from noseOfYeti.tokeniser.tokeniser import Tokeniser __all__ = [ "register", "Tokeniser", "TestSetup", "TokeniserCodec", ] noseofyeti-2.4.7/noseOfYeti/tokeniser/chooser.py0000644000000000000000000000436113615410400016737 0ustar00from inspect import getmembers class TestChooser: def __init__(self): self.new_module() def new_module(self): """Tells TestChooser that a new module has been entered""" self.done = {} def already_visited(self, kls, name): """Determine if a method has already been accepted for this module""" key = f"{kls}.{name}" if key not in self.done: self.done[key] = True return False else: return True def consider(self, method): """ Determines whether a method should be considered a Test Returns False if it believes it isn't a test Will return True otherwise """ if method.__name__.startswith("ignore__"): # Method wants to be ignored return False if hasattr(method, "__test__") and not method.__test__: # Method doesn't want to be tested return False kls = None print(method) if getattr(method, "__self__"): kls = method.__self__.__class__ if not kls: # im_class seems to be None in pypy for k, v in getmembers(method): if k == "im_self" and v: kls = v.__class__ break elif k == "im_class" and v: kls = v break if not hasattr(kls, "is_noy_spec"): # Kls not a noy_spec, we don't care if it runs or not return None if kls.__dict__.get("__only_run_tests_in_children__"): # Only run these tests in the children, not in this class itself return False method_in_kls = method.__name__ in kls.__dict__ method_is_test = method.__name__.startswith("test_") method_passed_down = any( method.__name__ in superkls.__dict__ and getattr(superkls, "__only_run_tests_in_children__", False) for superkls in kls.__bases__ ) if (method_passed_down or method_in_kls) and method_is_test: if not self.already_visited(kls.__name__, method.__name__): return True # Is a noy_spec method but not a valid test, refuse it return False noseofyeti-2.4.7/noseOfYeti/tokeniser/containers.py0000644000000000000000000001445713615410400017451 0ustar00import re from tokenize import COMMENT, NAME regexes = { "joins": re.compile(r"[- /]"), "repeated_underscore": re.compile(r"_{2,}"), "invalid_variable_name_start": re.compile(r"^[^a-zA-Z_]"), "invalid_variable_name_characters": re.compile(r"[^0-9a-zA-Z_]"), } def acceptable(name, capitalize=False): """Convert a string into something that can be used as a valid python variable name""" # Convert space and dashes into underscores name = regexes["joins"].sub("_", name) # Remove invalid characters name = regexes["invalid_variable_name_characters"].sub("", name) # Remove leading characters until we find a letter or underscore name = regexes["invalid_variable_name_start"].sub("", name) # Clean up irregularities in underscores. name = regexes["repeated_underscore"].sub("_", name.strip("_")) if capitalize: # We don't use python's built in capitalize method here because it # turns all upper chars into lower chars if not at the start of # the string and we only want to change the first character. name_parts = [] for word in name.split("_"): name_parts.append(word[0].upper()) if len(word) > 1: name_parts.append(word[1:]) name = "".join(name_parts) return name class TokenDetails: """Container for current token""" def __init__(self, tokenum=None, value=None, srow=0, scol=0): self.set(tokenum, value, srow, scol) def set(self, tokenum, value, srow, scol): self.srow = srow self.scol = scol self.value = value self.tokenum = tokenum def transfer(self, details): details.set(*self.values()) def values(self): return self.tokenum, self.value, self.srow, self.scol class Single: """Container for a single block (i.e. it or ignore block)""" def __init__(self, srow, scol, group, typ=None, indent=0): self.typ = typ self.group = group self.indent = indent self.args = [] self.comments = [] self.return_type = None self._name = None self.english = None self.skipped = False self.started_arg = False self.starting_arg = False if not self.group.root: self.args.append((NAME, "self", srow, scol)) else: self.args.append((None, None, srow, scol)) @property def name(self): return self._name @name.setter def name(self, value): self._name = acceptable(value) self.english = value @property def python_name(self): if self.typ == "it": return f"test_{self.name}" else: return f"ignore__{self.name}" @property def identifier(self): if self.group.root: return self.python_name else: return f"{self.group.kls_name}.{self.python_name}" def add_to_arg(self, tokenum, value, srow, scol): if value == "," and len(self.args) == 1 and self.args[0][0] is None: return if value == "->" and self.return_type is None: self.return_type = [(tokenum, value, srow, scol)] return if self.comments or tokenum is COMMENT: self.comments.append((tokenum, value)) elif self.return_type is not None: self.return_type.append((tokenum, value, srow, scol)) else: self.args.append((tokenum, value, srow, scol)) class Group: """Container for describe blocks""" def __init__(self, name=None, root=False, parent=None, level=0, typ=None): self.kls = None self.typ = typ self.name = name self.root = root self.empty = True self.level = level self.parent = parent self.singles = [] self.has_after_each = False self.async_after_each = False self.has_before_each = False self.async_before_each = False # Default whether this group is starting anything self.starting_single = False if root: # Root technically isn't a group, so it doesn't have a signature to start self.starting_group = False else: # Group is created before we have all information # Hence it's signature is being created self.starting_group = True def __repr__(self): if self.root: return "" else: return f"" @property def name(self): return self._name @name.setter def name(self, value): self._name = value self.english = value if value: self._name = acceptable(value, True) @property def starting_signature(self): """Determine if this group is starting itself or anything belonging to it""" return self.starting_group or self.starting_single @property def kls_name(self): """Determine python name for group""" # Determine kls for group if not self.parent or not self.parent.name: return f"Test{self.name}" else: use = self.parent.kls_name if use.startswith("Test"): use = use[4:] return f"Test{use}_{self.name}" @property def super_kls(self): """ Determine what kls this group inherits from If default kls should be used, then None is returned """ if not self.kls and self.parent and self.parent.name: return self.parent.kls_name return self.kls def start_group(self, scol, typ): """Start a new group""" return Group(parent=self, level=scol, typ=typ) def start_single(self, typ, srow, scol): """Start a new single""" self.starting_single = True single = self.single = Single(srow, scol, self, typ=typ, indent=(scol - self.level)) self.singles.append(single) return single def finish_signature(self): """Tell group it isn't starting anything anymore""" self.starting_group = False self.starting_single = False def modify_kls(self, name): """Add a part to what will end up being the kls' superclass""" if self.kls is None: self.kls = name else: self.kls += name noseofyeti-2.4.7/noseOfYeti/tokeniser/spec_codec.py0000644000000000000000000001422113615410400017360 0ustar00import codecs import encodings import re import sys import traceback from encodings import utf_8 from io import StringIO from tokenize import untokenize from noseOfYeti.tokeniser.tokeniser import Tokeniser regexes = { "whitespace": re.compile(r"\s*"), "only_whitespace": re.compile(r"^\s*$"), "encoding_matcher": re.compile(r"#\s*coding\s*:\s*spec"), "leading_whitespace": re.compile(r"^(\s*)[^\s]"), } class TokeniserCodec: """Class to register the spec codec""" def __init__(self, tokeniser): self.tokeniser = tokeniser self.transform = True self.codec = self.get_codec() def translate(self, value, transform=None): if isinstance(value, str): value = value.encode() return self.codec.decode( value, return_tuple=False, transform=self.transform if transform is None else transform ) def register(self): def search_function(s): """Determine if a file is of spec encoding and return special CodecInfo if it is""" if s != "spec": return None return self.codec # Do the register codecs.register(search_function) def get_codec(self): """Register spec codec""" # Assume utf8 encoding utf8 = encodings.search_function("utf8") class StreamReader(utf_8.StreamReader): """Used by cPython to deal with a spec file""" def __init__(sr, stream, *args, **kwargs): codecs.StreamReader.__init__(sr, stream, *args, **kwargs) if self.transform: data = self.dealwith(sr.stream.readline) sr.stream = StringIO(data) def _decode(text, *args, transform=None, **kwargs): transform = self.transform if transform is None else transform if not transform: return utf8.decode(text, *args, **kwargs) if hasattr(text, "tobytes"): text = text.tobytes().decode() else: text = text.decode() reader = StringIO(text) # Determine if we need to have imports for this string # It may be a fragment of the file has_spec = regexes["encoding_matcher"].search(reader.readline()) no_imports = not has_spec reader.seek(0) data = self.dealwith(reader.readline, no_imports=no_imports) # If nothing was changed, then we want to use the original file/line # Also have to replace indentation of original line with indentation of new line # To take into account nested describes if text and not regexes["only_whitespace"].match(text): if regexes["whitespace"].sub("", text) == regexes["whitespace"].sub("", data): bad_indentation = regexes["leading_whitespace"].search(text).groups()[0] good_indentation = regexes["leading_whitespace"].search(data).groups()[0] data = f"{good_indentation}{text[len(bad_indentation) :]}" # If text is empty and data isn't, then we should return text if len(text) == 0 and len(data) == 1: return "", 0 # Return translated version and it's length return data, len(data) def decode(text, *args, return_tuple=True, transform=None, **kwargs): ret = _decode(text, *args, transform=transform, **kwargs) if return_tuple: return ret else: return ret[0] class incrementaldecoder(utf8.incrementaldecoder): def decode(s, obj, final, **kwargs): if not self.transform: return super().decode(obj, final, **kwargs) lines = obj.split("\n".encode("utf-8")) if re.match(r"#\s*coding:\s*spec", lines[0].decode("utf-8", "replace")) and final: kwargs["return_tuple"] = False return decode(obj, final, **kwargs) else: return super().decode(obj, final, **kwargs) return codecs.CodecInfo( name="spec", encode=utf8.encode, decode=decode, streamreader=StreamReader, streamwriter=utf8.streamwriter, incrementalencoder=utf8.incrementalencoder, incrementaldecoder=incrementaldecoder, ) def dealwith(self, readline, **kwargs): """ Replace the contents of spec file with the translated version readline should be a callable object, which provides the same interface as the readline() method of built-in file objects """ data = [] try: # We pass in the data variable as an argument so that we # get partial output even in the case of an exception. self.tokeniser.translate(readline, data, **kwargs) except: lines = ['msg = r"""'] for line in traceback.format_exception(*sys.exc_info()): lines.append(line.strip()) lines.append('"""') lines.append(r'raise Exception(f"--- internal spec codec error --- \n{msg}")') data = "\n".join(lines) else: # At this point, data is a list of tokens data = untokenize(data) # python3.9 requires a newline at the end data += "\n" return data def output_for_debugging(self, stream, data): """It will write the translated version of the file""" with open(f"{stream.name}.spec.out", "w") as f: f.write(str(data)) ######################## ### CODEC REGISTER ######################## _spec_codec = None def codec(): """Return the codec used to translate a file""" global _spec_codec if _spec_codec is None: _spec_codec = TokeniserCodec(Tokeniser()) return _spec_codec def register(transform=True): """Get a codec and register it in python""" do_register = False try: codecs.lookup("spec") except LookupError: do_register = True cdc = codec() cdc.transform = transform if do_register: cdc.register() noseofyeti-2.4.7/noseOfYeti/tokeniser/support.py0000644000000000000000000000222313615410400017004 0ustar00""" A helper module to access the superclass' setUp() and tearDown() methods of generated test classes. """ import asyncio class TestSetup: def __init__(self, sup): self.sup = sup @property def setup(self): if hasattr(self.sup, "setup"): return self.sup.setup if hasattr(self.sup, "setUp"): return self.sup.setUp @property def teardown(self): if hasattr(self.sup, "teardown"): return self.sup.teardown if hasattr(self.sup, "tearDown"): return self.sup.tearDown def sync_before_each(self): setup = self.setup if setup: return setup() def sync_after_each(self): teardown = self.teardown if teardown: return teardown() async def async_before_each(self): res = self.sync_before_each() if res and asyncio.iscoroutine(res): return await res else: return res async def async_after_each(self): res = self.sync_after_each() if res and asyncio.iscoroutine(res): return await res else: return res noseofyeti-2.4.7/noseOfYeti/tokeniser/tokeniser.py0000644000000000000000000000335013615410400017275 0ustar00import os from tokenize import generate_tokens from noseOfYeti.tokeniser.tokens import Tokens from noseOfYeti.tokeniser.tracker import Tracker WITH_IT_RETURN_TYPE_ENV_NAME = "NOSE_OF_YETI_IT_RETURN_TYPE" class Tokeniser: """Endpoint for tokenising a file""" def __init__(self, with_describe_attrs=True, with_it_return_type=None): if with_it_return_type is None: self.with_it_return_type = os.environ.get( WITH_IT_RETURN_TYPE_ENV_NAME, "" ).lower() not in ("", "false", "0") else: self.with_it_return_type = with_it_return_type self.with_describe_attrs = with_describe_attrs def translate(self, readline, result=None, no_imports=None): # Tracker to keep track of information as the file is processed self.tokens = Tokens() self.tracker = Tracker(result, self.tokens, with_it_return_type=self.with_it_return_type) try: # Looking at all the tokens with self.tracker.add_phase() as tracker: for tokenum, value, (srow, scol), _, _ in generate_tokens(readline): tracker.next_token(tokenum, value, srow, scol) finally: # Complain about mismatched brackets self.tracker.raise_about_open_containers() # Add attributes to our Describes so that the plugin can handle some nesting issues # Where we have tests in upper level describes being run in lower level describes if self.with_describe_attrs: self.tracker.make_describe_attrs() # Add lines to bottom of file to add __testname__ attributes self.tracker.make_method_names() # Return translated list of tokens return self.tracker.result noseofyeti-2.4.7/noseOfYeti/tokeniser/tokens.py0000644000000000000000000001064313615410400016600 0ustar00from tokenize import COMMENT, INDENT, NAME, NEWLINE, OP, STRING, generate_tokens ######################## ### TOKENS IN GENERATOR ######################## def tokens_in(s, strip_it=True): closure = {"processed": False} def get(): if closure["processed"]: return "" closure["processed"] = True if strip_it: return s.strip() else: return s res = [(t, v) for t, v, _, _, _ in generate_tokens(get)][:-1] while res and res[-1][0] == 4: res.pop() return res ######################## ### TOKENS ######################## class Tokens: def __init__(self): self.equivalence = {"after_each": "tearDown", "before_each": "setUp"} self.before_each = [ (NAME, "def"), (NAME, self.equivalence["before_each"]), (OP, "("), (NAME, "self"), (OP, ")"), ] self.after_each = [ (NAME, "def"), (NAME, self.equivalence["after_each"]), (OP, "("), (NAME, "self"), (OP, ")"), ] self.single_colon = (OP, ":") ######################## ### MAKERS ######################## def make_single(self, name, args, comments, return_type): if return_type is True: return_type = [(STRING, "->"), (NAME, "None")] elif return_type is None: return_type = [] lst = [(NAME, "def"), (NAME, name), (OP, "(")] + [ (t, n) for t, n, *_ in args if t is not None ] has_end = True has_pass = False if lst[-1][1] == ":": lst.pop() elif return_type and return_type[-1][1] == ":": return_type.pop() elif lst[-1][1] == "pass" and lst[-2][1] == ":": has_pass = True lst.pop() lst.pop() elif return_type and return_type[-1][1] == "pass" and return_type[-2][1] == ":": has_pass = True return_type.pop() return_type.pop() else: has_end = False if not has_end: srow = args[-1][-2] scol = args[-1][-1] raise SyntaxError(f"Found a missing ':' on line {srow}, column {scol}") lst.append((OP, ")")) lst.extend(return_type) lst.append((OP, ":")) if has_pass: lst.append((NAME, "pass")) lst.extend(comments) return lst def make_describe(self, kls, name): lst = [(NAME, "class"), (NAME, name)] if kls: lst.append((OP, "(")) lst.extend(tokens_in(kls)) lst.append((OP, ")")) lst.append((OP, ":")) return lst def make_super(self, indent, kls, method, with_async=False): if kls: kls = tokens_in(kls) prefix = "async" if with_async else "sync" result = [(OP, ":"), (NEWLINE, "\n"), (INDENT, indent)] if with_async: result.append((NAME, "await")) result.extend( [ (NAME, "__import__"), (OP, "("), (OP, '"'), (STRING, "noseOfYeti"), (OP, '"'), (OP, ")"), (OP, "."), (NAME, "tokeniser"), (OP, "."), (NAME, "TestSetup"), (OP, "("), (NAME, "super"), (OP, "("), (OP, ")"), (OP, ")"), (OP, "."), (NAME, f"{prefix}_{method}"), (OP, "("), (OP, ")"), ] ) return result def make_describe_attr(self, describe): return [ (NEWLINE, "\n"), (NAME, describe), (OP, "."), (NAME, "is_noy_spec"), (OP, "="), (NAME, "True"), (COMMENT, " # type: ignore"), ] def make_name_modifier(self, cleaned, english): result = [(NEWLINE, "\n")] parts = cleaned.split(".") result.append((NAME, parts[0])) for part in parts[1:]: result.extend([(OP, "."), (NAME, part)]) result.extend( [ (OP, "."), (NAME, "__testname__"), (OP, "="), (STRING, english), (COMMENT, " # type: ignore"), ] ) return result noseofyeti-2.4.7/noseOfYeti/tokeniser/tracker.py0000644000000000000000000005241013615410400016726 0ustar00import re from contextlib import contextmanager from tokenize import ( COMMENT, DEDENT, ENDMARKER, ERRORTOKEN, INDENT, NAME, NEWLINE, OP, STRING, ) from noseOfYeti.tokeniser.containers import Group, TokenDetails try: from tokenize import FSTRING_END, FSTRING_START except ImportError: FSTRING_START, FSTRING_END = None, None # Regex for matching whitespace regexes = {"whitespace": re.compile(r"\s+")} class WildCard: """Used to determine if tokens should be inserted until ignored token""" def __repr__(self): return "" class Tracker: """Keep track of what each next token should mean""" def __init__(self, result, tokens, with_it_return_type=False): if result is None: self.result = [] else: self.result = result self.with_it_return_type = with_it_return_type self.single = None self.tokens = tokens self.groups = Group(root=True) self.current = TokenDetails() self.all_groups = [self.groups] self.in_container = False self.containers = [] self.ignore_next = [] self.indent_amounts = [] self.adjust_indent_at = [] self.indent_type = " " self.insert_till = None self.after_space = True self.inserted_line = False self.after_an_async = False self.f_string_level = 0 self.just_ended_container = False self.just_started_container = False @contextmanager def add_phase(self): """Context manager for when adding all the tokens""" # add stuff yield self # Make sure we output eveything self.finish_hanging() # Remove trailing indents and dedents while len(self.result) > 1 and self.result[-2][0] in (INDENT, ERRORTOKEN, NEWLINE): self.result.pop(-2) def next_token(self, tokenum, value, srow, scol): """Determine what to do with the next token""" # Make self.current reflect these values self.current.set(tokenum, value, srow, scol) # Determine indent_type based on this token if self.current.tokenum == INDENT and self.current.value: self.indent_type = self.current.value[0] # Only proceed if we shouldn't ignore this token if not self.ignore_token(): # Determining the f string level self.determine_f_string_level() # Determining if this token is whitespace self.determine_if_whitespace() # Determine if inside a container self.determine_inside_container() # Change indentation as necessary self.determine_indentation() # See if we are force inserting this token if self.forced_insert(): return # If we have a newline after an inserted line, then we don't need to worry about semicolons if self.inserted_line and self.current.tokenum == NEWLINE: self.inserted_line = False # If we have a non space, non comment after an inserted line, then insert a semicolon if self.result and not self.is_space and self.inserted_line: if self.current.tokenum != COMMENT: self.result.append((OP, ";")) self.inserted_line = False # Progress the tracker self.progress() # Add a newline if we just skipped a single if self.single and self.single.skipped: self.single.skipped = False self.result.append((NEWLINE, "\n")) # Set after_space so next line knows if it is after space self.after_space = self.is_space def raise_about_open_containers(self): if self.containers: val, srow, scol = self.containers[-1] where = f"line {srow}, column {scol}" raise SyntaxError(f"Found an open '{val}' ({where}) that wasn't closed") ######################## ### PROGRESS ######################## def progress(self): """ Deal with next token Used to create, fillout and end groups and singles As well as just append everything else """ tokenum, value, srow, scol = self.current.values() # Default to not appending anything just_append = False # Prevent from group having automatic pass given to it # If it already has a pass if tokenum == NAME and value == "pass": self.groups.empty = False # Set variables to be used later on to determine if this will likely make group not empty created_group = False found_content = False if not self.groups.starting_group and not self.is_space: found_content = True if self.groups.starting_group: # Inside a group signature, add to it if tokenum == STRING: self.groups.name = value elif tokenum == NAME or (tokenum == OP and value == "."): # Modify super class for group self.groups.modify_kls(value) elif tokenum == NEWLINE: # Premature end of group self.add_tokens_for_group(with_pass=True) elif tokenum == OP and value == ":": # Proper end of group self.add_tokens_for_group() elif self.groups.starting_single: # Inside single signature, add to it if tokenum == STRING and not self.single.name: self.single.name = value self.single.args[0] = ( self.single.args[0][0], self.single.args[0][1], srow, scol + len(value), ) elif tokenum == NEWLINE and not self.in_container: self.add_tokens_for_single() self.result.append((tokenum, value)) elif value and self.single.name: # Only want to add args after the name for the single has been specified self.single.add_to_arg(tokenum, value, srow, scol) elif self.after_space or self.after_an_async or scol == 0 and tokenum == NAME: # set after_an_async if we found an async by itself # So that we can just have that prepended and still be able to interpret our special blocks with_async = self.after_an_async if not self.after_an_async and value == "async": self.after_an_async = True else: self.after_an_async = False if value == "describe": created_group = True # add pass to previous group if nothing added between then and now if self.groups.empty and not self.groups.root: self.add_tokens_for_pass() # Start new group self.groups = self.groups.start_group(scol, value) self.all_groups.append(self.groups) elif value in ("it", "ignore"): self.single = self.groups.start_single(value, srow, scol) elif value in ("before_each", "after_each"): setattr(self.groups, f"has_{value}", True) if with_async: setattr(self.groups, f"async_{value}", True) self.add_tokens_for_test_helpers(value, with_async=with_async) else: just_append = True else: # Don't care about it, append! just_append = True # Found something that isn't whitespace or a new group # Hence current group isn't empty ! if found_content and not created_group: self.groups.empty = False # Just append if token should be if just_append: # Make sure comments are indented appropriately add_dedent = False if tokenum == COMMENT and not self.in_container: indent = self.indent_type * (self.current.scol - self.groups.level) self.result.append((INDENT, indent)) add_dedent = True self.result.append([tokenum, value]) if add_dedent: self.result.append((DEDENT, "")) ######################## ### UTILITY ######################## def add_tokens(self, tokens): """Add tokens to result""" self.result.extend([d for d in tokens]) def reset_indentation(self, amount): """Replace previous indentation with desired amount""" while self.result and self.result[-1][0] == INDENT: self.result.pop() self.result.append((INDENT, amount)) def ignore_token(self): """Determine if we should ignore current token""" def get_next_ignore(remove=False): """Get next ignore from ignore_next and remove from ignore_next""" next_ignore = self.ignore_next # Just want to return it, don't want to remove yet if not remove: if type(self.ignore_next) in (list, tuple): next_ignore = self.ignore_next[0] return next_ignore # Want to remove it from ignore_next if type(next_ignore) in (list, tuple) and next_ignore: next_ignore = self.ignore_next.pop(0) elif not next_ignore: self.next_ignore = None next_ignore = None else: self.next_ignore = None return next_ignore # If we have tokens to be ignored and we're not just inserting till some token if not self.insert_till and self.ignore_next: # Determine what the next ignore is next_ignore = get_next_ignore() if next_ignore == (self.current.tokenum, self.current.value): # Found the next ignore token, remove it from the stack # So that the next ignorable token can be considered get_next_ignore(remove=True) return True else: # If not a wildcard, then return now if type(next_ignore) is not WildCard: return False # Go through tokens until we find one that isn't a wildcard while type(next_ignore) == WildCard: next_ignore = get_next_ignore(remove=True) # If the next token is next ignore then we're done here! if next_ignore == (self.current.tokenum, self.current.value): get_next_ignore(remove=True) return True else: # If there is another token to ignore, then consider the wildcard # And keep inserting till we reach this next ignorable token if next_ignore: self.insert_till = next_ignore return False def make_method_names(self): """Create tokens for setting __testname__ on functions""" lst = [(NEWLINE, "\n"), (INDENT, "")] added = False for group in self.all_groups: for single in group.singles: name, english = single.name, single.english if english[1:-1] != name.replace("_", " "): lst.extend(self.tokens.make_name_modifier(single.identifier, english)) added = True if not added: return endmarker = False if not all(l[0] == DEDENT for l in lst): if self.result and self.result[-1][0] is ENDMARKER: endmarker = True self.result.pop() self.result.extend(lst) if endmarker: self.result.append((ENDMARKER, "")) def make_describe_attrs(self): """Create tokens for setting is_noy_spec on describes""" if self.all_groups: self.result.append((NEWLINE, "\n")) self.result.append((INDENT, "")) for group in self.all_groups: if group.name: self.result.extend(self.tokens.make_describe_attr(group.kls_name)) def forced_insert(self): """ Insert tokens if self.insert_till hasn't been reached yet Will respect self.inserted_line and make sure token is inserted before it Returns True if it appends anything or if it reached the insert_till token """ # If we have any tokens we are waiting for if self.insert_till: # Determine where to append this token append_at = -1 if self.inserted_line: append_at = -self.inserted_line + 1 # Reset insert_till if we found it if ( self.current.tokenum == self.insert_till[0] and self.current.value == self.insert_till[1] ): self.insert_till = None else: # Adjust self.adjust_indent_at to take into account the new token for index, value in enumerate(self.adjust_indent_at): if value < len(self.result) - append_at: self.adjust_indent_at[index] = value + 1 # Insert the new token self.result.insert(append_at, (self.current.tokenum, self.current.value)) # We appended the token return True ######################## ### ADD TOKENS ######################## def add_tokens_for_pass(self): """Add tokens for a pass to result""" # Make sure pass not added to group again self.groups.empty = False # Remove existing newline/indentation while self.result[-1][0] in (INDENT, NEWLINE): self.result.pop() # Add pass and indentation self.add_tokens( [(NAME, "pass"), (NEWLINE, "\n"), (INDENT, self.indent_type * self.current.scol)] ) def add_tokens_for_test_helpers(self, value, with_async=False): """Add setup/teardown function to group""" # Add tokens for this block tokens = getattr(self.tokens, value) self.result.extend(tokens) # Add super call if we're inside a class if not self.groups.root: # We need to adjust the indent before the super call later on self.adjust_indent_at.append(len(self.result) + 2) # Add tokens for super call tokens_for_super = self.tokens.make_super( self.indent_type * self.current.scol, self.groups.kls_name, value, with_async=with_async, ) self.result.extend(tokens_for_super) # Tell the machine we inserted a line self.inserted_line = len(tokens_for_super) # Make sure colon and newline are ignored # Already added as part of making super self.ignore_next = [(OP, ":"), WildCard(), (NEWLINE, "\n")] def add_tokens_for_group(self, with_pass=False): """Add the tokens for the group signature""" kls = self.groups.super_kls name = self.groups.kls_name # Reset indentation to beginning and add signature self.reset_indentation("") self.result.extend(self.tokens.make_describe(kls, name)) # Add pass if necessary if with_pass: self.add_tokens_for_pass() self.groups.finish_signature() def add_tokens_for_single(self): """Add the tokens for the single signature""" args = self.single.args name = self.single.python_name comments = self.single.comments return_type = self.single.return_type if return_type is None and self.with_it_return_type: return_type = True # Reset indentation to proper amount if not self.result or self.result[-1][0] != NAME: self.reset_indentation(self.indent_type * self.single.indent) # And add signature self.result.extend(self.tokens.make_single(name, args, comments, return_type)) self.groups.finish_signature() def finish_hanging(self): """Add tokens for hanging signature if any""" if self.groups.starting_signature: if self.groups.starting_group: self.add_tokens_for_group(with_pass=True) elif self.groups.starting_single: self.add_tokens_for_single() ######################## ### DETERMINE INFORMATION ######################## def determine_if_whitespace(self): """ Set is_space if current token is whitespace Is space if value is: * Newline * Empty String * Something that matches regexes['whitespace'] """ value = self.current.value if value == "\n": self.is_space = True else: self.is_space = False if value == "" or regexes["whitespace"].match(value): if self.f_string_level == 0: self.is_space = True def determine_f_string_level(self): """ Set self.f_string_level depending on FSTRING_{START,END} """ if self.current.tokenum == FSTRING_START: self.f_string_level += 1 if self.current.tokenum == FSTRING_END: if self.f_string_level > 0: self.f_string_level -= 1 def determine_inside_container(self): """ Set self.in_container if we're inside a container * Inside container * Current token starts a new container * Current token ends all containers """ tokenum, value = self.current.tokenum, self.current.value ending_container = False starting_container = False if tokenum == OP: srow = self.current.srow scol = self.current.scol # Record when we're inside a container of some sort (tuple, list, dictionary) # So that we can care about that when determining what to do with whitespace if value in ["(", "[", "{"]: # add to the stack because we started a list self.containers.append((value, srow, scol)) starting_container = True elif value in [")", "]", "}"]: # not necessary to check for correctness if not self.containers: raise SyntaxError(f"Found a hanging '{value}' on line {srow}, column {scol}") v, sr, sc = self.containers.pop() if v != {")": "(", "]": "[", "}": "{"}[value]: found_at = f"line {srow}, column {scol}" found_last = f"line {sr}, column {sc}" msg = "Trying to close the wrong type of bracket" msg = f"{msg}. Found '{value}' ({found_at}) instead of closing a '{v}' ({found_last})" raise SyntaxError(msg) ending_container = True self.just_ended_container = not len(self.containers) and ending_container self.just_started_container = len(self.containers) == 1 and starting_container self.in_container = ( len(self.containers) or self.just_ended_container or self.just_started_container ) def determine_indentation(self): """Reset indentation for current token and in self.result to be consistent and normalized""" # Ensuring NEWLINE tokens are actually specified as such if self.current.tokenum != NEWLINE and self.current.value == "\n": self.current.tokenum = NEWLINE # I want to change dedents into indents, because they seem to screw nesting up if self.current.tokenum == DEDENT: self.current.tokenum, self.current.value = self.convert_dedent() if ( self.after_space and not self.is_space and (not self.in_container or self.just_started_container) ): # Record current indentation level if not self.indent_amounts or self.current.scol > self.indent_amounts[-1]: self.indent_amounts.append(self.current.scol) # Adjust indent as necessary while self.adjust_indent_at: self.result[self.adjust_indent_at.pop()] = ( INDENT, self.indent_type * (self.current.scol - self.groups.level), ) # Roll back groups as necessary if not self.is_space and not self.in_container: while not self.groups.root and self.groups.level >= self.current.scol: self.finish_hanging() self.groups = self.groups.parent # Reset indentation to deal with nesting if self.current.tokenum == INDENT and not self.groups.root: self.current.value = self.current.value[self.groups.level :] def convert_dedent(self): """Convert a dedent into an indent""" # Dedent means go back to last indentation if self.indent_amounts: self.indent_amounts.pop() # Change the token tokenum = INDENT # Get last indent amount last_indent = 0 if self.indent_amounts: last_indent = self.indent_amounts[-1] # Make sure we don't have multiple indents in a row while self.result[-1][0] == INDENT: self.result.pop() value = self.indent_type * last_indent return tokenum, value noseofyeti-2.4.7/tests/__init__.py0000644000000000000000000000000013615410400014071 0ustar00noseofyeti-2.4.7/tests/conftest.py0000644000000000000000000001101013615410400014162 0ustar00import os import re import shutil import tempfile from contextlib import contextmanager from io import StringIO from textwrap import dedent from tokenize import untokenize import pytest from noseOfYeti.tokeniser.tokeniser import WITH_IT_RETURN_TYPE_ENV_NAME @pytest.fixture(autouse=True) def remove_with_it_return_type_env(monkeypatch): monkeypatch.delenv(WITH_IT_RETURN_TYPE_ENV_NAME, raising=False) @pytest.fixture() def a_temp_file(): @contextmanager def a_temp_file(contents): tempfle = None try: tempfle = tempfile.NamedTemporaryFile(delete=False) with open(tempfle.name, "w") as fle: fle.write(dedent(contents)) yield tempfle.name finally: if tempfle: if os.path.exists(tempfle.name): os.remove(tempfle.name) return a_temp_file @pytest.fixture() def a_temp_dir(): @contextmanager def a_temp_dir(): tempdir = None try: tempdir = tempfile.mkdtemp() yield tempdir finally: if tempdir: if os.path.exists(tempdir): shutil.rmtree(tempdir) return a_temp_dir def pytest_configure(): @pytest.helpers.register def assert_regex_lines(got_lines, want_lines, lstrip=True, rstrip=False): __tracebackhide__ = True got_lines = dedent(got_lines) want_lines = dedent(want_lines) if lstrip: got_lines = got_lines.lstrip() want_lines = want_lines.lstrip() if lstrip: got_lines = got_lines.rstrip() want_lines = want_lines.rstrip() print("GOT LINES\n=========") print(got_lines) print("\n\n\nWANT LINES\n==========") print(want_lines) for i, (wl, gl) in enumerate(zip(want_lines.split("\n"), got_lines.split("\n"))): try: m = re.match(wl, gl) except re.error as error: pytest.fail(f"Failed to turn line into a regex\nCONVERTING: {wl}\nERROR: {error}") if not m: pytest.fail(f"line {i} does not match. Wanted:\n{wl}\n\nGot:\n{gl}") @pytest.helpers.register def assert_lines(got_lines, want_lines, lstrip=True, rstrip=False, rstrip_got_lines=True): __tracebackhide__ = True got_lines = dedent(got_lines) want_lines = dedent(want_lines) if lstrip: got_lines = got_lines.lstrip() want_lines = want_lines.lstrip() if lstrip: got_lines = got_lines.rstrip() want_lines = want_lines.rstrip() print("GOT LINES\n=========") print(got_lines) print("\n\n\nWANT LINES\n==========") print(want_lines) for i, (wl, gl) in enumerate(zip(want_lines.split("\n"), got_lines.split("\n"))): if rstrip_got_lines: gl = gl.rstrip() if wl != gl: pytest.fail(f"line {i} does not match. Wanted:\n{wl}\n\nGot:\n{gl}") @pytest.helpers.register def assert_conversion( original, want_lines, *, tokeniser=None, regex=False, lstrip=True, rstrip=False ): __tracebackhide__ = True from noseOfYeti.tokeniser import Tokeniser original = dedent(original) if lstrip: original = original.lstrip() if rstrip: original = original.rstrip() if tokeniser is None: tokeniser = {} for give_return_types, ret in ((False, ""), (True, "->None ")): orig = original.replace("$RET", ret) want = want_lines.replace("$RET", ret) if give_return_types: kwargs = {**tokeniser, "with_it_return_type": True} else: kwargs = {**tokeniser, "with_it_return_type": False} s = StringIO(orig) tok = Tokeniser(**kwargs) got_lines = untokenize(tok.translate(s.readline)) if regex: pytest.helpers.assert_regex_lines(got_lines, want, lstrip=lstrip, rstrip=rstrip) else: pytest.helpers.assert_lines(got_lines, want, lstrip=lstrip, rstrip=rstrip) @pytest.helpers.register def assert_example(example, convert_to_tabs=False, **kwargs): __tracebackhide__ = True original, desired = example if convert_to_tabs: original = original.replace(" ", "\t") desired = desired.replace(" ", "\t") pytest.helpers.assert_conversion(original, desired, tokeniser=kwargs) noseofyeti-2.4.7/tests/test_chooser_test.py0000644000000000000000000001016113615410400016103 0ustar00import pytest from noseOfYeti.tokeniser.chooser import TestChooser as Chooser @pytest.fixture() def test_chooser(): return Chooser() @pytest.fixture() def Classes(): class Classes: class TestKlsForTest: def ignore__test(self): pass def test_with__test__set(self): pass test_with__test__set.__test__ = False def test_actual(self): pass class TestIgnoredKls: def test_things(self): pass class TestKlsWithInherited(TestKlsForTest): def test_on_subclass(self): pass class TestKlsParent: __only_run_tests_in_children__ = True def test_one(self): pass def test_two(self): pass class TestKlsChild(TestKlsParent): pass class TestKlsGrandChild(TestKlsChild): pass return Classes class Test_TestChooser: def test_it_resets_done_when_told_about_new_module(self, test_chooser): assert test_chooser.done == {} test_chooser.done["a"] = 3 assert test_chooser.done == {"a": 3} test_chooser.new_module() assert test_chooser.done == {} def test_already_visited_puts_kls_name_key_in_done_or_returns_True(self, test_chooser): assert test_chooser.done == {} assert not test_chooser.already_visited("a", "b") assert test_chooser.done == {"a.b": True} assert test_chooser.already_visited("a", "b") assert not test_chooser.already_visited("c", "d") assert test_chooser.done == {"a.b": True, "c.d": True} assert test_chooser.already_visited("c", "d") class Test_TestChooser_Consider: def test_it_ignores_if_method_starts_with_ignore(self, test_chooser, Classes): assert not test_chooser.consider(Classes.TestKlsForTest().ignore__test) def test_it_ignores_if_method_has__test__set_to_false(self, test_chooser, Classes): assert not test_chooser.consider(Classes.TestKlsForTest().test_with__test__set) def test_it_returns_None_if_kls_does_not_have_is_noy_test_set(self, test_chooser, Classes): assert test_chooser.consider(Classes.TestKlsForTest().test_actual) is None def test_it_ignores_inherited_tests_if_is_noy_test_is_set_on_kls(self, test_chooser, Classes): assert test_chooser.consider(Classes.TestKlsWithInherited().test_actual) is None Classes.TestKlsWithInherited.is_noy_spec = True assert not test_chooser.consider(Classes.TestKlsWithInherited().test_actual) assert test_chooser.consider(Classes.TestKlsWithInherited().test_on_subclass) def test_it_ignores_functions_already_visited(self, test_chooser, Classes): Classes.TestKlsWithInherited.is_noy_spec = True assert test_chooser.consider(Classes.TestKlsWithInherited().test_on_subclass) assert not test_chooser.consider(Classes.TestKlsWithInherited().test_on_subclass) def test_it_ignores_parent_if_specified_to_only_run_tests_in_children( self, test_chooser, Classes ): Classes.TestKlsParent.is_noy_spec = True assert not test_chooser.consider(Classes.TestKlsParent().test_one) assert not test_chooser.consider(Classes.TestKlsParent().test_two) def test_it_runs_parent_tests_in_child_if_specified_in_parent_to_only_run_tests_in_children( self, test_chooser, Classes ): Classes.TestKlsParent.is_noy_spec = True Classes.TestKlsChild.is_noy_spec = True assert test_chooser.consider(Classes.TestKlsChild().test_one) assert test_chooser.consider(Classes.TestKlsChild().test_two) def test_it_doesnt_run_grandparent_tests_if_specified_in_grandparent_to_only_run_tests_in_children( self, test_chooser, Classes, ): Classes.TestKlsParent.is_noy_spec = True Classes.TestKlsChild.is_noy_spec = True Classes.TestKlsGrandChild.is_noy_spec = True assert not test_chooser.consider(Classes.TestKlsGrandChild().test_one) assert not test_chooser.consider(Classes.TestKlsGrandChild().test_two) noseofyeti-2.4.7/tests/test_complex_tokeniser.py0000644000000000000000000002056513615410400017145 0ustar00import pytest from noseOfYeti.tokeniser.tokeniser import WITH_IT_RETURN_TYPE_ENV_NAME, Tokeniser class Examples: small_example = [ """ describe "This": before_each: self.x = 5 describe "That": before_each: self.y = 6 describe "Meh": after_each: self.y = None describe "Blah":pass describe "async": async before_each: pass async after_each: pass describe "Another": before_each: self.z = 8 """, """ class TestThis : def setUp (self ): __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();self .x =5 class TestThis_That (TestThis ): def setUp (self ): __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();self .y =6 class TestThis_That_Meh (TestThis_That ): def tearDown (self ): __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_after_each ();self .y =None class TestThis_Blah (TestThis ):pass class TestThis_Async (TestThis ): async def setUp (self ): await __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).async_before_each ();pass async def tearDown (self ): await __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).async_after_each ();pass class TestAnother : def setUp (self ): __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();self .z =8 TestThis .is_noy_spec =True # type: ignore TestThis_That .is_noy_spec =True # type: ignore TestThis_That_Meh .is_noy_spec =True # type: ignore TestThis_Blah .is_noy_spec =True # type: ignore TestThis_Async .is_noy_spec =True # type: ignore TestAnother .is_noy_spec =True # type: ignore """, ] big_example = [ """ describe "This": before_each: self.x = 5 it 'should': if x: pass else: x += 9 async it 'supports async its': pass describe "That": before_each: self.y = 6 describe "Meh": after_each: self.y = None it "should set __testname__ for non alpha names ' $^": pass it 'should': if y: pass else: pass it 'should have args', arg1, arg2: blah |should| be_good() describe "Blah":pass ignore "root level $pecial-method*+": pass describe "Another": before_each: self.z = 8 it 'should': if z: if u: print "hello \ there" else: print "no" else: pass async it 'supports level 0 async its': pass """, """ class TestThis : def setUp (self ): __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();self .x =5 def test_should (self )$RET: if x : pass else : x +=9 async def test_supports_async_its (self )$RET: pass class TestThis_That (TestThis ): def setUp (self ): __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();self .y =6 class TestThis_That_Meh (TestThis_That ): def tearDown (self ): __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_after_each ();self .y =None def test_should_set_testname_for_non_alpha_names (self )$RET: pass def test_should (self )$RET: if y : pass else : pass def test_should_have_args (self ,arg1 ,arg2 )$RET: blah |should |be_good () class TestThis_Blah (TestThis ):pass def ignore__root_level_pecial_method ()$RET: pass class TestAnother : def setUp (self ): __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();self .z =8 def test_should (self )$RET: if z : if u : print "hello \ there" else : print "no" else : pass async def test_supports_level_0_async_its ()$RET: pass TestThis .is_noy_spec =True # type: ignore TestThis_That .is_noy_spec =True # type: ignore TestThis_That_Meh .is_noy_spec =True # type: ignore TestThis_Blah .is_noy_spec =True # type: ignore TestAnother .is_noy_spec =True # type: ignore ignore__root_level_pecial_method .__testname__ ="root level $pecial-method*+" # type: ignore TestThis_That_Meh .test_should_set_testname_for_non_alpha_names .__testname__ ="should set __testname__ for non alpha names ' $^" # type: ignore """, ] comment_example = [ """ assertTileHues( self, tiles[0], 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, # noqa 18.75, 18.75, 18.75, 18.75, 18.75, 18.75, # noqa ) it "things": assertTileHues( self, tiles[1], 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, # noqa 18.75, 18.75, 18.75, 18.75, 18.75, 18.75, # noqa ) expected = { # something ("D2", ">" + "=" * 74) print() print("\n".join(message)) print() print("WANT >>" + "-" * 73) print() print("\n".join(want)) count = 1 while want: line = want[0] if not message: assert False, f"Ran out of lines, stopped at [{count}] '{want[0]}'" if message[0] == line or fnmatch.fnmatch(message[0], line): count += 1 want.pop(0) message.pop(0) if want: assert False, f"Didn't match all the lines, stopped at [{count}] '{want[0]}'" class Test_RegisteringCodec: @pytest.fixture() def directory(self, tmp_path_factory): directory = tmp_path_factory.mktemp("registering_codec") with open(directory / "test_code.py", "w") as fle: fle.write( dedent( """ # coding: spec describe "blah": it "should totally work": 1 + 1 |should| equal_to(2) if hasattr(TestBlah, 'test_should_totally_work'): print("test_should_totally_work") """ ).strip() ) with open(directory / "registerer.py", "w") as fle: fle.write( dedent( """ import codecs try: codecs.lookup("spec") except LookupError: pass else: assert False, "A spec was already registered!" from noseOfYeti.tokeniser import register register(transform=True) """ ).strip() ) return directory def test_not_registering_codec_leads_to_error(self, directory): output = assert_run_subprocess( [sys.executable, "-c", "import test_code"], expected_output=None, cwd=directory, env={"NOSE_OF_YETI_BLACK_COMPAT": "false"}, status=1, ) expected_output = "SyntaxError: [Uu]nknown encoding: spec" pytest.helpers.assert_regex_lines(output.split("\n")[-1], expected_output) def test_registering_codec_doesnt_lead_to_error(self, directory): assert_run_subprocess( [sys.executable, "-c", "import registerer; import test_code"], expected_output="test_should_totally_work", cwd=directory, env={"NOSE_OF_YETI_BLACK_COMPAT": "false"}, status=0, ) def test_can_correctly_translate_file(self): with open(os.path.join(example_dir, "test.py")) as fle: original = fle.read() with open(os.path.join(example_dir, "converted.test.py")) as fle: want = fle.read() after = TokeniserCodec(Tokeniser()).translate(original) pytest.helpers.assert_lines(after, want) def test_it_shows_errors(self): get_codec = mock.Mock(name="get_codec") readline = mock.Mock(name="readline") tokeniser = mock.Mock(name="tokeniser") tokeniser.translate.side_effect = Exception("NOPE") with mock.patch.object(TokeniserCodec, "get_codec", get_codec): codec = TokeniserCodec(tokeniser) got = codec.dealwith(readline) tokeniser.translate.assert_called_once_with(readline, []) expect = dedent( r''' msg = r""" Traceback (most recent call last): File "*noseOfYeti*tokeniser*spec_codec.py", line *, in dealwith Exception: NOPE """ raise Exception(f"--- internal spec codec error --- \n{msg}") ''' ) assert_glob_lines(got, expect) with pytest.raises(Exception) as excinfo: exec(got) assert excinfo.exconly().startswith("Exception: --- internal spec codec error ---") noseofyeti-2.4.7/tests/test_translation_tokeniser.py0000644000000000000000000004425513615410400020036 0ustar00import sys from textwrap import dedent import pytest def assert_example(original, desired): tab_original = original.replace(" ", "\t") tab_desired = desired.replace(" ", "\t") options = {"with_describe_attrs": False} for original, desired in ((original, desired), (tab_original, tab_desired)): pytest.helpers.assert_conversion(original, desired, tokeniser=options) # And with newlines original = f"import os\n{dedent(original)}" desired = f"import os\n{dedent(desired)}" pytest.helpers.assert_conversion(original, desired, tokeniser=options) class Test_Tokenisor_translation: def test_translates_a_describe(self): original = 'describe "Something testable"' desired = "class TestSomethingTestable :pass" assert_example(original, desired) def test_translates_an_it(self): original = 'it "should do this thing":' desired = "def test_should_do_this_thing ()$RET:" assert_example(original, desired) ## and with async original = 'async it "should do this thing":' desired = "async def test_should_do_this_thing ()$RET:" assert_example(original, desired) def test_translates_an_it_with_return_type(self): original = 'it "should do this thing" -> None:' desired = "def test_should_do_this_thing ()->None :" assert_example(original, desired) ## and with async original = 'async it "should do this thing" -> None:' desired = "async def test_should_do_this_thing ()->None :" assert_example(original, desired) def test_translates_an_it_with_complex_return_type(self): original = 'it "should do this thing" -> tp.Generic[Thing, list[str]]:' desired = "def test_should_do_this_thing ()->tp .Generic [Thing list [str ]]:" assert_example(original, desired) ## and with async original = 'async it "should do this thing" -> tp.Generic[Thing, list[str]]:' desired = "async def test_should_do_this_thing ()->tp .Generic [Thing list [str ]]:" assert_example(original, desired) def test_translates_an_it_with_return_type_and_args(self): original = 'it "should do this thing", one: str -> None:' desired = "def test_should_do_this_thing (one :str )->None :" assert_example(original, desired) ## and with async original = 'async it "should do this thing", two: int -> None:' desired = "async def test_should_do_this_thing (two :int )->None :" assert_example(original, desired) def test_translates_an_it_with_complex_return_type_and_args(self): original = ( 'it "should do this thing" blah: dict[str, list] -> tp.Generic[Thing, list[str]]:' ) desired = "def test_should_do_this_thing (blah :dict [str ,list ])->tp .Generic [Thing ,list [str ]]:" assert_example(original, desired) ## and with async original = 'async it "should do this thing" item: Item -> tp.Generic[Thing, list[str]]:' desired = ( "async def test_should_do_this_thing (item :Item )->tp .Generic [Thing ,list [str ]]:" ) assert_example(original, desired) def test_adds_arguments_to_its_if_declared_on_same_line(self): original = 'it "should do this thing", blah, meh:' desired = "def test_should_do_this_thing (blah ,meh )$RET:" assert_example(original, desired) def test_adds_arguments_with_default_string_to_its_if_declared_on_same_line(self): original = 'it "should do this thing", blah, meh="hello":' desired = 'def test_should_do_this_thing (blah ,meh ="hello")$RET:' assert_example(original, desired) def test_adds_type_annotations(self): original = 'it "should do this thing", blah:str, meh: Thing | Other:' desired = "def test_should_do_this_thing (blah :str ,meh :Thing |Other )$RET:" assert_example(original, desired) def test_allows_comments_after_it(self): original = 'it "should do this thing", blah:str, meh: Thing | Other: # a comment' desired = "def test_should_do_this_thing (blah :str ,meh :Thing |Other )$RET:# a comment" assert_example(original, desired) original = 'it "should do this thing": # a comment' desired = "def test_should_do_this_thing ()$RET:# a comment" assert_example(original, desired) def test_adds_arguments_to_its_if_declared_on_same_line_and_work_with_skipTest(self): original = 'it "should do this thing", blah, meh: pass' desired = "def test_should_do_this_thing (blah ,meh )$RET:pass" assert_example(original, desired) def test_complains_about_it_that_isnt_a_block(self): with pytest.raises(SyntaxError, match="Found a missing ':' on line 1, column 22"): assert_example('it "should be skipped"\n', "") with pytest.raises(SyntaxError, match="Found a missing ':' on line 1, column 22"): assert_example('it "should be skipped" # blah\n', "") # Same tests, but with newlines in front with pytest.raises(SyntaxError, match="Found a missing ':' on line 3, column 22"): assert_example('import os\n\nit "should be skipped"\n', "") original = 'import os\n\nit "should not be skipped":\n' desired = "import os\n\ndef test_should_not_be_skipped ()$RET:" assert_example(original, desired) ## And with async with pytest.raises(SyntaxError, match="Found a missing ':' on line 1, column 28"): assert_example('async it "should be skipped"\n', "") original = 'import os\n\nasync it "should not be skipped":\n' desired = "import os\n\nasync def test_should_not_be_skipped ()$RET:" assert_example(original, desired) # Same tests, but with newlines in front with pytest.raises(SyntaxError, match="Found a missing ':' on line 3, column 28"): assert_example('import os\n\nasync it "should be skipped"\n', "") original = 'import os\n\nasync it "should not be skipped":\n' desired = "import os\n\nasync def test_should_not_be_skipped ()$RET:" assert_example(original, desired) def test_turns_before_each_into_setup(self): assert_example("before_each:", "def setUp (self ):") # And with async assert_example("async before_each:", "async def setUp (self ):") def test_indentation_should_work_regardless_of_crazy_groups(self): original = """ describe 'a': it 'asdf': l = [ True , False , 1 , 2 ] t = (1 , 2 , 3 , 4 , 5, ) d = {'asdf' : True} t2 = (True , False ) it 'asdf2': pass""" desired = """ class TestA : def test_asdf (self )$RET: l =[True ,False ,1 ,2 ] t =(1 ,2 ,3 ,4 ,5 , ) d ={'asdf':True } t2 =(True ,False ) def test_asdf2 (self )$RET: pass """ assert_example(original, desired) def test_complains_if_describe_after_hanging_it(self): original = """ describe 'thing': it 'should be skipped' describe 'that': pass """ with pytest.raises(SyntaxError, match="Found a missing ':' on line 2, column 26"): assert_example(original, "") def test_indentation_should_work_for_inline_python_code(self): original = """ describe 'this': describe 'that': pass class SomeMockObject: def indented_method() """ desired = """ class TestThis :pass class TestThis_That (TestThis ): pass class SomeMockObject : def indented_method () """ assert_example(original, desired) def test_gives_setups_super_call_when_in_describes_that_know_about_await_if_async(self): original = """ describe "Thing": async before_each: self.x = 5 """ desired = """ class TestThing : async def setUp (self ): await __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).async_before_each ();self .x =5 """ assert_example(original, desired) def test_gives_setups_super_call_when_in_describes(self): original = """ describe "Thing": before_each: self.x = 5 """ desired = """ class TestThing : def setUp (self ): __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();self .x =5 """ assert_example(original, desired) def test_turns_after_each_into_teardown(self): assert_example("after_each:", "def tearDown (self ):") def test_gives_teardowns_super_call_that_awaits_when_in_describes_and_async(self): original = """ describe "Thing": async after_each: self.x = 5 """ desired = """ class TestThing : async def tearDown (self ): await __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).async_after_each ();self .x =5 """ assert_example(original, desired) def test_gives_teardowns_super_call_when_in_describes(self): original = """ describe "Thing": after_each: self.x = 5 """ desired = """ class TestThing : def tearDown (self ): __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_after_each ();self .x =5 """ assert_example(original, desired) def test_no_transform_inside_expression(self): assert_example("variable = before_each", "variable =before_each ") assert_example("variable = after_each", "variable =after_each ") assert_example("variable = describe", "variable =describe ") assert_example("variable = ignore", "variable =ignore ") assert_example("variable = it", "variable =it ") def test_sets__testname__on_non_alphanumeric_test_names(self): original = """ it "(root level) should work {well}": 3 |should| be(4) describe "SomeTests": it "doesn't get phased by $special characters": pass describe "NestedDescribe": it "asdf $% asdf": 1 |should| be(2) """ desired = """ def test_root_level_should_work_well ()$RET: 3 |should |be (4 ) class TestSomeTests : def test_doesnt_get_phased_by_special_characters (self )$RET: pass class TestSomeTests_NestedDescribe (TestSomeTests ): def test_asdf_asdf (self )$RET: 1 |should |be (2 ) test_root_level_should_work_well .__testname__ ="(root level) should work {well}" # type: ignore TestSomeTests .test_doesnt_get_phased_by_special_characters .__testname__ ="doesn't get phased by $special characters" # type: ignore TestSomeTests_NestedDescribe .test_asdf_asdf .__testname__ ="asdf $% asdf" # type: ignore """ assert_example(original, desired) def test_it_maintains_line_numbers_when_pass_on_another_line(self): original = """ it "is a function with a pass": pass it "is a function with a pass on another line": pass it "is a function with a pass on another line further below": #comment or something pass describe "block with a pass": pass describe "block with comment and pass": # comment or something pass describe "Nesting and passes": pass # comment describe "Nested": pass describe "More Nesting": # comment pass """ desired = """ def test_is_a_function_with_a_pass ()$RET:pass def test_is_a_function_with_a_pass_on_another_line ()$RET: pass def test_is_a_function_with_a_pass_on_another_line_further_below ()$RET: #comment or something pass class TestBlockWithAPass : pass class TestBlockWithCommentAndPass : # comment or something pass class TestNestingAndPasses :pass # comment class TestNestingAndPasses_Nested (TestNestingAndPasses ): pass class TestNestingAndPasses_Nested_MoreNesting (TestNestingAndPasses_Nested ): # comment pass """ assert_example(original, desired) def test_it_allows_default_arguments_for_its(self): original = """ it "is a test with default arguments", thing=2, other=[3]: pass describe "group": it "has self and default args", blah=None, you=(3, 4, 5, 5): # Test space is respected 1 |should| be(2) """ desired = """ def test_is_a_test_with_default_arguments (thing =2 ,other =[3 ])$RET: pass class TestGroup : def test_has_self_and_default_args (self ,blah =None ,you =(3 ,4 , 5 ,5 ))$RET: # Test space is respected 1 |should |be (2 ) """ assert_example(original, desired) def test_can_properly_dedent_after_block_of_just_containers(self): original = """ it "should ensure askers are None or boolean or string": for val in (None, False, 'asdf', u'asdf', lambda: 1): (lambda : Step(askBeforeAction = val)) |should_not| throw(Problem) (lambda : Step(askDesiredResult = val)) |should_not| throw(Problem) (lambda : Step(blockBeforeGet = val)) |should_not| throw(Problem) for val in (1, True): (lambda : Step(askBeforeAction = val)) |should| throw(Problem) (lambda : Step(askDesiredResult = val)) |should| throw(Problem) (lambda : Step(blockBeforeGet = val)) |should| throw(Problem) 3 |should| be(3) """ desired = """ def test_should_ensure_askers_are_None_or_boolean_or_string ()$RET: for val in (None ,False ,'asdf',u'asdf',lambda :1 ): (lambda :Step (askBeforeAction =val ))|should_not |throw (Problem ) (lambda :Step (askDesiredResult =val ))|should_not |throw (Problem ) (lambda :Step (blockBeforeGet =val ))|should_not |throw (Problem ) for val in (1 ,True ): (lambda :Step (askBeforeAction =val ))|should |throw (Problem ) (lambda :Step (askDesiredResult =val ))|should |throw (Problem ) (lambda :Step (blockBeforeGet =val ))|should |throw (Problem ) 3 |should |be (3 ) """ assert_example(original, desired) def test_it_doesnt_add_semicolon_after_noy_setup_if_not_necessary(self): original = """ describe "block with necessary semicolon": before_each: two = 1 + 1 describe "block with unecessary semiclon": before_each: #comment pass after_each: pass """ desired = """ class TestBlockWithNecessarySemicolon : def setUp (self ): __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();two =1 +1 class TestBlockWithUnecessarySemiclon : def setUp (self ): __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ()#comment pass def tearDown (self ): __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_after_each () pass """ assert_example(original, desired) def test_it_keeps_comments_placed_after_setup_and_teardown_methods(self): original = """ describe "Kls": before_each: # Comment one pass after_each: # Comment two pass describe "Kls2": before_each: # Comment three two = 1 + 1 after_each: # Comment four #comment pass """ desired = """ class TestKls : def setUp (self ):# Comment one __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each () pass def tearDown (self ):# Comment two __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_after_each () pass class TestKls2 : def setUp (self ):# Comment three __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();two =1 +1 def tearDown (self ):# Comment four __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_after_each ()#comment pass """ assert_example(original, desired) def test_it_doesnt_mess_up_dedent_from_whitespace_in_fstring(self): original = """ def one( item: object, want: three, /, _register: four ) -> dict | None: f"{item} " def two(value: object, /) -> dict | None: return None """ desired = """ def one ( item :object ,want :three ,/,_register :four )->dict |None : f"{item } " def two (value :object ,/)->dict |None : return None """ if sys.version_info < (3, 12): desired = desired.replace("{item }", "{item}") assert_example(original, desired) noseofyeti-2.4.7/tests/test_using_pytest.py0000644000000000000000000000476513615410400016154 0ustar00import shutil import subprocess import sys from pathlib import Path from textwrap import dedent here = Path(__file__).parent class TestPyTest: def test_it_collects_tests_correctly(self, tmp_path_factory, monkeypatch): directory = tmp_path_factory.mktemp("files") monkeypatch.setenv("NOSE_OF_YETI_BLACK_COMPAT", "false") shutil.copytree(here / "for_pytest_plugin", directory / "tests") want = set( dedent( """ tests/test_one.py::test_one tests/test_one.py::TestTwo::test_three tests/test_one.py::TestTwo::test_four tests/test_one.py::TestTwo_Five::test_six tests/test_one.py::TestTwo_Seven_Ten::test_eight tests/test_one.py::TestTwo_Seven_Ten::test_nine tests/test_one.py::TestTwo_Seven_Eleven::test_eight tests/test_one.py::TestTwo_Seven_Eleven::test_nine tests/test_one.py::TestTwelve::test_thirteen tests/test_two.py::TestOne::test_two tests/test_two.py::TestOne::test_three tests/test_two.py::TestOne::test_four tests/test_two.py::TestOne::test_five tests/test_two.py::TestOne_Six::test_seven tests/test_two.py::TestOne_Eight::test_nine tests/test_two.py::TestOne_Ten::test_eleven """ ) .strip() .split("\n") ) with subprocess.Popen( [ sys.executable, "-m", "pytest", str(directory / "tests" / "test_one.py"), str(directory / "tests" / "test_two.py"), "-v", "-o", "console_output_style=short", ], cwd=directory, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) as process: try: process.wait(timeout=5) except: process.kill() raise out = process.stdout.read().decode() got = set() for line in out.split("\n"): if "PASSED" in line: got.add(line.split(" ", 1)[0]) print(out) print("WANT:") for line in sorted(want): print(" ", line) print() print("GOT:") for line in sorted(got): print(" ", line) assert "16 passed" in out assert got == want noseofyeti-2.4.7/tests/for_formatting_and_pylama/failing_pylama_spec.py0000644000000000000000000000015213615410400023535 0ustar00# coding: spec import sys describe "TestNup": it "has pylama errors": a = 1 stuff() noseofyeti-2.4.7/tests/for_formatting_and_pylama/formatted_normal.py0000644000000000000000000000025613615410400023111 0ustar00import typing as tp def stuff(): pass def hello(one: int, two: str, *args: str) -> object: ... class Hi(tp.Protocol): async def blah(self): stuff() noseofyeti-2.4.7/tests/for_formatting_and_pylama/formatted_spec.py0000644000000000000000000000052213615410400022547 0ustar00# coding: spec def awesome(a: str) -> bool: return True def hi(twos: int, word: str, b: bool): pass describe "TestThing": async it "totes works yo", one: int, three: str: assert awesome(2) # type: ignore it "is great": assert False, "or is it ?" def with_other_things(): hi(22222, "asdfasdf", True) noseofyeti-2.4.7/tests/for_formatting_and_pylama/unformatted_normal.py0000644000000000000000000000027313615410400023453 0ustar00import typing as tp def stuff( ) : pass def hello(one: int,two:str, *args: str)->object : ... class Hi(tp.Protocol) : async def blah(self ): stuff () noseofyeti-2.4.7/tests/for_formatting_and_pylama/unformatted_spec.py0000644000000000000000000000054413615410400023116 0ustar00# coding: spec def awesome(a: str)-> bool: return True def hi( twos: int, word: str, b: bool) : pass describe "TestThing" : async it "totes works yo", one: int, three: str: assert awesome(2) # type: ignore it "is great": assert False, "or is it ?" def with_other_things( ): hi ( 22222, "asdfasdf",True) noseofyeti-2.4.7/tests/for_pytest_plugin/__init__.py0000644000000000000000000000000013615410400017645 0ustar00noseofyeti-2.4.7/tests/for_pytest_plugin/conftest.py0000644000000000000000000000023013615410400017740 0ustar00import os import pytest @pytest.hookimpl() def pytest_ignore_collect(collection_path, path, config): return "INNER_PYTEST_RUN" not in os.environ noseofyeti-2.4.7/tests/for_pytest_plugin/test_one.py0000644000000000000000000000176113615410400017745 0ustar00# coding: spec import unittest import pytest @pytest.fixture() def hi(): return "hi" it "one", hi: assert hi == "hi" describe "two": it "three", hi: assert hi == "hi" it "four", hi: assert hi == "hi" describe "five": it "six", hi: assert hi == "hi" describe "seven": __only_run_tests_in_children__ = True it "eight", hi, expected: assert hi == expected it "nine", hi, expected: assert hi == expected describe "ten": @pytest.fixture() def hi(self): return "hello" @pytest.fixture() def expected(self): return "hello" describe "eleven": @pytest.fixture() def hi(self): return "yo" @pytest.fixture() def expected(self): return "yo" describe unittest.TestCase, "twelve": it "thirteen": assert True noseofyeti-2.4.7/tests/for_pytest_plugin/test_two.py0000644000000000000000000000222413615410400017770 0ustar00# coding: spec import pytest @pytest.fixture() def register(): return 2 describe "one": it "two", register: assert register == 2 it "three", register: assert register == 2 it "four", register: assert register == 2 it "five", register: assert register == 2 describe "six": @pytest.fixture() def collector(self): return 3 async it "seven", register, collector: assert register == 2 assert collector == 3 describe "eight": @pytest.fixture() def collector(self): return 4 async it "nine", register, collector: assert register == 2 assert collector == 4 describe "ten": @pytest.fixture() def collector(self): return 5 @pytest.fixture() def superman(self, collector): return collector * 2 @pytest.fixture() def register(self): return 20 it "eleven", superman, register, collector: assert collector == 5 assert superman == 10 assert register == 20 noseofyeti-2.4.7/tools/bootstrap_venvstarter.py0000644000000000000000000000155313615410400017026 0ustar00import os import runpy import sys from pathlib import Path deps_dir = Path(__file__).parent / "deps" if not deps_dir.exists(): deps_dir.mkdir() if not (deps_dir / "venvstarter.py").exists(): if "PIP_REQUIRE_VIRTUALENV" in os.environ: del os.environ["PIP_REQUIRE_VIRTUALENV"] os.system(f"{sys.executable} -m pip install venvstarter -t {deps_dir}") venvstarter_module = runpy.run_path(str(deps_dir / "venvstarter.py")) wanted_version = "0.12.0" upgrade = False VERSION = venvstarter_module.get("VERSION") if VERSION is None: upgrade = True else: Version = venvstarter_module["Version"] if Version(VERSION) != Version(wanted_version): upgrade = True if upgrade: os.system(f"{sys.executable} -m pip install -U 'venvstarter=={wanted_version}' -t {deps_dir}") manager = runpy.run_path(str(deps_dir / "venvstarter.py"))["manager"] noseofyeti-2.4.7/tools/devtools.py0000644000000000000000000000551713615410400014211 0ustar00import inspect import os import platform import shlex import sys from collections.abc import Callable from pathlib import Path here = Path(__file__).parent if platform.system() == "Windows": import mslex # type:ignore[import] shlex = mslex # noqa class Command: def __call__(self, bin_dir, args) -> None: ... def command(func) -> Callable: func.__is_command__ = True return func def run(*args) -> None: cmd = " ".join(shlex.quote(str(part)) for part in args) print(f"Running '{cmd}'") ret = os.system(cmd) if ret != 0: sys.exit(1) class App: def __init__(self): self.commands = {} compare = inspect.signature(type("C", (Command,), {})().__call__) for name in dir(self): val = getattr(self, name) if getattr(val, "__is_command__", False): assert ( inspect.signature(val) == compare ), f"Expected '{name}' to have correct signature, have {inspect.signature(val)} instead of {compare}" self.commands[name] = val def __call__(self, args) -> None: bin_dir = Path(sys.executable).parent if args and args[0] in self.commands: os.chdir(here.parent) self.commands[args[0]](bin_dir, args[1:]) return sys.exit(f"Unknown command:\nAvailable: {sorted(self.commands)}\nWanted: {args}") @command def format(self, bin_dir, args) -> None: if not args: args = [".", *args] run(bin_dir / "black", *args) run(bin_dir / "isort", *args) @command def lint(self, bin_dir, args) -> None: run(bin_dir / "pylama", *args) @command def tests(self, bin_dir, args) -> None: if "-q" not in args: args = ["-q", *args] env = os.environ env["NOSE_OF_YETI_BLACK_COMPAT"] = "false" files = [] if "TESTS_CHDIR" in env: ags = [] test_dir = Path(env["TESTS_CHDIR"]).absolute() for a in args: test_name = "" if "::" in a: filename, test_name = a.split("::", 1) else: filename = a try: p = Path(filename).absolute() except: ags.append(a) else: if p.exists(): rel = p.relative_to(test_dir) if test_name: files.append(f"{rel}::{test_name}") else: files.append(str(rel)) else: ags.append(a) args = ags os.chdir(test_dir) run(bin_dir / "pytest", *files, *args) app = App() if __name__ == "__main__": app(sys.argv[1:]) noseofyeti-2.4.7/tools/requirements.dev.txt0000644000000000000000000000055713615410400016040 0ustar00pylama-dmypy==0.4 pylama==8.4.1 neovim==0.3.1 tox==4.8.0 isort==5.11.5 python-lsp-server==1.7.4; python_version < '3.10' python-lsp-server==1.9.0; python_version >= '3.10' pylsp-mypy==0.6.7 pyls-isort==0.2.2 python-lsp-black==1.3.0 mslex==1.1.0; sys.platform == 'win32' jedi==0.19.1; python_version >= '3.10' hatch==1.7.0 setuptools>=69.0.3; python_version >= '3.12' noseofyeti-2.4.7/tools/venv0000755000000000000000000000267613615410400012707 0ustar00#!/usr/bin/env python3 from pathlib import Path import typing as tp import subprocess import shutil import runpy import glob import sys import os here = Path(__file__).parent manager = runpy.run_path(str(Path(__file__).parent / "bootstrap_venvstarter.py"))["manager"] def run(venv_location: Path, args: tp.List[str]) -> tp.Union[None, str, tp.List[str]]: os.environ["NOSE_OF_YETI_BLACK_COMPAT"] = "true" for dr in Path(venv_location / "lib").iterdir(): if dr.name.startswith("python"): pth_file = dr / "site-packages" / "noy_black.pth" if not pth_file.exists(): pth_file.symlink_to(here / ".." / "noseOfYeti" / "black" / "noy_black.pth") break devtools_location = Path(__file__).parent / "devtools.py" return ["python", str(devtools_location)] manager = manager(run).named(".python") manager.add_no_binary("black") manager.add_env(NOSE_OF_YETI_BLACK_COMPAT="true") manager.add_local_dep( "{here}", "..", version_file=( "noseOfYeti", "version.py", ), name="noseOfYeti[black,tests]=={version}", ) if "TOX_PYTHON" in os.environ: folder = Path(os.environ["TOX_PYTHON"]).parent.parent manager.place_venv_in(folder.parent) manager.named(folder.name) else: manager.add_no_binary("black") manager.add_requirements_file("{here}", "requirements.dev.txt") manager.add_requirements_file("{here}", "requirements.docs.txt") manager.run() noseofyeti-2.4.7/.gitignore0000644000000000000000000000012113615410400012612 0ustar00*.pyc .dmypy.json __pycache__/ *.egg-info/ .tox pip-wheel-metadata/ build/ dist/ noseofyeti-2.4.7/LICENSE0000644000000000000000000000207113615410400011635 0ustar00The MIT License (MIT) Copyright (c) 2023 Stephen Moore Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. noseofyeti-2.4.7/README.rst0000644000000000000000000000336013615410400012321 0ustar00Nose of Yeti ============ This is a project creates a custom Python ``codec`` that lets the developer write their tests using an `RSpec `_ inspired DSL (i.e. ``describe`` and ``it`` blocks). It uses the fact that python allows the registration of a `codec `_ that can be used to read in source code and modify it at import time. NoseOfYeti uses this technique to find files with a particular coding declaration on the first line of a file to turn the rspec inspired DSL into ordinary python that is then executed as if it were written like that in the first place. The original idea comes from `@fmeyer `_ who wrote this `blog post `_ and a simple `proof of concept `_ before it was picked up and worked on by `@hltbra `_ in `this repo `_. `@delfick `_ discovered this work in 2010 and over the following decade plus has improved on the concept, giving it more features and integration with python tooling. The documentation can be found at http://noseofyeti.readthedocs.io and the code over at https://github.com/delfick/nose-of-yeti. Updating nose-of-yeti version ----------------------------- It is recommended any .pyc files are removed when NoseOfYeti is upgraded. The Python interpreter skips the translation process if it sees a .pyc file (unless the .py file has changed since the .pyc file was created). This means that any changes in the translation process won't apply until either the .pyc files are removed or all the .py files have been changed. noseofyeti-2.4.7/pyproject.toml0000644000000000000000000000426413615410400013552 0ustar00[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "noseOfYeti" dynamic = ["version"] description = "A custom python codec that provides an RSpec style dsl for python" readme = "README.rst" license = "MIT" authors = [ { name = "Stephen Moore", email = "stephen@delfick.com" }, ] keywords = [ "bdd", "rspec", "spec", ] classifiers = [ "Intended Audience :: Developers", "Programming Language :: Python", "Topic :: Software Development :: Documentation", "Topic :: Software Development :: Testing", ] [project.optional-dependencies] black = [ "black==22.10.0", "importlib-resources==5.10.0", ] tests = [ "alt-pytest-asyncio==0.6.0", "asynctest==0.13.0", "pytest-helpers-namespace==2021.4.29", "pytest>=7.0.1", ] [project.entry-points."nose.plugins"] noseOfYeti = "noseOfYeti.plugins.nosetests:Plugin" [project.entry-points."pylama.linter"] pylama_noy = "noseOfYeti.plugins.pylama:Linter" [project.entry-points.pyls] pyls_noy = "noseOfYeti.plugins.pyls" [project.entry-points.pytest11] nose_of_yeti = "noseOfYeti.plugins.pytest" [project.urls] Homepage = "https://github.com/delfick/nose-of-yeti" [tool.hatch.version] path = "noseOfYeti/version.py" [tool.hatch.build.targets.sdist] include = [ "/noseOfYeti", "/README.rst", "/LICENSE", "/test.sh", "/run.sh", "/pytest.ini", "/example/*", "/tests/**", "/tools/bootstrap_venvstarter.py", "/tools/requirements.dev.txt", "/tools/devtools.py", "/tools/venv" ] [tool.hatch.build.targets.wheel] include = [ "/noseOfYeti", ] [tool.hatch.build.targets.wheel.force-include] "noseOfYeti/black/noy_black.pth" = "noy_black.pth" [tool.black] line-length = 100 include = '(\.py|^venv)$' exclude = ''' /( \.git | \.tox | dist | tools/\.python | docs/_build | example | tests/for_formatting_and_pylama | build )/ ''' [tool.isort] profile = "black" skip_glob = [ ".git/*", ".tox/*", "dist/*", "tools/.python/*", "tools/deps/*", "docs/_build/*", "build/*", "example/*", "tests/for_formatting_and_pylama", ] [tool.mypy] plugins = 'noseOfYeti.plugins.mypy' ignore_missing_imports = true noseofyeti-2.4.7/PKG-INFO0000644000000000000000000000522113615410400011725 0ustar00Metadata-Version: 2.1 Name: noseOfYeti Version: 2.4.7 Summary: A custom python codec that provides an RSpec style dsl for python Project-URL: Homepage, https://github.com/delfick/nose-of-yeti Author-email: Stephen Moore License-Expression: MIT License-File: LICENSE Keywords: bdd,rspec,spec Classifier: Intended Audience :: Developers Classifier: Programming Language :: Python Classifier: Topic :: Software Development :: Documentation Classifier: Topic :: Software Development :: Testing Provides-Extra: black Requires-Dist: black==22.10.0; extra == 'black' Requires-Dist: importlib-resources==5.10.0; extra == 'black' Provides-Extra: tests Requires-Dist: alt-pytest-asyncio==0.6.0; extra == 'tests' Requires-Dist: asynctest==0.13.0; extra == 'tests' Requires-Dist: pytest-helpers-namespace==2021.4.29; extra == 'tests' Requires-Dist: pytest>=7.0.1; extra == 'tests' Description-Content-Type: text/x-rst Nose of Yeti ============ This is a project creates a custom Python ``codec`` that lets the developer write their tests using an `RSpec `_ inspired DSL (i.e. ``describe`` and ``it`` blocks). It uses the fact that python allows the registration of a `codec `_ that can be used to read in source code and modify it at import time. NoseOfYeti uses this technique to find files with a particular coding declaration on the first line of a file to turn the rspec inspired DSL into ordinary python that is then executed as if it were written like that in the first place. The original idea comes from `@fmeyer `_ who wrote this `blog post `_ and a simple `proof of concept `_ before it was picked up and worked on by `@hltbra `_ in `this repo `_. `@delfick `_ discovered this work in 2010 and over the following decade plus has improved on the concept, giving it more features and integration with python tooling. The documentation can be found at http://noseofyeti.readthedocs.io and the code over at https://github.com/delfick/nose-of-yeti. Updating nose-of-yeti version ----------------------------- It is recommended any .pyc files are removed when NoseOfYeti is upgraded. The Python interpreter skips the translation process if it sees a .pyc file (unless the .py file has changed since the .pyc file was created). This means that any changes in the translation process won't apply until either the .pyc files are removed or all the .py files have been changed.