jedi-0.7.0/0000775000175000017500000000000012204171764012637 5ustar daviddavid00000000000000jedi-0.7.0/pytest.ini0000664000175000017500000000053512204171717014671 0ustar daviddavid00000000000000[pytest] addopts = --doctest-modules # Ignore broken files in blackbox test directories norecursedirs = .* docs completion refactor absolute_import namespace_package # Activate `clean_jedi_cache` fixture for all tests. This should be # fine as long as we are using `clean_jedi_cache` as a session scoped # fixture. usefixtures = clean_jedi_cache jedi-0.7.0/sith.py0000775000175000017500000001623012204171717014163 0ustar daviddavid00000000000000#!/usr/bin/env python """ Sith attacks (and helps debugging) Jedi. Randomly search Python files and run Jedi on it. Exception and used arguments are recorded to ``./record.json`` (specified by --record):: ./sith.py random /path/to/sourcecode Redo recorded exception:: ./sith.py redo Show recorded exception:: ./sith.py show Run a specific operation ./sith.py run Where operation is one of completions, goto_assignments, goto_definitions, usages, or call_signatures. Note: Line numbers start at 1; columns start at 0 (this is consistent with many text editors, including Emacs). Usage: sith.py [--pdb|--ipdb|--pudb] [-d] [-n=] [-f] [--record=] random [] sith.py [--pdb|--ipdb|--pudb] [-d] [-f] [--record=] redo sith.py [--pdb|--ipdb|--pudb] [-d] [-f] run sith.py show [--record=] sith.py -h | --help Options: -h --help Show this screen. --record= Exceptions are recorded in here [default: record.json]. -f, --fs-cache By default, file system cache is off for reproducibility. -n, --maxtries= Maximum of random tries [default: 100] -d, --debug Jedi print debugging when an error is raised. --pdb Launch pdb when error is raised. --ipdb Launch ipdb when error is raised. --pudb Launch pudb when error is raised. """ from __future__ import print_function, division, unicode_literals from docopt import docopt import json import os import random import sys import traceback import jedi class SourceFinder(object): _files = None @staticmethod def fetch(file_path): if not os.path.isdir(file_path): yield file_path return for root, dirnames, filenames in os.walk(file_path): for name in filenames: if name.endswith('.py'): yield os.path.join(root, name) @classmethod def files(cls, file_path): if cls._files is None: cls._files = list(cls.fetch(file_path)) return cls._files class TestCase(object): def __init__(self, operation, path, line, column, traceback=None): if operation not in self.operations: raise ValueError("%s is not a valid operation" % operation) # Set other attributes self.operation = operation self.path = path self.line = line self.column = column self.traceback = traceback @classmethod def from_cache(cls, record): with open(record) as f: args = json.load(f) return cls(*args) operations = [ 'completions', 'goto_assignments', 'goto_definitions', 'usages', 'call_signatures'] @classmethod def generate(cls, file_path): operation = random.choice(cls.operations) path = random.choice(SourceFinder.files(file_path)) with open(path) as f: source = f.read() lines = source.splitlines() if not lines: lines = [''] line = random.randint(1, len(lines)) column = random.randint(0, len(lines[line - 1])) return cls(operation, path, line, column) def run(self, debugger, record=None, print_result=False): try: with open(self.path) as f: self.script = jedi.Script(f.read(), self.line, self.column, self.path) self.completions = getattr(self.script, self.operation)() if print_result: self.show_location(self.line, self.column) self.show_operation() except jedi.NotFoundError: pass except Exception: self.traceback = traceback.format_exc() if record is not None: call_args = (self.operation, self.path, self.line, self.column, self.traceback) with open(record, 'w') as f: json.dump(call_args, f) self.show_errors() if debugger: einfo = sys.exc_info() pdb = __import__(debugger) if debugger == 'pudb': pdb.post_mortem(einfo[2], einfo[0], einfo[1]) else: pdb.post_mortem(einfo[2]) exit(1) def show_location(self, lineno, column, show=3): # Three lines ought to be enough lower = lineno - show if lineno - show > 0 else 0 for i, line in enumerate(self.script.source.split('\n')[lower:lineno]): print(lower + i + 1, line) print(' ' * (column + len(str(lineno))), '^') def show_operation(self): print("%s:\n" % self.operation.capitalize()) getattr(self, 'show_' + self.operation)() def show_completions(self): for completion in self.completions: print(completion.name) # TODO: Support showing the location in other files # TODO: Move this printing to the completion objects themselves def show_usages(self): for completion in self.completions: print(completion.description) if os.path.abspath(completion.module_path) == os.path.abspath(self.path): self.show_location(completion.line, completion.column) def show_call_signatures(self): for completion in self.completions: # This is too complicated to print. It really should be # implemented in str() anyway. print(completion) # Can't print the location here because we don't have the module path def show_goto_definitions(self): for completion in self.completions: print(completion.desc_with_module) if os.path.abspath(completion.module_path) == os.path.abspath(self.path): self.show_location(completion.line, completion.column) show_goto_assignments = show_goto_definitions def show_errors(self): print(self.traceback) print(("Error with running Script(...).{operation}() with\n" "\tpath: {path}\n" "\tline: {line}\n" "\tcolumn: {column}").format(**self.__dict__)) def main(arguments): debugger = 'pdb' if arguments['--pdb'] else \ 'ipdb' if arguments['--ipdb'] else \ 'pudb' if arguments['--pudb'] else None record = arguments['--record'] jedi.settings.use_filesystem_cache = arguments['--fs-cache'] if arguments['--debug']: jedi.set_debug_function() if arguments['redo'] or arguments['show']: t = TestCase.from_cache(record) if arguments['show']: t.show_errors() else: t.run(debugger) elif arguments['run']: TestCase( arguments[''], arguments[''], int(arguments['']), int(arguments['']) ).run(debugger, print_result=True) else: for _ in range(int(arguments['--maxtries'])): t = TestCase.generate(arguments[''] or '.') t.run(debugger, record) print('.', end='') sys.stdout.flush() print() if __name__ == '__main__': arguments = docopt(__doc__) main(arguments) jedi-0.7.0/AUTHORS.txt0000664000175000017500000000124412204171716014523 0ustar daviddavid00000000000000Main Authors ============ David Halter (@davidhalter) Takafumi Arakaki (@tkf) Code Contributors ================= Danilo Bargen (@dbrgn) Laurens Van Houtven (@lvh) <_@lvh.cc> Aldo Stracquadanio (@Astrac) Jean-Louis Fuchs (@ganwell) tek (@tek) Yasha Borevich (@jjay) Aaron Griffin andviro (@andviro) Mike Gilbert (@floppym) Aaron Meurer (@asmeurer) Lubos Trilety Akinori Hattori (@hattya) srusskih (@srusskih) Note: (@user) means a github user name. jedi-0.7.0/PKG-INFO0000664000175000017500000001562612204171764013746 0ustar daviddavid00000000000000Metadata-Version: 1.1 Name: jedi Version: 0.7.0 Summary: An autocompletion tool for Python that can be used for text editors. Home-page: https://github.com/davidhalter/jedi Author: David Halter Author-email: davidhalter88@gmail.com License: MIT Description: ################################################### Jedi - an awesome autocompletion library for Python ################################################### .. image:: https://secure.travis-ci.org/davidhalter/jedi.png?branch=master :target: http://travis-ci.org/davidhalter/jedi :alt: Travis-CI build status .. image:: https://coveralls.io/repos/davidhalter/jedi/badge.png?branch=master :target: https://coveralls.io/r/davidhalter/jedi :alt: Coverage Status .. image:: https://pypip.in/d/jedi/badge.png :target: https://crate.io/packages/jedi/ Jedi is an autocompletion tool for Python that can be used in IDEs/editors. Jedi works. Jedi is fast. It understands all of the basic Python syntax elements including many builtin functions. Additionaly, Jedi suports two different goto functions and has support for renaming as well as Pydoc support and some other IDE features. Jedi uses a very simple API to connect with IDE's. There's a reference implementation as a `VIM-Plugin `_, which uses Jedi's autocompletion. I encourage you to use Jedi in your IDEs. It's really easy. If there are any problems (also with licensing), just contact me. Jedi can be used with the following editors: - Vim (jedi-vim_, YouCompleteMe_) - Emacs (Jedi.el_) - Sublime Text (SublimeJEDI_ [ST2 + ST3], anaconda_ [only ST3]) And it powers the following projects: - wdb_ Here are some pictures: .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_complete.png Completion for almost anything (Ctrl+Space). .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_function.png Display of function/class bodies, docstrings. .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_pydoc.png Pydoc support (with highlighting, Shift+k). There is also support for goto and renaming. Get the latest version from `github `_ (master branch should always be kind of stable/working). Docs are available at `https://jedi.readthedocs.org/ `_. Pull requests with documentation enhancements and/or fixes are awesome and most welcome. Jedi uses `semantic versioning `_. Installation ============ pip install jedi Note: This just installs the Jedi library, not the editor plugins. For information about how to make it work with your editor, refer to the corresponding documentation. You don't want to use ``pip``? Please refer to the `manual `_. Feature Support and Caveats =========================== Jedi really understands your Python code. For a comprehensive list what Jedi can do, see: `Features `_. A list of caveats can be found on the same page. You can run Jedi on cPython 2.6, 2.7, 3.2 or 3.3, but it should also understand/parse code older than those versions. Tips on how to use Jedi efficiently can be found `here `_. API for IDEs ============ It's very easy to create an editor plugin that uses Jedi. See `Plugin API `_ for more information. Development =========== There's a pretty good and extensive `development documentation `_. Testing ======= The test suite depends on ``tox`` and ``pytest``:: pip install tox pytest To run the tests for all supported Python versions:: tox If you want to test only a specific Python version (e.g. Python 2.7), it's as easy as :: tox -e py27 Tests are also run automatically on `Travis CI `_. For more detailed information visit the `testing documentation `_ .. _jedi-vim: https://github.com/davidhalter/jedi-vim .. _youcompleteme: http://valloric.github.io/YouCompleteMe/ .. _Jedi.el: https://github.com/tkf/emacs-jedi .. _sublimejedi: https://github.com/srusskih/SublimeJEDI .. _anaconda: https://github.com/DamnWidget/anaconda .. _wdb: https://github.com/Kozea/wdb .. :changelog: Changelog --------- 0.7.0 (2013-08-09) ++++++++++++++++++ * switched from LGPL to MIT license * added an Interpreter class to the API to make autocompletion in REPL possible. * added autocompletion support for namespace packages * add sith.py, a new random testing method 0.6.0 (2013-05-14) ++++++++++++++++++ * much faster parser with builtin part caching * a test suite, thanks @tkf 0.5 versions (2012) +++++++++++++++++++ * Initial development Keywords: python completion refactoring vim Platform: any Classifier: Development Status :: 4 - Beta Classifier: Environment :: Plugins Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.2 Classifier: Programming Language :: Python :: 3.3 Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Topic :: Text Editors :: Integrated Development Environments (IDE) Classifier: Topic :: Utilities jedi-0.7.0/MANIFEST.in0000664000175000017500000000037012204171716014372 0ustar daviddavid00000000000000include README.rst include CHANGELOG.rst include LICENSE.txt include AUTHORS.txt include .coveragerc include sith.py include conftest.py include pytest.ini include tox.ini include jedi/mixin/*.pym recursive-include test * recursive-exclude * *.pyc jedi-0.7.0/tox.ini0000664000175000017500000000072412204171717014153 0ustar daviddavid00000000000000[tox] envlist = py26, py27, py32, py33 [testenv] deps = https://bitbucket.org/hpk42/pytest/get/c4f58165e0d4.zip # docopt for sith doctests docopt commands = py.test [] [testenv:py26] deps = unittest2 {[testenv]deps} [testenv:cov] deps = coverage {[testenv]deps} commands = coverage run --source jedi -m py.test coverage report [testenv:sith] commands = {envpython} sith.py --record {envtmpdir}/record.json random {posargs:jedi} jedi-0.7.0/test/0000775000175000017500000000000012204171764013616 5ustar daviddavid00000000000000jedi-0.7.0/test/test_namespace_package.py0000664000175000017500000000422112204171717020633 0ustar daviddavid00000000000000import jedi import sys from os.path import dirname, join def test_namespace_package(): sys.path.insert(0, join(dirname(__file__), 'namespace_package/ns1')) sys.path.insert(1, join(dirname(__file__), 'namespace_package/ns2')) try: # goto definition assert jedi.Script('from pkg import ns1_file').goto_definitions() assert jedi.Script('from pkg import ns2_file').goto_definitions() assert not jedi.Script('from pkg import ns3_file').goto_definitions() # goto assignment tests = { 'from pkg.ns2_folder.nested import foo': 'nested!', 'from pkg.ns2_folder import foo': 'ns2_folder!', 'from pkg.ns2_file import foo': 'ns2_file!', 'from pkg.ns1_folder import foo': 'ns1_folder!', 'from pkg.ns1_file import foo': 'ns1_file!', 'from pkg import foo': 'ns1!', } for source, solution in tests.items(): ass = jedi.Script(source).goto_assignments() assert len(ass) == 1 assert ass[0].description == "foo = '%s'" % solution # completion completions = jedi.Script('from pkg import ').completions() names = [str(c.name) for c in completions] # str because of unicode compare = ['foo', 'ns1_file', 'ns1_folder', 'ns2_folder', 'ns2_file'] # must at least contain these items, other items are not important assert not (set(compare) - set(names)) tests = { 'from pkg import ns2_folder as x': 'ns2_folder!', 'from pkg import ns2_file as x': 'ns2_file!', 'from pkg.ns2_folder import nested as x': 'nested!', 'from pkg import ns1_folder as x': 'ns1_folder!', 'from pkg import ns1_file as x': 'ns1_file!', 'import pkg as x': 'ns1!', } for source, solution in tests.items(): for c in jedi.Script(source + '; x.').completions(): if c.name == 'foo': completion = c solution = "statement: foo = '%s'" % solution assert completion.description == solution finally: sys.path.pop(0) sys.path.pop(0) jedi-0.7.0/test/refactor/0000775000175000017500000000000012204171764015423 5ustar daviddavid00000000000000jedi-0.7.0/test/refactor/extract.py0000664000175000017500000000127112143361723017446 0ustar daviddavid00000000000000# --- simple def test(): #? 35 a return test(100, (30 + b, c) + 1) # +++ def test(): a = (30 + b, c) + 1 return test(100, a) # --- simple #2 def test(): #? 25 a return test(100, (30 + b, c) + 1) # +++ def test(): a = 30 + b return test(100, (a, c) + 1) # --- multiline def test(): #? 30 x return test(1, (30 + b, c) + 1) # +++ def test(): x = ((30 + b, c) + 1) return test(1, x ) # --- multiline #2 def test(): #? 25 x return test(1, (30 + b, c) + 1) # +++ def test(): x = 30 + b return test(1, (x, c) + 1) jedi-0.7.0/test/refactor/rename.py0000664000175000017500000000043712143361723017246 0ustar daviddavid00000000000000""" Test coverage for renaming is mostly being done by testing `Script.usages`. """ # --- simple def test1(): #? 7 blabla test1() AssertionError return test1, test1.not_existing # +++ def blabla(): blabla() AssertionError return blabla, blabla.not_existing jedi-0.7.0/test/refactor/inline.py0000664000175000017500000000036612143361723017256 0ustar daviddavid00000000000000# --- simple def test(): #? 4 a = (30 + b, c) + 1 return test(100, a) # +++ def test(): return test(100, (30 + b, c) + 1) # --- simple if 1: #? 4 a = 1, 2 return test(100, a) # +++ if 1: return test(100, (1, 2)) jedi-0.7.0/test/__init__.py0000664000175000017500000000000012143361723015713 0ustar daviddavid00000000000000jedi-0.7.0/test/namespace_package/0000775000175000017500000000000012204171764017225 5ustar daviddavid00000000000000jedi-0.7.0/test/namespace_package/ns1/0000775000175000017500000000000012204171764017726 5ustar daviddavid00000000000000jedi-0.7.0/test/namespace_package/ns1/pkg/0000775000175000017500000000000012204171764020507 5ustar daviddavid00000000000000jedi-0.7.0/test/namespace_package/ns1/pkg/__init__.py0000664000175000017500000000032612204171717022617 0ustar daviddavid00000000000000foo = 'ns1!' # this is a namespace package try: import pkg_resources pkg_resources.declare_namespace(__name__) except ImportError: import pkgutil __path__ = pkgutil.extend_path(__path__, __name__) jedi-0.7.0/test/namespace_package/ns1/pkg/ns1_folder/0000775000175000017500000000000012204171764022543 5ustar daviddavid00000000000000jedi-0.7.0/test/namespace_package/ns1/pkg/ns1_folder/__init__.py0000664000175000017500000000002412204171717024646 0ustar daviddavid00000000000000foo = 'ns1_folder!' jedi-0.7.0/test/namespace_package/ns1/pkg/ns1_file.py0000664000175000017500000000002212204171717022551 0ustar daviddavid00000000000000foo = 'ns1_file!' jedi-0.7.0/test/namespace_package/ns2/0000775000175000017500000000000012204171764017727 5ustar daviddavid00000000000000jedi-0.7.0/test/namespace_package/ns2/pkg/0000775000175000017500000000000012204171764020510 5ustar daviddavid00000000000000jedi-0.7.0/test/namespace_package/ns2/pkg/ns2_folder/0000775000175000017500000000000012204171764022545 5ustar daviddavid00000000000000jedi-0.7.0/test/namespace_package/ns2/pkg/ns2_folder/__init__.py0000664000175000017500000000002412204171717024650 0ustar daviddavid00000000000000foo = 'ns2_folder!' jedi-0.7.0/test/namespace_package/ns2/pkg/ns2_folder/nested/0000775000175000017500000000000012204171764024027 5ustar daviddavid00000000000000jedi-0.7.0/test/namespace_package/ns2/pkg/ns2_folder/nested/__init__.py0000664000175000017500000000002012204171717026126 0ustar daviddavid00000000000000foo = 'nested!' jedi-0.7.0/test/namespace_package/ns2/pkg/ns2_file.py0000664000175000017500000000002212204171717022553 0ustar daviddavid00000000000000foo = 'ns2_file!' jedi-0.7.0/test/test_integration_keyword.py0000664000175000017500000000255712204171717021325 0ustar daviddavid00000000000000""" Test of keywords and ``jedi.keywords`` """ import jedi from jedi import Script, common def test_goto_assignments_keyword(): """ Bug: goto assignments on ``in`` used to raise AttributeError:: 'unicode' object has no attribute 'generate_call_path' """ Script('in').goto_assignments() def test_keyword_doc(): r = list(Script("or", 1, 1).goto_definitions()) assert len(r) == 1 assert len(r[0].doc) > 100 r = list(Script("asfdasfd", 1, 1).goto_definitions()) assert len(r) == 0 k = Script("fro").completions()[0] imp_start = '\nThe ``import' assert k.raw_doc.startswith(imp_start) assert k.doc.startswith(imp_start) def test_keyword(): """ github jedi-vim issue #44 """ defs = Script("print").goto_definitions() assert [d.doc for d in defs] defs = Script("import").goto_definitions() assert len(defs) == 1 and [1 for d in defs if d.doc] # unrelated to #44 defs = Script("import").goto_assignments() assert len(defs) == 0 completions = Script("import", 1,1).completions() assert len(completions) == 0 with common.ignored(jedi.NotFoundError): # TODO shouldn't throw that. defs = Script("assert").goto_definitions() assert len(defs) == 1 def test_lambda(): defs = Script('lambda x: x', column=0).goto_definitions() assert [d.type for d in defs] == ['keyword'] jedi-0.7.0/test/absolute_import/0000775000175000017500000000000012204171764017026 5ustar daviddavid00000000000000jedi-0.7.0/test/absolute_import/unittest.py0000664000175000017500000000066512204171717021264 0ustar daviddavid00000000000000""" This is a module that shadows a builtin (intentionally). It imports a local module, which in turn imports stdlib unittest (the name shadowed by this module). If that is properly resolved, there's no problem. However, if jedi doesn't understand absolute_imports, it will get this module again, causing infinite recursion. """ from local_module import Assertions class TestCase(Assertions): def test(self): self.assertT jedi-0.7.0/test/absolute_import/local_module.py0000664000175000017500000000057712204171717022046 0ustar daviddavid00000000000000""" This is a module that imports the *standard library* unittest, despite there being a local "unittest" module. It specifies that it wants the stdlib one with the ``absolute_import`` __future__ import. The twisted equivalent of this module is ``twisted.trial._synctest``. """ from __future__ import absolute_import import unittest class Assertions(unittest.TestCase): pass jedi-0.7.0/test/test_utils.py0000664000175000017500000000635512204171717016376 0ustar daviddavid00000000000000import readline from jedi import utils from .helpers import TestCase, cwd_at class TestSetupReadline(TestCase): class NameSpace(): pass def __init__(self, *args, **kwargs): super(type(self), self).__init__(*args, **kwargs) self.namespace = self.NameSpace() utils.setup_readline(self.namespace) def completions(self, text): completer = readline.get_completer() i = 0 completions = [] while True: completion = completer(text, i) if completion is None: break completions.append(completion) i += 1 return completions def test_simple(self): assert self.completions('list') == ['list'] assert self.completions('importerror') == ['ImportError'] s = "print BaseE" assert self.completions(s) == [s + 'xception'] def test_nested(self): assert self.completions('list.Insert') == ['list.insert'] assert self.completions('list().Insert') == ['list().insert'] def test_magic_methods(self): assert self.completions('list.__getitem__') == ['list.__getitem__'] assert self.completions('list().__getitem__') == ['list().__getitem__'] def test_modules(self): import sys import os self.namespace.sys = sys self.namespace.os = os assert self.completions('os.path.join') == ['os.path.join'] assert self.completions('os.path.join().upper') == ['os.path.join().upper'] c = set(['os.' + d for d in dir(os) if d.startswith('ch')]) assert set(self.completions('os.ch')) == set(c) del self.namespace.sys del self.namespace.os def test_calls(self): s = 'str(bytes' assert self.completions(s) == [s, 'str(BytesWarning'] def test_import(self): s = 'from os.path import a' assert set(self.completions(s)) == set([s + 'ltsep', s + 'bspath']) assert self.completions('import keyword') == ['import keyword'] import os s = 'from os import ' goal = set([s + el for el in dir(os)]) # There are minor differences, e.g. the dir doesn't include deleted # items as well as items that are not only available on linux. assert len(set(self.completions(s)).symmetric_difference(goal)) < 20 @cwd_at('test') def test_local_import(self): s = 'import test_utils' assert self.completions(s) == [s] def test_preexisting_values(self): self.namespace.a = range(10) assert set(self.completions('a.')) == set(['a.' + n for n in dir(range(1))]) del self.namespace.a def test_colorama(self): """ Only test it if colorama library is available. This module is being tested because it uses ``setattr`` at some point, which Jedi doesn't understand, but it should still work in the REPL. """ try: # if colorama is installed import colorama except ImportError: pass else: self.namespace.colorama = colorama assert self.completions('colorama') assert self.completions('colorama.Fore.BLACK') == ['colorama.Fore.BLACK'] del self.namespace.colorama jedi-0.7.0/test/run.py0000775000175000017500000003036412204171717015003 0ustar daviddavid00000000000000#!/usr/bin/env python """ |jedi| is mostly being tested by what I would call "Blackbox Tests". These tests are just testing the interface and do input/output testing. This makes a lot of sense for |jedi|. Jedi supports so many different code structures, that it is just stupid to write 200'000 unittests in the manner of ``regression.py``. Also, it is impossible to do doctests/unittests on most of the internal data structures. That's why |jedi| uses mostly these kind of tests. There are different kind of tests: - completions / goto_definitions ``#?`` - goto_assignments: ``#!`` - usages: ``#<`` How to run tests? +++++++++++++++++ Jedi uses pytest_ to run unit and integration tests. To run tests, simply run ``py.test``. You can also use tox_ to run tests for multiple Python versions. .. _pytest: http://pytest.org .. _tox: http://testrun.org/tox Integration test cases are located in ``test/completion`` directory and each test cases are indicated by the comment ``#?`` (completions / definitions), ``#!`` (assignments) and ``#<`` (usages). There is also support for third party libraries. In a normal test run they are not being executed, you have to provide a ``--thirdparty`` option. In addition to standard `-k` and `-m` options in py.test, you can use `-T` (`--test-files`) option to specify integration test cases to run. It takes the format of ``FILE_NAME[:LINE[,LINE[,...]]]`` where ``FILE_NAME`` is a file in ``test/completion`` and ``LINE`` is a line number of the test comment. Here is some recipes: Run tests only in ``basic.py`` and ``imports.py``:: py.test test/test_integration.py -T basic.py -T imports.py Run test at line 4, 6, and 8 in ``basic.py``:: py.test test/test_integration.py -T basic.py:4,6,8 See ``py.test --help`` for more information. If you want to debug a test, just use the --pdb option. Auto-Completion +++++++++++++++ Uses comments to specify a test in the next line. The comment says, which results are expected. The comment always begins with `#?`. The last row symbolizes the cursor. For example:: #? ['real'] a = 3; a.rea Because it follows ``a.rea`` and a is an ``int``, which has a ``real`` property. Goto Definitions ++++++++++++++++ Definition tests use the same symbols like completion tests. This is possible because the completion tests are defined with a list:: #? int() ab = 3; ab Goto Assignments ++++++++++++++++ Tests look like this:: abc = 1 #! ['abc=1'] abc Additionally it is possible to add a number which describes to position of the test (otherwise it's just end of line):: #! 2 ['abc=1'] abc Usages ++++++ Tests look like this:: abc = 1 #< abc@1,0 abc@3,0 abc """ import os import re from ast import literal_eval import jedi from jedi._compatibility import unicode, reduce, StringIO, is_py3k TEST_COMPLETIONS = 0 TEST_DEFINITIONS = 1 TEST_ASSIGNMENTS = 2 TEST_USAGES = 3 class IntegrationTestCase(object): def __init__(self, test_type, correct, line_nr, column, start, line, path=None): self.test_type = test_type self.correct = correct self.line_nr = line_nr self.column = column self.start = start self.line = line self.path = path self.skip = None @property def module_name(self): return re.sub('.*/|\.py', '', self.path) def __repr__(self): name = os.path.basename(self.path) if self.path else None return '<%s: %s:%s:%s>' % (self.__class__.__name__, name, self.line_nr - 1, self.line.rstrip()) def script(self): return jedi.Script(self.source, self.line_nr, self.column, self.path) def run(self, compare_cb): testers = { TEST_COMPLETIONS: self.run_completion, TEST_DEFINITIONS: self.run_goto_definitions, TEST_ASSIGNMENTS: self.run_goto_assignments, TEST_USAGES: self.run_usages, } return testers[self.test_type](compare_cb) def run_completion(self, compare_cb): completions = self.script().completions() #import cProfile; cProfile.run('script.completions()') comp_str = set([c.name for c in completions]) return compare_cb(self, comp_str, set(literal_eval(self.correct))) def run_goto_definitions(self, compare_cb): def definition(correct, correct_start, path): def defs(line_nr, indent): s = jedi.Script(self.source, line_nr, indent, path) return set(s.goto_definitions()) should_be = set() number = 0 for index in re.finditer('(?: +|$)', correct): if correct == ' ': continue # -1 for the comment, +3 because of the comment start `#? ` start = index.start() number += 1 try: should_be |= defs(self.line_nr - 1, start + correct_start) except Exception: print('could not resolve %s indent %s' % (self.line_nr - 1, start)) raise # because the objects have different ids, `repr`, then compare. should_str = set(r.desc_with_module for r in should_be) if len(should_str) < number: raise Exception('Solution @%s not right, ' 'too few test results: %s' % (self.line_nr - 1, should_str)) return should_str script = self.script() should_str = definition(self.correct, self.start, script.path) result = script.goto_definitions() is_str = set(r.desc_with_module for r in result) return compare_cb(self, is_str, should_str) def run_goto_assignments(self, compare_cb): result = self.script().goto_assignments() comp_str = str(sorted(str(r.description) for r in result)) return compare_cb(self, comp_str, self.correct) def run_usages(self, compare_cb): result = self.script().usages() self.correct = self.correct.strip() compare = sorted((r.module_name, r.line, r.column) for r in result) wanted = [] if not self.correct: positions = [] else: positions = literal_eval(self.correct) for pos_tup in positions: if type(pos_tup[0]) == str: # this means that there is a module specified wanted.append(pos_tup) else: wanted.append((self.module_name, self.line_nr + pos_tup[0], pos_tup[1])) return compare_cb(self, compare, sorted(wanted)) def collect_file_tests(lines, lines_to_execute): makecase = lambda t: IntegrationTestCase(t, correct, line_nr, column, start, line) start = None correct = None test_type = None for line_nr, line in enumerate(lines): line_nr += 1 # py2.5 doesn't know about the additional enumerate param if not is_py3k: line = unicode(line, 'UTF-8') if correct: r = re.match('^(\d+)\s*(.*)$', correct) if r: column = int(r.group(1)) correct = r.group(2) start += r.regs[2][0] # second group, start index else: column = len(line) - 1 # -1 for the \n if test_type == '!': yield makecase(TEST_ASSIGNMENTS) elif test_type == '<': yield makecase(TEST_USAGES) elif correct.startswith('['): yield makecase(TEST_COMPLETIONS) else: yield makecase(TEST_DEFINITIONS) correct = None else: try: r = re.search(r'(?:^|(?<=\s))#([?!<])\s*([^\n]+)', line) # test_type is ? for completion and ! for goto_assignments test_type = r.group(1) correct = r.group(2) start = r.start() except AttributeError: correct = None else: # skip the test, if this is not specified test if lines_to_execute and line_nr not in lines_to_execute: correct = None def collect_dir_tests(base_dir, test_files, check_thirdparty=False): for f_name in os.listdir(base_dir): files_to_execute = [a for a in test_files.items() if a[0] in f_name] lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, []) if f_name.endswith(".py") and (not test_files or files_to_execute): skip = None if check_thirdparty: lib = f_name.replace('_.py', '') try: # there is always an underline at the end. # It looks like: completion/thirdparty/pylab_.py __import__(lib) except ImportError: skip = 'Thirdparty-Library %s not found.' % lib path = os.path.join(base_dir, f_name) source = open(path).read() for case in collect_file_tests(StringIO(source), lines_to_execute): case.path = path case.source = source if skip: case.skip = skip yield case docoptstr = """ Using run.py to make debugging easier with integration tests. An alternative testing format, which is much more hacky, but very nice to work with. Usage: run.py [--pdb] [--debug] [--thirdparty] [...] run.py --help Options: -h --help Show this screen. --pdb Enable pdb debugging on fail. -d, --debug Enable text output debugging (please install ``colorama``). --thirdparty Also run thirdparty tests (in ``completion/thirdparty``). """ if __name__ == '__main__': import docopt arguments = docopt.docopt(docoptstr) import time t_start = time.time() # Sorry I didn't use argparse here. It's because argparse is not in the # stdlib in 2.5. import sys if arguments['--debug']: jedi.set_debug_function() # get test list, that should be executed test_files = {} last = None for arg in arguments['']: if arg.isdigit(): if last is None: continue test_files[last].append(int(arg)) else: test_files[arg] = [] last = arg # completion tests: completion_test_dir = '../test/completion' summary = [] tests_fail = 0 # execute tests cases = list(collect_dir_tests(completion_test_dir, test_files)) if test_files or arguments['--thirdparty']: completion_test_dir += '/thirdparty' cases += collect_dir_tests(completion_test_dir, test_files, True) def file_change(current, tests, fails): if current is not None: current = os.path.basename(current) print('%s \t\t %s tests and %s fails.' % (current, tests, fails)) def report(case, actual, desired): if actual == desired: return 0 else: print("\ttest fail @%d, actual = %s, desired = %s" % (case.line_nr - 1, actual, desired)) return 1 import traceback current = cases[0].path if cases else None count = fails = 0 for c in cases: try: if c.run(report): tests_fail += 1 fails += 1 except Exception: traceback.print_exc() print("\ttest fail @%d" % (c.line_nr - 1)) tests_fail += 1 fails += 1 if arguments['--pdb']: import pdb pdb.post_mortem() count += 1 if current != c.path: file_change(current, count, fails) current = c.path count = fails = 0 file_change(current, count, fails) print('\nSummary: (%s fails of %s tests) in %.3fs' % (tests_fail, len(cases), time.time() - t_start)) for s in summary: print(s) exit_code = 1 if tests_fail else 0 if sys.hexversion < 0x02060000 and tests_fail <= 9: # Python 2.5 has major incompabillities (e.g. no property.setter), # therefore it is not possible to pass all tests. exit_code = 0 sys.exit(exit_code) jedi-0.7.0/test/test_docstring.py0000664000175000017500000000342712204171717017227 0ustar daviddavid00000000000000""" Testing of docstring related issues and especially ``jedi.docstrings``. """ import jedi from .helpers import unittest class TestDocstring(unittest.TestCase): def test_function_doc(self): defs = jedi.Script(""" def func(): '''Docstring of `func`.''' func""").goto_definitions() self.assertEqual(defs[0].raw_doc, 'Docstring of `func`.') @unittest.skip('need evaluator class for that') def test_attribute_docstring(self): defs = jedi.Script(""" x = None '''Docstring of `x`.''' x""").goto_definitions() self.assertEqual(defs[0].raw_doc, 'Docstring of `x`.') @unittest.skip('need evaluator class for that') def test_multiple_docstrings(self): defs = jedi.Script(""" def func(): '''Original docstring.''' x = func '''Docstring of `x`.''' x""").goto_definitions() docs = [d.raw_doc for d in defs] self.assertEqual(docs, ['Original docstring.', 'Docstring of `x`.']) def test_completion(self): assert jedi.Script(''' class DocstringCompletion(): #? [] """ asdfas """''').completions() def test_docstrings_type_dotted_import(self): s = """ def func(arg): ''' :type arg: threading.Thread ''' arg.""" names = [c.name for c in jedi.Script(s).completions()] assert 'start' in names def test_docstrings_type_str(self): s = """ def func(arg): ''' :type arg: str ''' arg.""" names = [c.name for c in jedi.Script(s).completions()] assert 'join' in names jedi-0.7.0/test/test_parsing.py0000664000175000017500000000553712204171717016702 0ustar daviddavid00000000000000from jedi.parsing import Parser from jedi import parsing_representation as pr def test_user_statement_on_import(): """github #285""" s = "from datetime import (\n" \ " time)" for pos in [(2, 1), (2, 4)]: u = Parser(s, user_position=pos).user_stmt assert isinstance(u, pr.Import) assert u.defunct == False assert [str(n) for n in u.get_defined_names()] == ['time'] class TestCallAndName(): def get_call(self, source): stmt = Parser(source, no_docstr=True).module.statements[0] return stmt.get_commands()[0] def test_name_and_call_positions(self): call = self.get_call('name\nsomething_else') assert str(call.name) == 'name' assert call.name.start_pos == call.start_pos == (1, 0) assert call.name.end_pos == call.end_pos == (1, 4) call = self.get_call('1.0\n') assert call.name == 1.0 assert call.start_pos == (1, 0) assert call.end_pos == (1, 3) def test_call_type(self): call = self.get_call('hello') assert call.type == pr.Call.NAME assert type(call.name) == pr.Name call = self.get_call('1.0') assert type(call.name) == float assert call.type == pr.Call.NUMBER call = self.get_call('1') assert type(call.name) == int assert call.type == pr.Call.NUMBER call = self.get_call('"hello"') assert call.type == pr.Call.STRING assert call.name == 'hello' class TestSubscopes(): def get_sub(self, source): return Parser(source).module.subscopes[0] def test_subscope_names(self): name = self.get_sub('class Foo: pass').name assert name.start_pos == (1, len('class ')) assert name.end_pos == (1, len('class Foo')) assert str(name) == 'Foo' name = self.get_sub('def foo(): pass').name assert name.start_pos == (1, len('def ')) assert name.end_pos == (1, len('def foo')) assert str(name) == 'foo' class TestImports(): def get_import(self, source): return Parser(source).module.imports[0] def test_import_names(self): imp = self.get_import('import math\n') names = imp.get_defined_names() assert len(names) == 1 assert str(names[0]) == 'math' assert names[0].start_pos == (1, len('import ')) assert names[0].end_pos == (1, len('import math')) assert imp.start_pos == (1, 0) assert imp.end_pos == (1, len('import math')) def test_module(): module = Parser('asdf', 'example.py', no_docstr=True).module name = module.name assert str(name) == 'example' assert name.start_pos == (0, 0) assert name.end_pos == (0, 0) module = Parser('asdf', no_docstr=True).module name = module.name assert str(name) == '' assert name.start_pos == (0, 0) assert name.end_pos == (0, 0) jedi-0.7.0/test/test_fast_parser.py0000664000175000017500000000123212204171717017534 0ustar daviddavid00000000000000import jedi def test_add_to_end(): """ fast_parser doesn't parse everything again. It just updates with the help of caches, this is an example that didn't work. """ a = """ class Abc(): def abc(self): self.x = 3 class Two(Abc): def h(self): self """ # ^ here is the first completion b = " def g(self):\n" \ " self." assert jedi.Script(a, 8, 12, 'example.py').completions() assert jedi.Script(a + b, path='example.py').completions() a = a[:-1] + '.\n' assert jedi.Script(a, 8, 13, 'example.py').completions() assert jedi.Script(a + b, path='example.py').completions() jedi-0.7.0/test/test_api_classes.py0000664000175000017500000000415412204171717017517 0ustar daviddavid00000000000000""" Test all things related to the ``jedi.api_classes`` module. """ import textwrap import pytest from jedi import Script import jedi def test_is_keyword(): results = Script('import ', 1, 1, None).goto_definitions() assert len(results) == 1 and results[0].is_keyword == True results = Script('str', 1, 1, None).goto_definitions() assert len(results) == 1 and results[0].is_keyword == False def make_definitions(): """ Return a list of definitions for parametrized tests. :rtype: [jedi.api_classes.BaseDefinition] """ source = textwrap.dedent(""" import sys class C: pass x = C() def f(): pass def g(): yield h = lambda: None """) definitions = [] definitions += jedi.defined_names(source) source += textwrap.dedent(""" variable = sys or C or x or f or g or g() or h""") lines = source.splitlines() script = Script(source, len(lines), len('variable'), None) definitions += script.goto_definitions() script2 = Script(source, 4, len('class C'), None) definitions += script2.usages() source_param = "def f(a): return a" script_param = Script(source_param, 1, len(source_param), None) definitions += script_param.goto_assignments() return definitions @pytest.mark.parametrize('definition', make_definitions()) def test_basedefinition_type(definition): assert definition.type in ('module', 'class', 'instance', 'function', 'generator', 'statement', 'import', 'param') def test_function_call_signature_in_doc(): defs = Script(""" def f(x, y=1, z='a'): pass f""").goto_definitions() doc = defs[0].doc assert "f(x, y = 1, z = 'a')" in doc def test_class_call_signature(): defs = Script(""" class Foo: def __init__(self, x, y=1, z='a'): pass Foo""").goto_definitions() doc = defs[0].doc assert "Foo(self, x, y = 1, z = 'a')" in doc def test_position_none_if_builtin(): gotos = Script('import sys; sys.path').goto_assignments() assert gotos[0].line is None assert gotos[0].column is None jedi-0.7.0/test/test_cache.py0000664000175000017500000000435412204171717016276 0ustar daviddavid00000000000000""" Test all things related to the ``jedi.cache`` module. """ import time import pytest import jedi from jedi import settings, cache from jedi.cache import ParserCacheItem, _ModulePickling ModulePickling = _ModulePickling() def test_modulepickling_change_cache_dir(monkeypatch, tmpdir): """ ModulePickling should not save old cache when cache_directory is changed. See: `#168 `_ """ dir_1 = str(tmpdir.mkdir('first')) dir_2 = str(tmpdir.mkdir('second')) item_1 = ParserCacheItem('fake parser 1') item_2 = ParserCacheItem('fake parser 2') path_1 = 'fake path 1' path_2 = 'fake path 2' monkeypatch.setattr(settings, 'cache_directory', dir_1) ModulePickling.save_module(path_1, item_1) cached = load_stored_item(ModulePickling, path_1, item_1) assert cached == item_1.parser monkeypatch.setattr(settings, 'cache_directory', dir_2) ModulePickling.save_module(path_2, item_2) cached = load_stored_item(ModulePickling, path_1, item_1) assert cached is None def load_stored_item(cache, path, item): """Load `item` stored at `path` in `cache`.""" return cache.load_module(path, item.change_time - 1) @pytest.mark.usefixtures("isolated_jedi_cache") def test_modulepickling_delete_incompatible_cache(): item = ParserCacheItem('fake parser') path = 'fake path' cache1 = _ModulePickling() cache1.version = 1 cache1.save_module(path, item) cached1 = load_stored_item(cache1, path, item) assert cached1 == item.parser cache2 = _ModulePickling() cache2.version = 2 cached2 = load_stored_item(cache2, path, item) assert cached2 is None def test_star_import_cache_duration(): new = 0.01 old, jedi.settings.star_import_cache_validity = \ jedi.settings.star_import_cache_validity, new cache.star_import_cache = {} # first empty... # path needs to be not-None (otherwise caching effects are not visible) jedi.Script('', 1, 0, '').completions() time.sleep(2 * new) jedi.Script('', 1, 0, '').completions() # reset values jedi.settings.star_import_cache_validity = old length = len(cache.star_import_cache) cache.star_import_cache = {} assert length == 1 jedi-0.7.0/test/test_integration.py0000664000175000017500000000172512204171717017555 0ustar daviddavid00000000000000import os import pytest from . import helpers def assert_case_equal(case, actual, desired): """ Assert ``actual == desired`` with formatted message. This is not needed for typical py.test use case, but as we need ``--assert=plain`` (see ../pytest.ini) to workaround some issue due to py.test magic, let's format the message by hand. """ assert actual == desired, """ Test %r failed. actual = %s desired = %s """ % (case, actual, desired) def test_integration(case, monkeypatch, pytestconfig): if case.skip is not None: pytest.skip(case.skip) repo_root = helpers.root_dir monkeypatch.chdir(os.path.join(repo_root, 'jedi')) case.run(assert_case_equal) def test_refactor(refactor_case): """ Run refactoring test case. :type refactor_case: :class:`.refactor.RefactoringCase` """ refactor_case.run() assert_case_equal(refactor_case, refactor_case.result, refactor_case.desired) jedi-0.7.0/test/completion/0000775000175000017500000000000012204171764015767 5ustar daviddavid00000000000000jedi-0.7.0/test/completion/descriptors.py0000664000175000017500000000514612204171717020706 0ustar daviddavid00000000000000class RevealAccess(object): """ A data descriptor that sets and returns values normally and prints a message logging their access. """ def __init__(self, initval=None, name='var'): self.val = initval self.name = name def __get__(self, obj, objtype): print('Retrieving', self.name) return self.val def __set__(self, obj, val): print('Updating', self.name) self.val = val def just_a_method(self): pass class C(object): x = RevealAccess(10, 'var "x"') #? RevealAccess() x #? ['just_a_method'] x.just_a_method y = 5.0 def __init__(self): #? int() self.x #? [] self.just_a_method #? [] C.just_a_method m = C() #? int() m.x #? float() m.y #? int() C.x #? [] m.just_a_method #? [] C.just_a_method # ----------------- # properties # ----------------- class B(): @property def r(self): return 1 @r.setter def r(self, value): return '' def t(self): return '' p = property(t) #? [] B().r() #? int() B().r #? str() B().p #? [] B().p() class PropClass(): def __init__(self, a): self.a = a @property def ret(self): return self.a @ret.setter def ret(self, value): return 1.0 def ret2(self): return self.a ret2 = property(ret2) @property def nested(self): """ causes recusions in properties, should work """ return self.ret @property def nested2(self): """ causes recusions in properties, should not work """ return self.nested2 @property def join1(self): """ mutual recusion """ return self.join2 @property def join2(self): """ mutual recusion """ return self.join1 #? str() PropClass("").ret #? [] PropClass().ret. #? str() PropClass("").ret2 #? PropClass().ret2 #? int() PropClass(1).nested #? [] PropClass().nested. #? PropClass(1).nested2 #? [] PropClass().nested2. #? PropClass(1).join1 # ----------------- # staticmethod/classmethod # ----------------- class E(object): a = '' def __init__(self, a): self.a = a def f(x): return x f = staticmethod(f) @staticmethod def g(x): return x def s(cls, x): return x s = classmethod(s) @classmethod def t(cls, x): return x @classmethod def u(cls, x): return cls.a e = E(1) #? int() e.f(1) #? int() E.f(1) #? int() e.g(1) #? int() E.g(1) #? int() e.s(1) #? int() E.s(1) #? int() e.t(1) #? int() E.t(1) #? str() e.u(1) #? str() E.u(1) jedi-0.7.0/test/completion/functions.py0000664000175000017500000001422312204171717020351 0ustar daviddavid00000000000000def array(first_param): #? ['first_param'] first_param return list() #? [] array.first_param #? [] array.first_param. func = array #? [] func.first_param #? list() array() #? ['array'] arr def inputs(param): return param #? list() inputs(list) def variable_middle(): var = 3 return var #? int() variable_middle() def variable_rename(param): var = param return var #? int() variable_rename(1) def multi_line_func(a, # comment blabla b): return b #? str() multi_line_func(1,'') # nothing after comma def asdf(a): return a x = asdf(a=1, ) #? int() x # ----------------- # double execution # ----------------- def double_exe(param): return param #? str() variable_rename(double_exe)("") # -> shouldn't work (and throw no error) #? [] variable_rename(list())(). #? [] variable_rename(1)(). # ----------------- # recursions (should ignore) # ----------------- def recursion(a, b): if a: return b else: return recursion(a+".", b+1) #? int() float() recursion("a", 1.0) def other(a): return recursion2(a) def recursion2(a): if a: return other(a) else: return recursion2("") return a #? int() str() recursion2(1) # ----------------- # ordering # ----------------- def a(): #? int() b() return b() def b(): return 1 #? int() a() # ----------------- # keyword arguments # ----------------- def func(a=1, b=''): return a, b exe = func(b=list, a=tuple) #? tuple exe[0] #? list exe[1] # ----------------- # default arguments # ----------------- #? int() func()[0] #? str() func()[1] #? float() func(1.0)[0] #? str() func(1.0)[1] # ----------------- # closures # ----------------- def a(): l = 3 def func_b(): #? str() l = '' #? ['func_b'] func_b #? int() l # ----------------- # *args # ----------------- def args_func(*args): #? tuple() return args exe = args_func(1, "") #? int() exe[0] #? str() exe[1] # illegal args (TypeError) #? args_func(*1)[0] # iterator #? int() args_func(*iter([1]))[0] # different types e = args_func(*[1+"", {}]) #? int() str() e[0] #? dict() e[1] _list = [1,""] exe2 = args_func(_list)[0] #? str() exe2[1] exe3 = args_func([1,""])[0] #? str() exe3[1] def args_func(arg1, *args): return arg1, args exe = args_func(1, "", list) #? int() exe[0] #? tuple() exe[1] #? list() exe[1][1] # ----------------- # ** kwargs # ----------------- def kwargs_func(**kwargs): #? ['keys'] kwargs.keys #? dict() return kwargs exe = kwargs_func(a=3,b=4.0) #? dict() exe #? int() exe['a'] #? float() exe['b'] #? int() float() exe['c'] exe2 = kwargs_func(**{a:3, b:4.0}) #? int() exe2['a'] # ----------------- # *args / ** kwargs # ----------------- def func_without_call(*args, **kwargs): #? tuple() args #? dict() kwargs def fu(a=1, b="", *args, **kwargs): return a, b, args, kwargs exe = fu(list, 1, "", c=set, d="") #? list() exe[0] #? int() exe[1] #? tuple() exe[2] #? str() exe[2][0] #? dict() exe[3] #? set() exe[3]['c'] # ----------------- # nested *args # ----------------- def function_args(a, b, c): return b def nested_args(*args): return function_args(*args) def nested_args2(*args, **kwargs): return nested_args(*args) #? int() nested_args('', 1, 1.0, list) #? [] nested_args('') #? int() nested_args2('', 1, 1.0) #? [] nested_args2('') # ----------------- # nested **kwargs # ----------------- def nested_kw(**kwargs1): return function_args(**kwargs1) def nested_kw2(**kwargs2): return nested_kw(**kwargs2) #? int() nested_kw(b=1, c=1.0, list) #? int() nested_kw(b=1) #? int() nested_kw(d=1.0, b=1, list) #? int() nested_kw(b=1) #? int() nested_kw(a=3.0, b=1) #? int() nested_kw(b=1, a=r"") #? [] nested_kw('') #? [] nested_kw(a='') #? int() nested_kw2(b=1) #? int() nested_kw2(b=1, c=1.0) #? int() nested_kw2(c=1.0, b=1) #? [] nested_kw2('') #? [] nested_kw2(a='') #? [] nested_kw2('', b=1). # ----------------- # nested *args/**kwargs # ----------------- def nested_both(*args, **kwargs): return function_args(*args, **kwargs) def nested_both2(*args, **kwargs): return nested_both(*args, **kwargs) #? int() nested_both('', b=1, c=1.0, list) #? int() nested_both('', c=1.0, b=1, list) #? [] nested_both('') #? int() nested_both2('', b=1, c=1.0) #? int() nested_both2('', c=1.0, b=1) #? [] nested_both2('') # ----------------- # nested *args/**kwargs with a default arg # ----------------- def function_def(a, b, c): return a, b def nested_def(a, *args, **kwargs): return function_def(a, *args, **kwargs) def nested_def2(*args, **kwargs): return nested_def(*args, **kwargs) #? str() nested_def2('', 1, 1.0)[0] #? str() nested_def2('', b=1, c=1.0)[0] #? str() nested_def2('', c=1.0, b=1)[0] #? int() nested_def2('', 1, 1.0)[1] #? int() nested_def2('', b=1, c=1.0)[1] #? int() nested_def2('', c=1.0, b=1)[1] #? [] nested_def2('')[1] # ----------------- # function annotations (should be ignored at the moment) # ----------------- def annot(a:3, *args:3): return a, args[0] #? str() annot('', 1.0)[0] #? float() annot('', 1.0)[1] def annot_ret(a:3) -> 3: return a #? str() annot_ret('') # ----------------- # magic methods # ----------------- def a(): pass #? ['__closure__'] a.__closure__ # ----------------- # lambdas # ----------------- a = lambda: 3 #? int() a() x = [] a = lambda x: x #? int() a(0) #? float() (lambda x: x)(3.0) arg_l = lambda x, y: y, x #? float() arg_l[0]('', 1.0) #? list() arg_l[1] arg_l = lambda x, y: (y, x) args = 1,"" result = arg_l(*args) #? tuple() result #? str() result[0] #? int() result[1] def with_lambda(callable_lambda, *args, **kwargs): return callable_lambda(1, *args, **kwargs) #? int() with_lambda(arg_l, 1.0)[1] #? float() with_lambda(arg_l, 1.0)[0] #? float() with_lambda(arg_l, y=1.0)[0] #? int() with_lambda(lambda x: x) #? float() with_lambda(lambda x, y: y, y=1.0) arg_func = lambda *args, **kwargs: (args[0], kwargs['a']) #? int() arg_func(1, 2, a='', b=10)[0] #? list() arg_func(1, 2, a=[], b=10)[1] # magic method a = lambda: 3 #? ['__closure__'] a.__closure__ class C(): def __init__(self): self.a = lambda: 1 #? int() C().a() jedi-0.7.0/test/completion/std.py0000664000175000017500000000234212143361723017132 0ustar daviddavid00000000000000""" std library stuff """ # ----------------- # re # ----------------- import re c = re.compile(r'a') # re.compile should not return str -> issue #68 #? [] c.startswith #? int() c.match().start() #? int() re.match(r'a', 'a').start() for a in re.finditer('a', 'a'): #? int() a.start() #? str() re.sub('a', 'a') # ----------------- # ref # ----------------- import weakref #? int() weakref.proxy(1) #? weakref.ref weakref.ref(1) #? int() weakref.ref(1)() # ----------------- # functools # ----------------- import functools basetwo = functools.partial(int, base=2) #? int() basetwo() def a(a, b): return a, b a = functools.partial(a, 0) #? int() a('')[0] #? str() a('')[1] def my_decorator(f): @functools.wraps(f) def wrapper(*args, **kwds): return f(*args, **kwds) return wrapper @my_decorator def example(a): return a #? str() example('') # ----------------- # sqlite3 (#84) # ----------------- import sqlite3 #? sqlite3.Connection() con = sqlite3.connect() #? sqlite3.Cursor() c = con.cursor() #? sqlite3.Row() row = c.fetchall()[0] #? str() row.keys()[0] def huhu(db): """ :type db: sqlite3.Connection :param db: the db connection """ #? sqlite3.Connection() db jedi-0.7.0/test/completion/__init__.py0000664000175000017500000000011512143361723020073 0ustar daviddavid00000000000000""" needed for some modules to test against packages. """ some_variable = 1 jedi-0.7.0/test/completion/complex.py0000664000175000017500000000031112143361723020001 0ustar daviddavid00000000000000""" Mainly for stupid error reports of @gwrtheyrn. :-) """ import time class Foo(object): global time asdf = time def asdfy(): return Foo xorz = getattr(asdfy()(), 'asdf') #? time xorz jedi-0.7.0/test/completion/ordering.py0000664000175000017500000000357012143361723020155 0ustar daviddavid00000000000000# ----------------- # normal # ----------------- a = "" a = 1 #? int() a #? [] a.append a = list b = 1; b = "" #? str() b # temp should not be accessible before definition #? [] temp a = 1 temp = b; b = a a = temp #? int() b #? int() b #? str() a a = tuple if 1: a = list #? ['append'] a.append #? ['index'] a.index # ----------------- # tuples exchanges # ----------------- a, b = 1, "" #? int() a #? str() b b, a = a, b #? int() b #? str() a b, a = a, b #? int() a #? str() b # ----------------- # function # ----------------- def a(a=3): #? int() a #? [] a.func return a #? int() a(2) #? [] a(2).func a_param = 3 def func(a_param): # should not be int #? [] a_param. from os import path # should not return a function, because `a` is a function above def f(b, a): return a #? [] f(b=3) # ----------------- # class # ----------------- class A(object): a = "" a = 3 #? int() a a = list() def __init__(self): self.b = "" def before(self): self.b = 3 # TODO should this be so? #? int() str() list() self.b self.b = list self.a = 1 #? str() int() self.a #? ['after'] self.after self.c = 3 #? int() self.c def after(self): self.a = '' c = set() #? list() A.a a = A() #? ['after'] a.after #? [] a.upper #? [] a.append #? [] a.real #? str() int() a.a a = 3 class a(): def __init__(self, a): self.a = a #? float() a(1.0).a #? a().a # ----------------- # imports # ----------------- math = 3 import math #? ['cosh'] math.cosh #? [] math.real math = 3 #? int() math #? [] math.cos # do the same for star imports cosh = 3 from math import * # This doesn't work, but that's not a problem, star imports should be at the # start of EVERY script! ##? [] cosh.real cosh = 3 #? int() cosh jedi-0.7.0/test/completion/goto.py0000664000175000017500000000430412204171717017310 0ustar daviddavid00000000000000# goto_assignments command tests are different in syntax definition = 3 #! 0 ['a = definition'] a = definition #! [] b #! ['a = definition'] a b = a c = b #! ['c = b'] c cd = 1 #! 1 ['cd = c'] cd = c #! 0 ['cd = e'] cd = e #! ['module math'] import math #! ['import math'] math #! ['import math'] b = math #! ['b = math'] b class C(object): def b(self): #! ['b = math'] b #! ['def b'] self.b #! 14 ['def b'] self.b() #! 11 ['self'] self.b return 1 #! ['def b'] b #! ['b = math'] b #! ['def b'] C.b #! ['def b'] C().b #! 0 ['class C'] C().b #! 0 ['class C'] C().b D = C #! ['def b'] D.b #! ['def b'] D().b #! 0 ['D = C'] D().b #! 0 ['D = C'] D().b def c(): return '' #! ['def c'] c #! 0 ['def c'] c() # ----------------- # imports # ----------------- #! ['module import_tree'] import import_tree #! ["a = ''"] import_tree.a #! ['module mod1'] import import_tree.mod1 #! ['a = 1'] import_tree.mod1.a #! ['module pkg'] import import_tree.pkg #! ['a = list'] import_tree.pkg.a #! ['module mod1'] import import_tree.pkg.mod1 #! ['a = 1.0'] import_tree.pkg.mod1.a #! ["a = ''"] import_tree.a #! ['module mod1'] from import_tree.pkg import mod1 #! ['a = 1.0'] mod1.a #! ['module mod1'] from import_tree import mod1 #! ['a = 1'] mod1.a #! ['a = 1.0'] from import_tree.pkg.mod1 import a #! ['import os'] from .imports import os #! ['some_variable = 1'] from . import some_variable # ----------------- # anonymous classes # ----------------- def func(): class A(): def b(self): return 1 return A() #! 8 ['def b'] func().b() # ----------------- # on itself # ----------------- #! 7 ['class ClassDef'] class ClassDef(): """ abc """ pass # ----------------- # params # ----------------- param = ClassDef #! 8 ['param'] def ab1(param): pass #! 9 ['param'] def ab2(param): pass #! 11 ['param = ClassDef'] def ab3(a=param): pass ab1(ClassDef);ab2(ClassDef);ab3(ClassDef) # ----------------- # for loops # ----------------- for i in range(1): #! ['for i in range(1): i'] i for key, value in [(1,2)]: #! ['for key,value in [(1...'] key for i in []: #! ['for i in []: i'] i jedi-0.7.0/test/completion/types.py0000664000175000017500000000212312143361723017501 0ustar daviddavid00000000000000# ----------------- # non array # ----------------- #? ['imag'] int.imag #? [] int.is_integer #? ['is_integer'] float.is_int #? ['is_integer'] 1.0.is_integer #? ['upper'] "".upper #? ['upper'] r"".upper # strangely this didn't work, because the = is used for assignments #? ['upper'] "=".upper a = "=" #? ['upper'] a.upper # ----------------- # lists # ----------------- arr = [] #? ['append'] arr.app #? ['append'] list().app #? ['append'] [].append arr2 = [1,2,3] #? ['append'] arr2.app #? int() arr.count(1) # ----------------- # dicts # ----------------- dic = {} #? ['copy', 'clear'] dic.c dic2 = dict(a=1, b=2) #? ['pop', 'popitem'] dic2.p #? ['popitem'] {}.popitem dic2 = {'asdf': 3} #? ['popitem'] dic2.popitem #? int() dic2['asdf'] # ----------------- # set # ----------------- set_t = {1,2} #? ['clear', 'copy'] set_t.c set_t2 = set() #? ['clear', 'copy'] set_t2.c # ----------------- # tuples # ----------------- tup = ('',2) #? ['count'] tup.c tup2 = tuple() #? ['index'] tup2.i #? ['index'] ().i tup3 = 1,"" #? ['index'] tup3.index tup4 = 1,"" #? ['index'] tup4.index jedi-0.7.0/test/completion/basic.py0000664000175000017500000000643212204171717017425 0ustar daviddavid00000000000000# ----------------- # cursor position # ----------------- #? 0 int int() #? 3 int int() #? 4 str int(str) # ----------------- # should not complete # ----------------- #? [] . #? [] str.. #? [] a(0):. # ----------------- # if/else/elif # ----------------- if 1: 1 elif(3): a = 3 else: a = '' #? int() str() a def func(): if 1: 1 elif(3): a = 3 else: a = '' #? int() str() return a #? int() str() func() # ----------------- # for loops # ----------------- for a in [1,2]: #? int() a for a1 in 1,"": #? int() str() a1 for a3, b3 in (1,""), (1,""), (1,""): #? int() a3 #? str() b3 for a4, (b4, c4) in (1,("", list)), (1,("", list)): #? int() a4 #? str() b4 #? list c4 a = [] for i in [1,'']: #? int() str() i a += [i] #? int() str() a[0] for i in list([1,'']): #? int() str() i #? int() str() for x in [1,'']: x a = [] b = [1.0,''] for i in b: a += [i] #? float() str() a[0] # ----------------- # range() # ----------------- for i in range(10): #? int() i # ----------------- # list comprehensions # ----------------- # basics: a = ['' for a in [1]] #? str() a[0] a = [a for a in [1]] #? int() a[0] a = [a for a in 1,2] #? int() a[0] a = [a for a,b in [(1,'')]] #? int() a[0] arr = [1,''] a = [a for a in arr] #? int() str() a[0] a = [a if 1.0 else '' for a in [1] if [1.0]] #? int() str() a[0] # ----------------- # nested list comprehensions # ----------------- b = [a for arr in [[1]] for a in arr] #? int() b[0] b = [a for arr in [[1]] if '' for a in arr if ''] #? int() b[0] b = [b for arr in [[[1.0]]] for a in arr for b in a] #? float() b[0] # jedi issue #26 #? list() a = [[int(v) for v in line.strip().split() if v] for line in ["123", "123", "123"] if line] #? list() a[0] #? int() a[0][0] # ----------------- # ternary operator # ----------------- a = 3 b = '' if a else set() #? str() set() b def ret(a): return ['' if a else set()] #? str() set() ret(1)[0] #? str() set() ret()[0] # ----------------- # with statements # ----------------- with open('') as f: #? ['closed'] f.closed with open('') as f1, open('') as f2: #? ['closed'] f1.closed #? ['closed'] f2.closed # ----------------- # global vars # ----------------- def global_define(): global global_var_in_func global_var_in_func = 3 #? int() global_var_in_func # ----------------- # within docstrs # ----------------- def a(): """ #? ['global_define'] global_define """ pass #? # str literals in comment """ upper # ----------------- # magic methods # ----------------- class A(object): pass class B(): pass #? ['__init__'] A.__init__ #? ['__init__'] B.__init__ #? ['__init__'] int().__init__ # ----------------- # comments # ----------------- class A(): def __init__(self): self.hello = {} # comment shouldn't be a string #? dict() A().hello # ----------------- # unicode # ----------------- a = 'smörbröd' #? str() a xyz = 'smörbröd.py' if 1: #? str() xyz # ----------------- # exceptions # ----------------- try: import math except ImportError as i_a: #? ['i_a'] i_a #? ImportError() i_a try: import math except ImportError, i_b: #? ['i_b'] i_b #? ImportError() i_b jedi-0.7.0/test/completion/classes.py0000664000175000017500000001406012204171717017775 0ustar daviddavid00000000000000def find_class(): """ This scope is special, because its in front of TestClass """ #? ['ret'] TestClass.ret if 1: #? ['ret'] TestClass.ret class FindClass(): #? [] TestClass.ret if a: #? [] TestClass.ret def find_class(self): #? ['ret'] TestClass.ret if 1: #? ['ret'] TestClass.ret #? [] FindClass().find_class.self #? [] FindClass().find_class.self.find_class # set variables, which should not be included, because they don't belong to the # class second = 1 second = "" class TestClass(object): var_class = TestClass(1) def __init__(self2, first_param, second_param, third=1.0): self2.var_inst = first_param self2.second = second_param self2.first = first_param a = 3 def var_func(self): return 1 def get_first(self): # traversal self.second_new = self.second return self.var_inst def values(self): self.var_local = 3 #? ['var_class', 'var_func', 'var_inst', 'var_local'] self.var_ def ret(self, a1): # should not know any class functions! #? [] values #? ['return'] ret return a1 # should not work #? [] var_local #? [] var_inst #? [] var_func # instance inst = TestClass(1) #? ['var_class', 'var_func', 'var_inst', 'var_local'] inst.var #? ['var_class', 'var_func'] TestClass.var #? int() inst.var_local #? [] TestClass.var_local. #? int() TestClass().ret(1) #? int() inst.ret(1) myclass = TestClass(1, '', 3.0) #? int() myclass.get_first() #? [] myclass.get_first.real # too many params #? int() TestClass(1,1,1).var_inst # too few params #? int() TestClass(1).first #? [] TestClass(1).second. # complicated variable settings in class #? str() myclass.second #? str() myclass.second_new # multiple classes / ordering ints = TestClass(1, 1.0) strs = TestClass("", '') #? float() ints.second #? str() strs.second #? ['var_class'] TestClass.var_class.var_class.var_class.var_class # operations (+, *, etc) shouldn't be InstanceElements - #246 class A(): def __init__(self): self.addition = 1 + 2 #? int() A().addition # ----------------- # inheritance # ----------------- class Base(object): def method_base(self): return 1 class SuperClass(Base): class_super = 3 def __init__(self): self.var_super = '' def method_super(self): self.var2_super = list class Mixin(SuperClass): def method_mixin(self): return int class SubClass(SuperClass): class_sub = 3 def __init__(self): self.var_sub = '' def method_sub(self): self.var_sub = list return tuple instance = SubClass() #? ['method_base', 'method_sub', 'method_super'] instance.method_ #? ['var2_super', 'var_sub', 'var_super'] instance.var #? ['class_sub', 'class_super'] instance.class_ #? ['method_base', 'method_sub', 'method_super'] SubClass.method_ #? [] SubClass.var #? ['class_sub', 'class_super'] SubClass.class_ # ----------------- # __call__ # ----------------- class CallClass(): def __call__(self): return 1 #? int() CallClass()() # ----------------- # variable assignments # ----------------- class V: def __init__(self, a): self.a = a def ret(self): return self.a d = b b = ret if 1: c = b #? int() V(1).b() #? int() V(1).c() #? [] V(1).d() # ----------------- # ordering # ----------------- class A(): def b(self): #? int() a_func() #? str() self.a_func() return a_func() def a_func(self): return "" def a_func(): return 1 #? int() A().b() #? str() A().a_func() # ----------------- # nested classes # ----------------- class A(): class B(): pass def b(self): return 1.0 #? float() A().b() class A(): def b(self): class B(): def b(self): return [] return B().b() #? list() A().b() # ----------------- # recursions # ----------------- def Recursion(): def recurse(self): self.a = self.a self.b = self.b.recurse() #? Recursion().a #? Recursion().b # ----------------- # ducktyping # ----------------- def meth(self): return self.a, self.b class WithoutMethod(): a = 1 def __init__(self): self.b = 1.0 def blub(self): return self.b m = meth class B(): b = '' a = WithoutMethod().m() #? int() a[0] #? float() a[1] #? float() WithoutMethod.blub(WithoutMethod()) #? str() WithoutMethod.blub(B()) # ----------------- # __getattr__ / getattr() / __getattribute__ # ----------------- #? str().upper getattr(str(), 'upper') #? str.upper getattr(str, 'upper') # some strange getattr calls #? getattr(str, 1) #? getattr() #? getattr(str) #? getattr(getattr, 1) #? getattr(str, []) class Base(): def ret(self, b): return b class Wrapper(): def __init__(self, obj): self.obj = obj def __getattr__(self, name): return getattr(self.obj, name) class Wrapper2(): def __getattribute__(self, name): return getattr(Base(), name) #? int() Wrapper(Base()).ret(3) #? int() Wrapper2(Base()).ret(3) # ----------------- # private vars # ----------------- class PrivateVar(): def __init__(self): self.__var = 1 #? int() self.__var #? [] PrivateVar().__var #? PrivateVar().__var # ----------------- # super # ----------------- class Super(object): a = 3 class TestSuper(Super): #? super() def test(self): #? Super() super() #? ['a'] super().a if 1: #? Super() super() def a(): #? super() # ----------------- # if flow at class level # ----------------- class TestX(object): def normal_method(self): return 1 if True: def conditional_method(self): var = self.normal_method() #? int() var return 2 def other_method(self): var = self.conditional_method() #? int() var jedi-0.7.0/test/completion/keywords.py0000664000175000017500000000006712143361723020211 0ustar daviddavid00000000000000 #? ['raise'] raise #? ['except', 'Exception'] except jedi-0.7.0/test/completion/import_tree/0000775000175000017500000000000012204171764020320 5ustar daviddavid00000000000000jedi-0.7.0/test/completion/import_tree/random.py0000664000175000017500000000007612143361723022153 0ustar daviddavid00000000000000""" Here because random is also a builtin module. """ a = set jedi-0.7.0/test/completion/import_tree/pkg/0000775000175000017500000000000012204171764021101 5ustar daviddavid00000000000000jedi-0.7.0/test/completion/import_tree/pkg/__init__.py0000664000175000017500000000003512143361723023206 0ustar daviddavid00000000000000a = list from math import * jedi-0.7.0/test/completion/import_tree/pkg/mod1.py0000664000175000017500000000001012143361723022300 0ustar daviddavid00000000000000a = 1.0 jedi-0.7.0/test/completion/import_tree/__init__.py0000664000175000017500000000000712143361723022424 0ustar daviddavid00000000000000a = "" jedi-0.7.0/test/completion/import_tree/recurse_class2.py0000664000175000017500000000007312204171717023607 0ustar daviddavid00000000000000import recurse_class1 class C(recurse_class1.C): pass jedi-0.7.0/test/completion/import_tree/rename1.py0000664000175000017500000000005112143361723022214 0ustar daviddavid00000000000000""" used for renaming tests """ abc = 3 jedi-0.7.0/test/completion/import_tree/mod1.py0000664000175000017500000000005412143361723021527 0ustar daviddavid00000000000000a = 1 from import_tree.random import a as c jedi-0.7.0/test/completion/import_tree/recurse_class1.py0000664000175000017500000000012012204171717023577 0ustar daviddavid00000000000000import recurse_class2 class C(recurse_class2.C): def a(self): pass jedi-0.7.0/test/completion/import_tree/rename2.py0000664000175000017500000000007712143361723022225 0ustar daviddavid00000000000000""" used for renaming tests """ from rename1 import abc abc jedi-0.7.0/test/completion/import_tree/mod2.py0000664000175000017500000000003312204171717021525 0ustar daviddavid00000000000000from . import mod1 as fake jedi-0.7.0/test/completion/decorators.py0000664000175000017500000001135312204171717020507 0ustar daviddavid00000000000000# ----------------- # normal decorators # ----------------- def decorator(func): def wrapper(*args): return func(1, *args) return wrapper @decorator def decorated(a,b): return a,b exe = decorated(set, '') #? set exe[1] #? int() exe[0] # more complicated with args/kwargs def dec(func): def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper @dec def fu(a, b, c, *args, **kwargs): return a, b, c, args, kwargs exe = fu(list, c=set, b=3, d='') #? list() exe[0] #? int() exe[1] #? set exe[2] #? [] exe[3][0] #? str() exe[4]['d'] exe = fu(list, set, 3, '', d='') #? str() exe[3][0] # ----------------- # multiple decorators # ----------------- def dec2(func2): def wrapper2(first_arg, *args2, **kwargs2): return func2(first_arg, *args2, **kwargs2) return wrapper2 @dec2 @dec def fu2(a, b, c, *args, **kwargs): return a, b, c, args, kwargs exe = fu2(list, c=set, b=3, d='str') #? list() exe[0] #? int() exe[1] #? set exe[2] #? [] exe[3][0] #? str() exe[4]['d'] # ----------------- # Decorator is a class # ----------------- class Decorator(object): def __init__(self, func): self.func = func def __call__(self, *args, **kwargs): return self.func(1, *args, **kwargs) @Decorator def nothing(a,b,c): return a,b,c #? int() nothing("")[0] #? str() nothing("")[1] @Decorator def nothing(a,b,c): return a,b,c class MethodDecoratorAsClass(): class_var = 3 @Decorator def func_without_self(arg, arg2): return arg, arg2 @Decorator def func_with_self(self, arg): return self.class_var #? int() MethodDecoratorAsClass().func_without_self('')[0] #? str() MethodDecoratorAsClass().func_without_self('')[1] #? MethodDecoratorAsClass().func_with_self(1) class SelfVars(): """Init decorator problem as an instance, #247""" @Decorator def __init__(self): """ init decorators should be ignored when looking up variables in the class. """ self.c = list @Decorator def shouldnt_expose_var(not_self): """ Even though in real Python this shouldn't expose the variable, in this case Jedi exposes the variable, because these kind of decorators are normally descriptors, which SHOULD be exposed (at least 90%). """ not_self.b = 1.0 def other_method(self): #? float() self.b #? list self.c # ----------------- # not found decorators (are just ignored) # ----------------- @not_found_decorator def just_a_func(): return 1 #? int() just_a_func() #? ['__closure__'] just_a_func.__closure__ class JustAClass: @not_found_decorator2 def a(self): return 1 #? ['__closure__'] JustAClass().a.__closure__ #? int() JustAClass().a() #? ['__closure__'] JustAClass.a.__closure__ #? int() JustAClass.a() # ----------------- # illegal decorators # ----------------- class DecoratorWithoutCall(): def __init__(self, func): self.func = func @DecoratorWithoutCall def f(): return 1 # cannot be resolved - should be ignored @DecoratorWithoutCall(None) def g(): return 1 #? f() #? int() g() # ----------------- # method decorators # ----------------- def dec(f): def wrapper(s): return f(s) return wrapper class MethodDecorators(): _class_var = 1 def __init__(self): self._method_var = '' @dec def constant(self): return 1.0 @dec def class_var(self): return self._class_var @dec def method_var(self): return self._method_var #? float() MethodDecorators().constant() #? int() MethodDecorators().class_var() #? str() MethodDecorators().method_var() class Base(): @not_existing def __init__(self): pass @not_existing def b(self): return '' @dec def c(self): return 1 class MethodDecoratorDoesntExist(Base): """#272 github: combination of method decorators and super()""" def a(self): #? super().__init__() #? str() super().b() #? int() super().c() #? float() self.d() @doesnt_exist def d(self): return 1.0 # ----------------- # others # ----------------- def memoize(function): def wrapper(*args): if 1: pass else: rv = function(*args) return rv return wrapper @memoize def follow_statement(stmt): return stmt # here we had problems with the else clause, because the parent was not right. #? int() follow_statement(1) # ----------------- # class decorators # ----------------- # class decorators should just be ignored @should_ignore class A(): def ret(self): return 1 #? int() A().ret() jedi-0.7.0/test/completion/named_param.py0000664000175000017500000000017012204171717020601 0ustar daviddavid00000000000000""" named params: >>> def a(abc): pass ... >>> a(abc=3) # <- this stuff """ def a(abc): pass #? 5 ['abc'] a(abc) jedi-0.7.0/test/completion/docstring.py0000664000175000017500000000307312204171717020336 0ustar daviddavid00000000000000""" Test docstrings in functions and classes, which are used to infer types """ # ----------------- # sphinx style # ----------------- def f(a, b, c, d): """ asdfasdf :param a: blablabla :type a: str :type b: (str, int) :type c: threading.Thread :type d: :class:`threading.Thread` :rtype: dict """ #? str() a #? str() b[0] #? int() b[1] #? ['join'] c.join #? ['join'] d.join #? dict() f() # wrong declarations def f(a, b): """ :param a: Forgot type declaration :type a: :param b: Just something :type b: `` :rtype: """ #? a #? b #? f() # ----------------- # epydoc style # ----------------- def e(a, b): """ asdfasdf @type a: str @param a: blablabla @type b: (str, int) @param b: blablah @rtype: list """ #? str() a #? str() b[0] #? int() b[1] #? list() e() # Returns with param type only def rparam(a,b): """ @type a: str """ return a #? str() rparam() # Composite types def composite(): """ @rtype: (str, int, dict) """ x, y, z = composite() #? str() x #? int() y #? dict() z # Both docstring and calculated return type def both(): """ @rtype: str """ return 23 #? str(), int() both() class Test(object): def __init__(self): self.teststr = "" """ # jedi issue #210 """ def test(self): #? ['teststr'] self.teststr # ----------------- # statement docstrings # ----------------- d = '' """ bsdf """ #? str() d.upper() jedi-0.7.0/test/completion/usages.py0000664000175000017500000001017212204171717017627 0ustar daviddavid00000000000000""" Renaming tests. This means search for usages. I always leave a little bit of space to add room for additions, because the results always contain position informations. """ #< 4 (0,4), (3,0), (5,0) def abc(): pass #< 0 (-3,4), (0,0), (2,0) abc.d.a.bsaasd.abc.d abc abc = #< (-3,0), (0,0) abc Abc = 3 #< 6 (0,6), (2,4), (5,8), (17,0) class Abc(): #< (-2,6), (0,4), (3,8), (15,0) Abc def Abc(self): Abc; self.c = 3 #< 17 (0,16), (2,8) def a(self, Abc): #< 10 (-2,16), (0,8) Abc #< 19 (0,18), (2,8) def self_test(self): #< 12 (-2,18), (0,8) self.b Abc.d.Abc #< 4 (0,4), (4,1) def blub(): #< (-4,4), (0,1) @blub def a(): pass #< 0 (0,0), (1,0) set_object_var = object() set_object_var.var = 1 response = 5 #< 0 (0,0), (1,0), (2,0), (4,0) response = HttpResponse(mimetype='application/pdf') response['Content-Disposition'] = 'attachment; filename=%s.pdf' % id response.write(pdf) #< (-4,0), (-3,0), (-2,0), (0,0) response # ----------------- # imports # ----------------- #< (0,7), (3,0) import module_not_exists #< (-3,7), (0,0) module_not_exists #< ('rename1', 1,0), (0,24), (3,0), (6,17), ('rename2', 4,5), (10,17), (13,17) from import_tree import rename1 #< (0,8), ('rename1',3,0), ('rename2',4,20), ('rename2',6,0), (3,32), (7,32), (4,0) rename1.abc #< (-3,8), ('rename1', 3,0), ('rename2', 4,20), ('rename2', 6,0), (0,32), (4,32), (1,0) from import_tree.rename1 import abc abc #< 20 ('rename1', 1,0), ('rename2', 4,5), (-10,24), (-7,0), (-4,17), (0,17), (3,17) from import_tree.rename1 import abc #< (0, 32), from import_tree.rename1 import not_existing # shouldn't work #< from not_existing import * # ----------------- # classes # ----------------- class TestMethods(object): #< 8 (0,8), (2,13) def a_method(self): #< 13 (-2,8), (0,13) self.a_method() #< 13 (2,8), (0,13), (3,13) self.b_method() def b_method(self): self.b_method class TestClassVar(object): #< 4 (0,4), (5,13), (7,21) class_v = 1 def a(self): class_v = 1 #< (-5,4), (0,13), (2,21) self.class_v #< (-7,4), (-2,13), (0,21) TestClassVar.class_v #< (0,8), (-7, 8) class_v class TestInstanceVar(): def a(self): #< 13 (4,13), (0,13) self._instance_var = 3 def b(self): #< (-4,13), (0,13) self._instance_var class NestedClass(): def __getattr__(self, name): return self # Shouldn't find a definition, because there's no name defined (used ``getattr``). #< (0, 14), NestedClass().instance # ----------------- # inheritance # ----------------- class Super(object): #< 4 (0,4), (23,18), (25,13) base_class = 1 #< 4 (0,4), class_var = 1 #< 8 (0,8), def base_method(self): #< 13 (0,13), (20,13) self.base_var = 1 #< 13 (0,13), (24,13), (29,13) self.instance_var = 1 #< 8 (0,8), def just_a_method(self): pass #< 20 (0,16), (-18,6) class TestClass(Super): #< 4 (0,4), class_var = 1 def x_method(self): #< (0,18), (2,13), (-23,4) TestClass.base_class #< (-2,18), (0,13), (-25,4) self.base_class #< (-20,13), (0,13) self.base_var #< TestClass.base_var #< 13 (5,13), (0,13) self.instance_var = 3 #< 9 (0,8), def just_a_method(self): #< (-5,13), (0,13), (-29,13) self.instance_var # ----------------- # properties # ----------------- class TestProperty: @property #< 10 (0,8), (5,13) def prop(self): return 1 def a(self): #< 13 (-5,8), (0,13) self.prop @property #< 13 (0,8), (4,5) def rw_prop(self): return self._rw_prop #< 8 (-4,8), (0,5) @rw_prop.setter #< 8 (0,8), (5,13) def rw_prop(self, value): self._rw_prop = value def b(self): #< 13 (-5,8), (0,13) self.rw_prop # ----------------- # *args, **kwargs # ----------------- #< 11 (1,11), (0,8) def f(**kwargs): return kwargs # ----------------- # No result # ----------------- if isinstance(j, int): #< j jedi-0.7.0/test/completion/thirdparty/0000775000175000017500000000000012204171764020161 5ustar daviddavid00000000000000jedi-0.7.0/test/completion/thirdparty/django_.py0000664000175000017500000000032412143361723022131 0ustar daviddavid00000000000000#! ['class ObjectDoesNotExist'] from django.core.exceptions import ObjectDoesNotExist import django #? ['get_version'] django.get_version from django.conf import settings #? ['configured'] settings.configured jedi-0.7.0/test/completion/thirdparty/PyQt4_.py0000664000175000017500000000050012143361723021644 0ustar daviddavid00000000000000from PyQt4.QtCore import * from PyQt4.QtGui import * #? ['QActionGroup'] QActionGroup #? ['currentText'] QStyleOptionComboBox().currentText #? [] QStyleOptionComboBox().currentText. from PyQt4 import QtGui #? ['currentText'] QtGui.QStyleOptionComboBox().currentText #? [] QtGui.QStyleOptionComboBox().currentText. jedi-0.7.0/test/completion/thirdparty/jedi_.py0000664000175000017500000000270212204171717021604 0ustar daviddavid00000000000000 from jedi import functions, evaluate, parsing el = functions.completions()[0] #? ['description'] el.description #? str() el.description scopes, path, dot, like = \ api._prepare_goto(source, row, column, path, True) # has problems with that (sometimes) very deep nesting. #? set() el = scopes # get_names_for_scope is also recursion stuff #? tuple() el = list(evaluate.get_names_for_scope())[0] #? int() parsing.Module() el = list(evaluate.get_names_for_scope(1))[0][0] #? parsing.Module() el = list(evaluate.get_names_for_scope())[0][0] #? list() el = list(evaluate.get_names_for_scope(1))[0][1] #? list() el = list(evaluate.get_names_for_scope())[0][1] #? list() parsing.Scope((0,0)).get_set_vars() #? parsing.Import() parsing.Name() parsing.Scope((0,0)).get_set_vars()[0] # TODO access parent is not possible, because that is not set in the class ## parsing.Class() parsing.Scope((0,0)).get_set_vars()[0].parent #? parsing.Import() parsing.Name() el = list(evaluate.get_names_for_scope())[0][1][0] #? evaluate.Array() evaluate.Class() evaluate.Function() evaluate.Instance() list(evaluate.follow_call())[0] # With the right recursion settings, this should be possible (and maybe more): # Array Class Function Generator Instance Module # However, this was produced with the recursion settings 10/350/10000, and # lasted 18.5 seconds. So we just have to be content with the results. #? evaluate.Class() evaluate.Function() evaluate.get_scopes_for_name()[0] jedi-0.7.0/test/completion/thirdparty/psycopg2_.py0000664000175000017500000000020612143361723022434 0ustar daviddavid00000000000000import psycopg2 conn = psycopg2.connect('dbname=test') #? ['cursor'] conn.cursor cur = conn.cursor() #? ['fetchall'] cur.fetchall jedi-0.7.0/test/completion/thirdparty/pylab_.py0000664000175000017500000000104012143361723021772 0ustar daviddavid00000000000000import pylab # two gotos #! ['module numpy'] import numpy #! ['module random'] import numpy.random #? ['array2string'] numpy.array2string #? ['shape'] numpy.matrix().shape #? ['random_integers'] pylab.random_integers #? [] numpy.random_integers #? ['random_integers'] numpy.random.random_integers #? ['sample'] numpy.random.sample import numpy na = numpy.array([1,2]) #? ['shape'] na.shape # shouldn't raise an error #29, jedi-vim # doesn't return something, because matplotlib uses __import__ fig = pylab.figure() #? fig.add_subplot jedi-0.7.0/test/completion/definition.py0000664000175000017500000000171712204171717020475 0ustar daviddavid00000000000000""" Fallback to callee definition when definition not found. - https://github.com/davidhalter/jedi/issues/131 - https://github.com/davidhalter/jedi/pull/149 """ """Parenthesis closed at next line.""" #? isinstance isinstance( ) #? isinstance isinstance( ) #? isinstance isinstance(None, ) #? isinstance isinstance(None, ) """Parenthesis closed at same line.""" # Note: len('isinstance(') == 11 #? 11 isinstance isinstance() # Note: len('isinstance(None,') == 16 ##? 16 isinstance isinstance(None,) # Note: len('isinstance(None,') == 16 ##? 16 isinstance isinstance(None, ) # Note: len('isinstance(None, ') == 17 ##? 17 isinstance isinstance(None, ) # Note: len('isinstance( ') == 12 ##? 12 isinstance isinstance( ) """Unclosed parenthesis.""" #? isinstance isinstance( def x(): pass # acts like EOF ##? isinstance isinstance( def x(): pass # acts like EOF #? isinstance isinstance(None, def x(): pass # acts like EOF ##? isinstance isinstance(None, jedi-0.7.0/test/completion/isinstance.py0000664000175000017500000000131212143361723020474 0ustar daviddavid00000000000000if isinstance(i, str): #? str() i if isinstance(j, (str, int)): #? str() int() j while isinstance(k, (str, int)): #? str() int() k if not isinstance(k, (str, int)): #? k while not isinstance(k, (str, int)): #? k assert isinstance(ass, int): #? int() ass assert isinstance(ass, str): assert not isinstance(ass, int): if 2: #? str() ass # ----------------- # in functions # ----------------- import datetime def fooooo(obj): if isinstance(obj, datetime.datetime): #? datetime.datetime obj def fooooo2(obj): if isinstance(obj, datetime.datetime): return obj else: return 1 #? int() datetime.datetime fooooo2('') jedi-0.7.0/test/completion/invalid.py0000664000175000017500000000342312204171717017767 0ustar daviddavid00000000000000""" This file is less about the results and much more about the fact, that no exception should be thrown. Basically this file could change depending on the current implementation. But there should never be any errors. """ # wait until keywords are out of definitions (pydoc function). ##? 5 's'() #? ['upper'] str()).upper # ----------------- # funcs # ----------------- def asdf(a or b): # multiple param names return a #? int() asdf(2) from a import (b def blub(): return 0 def openbrace(): asdf = 3 asdf asdf( #? int() asdf return 1 #? int() openbrace() blub([ #? int() openbrace() def indentfault(): asd( indentback #? [] indentfault(). def openbrace2(): asd( def normalfunc(): return 1 #? int() normalfunc() # dots in param def f(seq1...=None): return seq1 #? int() f(1) @ def test_empty_decorator(): return 1 #? int() test_empty_decorator() # ----------------- # flows # ----------------- # first part not complete (raised errors) if a a else: #? ['AttributeError'] AttributeError try #? ['AttributeError'] except AttributeError pass finally: pass #? ['isinstance'] if isi try: except TypeError: #? str() "" # wrong ternary expression a = 1 if #? int() a for for_local in : for_local #? ['for_local'] for_local #? for_local # ----------------- # list comprehensions # ----------------- a2 = [for a2 in [0]] #? a2[0] a3 = [for xyz in] #? a3[0] a3 = [a4 for in 'b'] #? str() a3[0] a3 = [a4 for a in for x in y] #? a3[0] a = [for a in def break(): pass #? a[0] a = [a for a in [1,2] def break(): pass #? int() a[0] #? [] int()). # ----------------- # keywords # ----------------- #! [] as def empty_assert(): x = 3 assert #? int() x import datetime as jedi-0.7.0/test/completion/sys_path.py0000664000175000017500000000070012143361723020166 0ustar daviddavid00000000000000 import sys import os from os import dirname sys.path.insert(0, '../../jedi') sys.path.append(dirname(os.path.abspath('thirdparty' + os.path.sep + 'asdf'))) # modifications, that should fail: # because of sys module sys.path.append(sys.path[1] + '/thirdparty') # syntax err sys.path.append('a' +* '/thirdparty') #? ['evaluate', 'evaluate_representation'] import evaluate #? ['goto'] evaluate.goto #? ['jedi_'] import jedi_ #? ['el'] jedi_.el jedi-0.7.0/test/completion/generators.py0000664000175000017500000000267212143361723020517 0ustar daviddavid00000000000000# ----------------- # yield statement # ----------------- def gen(): yield 1 yield "" gen_exe = gen() #? int() str() next(gen_exe) #? int() str() list next(gen_exe, list) def gen_ret(value): yield value #? int() next(gen_ret(1)) #? [] next(gen_ret()) # ----------------- # generators should not be indexable # ----------------- def get(param): yield 1 yield "" #? [] get()[0] # ----------------- # __iter__ # ----------------- for a in get(): #? int() str() a class Get(): def __iter__(self): yield 1 yield "" b = [] for a in Get(): #? int() str() a b += [a] #? list() b #? int() str() b[0] g = iter(Get()) #? int() str() next(g) g = iter([1.0]) #? float() next(g) # ----------------- # __next__ # ----------------- class Counter: def __init__(self, low, high): self.current = low self.high = high def __iter__(self): return self def next(self): """ need to have both __next__ and next, because of py2/3 testing """ return self.__next__() def __next__(self): if self.current > self.high: raise StopIteration else: self.current += 1 return self.current - 1 for c in Counter(3, 8): #? int() print c # ----------------- # tuples # ----------------- def gen(): if a: yield 1, "" else: yield 2, 1.0 a, b = next(gen()) #? int() a #? str() float() b jedi-0.7.0/test/completion/dynamic.py0000664000175000017500000001241312204171717017764 0ustar daviddavid00000000000000""" This is used for dynamic object completion. Jedi tries to guess the types with a backtracking approach. """ def func(a): #? int() str() return a #? int() func(1) func int(1) + (int(2))+ func('') # Again the same function, but with another call. def func(a): #? float() return a func(1.0) # Again the same function, but with no call. def func(a): #? return a def func(a): #? float() return a str(func(1.0)) # ----------------- # *args, **args # ----------------- def arg(*args): #? tuple() args #? int() args[0] arg(1,"") # ----------------- # decorators # ----------------- def def_func(f): def wrapper(*args, **kwargs): return f(*args, **kwargs) return wrapper @def_func def func(c): #? str() return c #? str() func("str") @def_func def func(c=1): #? int() float() return c func(1.0) # Needs to be here, because in this case func is an import -> shouldn't lead to # exceptions. import sys as func func.sys # ----------------- # classes # ----------------- class A(): def __init__(self, a): #? str() a A("s") class A(): def __init__(self, a): #? int() a self.a = a def test(self, a): #? float() a self.c = self.test2() def test2(self): #? int() return self.a def test3(self): #? int() self.test2() #? int() self.c A(3).test(2.0) A(3).test2() # ----------------- # list.append # ----------------- arr = [] for a in [1,2]: arr.append(a); arr.append # should not cause an exception arr.append() # should not cause an exception #? int() arr[10] arr = [tuple()] for a in [1,2]: arr.append(a); #? int() tuple() arr[10] #? int() arr[10].index() arr = list([]) arr.append(1) #? int() arr[0] # ----------------- # list.insert # ----------------- arr = [""] arr.insert(0, 1.0) # on exception due to this, please! arr.insert(0) arr.insert() #? float() str() arr[10] for a in arr: #? float() str() a #? float() str() list(arr)[10] # ----------------- # set.add # ----------------- st = {1.0} for a in [1,2]: st.add(a) st.append('') # lists should not have an influence st.add # should not cause an exception st.add() # ----------------- # list.extend / set.update # ----------------- arr = [1.0] arr.extend([1,2,3]) arr.extend([]) arr.extend("") # should ignore #? float() int() arr[100] a = set(arr) a.update(list(["", 1])) #? float() int() str() list(a)[0] # ----------------- # set/list initialized as functions # ----------------- st = set() st.add(1) #? int() for s in st: s lst = list() lst.append(1) #? int() for i in lst: i # ----------------- # renames / type changes # ----------------- arr = [] arr2 = arr arr2.append('') #? str() arr2[0] st = {1.0} st.add(1) lst = list(st) lst.append('') #? float() int() str() lst[0] lst = [1] lst.append(1.0) s = set(lst) s.add("") lst = list(s) lst.append({}) #? dict() int() float() str() lst[0] # should work with tuple conversion, too. #? dict() int() float() str() tuple(lst)[0] # but not with an iterator #? iter(lst)[0] # ----------------- # complex including += # ----------------- class C(): pass class D(): pass class E(): pass lst = [1] lst.append(1.0) lst += [C] s = set(lst) s.add("") s += [D] lst = list(s) lst.append({}) lst += [E] ##? dict() int() float() str() C D E lst[0] # ----------------- # functions # ----------------- def arr_append(arr4, a): arr4.append(a) def add_to_arr(arr2, a): arr2.append(a) return arr2 def app(a): arr3.append(a) arr3 = [1.0] res = add_to_arr(arr3, 1) arr_append(arr3, 'str') app(set()) #? float() str() int() set() arr3[10] #? float() str() int() set() res[10] # ----------------- # returns, special because the module dicts are not correct here. # ----------------- def blub(): a = [] a.append(1.0) #? float() a[0] return a #? float() blub()[0] # list with default def blub(): a = list([1]) a.append(1.0) return a #? int() float() blub()[0] # empty list def blub(): a = list() a.append(1.0) return a #? float() blub()[0] # with if def blub(): if 1: a = [] a.append(1.0) return a #? float() blub()[0] # with else clause def blub(): if 1: 1 else: a = [] a.append(1) return a #? int() blub()[0] # ----------------- # returns, the same for classes # ----------------- class C(): def blub(self, b): if 1: a = [] a.append(b) return a def blub2(self): """ mapper function """ a = self.blub(1.0) #? float() a[0] return a def class_arr(self, el): self.a = [] self.a.append(el) #? int() self.a[0] return self.a #? int() C().blub(1)[0] #? float() C().blub2(1)[0] #? int() C().a[0] #? int() C().class_arr(1)[0] # ----------------- # array recursions # ----------------- a = set([1.0]) a.update(a) a.update([1]) #? float() int() list(a)[0] def first(a): b = [] b.append(a) b.extend(second(a)) return list(b) def second(a): b = [] b.extend(first(a)) return list(b) #? float() first(1.0)[0] def third(): b = [] b.extend extend() b.extend(first()) return list(b) #? third()[0] jedi-0.7.0/test/completion/arrays.py0000664000175000017500000000705112204171717017643 0ustar daviddavid00000000000000# ----------------- # basic array lookups # ----------------- #? int() [1,""][0] #? str() [1,""][1] #? int() str() [1,""][2] #? int() str() [1,""][20] #? int() str() [1,""][str(hello)] a = list() #? list() [a][0] #? list() [[a,a,a]][2][100] c = [[a,""]] #? str() c[0][1] b = [6,7] #? int() b[8-7] #? list() b[8:] #? list() b[int():] # ----------------- # iterable multiplication # ----------------- a = ['']*2 #? list() a a = 2*2 #? int() a a = "a"*3 #? str() a # ----------------- # tuple assignments # ----------------- a1, b1 = (1, "") #? int() a1 #? str() b1 (a2, b2) = (1, "") #? int() a2 #? str() b2 # list assignment [list1, list2] = (1, "") #? int() list1 #? str() list2 [list3, list4] = [1, ""] #? int() list3 #? str() list4 # ----------------- # subtuple assignment # ----------------- (a3, (b3, c3)) = (1, ("", list)) #? list c3 a4, (b4, c4) = (1, ("", list)) #? list c4 #? int() a4 #? str() b4 # ----------------- # multiple assignments # ----------------- a = b = 1 #? int() a #? int() b (a, b) = (c, (e, f)) = ('2', (3, 4)) #? str() a #? tuple() b #? str() c #? int() e #? int() f # ----------------- # unnessecary braces # ----------------- #? int() (1) #? int() ((1)) #? int() ((1)+1) u, v = 1, "" #? int() u ((u1, v1)) = 1, "" #? int() u1 #? int() (u1) (a), b = 1, '' #? int() a def a(): return '' #? str() (a)() #? str() (a)().replace() #? int() (tuple).index() #? int() (tuple)().index() class C(): def __init__(self): self.a = (str()).upper() #? str() C().a # ----------------- # imbalanced sides # ----------------- (f, g) = (1,) #? int() f #? [] g. (f, g, h) = (1,'') #? int() f #? str() g #? [] h. (f1, g1) = 1 #? [] f1. #? [] g1. (f, g) = (1,'',1.0) #? int() f #? str() g # ----------------- # dicts # ----------------- dic2 = {'asdf': 3, 'b': 'str'} #? int() dic2['asdf'] # string literal #? int() dic2[r'asdf'] #? int() dic2[r'asdf'] #? int() str() dic2['just_something'] def f(): """ github #83 """ r = {} r['status'] = (200, 'ok') return r #? dict() f() # completion within dicts #? 9 ['str'] {str: str} # iteration problem (detected with sith) d = dict({'a':''}) def y(a): return a #? y(**d) # problem with more complicated casts dic = {str(key): ''} #? str() dic[''] # ----------------- # with variable as index # ----------------- a = (1, "") index = 1 #? str() a[index] # these should just ouput the whole array index = int #? int() str() a[index] index = int() #? int() str() a[index] # dicts index = 'asdf' dic2 = {'asdf': 3, 'b': 'str'} #? int() dic2[index] # ----------------- # __getitem__ # ----------------- class GetItem(): def __getitem__(self, index): return 1.0 #? float() GetItem()[0] class GetItem(): def __init__(self, el): self.el = el def __getitem__(self, index): return self.el #? str() GetItem("")[1] # ----------------- # conversions # ----------------- a = [1, ""] #? int() str() list(a)[1] #? int() str() list(a)[0] #? set(a)[0] #? int() str() list(set(a))[1] #? int() str() list(list(set(a)))[1] # does not yet work, because the recursion catching is not good enough (catches # to much) #? int() str() list(set(list(set(a))))[1] #? int() str() list(set(set(a)))[1] # frozenset #? int() str() list(frozenset(a))[1] #? int() str() list(set(frozenset(a)))[1] # iter #? int() str() list(iter(a))[1] #? int() str() list(iter(list(set(a))))[1] # tuple #? int() str() tuple(a)[1] #? int() str() tuple(list(set(a)))[1] #? int() tuple({1})[0] #? int() tuple((1,))[0] # implementation detail for lists, should not be visible #? [] list().__iterable jedi-0.7.0/test/completion/imports.py0000664000175000017500000001077212204171717020043 0ustar daviddavid00000000000000# ----------------- # own structure # ----------------- # do separate scopes def scope_basic(): from import_tree import mod1 #? int() mod1.a #? [] import_tree.a #? [] import_tree.mod1 import import_tree #? str() import_tree.a #? [] import_tree.mod1 def scope_pkg(): import import_tree.mod1 #? str() import_tree.a #? ['mod1'] import_tree.mod1 #? int() import_tree.mod1.a def scope_nested(): import import_tree.pkg.mod1 #? str() import_tree.a #? list import_tree.pkg.a #? ['sqrt'] import_tree.pkg.sqrt #? ['a', 'pkg'] import_tree. #? float() import_tree.pkg.mod1.a import import_tree.random #? set import_tree.random.a def scope_nested2(): """Multiple modules should be indexable, if imported""" import import_tree.mod1 import import_tree.pkg #? ['mod1'] import_tree.mod1 #? ['pkg'] import_tree.pkg #? [] import_tree.rename1 def from_names(): #? ['mod1'] from import_tree.pkg. #? ['path'] from os. def builtin_test(): #? ['math'] import math def scope_from_import_variable(): """ All of them shouldn't work, because "fake" imports don't work in python without the use of ``sys.modules`` modifications (e.g. ``os.path`` see also github issue #213 for clarification. """ #? from import_tree.mod2.fake import a #? from import_tree.mod2.fake import c #? a #? c def scope_from_import_variable_with_parenthesis(): from import_tree.mod2.fake import ( a, c ) #? a #? c # ----------------- # std lib modules # ----------------- import tokenize #? ['tok_name'] tokenize.tok_name from pyclbr import * #? ['readmodule_ex'] readmodule_ex import os #? ['dirname'] os.path.dirname #? os.path.join from os.path import join from os.path import ( expanduser ) #? os.path.expanduser expanduser from itertools import (tee, islice) #? ['islice'] islice from functools import (partial, wraps) #? ['wraps'] wraps from keyword import kwlist, \ iskeyword #? ['kwlist'] kwlist #? [] from keyword import not_existing1, not_existing2 from tokenize import io tokenize.generate_tokens # ----------------- # builtins # ----------------- import sys #? ['prefix'] sys.prefix #? ['append'] sys.path.append from math import * #? ['cos', 'cosh'] cos def func_with_import(): import time return time #? ['sleep'] func_with_import().sleep # ----------------- # completions within imports # ----------------- #? ['sqlite3'] import sqlite3 #? ['classes'] import classes #? ['timedelta'] from datetime import timedel # should not be possible, because names can only be looked up 1 level deep. #? [] from datetime.timedelta import resolution #? [] from datetime.timedelta import #? ['Cursor'] from sqlite3 import Cursor # ----------------- # relative imports # ----------------- from .import_tree import mod1 #? int() mod1.a from ..import_tree import mod1 #? mod1.a from .......import_tree import mod1 #? mod1.a from .. import helpers #? int() helpers.sample_int from ..helpers import sample_int as f #? int() f from . import run #? [] run. from . import import_tree as imp_tree #? str() imp_tree.a from . import datetime as mod1 #? [] mod1. #? str() imp_tree.a #? ['some_variable'] from . import some_variable #? ['arrays'] from . import arrays #? [] from . import import_tree as ren # ----------------- # special positions -> edge cases # ----------------- import datetime #? 6 datetime from datetime.time import time #? [] import datetime. #? [] import datetime.date #? 18 ['import'] from import_tree. import pkg #? 17 ['mod1', 'mod2', 'random', 'pkg', 'rename1', 'rename2', 'recurse_class1', 'recurse_class2'] from import_tree. import pkg #? 18 ['pkg'] from import_tree.p import pkg #? 17 ['import_tree'] from .import_tree import #? 10 ['run'] from ..run import #? ['run'] from .. import run #? [] from not_a_module import # self import # this can cause recursions from imports import * #137 import json #? 23 json.dump from json import load, dump #? 17 json.load from json import load, dump # without the from clause: import json, datetime #? 7 json import json, datetime #? 13 datetime import json, datetime # ----------------- # packages # ----------------- from import_tree.mod1 import c #? set c from import_tree import recurse_class1 #? ['a'] recurse_class1.C.a # github #239 RecursionError #? ['a'] recurse_class1.C().a jedi-0.7.0/test/test_absolute_import.py0000664000175000017500000000240612204171717020437 0ustar daviddavid00000000000000""" Tests ``from __future__ import absolute_import`` (only important for Python 2.X) """ import jedi from jedi.parsing import Parser from . import helpers def test_explicit_absolute_imports(): """ Detect modules with ``from __future__ import absolute_import``. """ parser = Parser("from __future__ import absolute_import", "test.py") assert parser.module.has_explicit_absolute_import def test_no_explicit_absolute_imports(): """ Detect modules without ``from __future__ import absolute_import``. """ parser = Parser("1", "test.py") assert not parser.module.has_explicit_absolute_import def test_dont_break_imports_without_namespaces(): """ The code checking for ``from __future__ import absolute_import`` shouldn't assume that all imports have non-``None`` namespaces. """ src = "from __future__ import absolute_import\nimport xyzzy" parser = Parser(src, "test.py") assert parser.module.has_explicit_absolute_import @helpers.cwd_at("test/absolute_import") def test_can_complete_when_shadowing(): filename = "unittest.py" with open(filename) as f: lines = f.readlines() src = "".join(lines) script = jedi.Script(src, len(lines), len(lines[1]), filename) assert script.completions() jedi-0.7.0/test/test_jedi_system.py0000664000175000017500000000351212204171717017545 0ustar daviddavid00000000000000""" Test the Jedi "System" which means for example to test if imports are correctly used. """ import os import inspect import jedi def test_settings_module(): """ jedi.settings and jedi.cache.settings must be the same module. """ from jedi import cache from jedi import settings assert cache.settings is settings def test_no_duplicate_modules(): """ Make sure that import hack works as expected. Jedi does an import hack (see: jedi/__init__.py) to have submodules with circular dependencies. The modules in this circular dependency "loop" must be imported by ``import `` rather than normal ``from jedi import `` (or ``from . jedi ...``). This test make sure that this is satisfied. See also: - `#160 `_ - `#161 `_ """ import sys jedipath = os.path.dirname(os.path.abspath(jedi.__file__)) def is_submodule(m): try: filepath = m.__file__ except AttributeError: return False return os.path.abspath(filepath).startswith(jedipath) modules = list(filter(is_submodule, sys.modules.values())) top_modules = [m for m in modules if not m.__name__.startswith('jedi.')] for m in modules: if m is jedi: # py.test automatically improts `jedi.*` when --doctest-modules # is given. So this test cannot succeeds. continue for tm in top_modules: try: imported = getattr(m, tm.__name__) except AttributeError: continue if inspect.ismodule(imported): # module could have a function with the same name, e.g. # `keywords.keywords`. assert imported is tm jedi-0.7.0/test/test_integration_import.py0000664000175000017500000000540712204171717021150 0ustar daviddavid00000000000000""" Tests of various import related things that could not be tested with "Black Box Tests". """ import itertools from jedi import Script from .helpers import cwd_at def test_goto_definition_on_import(): assert Script("import sys_blabla", 1, 8).goto_definitions() == [] assert len(Script("import sys", 1, 8).goto_definitions()) == 1 @cwd_at('jedi') def test_complete_on_empty_import(): # should just list the files in the directory assert 10 < len(Script("from .", path='').completions()) < 30 assert 10 < len(Script("from . import", 1, 5, '').completions()) < 30 assert 10 < len(Script("from . import classes", 1, 5, '').completions()) < 30 assert len(Script("import").completions()) == 0 assert len(Script("import import", path='').completions()) > 0 # 111 assert Script("from datetime import").completions()[0].name == 'import' assert Script("from datetime import ").completions() def test_imports_on_global_namespace_without_path(): """If the path is None, there shouldn't be any import problem""" completions = Script("import operator").completions() assert [c.name for c in completions] == ['operator'] completions = Script("import operator", path= 'example.py').completions() assert [c.name for c in completions] == ['operator'] # the first one has a path the second doesn't completions = Script("import keyword", path='example.py').completions() assert [c.name for c in completions] == ['keyword'] completions = Script("import keyword").completions() assert [c.name for c in completions] == ['keyword'] def test_named_import(): """named import - jedi-vim issue #8""" s = "import time as dt" assert len(Script(s, 1, 15, '/').goto_definitions()) == 1 assert len(Script(s, 1, 10, '/').goto_definitions()) == 1 def test_goto_following_on_imports(): s = "import multiprocessing.dummy; multiprocessing.dummy" g = Script(s).goto_assignments() assert len(g) == 1 assert (g[0].line, g[0].column) != (0, 0) def test_after_from(): def check(source, result, column=None): completions = Script(source, column=column).completions() assert [c.name for c in completions] == result check('from os ', ['import']) check('\nfrom os ', ['import']) check('\nfrom os import whatever', ['import'], len('from os im')) check('from os\\\n', ['import']) check('from os \\\n', ['import']) def test_follow_definition(): """ github issue #45 """ c = Script("from datetime import timedelta; timedelta").completions() # type can also point to import, but there will be additional # attributes objs = itertools.chain.from_iterable(r.follow_definition() for r in c) types = [o.type for o in objs] assert 'import' not in types and 'class' in types jedi-0.7.0/test/test_unicode.py0000664000175000017500000000320012204171717016646 0ustar daviddavid00000000000000# -*- coding: utf-8 -*- """ All character set and unicode related tests. """ from jedi import Script from jedi._compatibility import utf8, unicode def test_unicode_script(): """ normally no unicode objects are being used. (<=2.7) """ s = unicode("import datetime; datetime.timedelta") completions = Script(s).completions() assert len(completions) assert type(completions[0].description) is unicode s = utf8("author='öä'; author") completions = Script(s).completions() x = completions[0].description assert type(x) is unicode s = utf8("#-*- coding: iso-8859-1 -*-\nauthor='öä'; author") s = s.encode('latin-1') completions = Script(s).completions() assert type(completions[0].description) is unicode def test_unicode_attribute(): """ github jedi-vim issue #94 """ s1 = utf8('#-*- coding: utf-8 -*-\nclass Person():\n' ' name = "e"\n\nPerson().name.') completions1 = Script(s1).completions() assert 'strip' in [c.name for c in completions1] s2 = utf8('#-*- coding: utf-8 -*-\nclass Person():\n' ' name = "é"\n\nPerson().name.') completions2 = Script(s2).completions() assert 'strip' in [c.name for c in completions2] def test_multibyte_script(): """ `jedi.Script` must accept multi-byte string source. """ try: code = unicode("import datetime; datetime.d") comment = utf8("# multi-byte comment あいうえおä") s = (unicode('%s\n%s') % (code, comment)).encode('utf-8') except NameError: pass # python 3 has no unicode method else: assert len(Script(s, 1, len(code)).completions()) jedi-0.7.0/test/test_speed.py0000664000175000017500000000262312204171717016330 0ustar daviddavid00000000000000""" Speed tests of Jedi. To prove that certain things don't take longer than they should. """ import time import functools from .helpers import TestCase import jedi class TestSpeed(TestCase): def _check_speed(time_per_run, number=4, run_warm=True): """ Speed checks should typically be very tolerant. Some machines are faster than others, but the tests should still pass. These tests are here to assure that certain effects that kill jedi performance are not reintroduced to Jedi.""" def decorated(func): @functools.wraps(func) def wrapper(self): if run_warm: func(self) first = time.time() for i in range(number): func(self) single_time = (time.time() - first) / number print('\nspeed', func, single_time) assert single_time < time_per_run return wrapper return decorated @_check_speed(0.2) def test_os_path_join(self): s = "from posixpath import join; join('', '')." assert len(jedi.Script(s).completions()) > 10 # is a str completion @_check_speed(0.1) def test_scipy_speed(self): s = 'import scipy.weave; scipy.weave.inline(' script = jedi.Script(s, 1, len(s), '') script.call_signatures() #print(jedi.imports.imports_processed) jedi-0.7.0/test/test_regression.py0000664000175000017500000001231212204171717017404 0ustar daviddavid00000000000000""" Unit tests to avoid errors of the past. These are also all tests that didn't found a good place in any other testing module. """ import os import textwrap from .helpers import TestCase, cwd_at import jedi from jedi import Script from jedi import api, parsing #jedi.set_debug_function() class TestRegression(TestCase): def test_goto_definition_cursor(self): s = ("class A():\n" " def _something(self):\n" " return\n" " def different_line(self,\n" " b):\n" " return\n" "A._something\n" "A.different_line" ) in_name = 2, 9 under_score = 2, 8 cls = 2, 7 should1 = 7, 10 diff_line = 4, 10 should2 = 8, 10 def get_def(pos): return [d.description for d in Script(s, *pos).goto_definitions()] in_name = get_def(in_name) under_score = get_def(under_score) should1 = get_def(should1) should2 = get_def(should2) diff_line = get_def(diff_line) assert should1 == in_name assert should1 == under_score assert should2 == diff_line self.assertRaises(jedi.NotFoundError, get_def, cls) def test_operator_doc(self): r = list(Script("a == b", 1, 3).goto_definitions()) assert len(r) == 1 assert len(r[0].doc) > 100 def test_goto_definition_at_zero(self): assert Script("a", 1, 1).goto_definitions() == [] s = Script("str", 1, 1).goto_definitions() assert len(s) == 1 assert list(s)[0].description == 'class str' assert Script("", 1, 0).goto_definitions() == [] def test_complete_at_zero(self): s = Script("str", 1, 3).completions() assert len(s) == 1 assert list(s)[0].name == 'str' s = Script("", 1, 0).completions() assert len(s) > 0 @cwd_at('jedi') def test_add_dynamic_mods(self): api.settings.additional_dynamic_modules = ['dynamic.py'] # Fictional module that defines a function. src1 = "def ret(a): return a" # Other fictional modules in another place in the fs. src2 = 'from .. import setup; setup.ret(1)' # .parser to load the module api.modules.Module(os.path.abspath('dynamic.py'), src2).parser result = Script(src1, path='../setup.py').goto_definitions() assert len(result) == 1 assert result[0].description == 'class int' def test_os_nowait(self): """ github issue #45 """ s = Script("import os; os.P_").completions() assert 'P_NOWAIT' in [i.name for i in s] def test_points_in_completion(self): """At some point, points were inserted into the completions, this caused problems, sometimes. """ c = Script("if IndentationErr").completions() assert c[0].name == 'IndentationError' self.assertEqual(c[0].complete, 'or') def test_no_statement_parent(self): source = textwrap.dedent(""" def f(): pass class C: pass variable = f or C""") defs = Script(source, column=3).goto_definitions() defs = sorted(defs, key=lambda d: d.line) self.assertEqual([d.description for d in defs], ['def f', 'class C']) def test_end_pos(self): # jedi issue #150 s = "x()\nx( )\nx( )\nx ( )" parser = parsing.Parser(s) for i, s in enumerate(parser.module.statements, 3): for c in s.get_commands(): self.assertEqual(c.execution.end_pos[1], i) def check_definition_by_marker(self, source, after_cursor, names): r""" Find definitions specified by `after_cursor` and check what found For example, for the following configuration, you can pass ``after_cursor = 'y)'``.:: function( x, y) \ `- You want cursor to be here """ source = textwrap.dedent(source) for (i, line) in enumerate(source.splitlines()): if after_cursor in line: break column = len(line) - len(after_cursor) defs = Script(source, i + 1, column).goto_definitions() self.assertEqual([d.name for d in defs], names) def test_backslash_continuation(self): """ Test that ModuleWithCursor.get_path_until_cursor handles continuation """ self.check_definition_by_marker(r""" x = 0 a = \ [1, 2, 3, 4, 5, 6, 7, 8, 9, x] # <-- here """, '] # <-- here', ['int']) # completion in whitespace s = 'asdfxyxxxxxxxx sds\\\n hello' assert Script(s, 2, 4).goto_assignments() == [] def test_backslash_continuation_and_bracket(self): self.check_definition_by_marker(r""" x = 0 a = \ [1, 2, 3, 4, 5, 6, 7, 8, 9, (x)] # <-- here """, '(x)] # <-- here', [None]) def test_generator(self): # Did have some problems with the usage of generator completions this # way. s = "def abc():\n" \ " yield 1\n" \ "abc()." assert Script(s).completions() jedi-0.7.0/test/refactor.py0000775000175000017500000000641312143361723016002 0ustar daviddavid00000000000000#!/usr/bin/env python """ Refactoring tests work a little bit similar to Black Box tests. But the idea is here to compare two versions of code. """ from __future__ import with_statement import os import re from jedi._compatibility import reduce import jedi from jedi import refactoring class RefactoringCase(object): def __init__(self, name, source, line_nr, index, path, new_name, start_line_test, desired): self.name = name self.source = source self.line_nr = line_nr self.index = index self.path = path self.new_name = new_name self.start_line_test = start_line_test self.desired = desired def refactor(self): script = jedi.Script(self.source, self.line_nr, self.index, self.path) f_name = os.path.basename(self.path) refactor_func = getattr(refactoring, f_name.replace('.py', '')) args = (self.new_name,) if self.new_name else () return refactor_func(script, *args) def run(self): refactor_object = self.refactor() # try to get the right excerpt of the newfile f = refactor_object.new_files()[self.path] lines = f.splitlines()[self.start_line_test:] end = self.start_line_test + len(lines) pop_start = None for i, l in enumerate(lines): if l.startswith('# +++'): end = i break elif '#? ' in l: pop_start = i lines.pop(pop_start) self.result = '\n'.join(lines[:end - 1]).strip() return self.result def check(self): return self.run() == self.desired def __repr__(self): return '<%s: %s:%s>' % (self.__class__.__name__, self.name, self.line_nr - 1) def collect_file_tests(source, path, lines_to_execute): r = r'^# --- ?([^\n]*)\n((?:(?!\n# \+\+\+).)*)' \ r'\n# \+\+\+((?:(?!\n# ---).)*)' for match in re.finditer(r, source, re.DOTALL | re.MULTILINE): name = match.group(1).strip() first = match.group(2).strip() second = match.group(3).strip() start_line_test = source[:match.start()].count('\n') + 1 # get the line with the position of the operation p = re.match(r'((?:(?!#\?).)*)#\? (\d*) ?([^\n]*)', first, re.DOTALL) if p is None: print("Please add a test start.") continue until = p.group(1) index = int(p.group(2)) new_name = p.group(3) line_nr = start_line_test + until.count('\n') + 2 if lines_to_execute and line_nr - 1 not in lines_to_execute: continue yield RefactoringCase(name, source, line_nr, index, path, new_name, start_line_test, second) def collect_dir_tests(base_dir, test_files): for f_name in os.listdir(base_dir): files_to_execute = [a for a in test_files.items() if a[0] in f_name] lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, []) if f_name.endswith(".py") and (not test_files or files_to_execute): path = os.path.join(base_dir, f_name) with open(path) as f: source = f.read() for case in collect_file_tests(source, path, lines_to_execute): yield case jedi-0.7.0/test/helpers.py0000664000175000017500000000200512204171717015625 0ustar daviddavid00000000000000""" A helper module for testing, improves compatibility for testing (as ``jedi._compatibility``) as well as introducing helper functions. """ import sys if sys.hexversion < 0x02070000: import unittest2 as unittest else: import unittest TestCase = unittest.TestCase import os from os.path import abspath, dirname import functools test_dir = dirname(abspath(__file__)) root_dir = dirname(test_dir) sample_int = 1 # This is used in completion/imports.py def cwd_at(path): """ Decorator to run function at `path`. :type path: str :arg path: relative path from repository root (e.g., ``'jedi'``). """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwds): try: oldcwd = os.getcwd() repo_root = os.path.dirname(test_dir) os.chdir(os.path.join(repo_root, path)) return func(*args, **kwds) finally: os.chdir(oldcwd) return wrapper return decorator jedi-0.7.0/test/test_call_signatures.py0000664000175000017500000000610012204171717020401 0ustar daviddavid00000000000000import textwrap from .helpers import TestCase from jedi import Script class TestCallSignatures(TestCase): def _run(self, source, expected_name, expected_index=0, line=None, column=None): signatures = Script(source, line, column).call_signatures() assert len(signatures) <= 1 if not signatures: assert expected_name is None else: assert signatures[0].call_name == expected_name assert signatures[0].index == expected_index def test_call_signatures(self): def run(source, name, index=0, column=None, line=1): self._run(source, name, index, line, column) # simple s1 = "abs(a, str(" run(s1, 'abs', 0, 4) run(s1, 'abs', 1, 6) run(s1, 'abs', 1, 7) run(s1, 'abs', 1, 8) run(s1, 'str', 0, 11) s2 = "abs(), " run(s2, 'abs', 0, 4) run(s2, None, column=5) run(s2, None) s3 = "abs()." run(s3, None, column=5) run(s3, None) # more complicated s4 = 'abs(zip(), , set,' run(s4, None, column=3) run(s4, 'abs', 0, 4) run(s4, 'zip', 0, 8) run(s4, 'abs', 0, 9) #run(s4, 'abs', 1, 10) s5 = "abs(1,\nif 2:\n def a():" run(s5, 'abs', 0, 4) run(s5, 'abs', 1, 6) s6 = "str().center(" run(s6, 'center', 0) run(s6, 'str', 0, 4) s7 = "str().upper().center(" s8 = "str(int[zip(" run(s7, 'center', 0) run(s8, 'zip', 0) run(s8, 'str', 0, 8) run("import time; abc = time; abc.sleep(", 'sleep', 0) # jedi-vim #9 run("with open(", 'open', 0) # jedi-vim #11 run("for sorted(", 'sorted', 0) run("for s in sorted(", 'sorted', 0) # jedi #57 s = "def func(alpha, beta): pass\n" \ "func(alpha='101'," run(s, 'func', 0, column=13, line=2) def test_function_definition_complex(self): s = """ def abc(a,b): pass def a(self): abc( if 1: pass """ self._run(s, 'abc', 0, line=6, column=24) s = """ import re def huhu(it): re.compile( return it * 2 """ self._run(s, 'compile', 0, line=4, column=31) # jedi-vim #70 s = """def foo(""" assert Script(s).call_signatures() == [] # jedi-vim #116 s = """import functools; test = getattr(functools, 'partial'); test(""" self._run(s, 'partial', 0) def test_call_signature_on_module(self): """github issue #240""" s = 'import datetime; datetime(' # just don't throw an exception (if numpy doesn't exist, just ignore it) assert Script(s).call_signatures() == [] def test_function_definition_empty_paren_pre_space(self): s = textwrap.dedent("""\ def f(a, b): pass f( )""") self._run(s, 'f', 0, line=3, column=3) jedi-0.7.0/test/test_api.py0000664000175000017500000000144012204171717015775 0ustar daviddavid00000000000000""" Test all things related to the ``jedi.api`` module. """ from jedi import common, api def test_preload_modules(): def check_loaded(*modules): # + 1 for builtin, +1 for None module (currently used) assert len(new) == len(modules) + 2 for i in modules + ('__builtin__',): assert [i in k for k in new.keys() if k is not None] from jedi import cache temp_cache, cache.parser_cache = cache.parser_cache, {} new = cache.parser_cache with common.ignored(KeyError): # performance of tests -> no reload new['__builtin__'] = temp_cache['__builtin__'] api.preload_module('datetime') check_loaded('datetime') api.preload_module('json', 'token') check_loaded('datetime', 'json', 'token') cache.parser_cache = temp_cache jedi-0.7.0/test/conftest.py0000664000175000017500000000620312204171717016014 0ustar daviddavid00000000000000import os import shutil import tempfile import pytest from . import helpers from . import run from . import refactor def pytest_addoption(parser): parser.addoption( "--integration-case-dir", default=os.path.join(helpers.test_dir, 'completion'), help="Directory in which integration test case files locate.") parser.addoption( "--refactor-case-dir", default=os.path.join(helpers.test_dir, 'refactor'), help="Directory in which refactoring test case files locate.") parser.addoption( "--test-files", "-T", default=[], action='append', help=( "Specify test files using FILE_NAME[:LINE[,LINE[,...]]]. " "For example: -T generators.py:10,13,19. " "Note that you can use -m to specify the test case by id.")) parser.addoption( "--thirdparty", action='store_true', help="Include integration tests that requires third party modules.") def parse_test_files_option(opt): """ Parse option passed to --test-files into a key-value pair. >>> parse_test_files_option('generators.py:10,13,19') ('generators.py', [10, 13, 19]) """ opt = str(opt) if ':' in opt: (f_name, rest) = opt.split(':', 1) return (f_name, list(map(int, rest.split(',')))) else: return (opt, []) def pytest_generate_tests(metafunc): """ :type metafunc: _pytest.python.Metafunc """ test_files = dict(map(parse_test_files_option, metafunc.config.option.test_files)) if 'case' in metafunc.fixturenames: base_dir = metafunc.config.option.integration_case_dir thirdparty = metafunc.config.option.thirdparty cases = list(run.collect_dir_tests(base_dir, test_files)) if thirdparty: cases.extend(run.collect_dir_tests( os.path.join(base_dir, 'thirdparty'), test_files, True)) metafunc.parametrize('case', cases) if 'refactor_case' in metafunc.fixturenames: base_dir = metafunc.config.option.refactor_case_dir metafunc.parametrize( 'refactor_case', refactor.collect_dir_tests(base_dir, test_files)) @pytest.fixture() def isolated_jedi_cache(monkeypatch, tmpdir): """ Set `jedi.settings.cache_directory` to a temporary directory during test. Same as `clean_jedi_cache`, but create the temporary directory for each test case (scope='function'). """ from jedi import settings monkeypatch.setattr(settings, 'cache_directory', str(tmpdir)) @pytest.fixture(scope='session') def clean_jedi_cache(request): """ Set `jedi.settings.cache_directory` to a temporary directory during test. Note that you can't use built-in `tmpdir` and `monkeypatch` fixture here because their scope is 'function', which is not used in 'session' scope fixture. This fixture is activated in ../pytest.ini. """ from jedi import settings old = settings.cache_directory tmp = tempfile.mkdtemp(prefix='jedi-test-') settings.cache_directory = tmp @request.addfinalizer def restore(): settings.cache_directory = old shutil.rmtree(tmp) jedi-0.7.0/test/test_interpreter.py0000664000175000017500000000304012204171717017565 0ustar daviddavid00000000000000""" Tests of ``jedi.api.Interpreter``. """ from .helpers import TestCase import jedi from jedi._compatibility import is_py33 class TestInterpreterAPI(TestCase): def check_interpreter_complete(self, source, namespace, completions, **kwds): script = jedi.Interpreter(source, [namespace], **kwds) cs = script.completions() actual = [c.name for c in cs] self.assertEqual(sorted(actual), sorted(completions)) def test_complete_raw_function(self): from os.path import join self.check_interpreter_complete('join().up', locals(), ['upper']) def test_complete_raw_function_different_name(self): from os.path import join as pjoin self.check_interpreter_complete('pjoin().up', locals(), ['upper']) def test_complete_raw_module(self): import os self.check_interpreter_complete('os.path.join().up', locals(), ['upper']) def test_complete_raw_instance(self): import datetime dt = datetime.datetime(2013, 1, 1) completions = ['time', 'timetz', 'timetuple'] if is_py33: completions += ['timestamp'] self.check_interpreter_complete('(dt - dt).ti', locals(), completions) jedi-0.7.0/test/test_full_name.py0000664000175000017500000000444712204171717017200 0ustar daviddavid00000000000000""" Tests for :attr:`.BaseDefinition.full_name`. There are three kinds of test: #. Test classes derived from :class:`MixinTestFullName`. Child class defines :attr:`.operation` to alter how the api definition instance is created. #. :class:`TestFullDefinedName` is to test combination of ``obj.full_name`` and ``jedi.defined_names``. #. Misc single-function tests. """ import textwrap import jedi from jedi import api_classes from .helpers import TestCase class MixinTestFullName(object): operation = None def check(self, source, desired): script = jedi.Script(textwrap.dedent(source)) definitions = getattr(script, type(self).operation)() self.assertEqual(definitions[0].full_name, desired) def test_os_path_join(self): self.check('import os; os.path.join', 'os.path.join') def test_builtin(self): self.check('type', 'type') def test_from_import(self): self.check('from os import path', 'os.path') class TestFullNameWithGotoDefinitions(MixinTestFullName, TestCase): operation = 'goto_definitions' def test_tuple_mapping(self): self.check(""" import re any_re = re.compile('.*') any_re""", 're.RegexObject') class TestFullNameWithCompletions(MixinTestFullName, TestCase): operation = 'completions' class TestFullDefinedName(TestCase): """ Test combination of ``obj.full_name`` and ``jedi.defined_names``. """ def check(self, source, desired): definitions = jedi.defined_names(textwrap.dedent(source)) full_names = [d.full_name for d in definitions] self.assertEqual(full_names, desired) def test_local_names(self): self.check(""" def f(): pass class C: pass """, ['f', 'C']) def test_imports(self): self.check(""" import os from os import path from os.path import join from os import path as opath """, ['os', 'os.path', 'os.path.join', 'os.path']) def test_keyword_full_name_should_be_none(): """issue #94""" # Using `from jedi.keywords import Keyword` here does NOT work # in Python 3. This is due to the import hack jedi using. Keyword = api_classes.keywords.Keyword d = api_classes.Definition(Keyword('(', (0, 0))) assert d.full_name is None jedi-0.7.0/test/test_defined_names.py0000664000175000017500000000420212204171717020004 0ustar daviddavid00000000000000""" Tests for `api.defined_names`. """ import textwrap from jedi import api from .helpers import TestCase class TestDefinedNames(TestCase): def assert_definition_names(self, definitions, names): self.assertEqual([d.name for d in definitions], names) def check_defined_names(self, source, names): definitions = api.defined_names(textwrap.dedent(source)) self.assert_definition_names(definitions, names) return definitions def test_get_definitions_flat(self): self.check_defined_names(""" import module class Class: pass def func(): pass data = None """, ['module', 'Class', 'func', 'data']) def test_dotted_assignment(self): self.check_defined_names(""" x = Class() x.y.z = None """, ['x']) def test_multiple_assignment(self): self.check_defined_names(""" x = y = None """, ['x', 'y']) def test_multiple_imports(self): self.check_defined_names(""" from module import a, b from another_module import * """, ['a', 'b']) def test_nested_definitions(self): definitions = self.check_defined_names(""" class Class: def f(): pass def g(): pass """, ['Class']) subdefinitions = definitions[0].defined_names() self.assert_definition_names(subdefinitions, ['f', 'g']) self.assertEqual([d.full_name for d in subdefinitions], ['Class.f', 'Class.g']) def test_nested_class(self): definitions = self.check_defined_names(""" class L1: class L2: class L3: def f(): pass def f(): pass def f(): pass def f(): pass """, ['L1', 'f']) subdefs = definitions[0].defined_names() subsubdefs = subdefs[0].defined_names() self.assert_definition_names(subdefs, ['L2', 'f']) self.assert_definition_names(subsubdefs, ['L3', 'f']) self.assert_definition_names(subsubdefs[0].defined_names(), ['f']) jedi-0.7.0/jedi/0000775000175000017500000000000012204171764013552 5ustar daviddavid00000000000000jedi-0.7.0/jedi/recursion.py0000664000175000017500000001206712204171717016141 0ustar daviddavid00000000000000""" Recursions are the recipe of |jedi| to conquer Python code. However, someone must stop recursions going mad. Some settings are here to make |jedi| stop at the right time. You can read more about them :ref:`here `. Next to :mod:`cache` this module also makes |jedi| not thread-safe. Why? ``ExecutionRecursionDecorator`` uses class variables to count the function calls. """ from jedi import parsing_representation as pr from jedi import debug from jedi import settings import evaluate_representation as er import builtin class RecursionDecorator(object): """ A decorator to detect recursions in statements. In a recursion a statement at the same place, in the same module may not be executed two times. """ def __init__(self, func): self.func = func self.reset() def __call__(self, stmt, *args, **kwargs): # print stmt, len(self.node_statements()) if self.push_stmt(stmt): return [] else: result = self.func(stmt, *args, **kwargs) self.pop_stmt() return result def push_stmt(self, stmt): self.current = RecursionNode(stmt, self.current) check = self._check_recursion() if check: # TODO remove False!!!! debug.warning('catched stmt recursion: %s against %s @%s' % (stmt, check.stmt, stmt.start_pos)) self.pop_stmt() return True return False def pop_stmt(self): if self.current is not None: # I don't know how current can be None, but sometimes it happens # with Python3. self.current = self.current.parent def _check_recursion(self): test = self.current while True: test = test.parent if self.current == test: return test if not test: return False def reset(self): self.top = None self.current = None def node_statements(self): result = [] n = self.current while n: result.insert(0, n.stmt) n = n.parent return result class RecursionNode(object): """ A node of the RecursionDecorator. """ def __init__(self, stmt, parent): self.script = stmt.get_parent_until() self.position = stmt.start_pos self.parent = parent self.stmt = stmt # Don't check param instances, they are not causing recursions # The same's true for the builtins, because the builtins are really # simple. self.is_ignored = isinstance(stmt, pr.Param) \ or (self.script == builtin.Builtin.scope) def __eq__(self, other): if not other: return None is_list_comp = lambda x: isinstance(x, pr.ForFlow) and x.is_list_comp return self.script == other.script \ and self.position == other.position \ and not is_list_comp(self.stmt.parent) \ and not is_list_comp(other.parent) \ and not self.is_ignored and not other.is_ignored class ExecutionRecursionDecorator(object): """ Catches recursions of executions. It is designed like a Singelton. Only one instance should exist. """ def __init__(self, func): self.func = func self.reset() def __call__(self, execution, evaluate_generator=False): debug.dbg('Execution recursions: %s' % execution, self.recursion_level, self.execution_count, len(self.execution_funcs)) if self.check_recursion(execution, evaluate_generator): result = [] else: result = self.func(execution, evaluate_generator) self.cleanup() return result @classmethod def cleanup(cls): cls.parent_execution_funcs.pop() cls.recursion_level -= 1 @classmethod def check_recursion(cls, execution, evaluate_generator): in_par_execution_funcs = execution.base in cls.parent_execution_funcs in_execution_funcs = execution.base in cls.execution_funcs cls.recursion_level += 1 cls.execution_count += 1 cls.execution_funcs.add(execution.base) cls.parent_execution_funcs.append(execution.base) if cls.execution_count > settings.max_executions: return True if isinstance(execution.base, (er.Generator, er.Array)): return False module = execution.get_parent_until() if evaluate_generator or module == builtin.Builtin.scope: return False if in_par_execution_funcs: if cls.recursion_level > settings.max_function_recursion_level: return True if in_execution_funcs and \ len(cls.execution_funcs) > settings.max_until_execution_unique: return True if cls.execution_count > settings.max_executions_without_builtins: return True return False @classmethod def reset(cls): cls.recursion_level = 0 cls.parent_execution_funcs = [] cls.execution_funcs = set() cls.execution_count = 0 jedi-0.7.0/jedi/api_classes.py0000664000175000017500000004531412204171717016417 0ustar daviddavid00000000000000""" The :mod:`api_classes` module contains the return classes of the API. These classes are the much bigger part of the whole API, because they contain the interesting information about completion and goto operations. """ from __future__ import with_statement import warnings import functools from jedi._compatibility import unicode, next from jedi import settings from jedi import common from jedi import parsing_representation as pr from jedi import cache import keywords import recursion import dynamic import evaluate import imports import evaluate_representation as er def _clear_caches(): """ Clear all caches of this and related modules. The only cache that will not be deleted is the module cache. """ cache.clear_caches() dynamic.search_param_cache.clear() recursion.ExecutionRecursionDecorator.reset() evaluate.follow_statement.reset() imports.imports_processed = 0 def _clear_caches_after_call(func): """ Clear caches just before returning a value. """ @functools.wraps(func) def wrapper(*args, **kwds): result = func(*args, **kwds) _clear_caches() return result return wrapper class BaseDefinition(object): _mapping = { 'posixpath': 'os.path', 'riscospath': 'os.path', 'ntpath': 'os.path', 'os2emxpath': 'os.path', 'macpath': 'os.path', 'genericpath': 'os.path', 'posix': 'os', '_io': 'io', '_functools': 'functools', '_sqlite3': 'sqlite3', '__builtin__': '', 'builtins': '', } _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in { 'argparse._ActionsContainer': 'argparse.ArgumentParser', '_sre.SRE_Match': 're.MatchObject', '_sre.SRE_Pattern': 're.RegexObject', }.items()) def __init__(self, definition, start_pos): self._start_pos = start_pos self._definition = definition """ An instance of :class:`jedi.parsing_representation.Base` subclass. """ self.is_keyword = isinstance(definition, keywords.Keyword) # generate a path to the definition self._module = definition.get_parent_until() self.module_path = self._module.path @property def start_pos(self): """ .. deprecated:: 0.7.0 Use :attr:`.line` and :attr:`.column` instead. .. todo:: Remove! """ warnings.warn("Use line/column instead.", DeprecationWarning) return self._start_pos @property def type(self): """ The type of the definition. Here is an example of the value of this attribute. Let's consider the following source. As what is in ``variable`` is unambiguous to Jedi, :meth:`api.Script.goto_definitions` should return a list of definition for ``sys``, ``f``, ``C`` and ``x``. >>> from jedi import Script >>> source = ''' ... import keyword ... ... class C: ... pass ... ... class D: ... pass ... ... x = D() ... ... def f(): ... pass ... ... variable = keyword or f or C or x''' >>> script = Script(source, len(source.splitlines()), 3, 'example.py') >>> defs = script.goto_definitions() Before showing what is in ``defs``, let's sort it by :attr:`line` so that it is easy to relate the result to the source code. >>> defs = sorted(defs, key=lambda d: d.line) >>> defs # doctest: +NORMALIZE_WHITESPACE [, , , ] Finally, here is what you can get from :attr:`type`: >>> defs[0].type 'module' >>> defs[1].type 'class' >>> defs[2].type 'instance' >>> defs[3].type 'function' """ # generate the type stripped = self._definition if isinstance(self._definition, er.InstanceElement): stripped = self._definition.var if isinstance(stripped, pr.Name): stripped = stripped.parent return type(stripped).__name__.lower() @property def path(self): """The module path.""" path = [] def insert_nonnone(x): if x: path.insert(0, x) if not isinstance(self._definition, keywords.Keyword): par = self._definition while par is not None: if isinstance(par, pr.Import): insert_nonnone(par.namespace) insert_nonnone(par.from_ns) if par.relative_count == 0: break with common.ignored(AttributeError): path.insert(0, par.name) par = par.parent return path @property def module_name(self): """ The module name. >>> from jedi import Script >>> source = 'import datetime' >>> script = Script(source, 1, len(source), 'example.py') >>> d = script.goto_definitions()[0] >>> print(d.module_name) # doctest: +ELLIPSIS datetime """ return str(self._module.name) def in_builtin_module(self): """Whether this is a builtin module.""" return not (self.module_path is None or self.module_path.endswith('.py')) @property def line_nr(self): """ .. deprecated:: 0.5.0 Use :attr:`.line` instead. .. todo:: Remove! """ warnings.warn("Use line instead.", DeprecationWarning) return self.line @property def line(self): """The line where the definition occurs (starting with 1).""" if self.in_builtin_module(): return None return self._start_pos[0] @property def column(self): """The column where the definition occurs (starting with 0).""" if self.in_builtin_module(): return None return self._start_pos[1] @property def doc(self): r""" Return a document string for this completion object. Example: >>> from jedi import Script >>> source = '''\ ... def f(a, b=1): ... "Document for function f." ... ''' >>> script = Script(source, 1, len('def f'), 'example.py') >>> d = script.goto_definitions()[0] >>> print(d.doc) f(a, b = 1) Document for function f. Notice that useful extra information is added to the actual docstring. For function, it is call signature. If you need actual docstring, use :attr:`raw_doc` instead. >>> print(d.raw_doc) Document for function f. """ try: return self._definition.doc except AttributeError: return self.raw_doc @property def raw_doc(self): """ The raw docstring ``__doc__`` for any object. See :attr:`doc` for example. """ try: return unicode(self._definition.docstr) except AttributeError: return '' @property def description(self): """A textual description of the object.""" return unicode(self._definition) @property def full_name(self): """ Dot-separated path of this object. It is in the form of ``[.[...]][.]``. It is useful when you want to look up Python manual of the object at hand. Example: >>> from jedi import Script >>> source = ''' ... import os ... os.path.join''' >>> script = Script(source, 3, len('os.path.join'), 'example.py') >>> print(script.goto_definitions()[0].full_name) os.path.join Notice that it correctly returns ``'os.path.join'`` instead of (for example) ``'posixpath.join'``. """ path = [unicode(p) for p in self.path] # TODO add further checks, the mapping should only occur on stdlib. if not path: return None # for keywords the path is empty with common.ignored(KeyError): path[0] = self._mapping[path[0]] for key, repl in self._tuple_mapping.items(): if tuple(path[:len(key)]) == key: path = [repl] + path[len(key):] return '.'.join(path if path[0] else path[1:]) def __repr__(self): return "<%s %s>" % (type(self).__name__, self.description) class Completion(BaseDefinition): """ `Completion` objects are returned from :meth:`api.Script.completions`. They provide additional information about a completion. """ def __init__(self, name, needs_dot, like_name_length, base): super(Completion, self).__init__(name.parent, name.start_pos) self._name = name self._needs_dot = needs_dot self._like_name_length = like_name_length self._base = base # Completion objects with the same Completion name (which means # duplicate items in the completion) self._same_name_completions = [] self._followed_definitions = None def _complete(self, like_name): dot = '.' if self._needs_dot else '' append = '' if settings.add_bracket_after_function \ and self.type == 'Function': append = '(' if settings.add_dot_after_module: if isinstance(self._base, pr.Module): append += '.' if isinstance(self._base, pr.Param): append += '=' name = self._name.names[-1] if like_name: name = name[self._like_name_length:] return dot + name + append @property def complete(self): """ Return the rest of the word, e.g. completing ``isinstance``:: isinstan# <-- Cursor is here would return the string 'ce'. It also adds additional stuff, depending on your `settings.py`. """ return self._complete(True) @property def name(self): """ Similar to :meth:`Completion.complete`, but return the whole word, for example:: isinstan would return `isinstance`. """ return unicode(self._name.names[-1]) @property def name_with_symbols(self): """ Similar to :meth:`Completion.name`, but like :meth:`Completion.name` returns also the symbols, for example:: list() would return ``.append`` and others (which means it adds a dot). """ return self._complete(False) @property def word(self): """ .. deprecated:: 0.6.0 Use :attr:`.name` instead. .. todo:: Remove! """ warnings.warn("Use name instead.", DeprecationWarning) return self.name @property def description(self): """Provide a description of the completion object.""" parent = self._name.parent if parent is None: return '' t = self.type if t == 'statement' or t == 'import': desc = self._definition.get_code(False) else: desc = '.'.join(unicode(p) for p in self.path) line = '' if self.in_builtin_module else '@%s' % self.line return '%s: %s%s' % (t, desc, line) def follow_definition(self): """ Return the original definitions. I strongly recommend not using it for your completions, because it might slow down |jedi|. If you want to read only a few objects (<=20), it might be useful, especially to get the original docstrings. The basic problem of this function is that it follows all results. This means with 1000 completions (e.g. numpy), it's just PITA-slow. """ if self._followed_definitions is None: if self._definition.isinstance(pr.Statement): defs = evaluate.follow_statement(self._definition) elif self._definition.isinstance(pr.Import): defs = imports.strip_imports([self._definition]) else: return [self] self._followed_definitions = \ [BaseDefinition(d, d.start_pos) for d in defs] _clear_caches() return self._followed_definitions def __repr__(self): return '<%s: %s>' % (type(self).__name__, self._name) class Definition(BaseDefinition): """ *Definition* objects are returned from :meth:`api.Script.goto_assignments` or :meth:`api.Script.goto_definitions`. """ def __init__(self, definition): super(Definition, self).__init__(definition, definition.start_pos) @property def name(self): """ Name of variable/function/class/module. For example, for ``x = None`` it returns ``'x'``. :rtype: str or None """ d = self._definition if isinstance(d, er.InstanceElement): d = d.var if isinstance(d, pr.Name): return d.names[-1] if d.names else None elif isinstance(d, er.Array): return unicode(d.type) elif isinstance(d, (pr.Class, er.Class, er.Instance, er.Function, pr.Function)): return unicode(d.name) elif isinstance(d, pr.Module): return self.module_name elif isinstance(d, pr.Import): try: return d.get_defined_names()[0].names[-1] except (AttributeError, IndexError): return None elif isinstance(d, pr.Statement): try: return d.assignment_details[0][1].values[0][0].name.names[-1] except IndexError: return None return None @property def description(self): """ A description of the :class:`.Definition` object, which is heavily used in testing. e.g. for ``isinstance`` it returns ``def isinstance``. Example: >>> from jedi import Script >>> source = ''' ... def f(): ... pass ... ... class C: ... pass ... ... variable = f or C''' >>> script = Script(source, column=3) # line is maximum by default >>> defs = script.goto_definitions() >>> defs = sorted(defs, key=lambda d: d.line) >>> defs [, ] >>> str(defs[0].description) # strip literals in python2 'def f' >>> str(defs[1].description) 'class C' """ d = self._definition if isinstance(d, er.InstanceElement): d = d.var if isinstance(d, pr.Name): d = d.parent if isinstance(d, er.Array): d = 'class ' + d.type elif isinstance(d, (pr.Class, er.Class, er.Instance)): d = 'class ' + unicode(d.name) elif isinstance(d, (er.Function, pr.Function)): d = 'def ' + unicode(d.name) elif isinstance(d, pr.Module): # only show module name d = 'module %s' % self.module_name elif self.is_keyword: d = 'keyword %s' % d.name else: code = d.get_code().replace('\n', '') max_len = 20 d = (code[:max_len] + '...') if len(code) > max_len + 3 else code return d @property def desc_with_module(self): """ In addition to the definition, also return the module. .. warning:: Don't use this function yet, its behaviour may change. If you really need it, talk to me. .. todo:: Add full path. This function is should return a `module.class.function` path. """ if self.module_path.endswith('.py') \ and not isinstance(self._definition, pr.Module): position = '@%s' % (self.line) else: # is a builtin or module position = '' return "%s:%s%s" % (self.module_name, self.description, position) def defined_names(self): """ List sub-definitions (e.g., methods in class). :rtype: list of Definition """ d = self._definition if isinstance(d, er.InstanceElement): d = d.var if isinstance(d, pr.Name): d = d.parent return _defined_names(d) def _defined_names(scope): """ List sub-definitions (e.g., methods in class). :type scope: Scope :rtype: list of Definition """ pair = next(evaluate.get_names_of_scope( scope, star_search=False, include_builtin=False), None) names = pair[1] if pair else [] return [Definition(d) for d in sorted(names, key=lambda s: s.start_pos)] class Usage(BaseDefinition): """TODO: document this""" def __init__(self, name_part, scope): super(Usage, self).__init__(scope, name_part.start_pos) self.text = unicode(name_part) self.end_pos = name_part.end_pos @property def description(self): return "%s@%s,%s" % (self.text, self.line, self.column) def __eq__(self, other): return self._start_pos == other._start_pos \ and self.module_path == other.module_path def __hash__(self): return hash((self._start_pos, self.module_path)) class CallDef(object): """ `CallDef` objects is the return value of `Script.function_definition`. It knows what functions you are currently in. e.g. `isinstance(` would return the `isinstance` function. without `(` it would return nothing. """ def __init__(self, executable, index, call): self._executable = executable self.index = index self._call = call @property def params(self): if self._executable.isinstance(er.Function): if isinstance(self._executable, er.InstanceElement): return self._executable.params[1:] return self._executable.params else: try: sub = self._executable.get_subscope_by_name('__init__') return sub.params[1:] # ignore self except KeyError: return [] @property def bracket_start(self): """ The indent of the bracket that is responsible for the last function call. """ c = self._call while c.next is not None: c = c.next return c.name.end_pos @property def call_name(self): """ The name (e.g. 'isinstance') as a string. """ return unicode(self._executable.name) @property def module(self): return self._executable.get_parent_until() def __repr__(self): return '<%s: %s index %s>' % (type(self).__name__, self._executable, self.index) jedi-0.7.0/jedi/__init__.py0000664000175000017500000000322012204171717015656 0ustar daviddavid00000000000000""" Jedi is an autocompletion tool for Python that can be used in IDEs/editors. Jedi works. Jedi is fast. It understands all of the basic Python syntax elements including many builtin functions. Additionaly, Jedi suports two different goto functions and has support for renaming as well as Pydoc support and some other IDE features. Jedi uses a very simple API to connect with IDE's. There's a reference implementation as a `VIM-Plugin `_, which uses Jedi's autocompletion. I encourage you to use Jedi in your IDEs. It's really easy. If there are any problems (also with licensing), just contact me. To give you a simple example how you can use the Jedi library, here is an example for the autocompletion feature: >>> import jedi >>> source = ''' ... import datetime ... datetime.da''' >>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py') >>> script >>> completions = script.completions() >>> completions #doctest: +ELLIPSIS [, , ...] >>> print(completions[0].complete) te >>> print(completions[0].name) date As you see Jedi is pretty simple and allows you to concentrate on writing a good text editor, while still having very good IDE features for Python. """ __version__ = 0, 7, 0 import sys # python imports are hell sometimes. Especially the combination of relative # imports and circular imports... Just avoid it: sys.path.insert(0, __path__[0]) from .api import Script, Interpreter, NotFoundError, set_debug_function from .api import preload_module, defined_names from . import settings sys.path.pop(0) jedi-0.7.0/jedi/__main__.py0000664000175000017500000000046412204171717015646 0ustar daviddavid00000000000000from sys import argv if len(argv) == 2 and argv[1] == 'repl': # don't want to use __main__ only for repl yet, maybe we want to use it for # something else. So just use the keyword ``repl`` for now. from os import path print(path.join(path.dirname(path.abspath(__file__)), 'replstartup.py')) jedi-0.7.0/jedi/mixin/0000775000175000017500000000000012204171764014676 5ustar daviddavid00000000000000jedi-0.7.0/jedi/mixin/datetime.pym0000664000175000017500000000011512143361723017214 0ustar daviddavid00000000000000class datetime(): @staticmethod def now(): return datetime() jedi-0.7.0/jedi/mixin/builtins.pym0000664000175000017500000001051212143361723017253 0ustar daviddavid00000000000000""" Pure Python implementation of some builtins. This code is not going to be executed anywhere. These implementations are not always correct, but should work as good as possible for the auto completion. """ def next(iterator, default=None): if hasattr("next"): return iterator.next() else: return iterator.__next__() return default def iter(collection, sentinel=None): if sentinel: yield collection() else: for c in collection: yield c def range(start, stop=None, step=1): return [0] class xrange(): # Attention: this function doesn't exist in Py3k (there it is range). def __iter__(self): yield 1 def count(self): return 1 def index(self): return 1 #-------------------------------------------------------- # descriptors #-------------------------------------------------------- class property(): def __init__(self, fget, fset=None, fdel=None, doc=None): self.fget = fget self.fset = fset self.fdel = fdel self.__doc__ = doc def __get__(self, obj, cls): return self.fget(obj) def __set__(self, obj, value): self.fset(obj, value) def __delete__(self, obj): self.fdel(obj) def setter(self, func): self.fset = func return self def getter(self, func): self.fget = func return self def deleter(self, func): self.fdel = func return self class staticmethod(): def __init__(self, func): self.__func = func def __get__(self, obj, cls): return self.__func class classmethod(): def __init__(self, func): self.__func = func def __get__(self, obj, cls): def _method(*args, **kwargs): return self.__func(cls, *args, **kwargs) return _method #-------------------------------------------------------- # array stuff #-------------------------------------------------------- class list(): def __init__(self, iterable=[]): self.__iterable = [] for i in iterable: self.__iterable += [i] def __iter__(self): for i in self.__iterable: yield i def __getitem__(self, y): return self.__iterable[y] def pop(self): return self.__iterable[-1] class tuple(): def __init__(self, iterable=[]): self.__iterable = [] for i in iterable: self.__iterable += [i] def __iter__(self): for i in self.__iterable: yield i def __getitem__(self, y): return self.__iterable[y] def index(self): return 1 def count(self): return 1 class set(): def __init__(self, iterable=[]): self.__iterable = iterable def __iter__(self): for i in self.__iterable: yield i def pop(self): return self.__iterable.pop() def copy(self): return self def difference(self, other): return self - other def intersection(self, other): return self & other def symmetric_difference(self, other): return self ^ other def union(self, other): return self | other class frozenset(): def __init__(self, iterable=[]): self.__iterable = iterable def __iter__(self): for i in self.__iterable: yield i def copy(self): return self class dict(): def __init__(self, **elements): self.__elements = elements def clear(self): # has a strange docstr pass def get(self, k, d=None): # TODO implement try: #return self.__elements[k] pass except KeyError: return d class reversed(): def __init__(self, sequence): self.__sequence = sequence def __iter__(self): for i in self.__sequence: yield i def __next__(self): return next(self.__iter__()) def next(self): return self.__next__() #-------------------------------------------------------- # basic types #-------------------------------------------------------- class int(): def __init__(self, x, base=None): pass class str(): def __init__(self, obj): pass class object(): def mro(): """ mro() -> list return a type's method resolution order """ return [object] jedi-0.7.0/jedi/mixin/_weakref.pym0000664000175000017500000000030512143361723017204 0ustar daviddavid00000000000000def proxy(object, callback=None): return object class ref(): def __init__(self, object, callback=None): self.__object = object def __call__(self): return self.__object jedi-0.7.0/jedi/mixin/_sre.pym0000664000175000017500000000557512143361723016367 0ustar daviddavid00000000000000def compile(): class SRE_Match(): endpos = 1 lastgroup = 0 lastindex = 1 pos = 0 string = 'a' regs = ((0, 1),) def __init__(self, pattern): self.re = pattern def start(self): return 0 def end(self): return 1 def span(self): return 0, 1 def expand(self): return '' def group(self, nr): return '' def groupdict(self): return {'a', 'a'} def groups(self): return ('a',) class SRE_Pattern(): flags = 0 groupindex = {} groups = 0 pattern = 'a' def findall(self, string, pos=None, endpos=None): """ findall(string[, pos[, endpos]]) --> list. Return a list of all non-overlapping matches of pattern in string. """ return ['a'] def finditer(self, string, pos=None, endpos=None): """ finditer(string[, pos[, endpos]]) --> iterator. Return an iterator over all non-overlapping matches for the RE pattern in string. For each match, the iterator returns a match object. """ yield SRE_Match(self) def match(self, string, pos=None, endpos=None): """ match(string[, pos[, endpos]]) --> match object or None. Matches zero or more characters at the beginning of the string pattern """ return SRE_Match(self) def scanner(self, string, pos=None, endpos=None): pass def search(self, string, pos=None, endpos=None): """ search(string[, pos[, endpos]]) --> match object or None. Scan through string looking for a match, and return a corresponding MatchObject instance. Return None if no position in the string matches. """ return SRE_Match(self) def split(self, string, maxsplit=0]): """ split(string[, maxsplit = 0]) --> list. Split string by the occurrences of pattern. """ return ['a'] def sub(self, repl, string, count=0): """ sub(repl, string[, count = 0]) --> newstring Return the string obtained by replacing the leftmost non-overlapping occurrences of pattern in string by the replacement repl. """ return '' def subn(self, repl, string, count=0): """ subn(repl, string[, count = 0]) --> (newstring, number of subs) Return the tuple (new_string, number_of_subs_made) found by replacing the leftmost non-overlapping occurrences of pattern with the replacement repl. """ return ('', 1) return SRE_Pattern() jedi-0.7.0/jedi/mixin/_sqlite3.pym0000664000175000017500000000075112143361723017151 0ustar daviddavid00000000000000def connect(database, timeout=None, isolation_level=None, detect_types=None, factory=None): return Connection() class Connection(): def cursor(self): return Cursor() class Cursor(): def cursor(self): return Cursor() def fetchone(self): return Row() def fetchmany(self, size=cursor.arraysize): return [self.fetchone()] def fetchall(self): return [self.fetchone()] class Row(): def keys(self): return [''] jedi-0.7.0/jedi/mixin/_functools.pym0000664000175000017500000000052412143361723017577 0ustar daviddavid00000000000000class partial(): def __init__(self, func, *args, **keywords): self.__func = func self.__args = args self.__keywords = keywords def __call__(self, *args, **kwargs): # I know this doesn't work in Python, but Jedi can this ;-) return self.__func(*self.__args, *args, **self.keywords, **kwargs) jedi-0.7.0/jedi/mixin/posix.pym0000664000175000017500000000007212143361723016564 0ustar daviddavid00000000000000def getcwd(): return '' def getcwdu(): return '' jedi-0.7.0/jedi/keywords.py0000664000175000017500000000425212204171717015774 0ustar daviddavid00000000000000from __future__ import with_statement import pydoc import keyword from jedi._compatibility import is_py3k from jedi import parsing_representation as pr from jedi import common import builtin try: from pydoc_data import topics as pydoc_topics except ImportError: # Python 2.6 import pydoc_topics if is_py3k: keys = keyword.kwlist else: keys = keyword.kwlist + ['None', 'False', 'True'] def keywords(string='', pos=(0, 0), all=False): if all: return set([Keyword(k, pos) for k in keys]) if string in keys: return set([Keyword(string, pos)]) return set() def keyword_names(*args, **kwargs): kwds = [] for k in keywords(*args, **kwargs): start = k.start_pos end = start[0], start[1] + len(k.name) kwds.append(pr.Name(k.parent, [(k.name, start)], start, end, k)) return kwds def get_operator(string, pos): return Keyword(string, pos) class Keyword(object): def __init__(self, name, pos): self.name = name self.start_pos = pos self.parent = builtin.Builtin.scope def get_parent_until(self): return self.parent @property def names(self): """ For a `parsing.Name` like comparision """ return [self.name] @property def docstr(self): return imitate_pydoc(self.name) def __repr__(self): return '<%s: %s>' % (type(self).__name__, self.name) def imitate_pydoc(string): """ It's not possible to get the pydoc's without starting the annoying pager stuff. """ # str needed because of possible unicode stuff in py2k (pydoc doesn't work # with unicode strings) string = str(string) h = pydoc.help with common.ignored(KeyError): # try to access symbols string = h.symbols[string] string, _, related = string.partition(' ') get_target = lambda s: h.topics.get(s, h.keywords.get(s)) while isinstance(string, str): string = get_target(string) try: # is a tuple now label, related = string except TypeError: return '' try: return pydoc_topics.topics[label] if pydoc_topics else '' except KeyError: return '' jedi-0.7.0/jedi/evaluate_representation.py0000664000175000017500000010645212204171717021062 0ustar daviddavid00000000000000""" Like described in the :mod:`parsing_representation` module, there's a need for an ast like module to represent the states of parsed modules. But now there are also structures in Python that need a little bit more than that. An ``Instance`` for example is only a ``Class`` before it is instantiated. This class represents these cases. So, why is there also a ``Class`` class here? Well, there are decorators and they change classes in Python 3. """ from __future__ import with_statement import copy import itertools from jedi._compatibility import use_metaclass, next, hasattr, unicode from jedi import parsing_representation as pr from jedi import cache from jedi import helpers from jedi import debug from jedi import common import recursion import docstrings import imports import evaluate import builtin import dynamic class Executable(pr.IsScope): """ An instance is also an executable - because __init__ is called :param var_args: The param input array, consist of `pr.Array` or list. """ def __init__(self, base, var_args=()): self.base = base self.var_args = var_args def get_parent_until(self, *args, **kwargs): return self.decorated.get_parent_until(*args, **kwargs) @property def parent(self): return self.decorated.parent @property def decorated(self): """ Instance doesn't care about decorators and Execution overrides this """ return self.base class Instance(use_metaclass(cache.CachedMetaClass, Executable)): """ This class is used to evaluate instances. """ def __init__(self, base, var_args=()): super(Instance, self).__init__(base, var_args) if str(base.name) in ['list', 'set'] \ and builtin.Builtin.scope == base.get_parent_until(): # compare the module path with the builtin name. self.var_args = dynamic.check_array_instances(self) else: # need to execute the __init__ function, because the dynamic param # searching needs it. with common.ignored(KeyError): self.execute_subscope_by_name('__init__', self.var_args) # Generated instances are classes that are just generated by self # (No var_args) used. self.is_generated = False @cache.memoize_default() def _get_method_execution(self, func): func = InstanceElement(self, func, True) return Execution(func, self.var_args) def _get_func_self_name(self, func): """ Returns the name of the first param in a class method (which is normally self. """ try: return str(func.params[0].used_vars[0]) except IndexError: return None @cache.memoize_default([]) def _get_self_attributes(self): def add_self_dot_name(name): """ Need to copy and rewrite the name, because names are now ``instance_usage.variable`` instead of ``self.variable``. """ n = copy.copy(name) n.names = n.names[1:] names.append(InstanceElement(self, n)) names = [] # This loop adds the names of the self object, copies them and removes # the self. for sub in self.base.subscopes: if isinstance(sub, pr.Class): continue # Get the self name, if there's one. self_name = self._get_func_self_name(sub) if not self_name: continue if sub.name.get_code() == '__init__': # ``__init__`` is special because the params need are injected # this way. Therefore an execution is necessary. if not sub.decorators: # __init__ decorators should generally just be ignored, # because to follow them and their self variables is too # complicated. sub = self._get_method_execution(sub) for n in sub.get_set_vars(): # Only names with the selfname are being added. # It is also important, that they have a len() of 2, # because otherwise, they are just something else if n.names[0] == self_name and len(n.names) == 2: add_self_dot_name(n) for s in self.base.get_super_classes(): names += Instance(s)._get_self_attributes() return names def get_subscope_by_name(self, name): sub = self.base.get_subscope_by_name(name) return InstanceElement(self, sub, True) def execute_subscope_by_name(self, name, args=()): method = self.get_subscope_by_name(name) return Execution(method, args).get_return_types() def get_descriptor_return(self, obj): """ Throws a KeyError if there's no method. """ # Arguments in __get__ descriptors are obj, class. # `method` is the new parent of the array, don't know if that's good. args = [obj, obj.base] if isinstance(obj, Instance) else [None, obj] return self.execute_subscope_by_name('__get__', args) @cache.memoize_default([]) def get_defined_names(self): """ Get the instance vars of a class. This includes the vars of all classes """ names = self._get_self_attributes() class_names = self.base.get_defined_names() for var in class_names: names.append(InstanceElement(self, var, True)) return names def scope_generator(self): """ An Instance has two scopes: The scope with self names and the class scope. Instance variables have priority over the class scope. """ yield self, self._get_self_attributes() names = [] class_names = self.base.get_defined_names() for var in class_names: names.append(InstanceElement(self, var, True)) yield self, names def get_index_types(self, index=None): args = [] if index is None else [index] try: return self.execute_subscope_by_name('__getitem__', args) except KeyError: debug.warning('No __getitem__, cannot access the array.') return [] def __getattr__(self, name): if name not in ['start_pos', 'end_pos', 'name', 'get_imports', 'doc', 'docstr', 'asserts']: raise AttributeError("Instance %s: Don't touch this (%s)!" % (self, name)) return getattr(self.base, name) def __repr__(self): return "" % \ (type(self).__name__, self.base, len(self.var_args or [])) class InstanceElement(use_metaclass(cache.CachedMetaClass, pr.Base)): """ InstanceElement is a wrapper for any object, that is used as an instance variable (e.g. self.variable or class methods). """ def __init__(self, instance, var, is_class_var=False): if isinstance(var, pr.Function): var = Function(var) elif isinstance(var, pr.Class): var = Class(var) self.instance = instance self.var = var self.is_class_var = is_class_var @property @cache.memoize_default() def parent(self): par = self.var.parent if isinstance(par, Class) and par == self.instance.base \ or isinstance(par, pr.Class) \ and par == self.instance.base.base: par = self.instance elif not isinstance(par, pr.Module): par = InstanceElement(self.instance, par, self.is_class_var) return par def get_parent_until(self, *args, **kwargs): return pr.Simple.get_parent_until(self, *args, **kwargs) def get_decorated_func(self): """ Needed because the InstanceElement should not be stripped """ func = self.var.get_decorated_func(self.instance) if func == self.var: return self return func def get_commands(self): # Copy and modify the array. return [InstanceElement(self.instance, command, self.is_class_var) if not isinstance(command, unicode) else command for command in self.var.get_commands()] def __iter__(self): for el in self.var.__iter__(): yield InstanceElement(self.instance, el, self.is_class_var) def __getattr__(self, name): return getattr(self.var, name) def isinstance(self, *cls): return isinstance(self.var, cls) def __repr__(self): return "<%s of %s>" % (type(self).__name__, self.var) class Class(use_metaclass(cache.CachedMetaClass, pr.IsScope)): """ This class is not only important to extend `pr.Class`, it is also a important for descriptors (if the descriptor methods are evaluated or not). """ def __init__(self, base): self.base = base @cache.memoize_default(default=()) def get_super_classes(self): supers = [] # TODO care for mro stuff (multiple super classes). for s in self.base.supers: # Super classes are statements. for cls in evaluate.follow_statement(s): if not isinstance(cls, Class): debug.warning('Received non class, as a super class') continue # Just ignore other stuff (user input error). supers.append(cls) if not supers and self.base.parent != builtin.Builtin.scope: # add `object` to classes supers += evaluate.find_name(builtin.Builtin.scope, 'object') return supers @cache.memoize_default(default=()) def get_defined_names(self): def in_iterable(name, iterable): """ checks if the name is in the variable 'iterable'. """ for i in iterable: # Only the last name is important, because these names have a # maximal length of 2, with the first one being `self`. if i.names[-1] == name.names[-1]: return True return False result = self.base.get_defined_names() super_result = [] # TODO mro! for cls in self.get_super_classes(): # Get the inherited names. for i in cls.get_defined_names(): if not in_iterable(i, result): super_result.append(i) result += super_result return result def get_subscope_by_name(self, name): for sub in reversed(self.subscopes): if sub.name.get_code() == name: return sub raise KeyError("Couldn't find subscope.") @property def name(self): return self.base.name def __getattr__(self, name): if name not in ['start_pos', 'end_pos', 'parent', 'asserts', 'docstr', 'doc', 'get_imports', 'get_parent_until', 'get_code', 'subscopes']: raise AttributeError("Don't touch this: %s of %s !" % (name, self)) return getattr(self.base, name) def __repr__(self): return "" % (type(self).__name__, self.base) class Function(use_metaclass(cache.CachedMetaClass, pr.IsScope)): """ Needed because of decorators. Decorators are evaluated here. """ def __init__(self, func, is_decorated=False): """ This should not be called directly """ self.base_func = func self.is_decorated = is_decorated @cache.memoize_default() def _decorated_func(self, instance=None): """ Returns the function, that is to be executed in the end. This is also the places where the decorators are processed. """ f = self.base_func # Only enter it, if has not already been processed. if not self.is_decorated: for dec in reversed(self.base_func.decorators): debug.dbg('decorator:', dec, f) dec_results = set(evaluate.follow_statement(dec)) if not len(dec_results): debug.warning('decorator not found: %s on %s' % (dec, self.base_func)) return None decorator = dec_results.pop() if dec_results: debug.warning('multiple decorators found', self.base_func, dec_results) # Create param array. old_func = Function(f, is_decorated=True) if instance is not None and decorator.isinstance(Function): old_func = InstanceElement(instance, old_func) instance = None wrappers = Execution(decorator, (old_func,)).get_return_types() if not len(wrappers): debug.warning('no wrappers found', self.base_func) return None if len(wrappers) > 1: debug.warning('multiple wrappers found', self.base_func, wrappers) # This is here, that the wrapper gets executed. f = wrappers[0] debug.dbg('decorator end', f) if f != self.base_func and isinstance(f, pr.Function): f = Function(f) return f def get_decorated_func(self, instance=None): decorated_func = self._decorated_func(instance) if decorated_func == self.base_func: return self if decorated_func is None: # If the decorator func is not found, just ignore the decorator # function, because sometimes decorators are just really # complicated. return Function(self.base_func, True) return decorated_func def get_magic_method_names(self): return builtin.Builtin.magic_function_scope.get_defined_names() def get_magic_method_scope(self): return builtin.Builtin.magic_function_scope def __getattr__(self, name): return getattr(self.base_func, name) def __repr__(self): dec = '' if self._decorated_func() != self.base_func: dec = " is " + repr(self._decorated_func()) return "" % (type(self).__name__, self.base_func, dec) class Execution(Executable): """ This class is used to evaluate functions and their returns. This is the most complicated class, because it contains the logic to transfer parameters. It is even more complicated, because there may be multiple calls to functions and recursion has to be avoided. But this is responsibility of the decorators. """ def follow_var_arg(self, index): try: stmt = self.var_args[index] except IndexError: return [] else: if isinstance(stmt, pr.Statement): return evaluate.follow_statement(stmt) else: return [stmt] # just some arbitrary object @property @cache.memoize_default() def decorated(self): """Get the decorated version of the input""" base = self.base if self.base.isinstance(Function): base = base.get_decorated_func() return base @cache.memoize_default(default=()) @recursion.ExecutionRecursionDecorator def get_return_types(self, evaluate_generator=False): """ Get the return types of a function. """ base = self.decorated stmts = [] if base.parent == builtin.Builtin.scope \ and not isinstance(base, (Generator, Array)): func_name = str(base.name) # some implementations of builtins: if func_name == 'getattr': # follow the first param objects = self.follow_var_arg(0) names = self.follow_var_arg(1) for obj in objects: if not isinstance(obj, (Instance, Class, pr.Module)): debug.warning('getattr called without instance') continue for arr_name in names: if not isinstance(arr_name, Instance): debug.warning('getattr called without str') continue if len(arr_name.var_args) != 1: debug.warning('jedi getattr is too simple') key = arr_name.var_args[0] stmts += evaluate.follow_path(iter([key]), obj, base) return stmts elif func_name == 'type': # otherwise it would be a metaclass if len(self.var_args) == 1: objects = self.follow_var_arg(0) return [o.base for o in objects if isinstance(o, Instance)] elif func_name == 'super': # TODO make this able to detect multiple inheritance supers accept = (pr.Function,) func = self.var_args.get_parent_until(accept) if func.isinstance(*accept): cls = func.get_parent_until(accept + (pr.Class,), include_current=False) if isinstance(cls, pr.Class): cls = Class(cls) su = cls.get_super_classes() if su: return [Instance(su[0])] return [] if base.isinstance(Class): # There maybe executions of executions. stmts = [Instance(base, self.var_args)] elif isinstance(base, Generator): return base.iter_content() else: try: base.returns # Test if it is a function except AttributeError: if hasattr(base, 'execute_subscope_by_name'): try: stmts = base.execute_subscope_by_name('__call__', self.var_args) except KeyError: debug.warning("no __call__ func available", base) else: debug.warning("no execution possible", base) else: stmts = self._get_function_returns(base, evaluate_generator) debug.dbg('exec result: %s in %s' % (stmts, self)) return imports.strip_imports(stmts) def _get_function_returns(self, func, evaluate_generator): """ A normal Function execution """ # Feed the listeners, with the params. for listener in func.listeners: listener.execute(self.get_params()) if func.is_generator and not evaluate_generator: return [Generator(func, self.var_args)] else: stmts = docstrings.find_return_types(func) for r in self.returns: if r is not None: stmts += evaluate.follow_statement(r) return stmts @cache.memoize_default(default=()) def get_params(self): """ This returns the params for an Execution/Instance and is injected as a 'hack' into the pr.Function class. This needs to be here, because Instance can have __init__ functions, which act the same way as normal functions. """ def gen_param_name_copy(param, keys=(), values=(), array_type=None): """ Create a param with the original scope (of varargs) as parent. """ if isinstance(self.var_args, pr.Array): parent = self.var_args.parent start_pos = self.var_args.start_pos else: parent = self.decorated start_pos = 0, 0 new_param = copy.copy(param) new_param.is_generated = True if parent is not None: new_param.parent = parent # create an Array (-> needed for *args/**kwargs tuples/dicts) arr = pr.Array(self._sub_module, start_pos, array_type, parent) arr.values = values key_stmts = [] for key in keys: stmt = pr.Statement(self._sub_module, [], [], [], start_pos, None) stmt._commands = [key] key_stmts.append(stmt) arr.keys = key_stmts arr.type = array_type new_param._commands = [arr] name = copy.copy(param.get_name()) name.parent = new_param return name result = [] start_offset = 0 if isinstance(self.decorated, InstanceElement): # Care for self -> just exclude it and add the instance start_offset = 1 self_name = copy.copy(self.decorated.params[0].get_name()) self_name.parent = self.decorated.instance result.append(self_name) param_dict = {} for param in self.decorated.params: param_dict[str(param.get_name())] = param # There may be calls, which don't fit all the params, this just ignores # it. var_arg_iterator = self.get_var_args_iterator() non_matching_keys = [] keys_used = set() keys_only = False for param in self.decorated.params[start_offset:]: # The value and key can both be null. There, the defaults apply. # args / kwargs will just be empty arrays / dicts, respectively. # Wrong value count is just ignored. If you try to test cases that # are not allowed in Python, Jedi will maybe not show any # completions. key, value = next(var_arg_iterator, (None, None)) while key: keys_only = True try: key_param = param_dict[str(key)] except KeyError: non_matching_keys.append((key, value)) else: keys_used.add(str(key)) result.append(gen_param_name_copy(key_param, values=[value])) key, value = next(var_arg_iterator, (None, None)) commands = param.get_commands() keys = [] values = [] array_type = None ignore_creation = False if commands[0] == '*': # *args param array_type = pr.Array.TUPLE if value: values.append(value) for key, value in var_arg_iterator: # Iterate until a key argument is found. if key: var_arg_iterator.push_back((key, value)) break values.append(value) elif commands[0] == '**': # **kwargs param array_type = pr.Array.DICT if non_matching_keys: keys, values = zip(*non_matching_keys) elif not keys_only: # normal param if value is not None: values = [value] else: if param.assignment_details: # No value: return the default values. ignore_creation = True result.append(param.get_name()) param.is_generated = True else: # If there is no assignment detail, that means there is # no assignment, just the result. Therefore nothing has # to be returned. values = [] # Just ignore all the params that are without a key, after one # keyword argument was set. if not ignore_creation and (not keys_only or commands[0] == '**'): keys_used.add(str(key)) result.append(gen_param_name_copy(param, keys=keys, values=values, array_type=array_type)) if keys_only: # sometimes param arguments are not completely written (which would # create an Exception, but we have to handle that). for k in set(param_dict) - keys_used: result.append(gen_param_name_copy(param_dict[k])) return result def get_var_args_iterator(self): """ Yields a key/value pair, the key is None, if its not a named arg. """ def iterate(): # `var_args` is typically an Array, and not a list. for stmt in self.var_args: if not isinstance(stmt, pr.Statement): if stmt is None: yield None, None continue old = stmt # generate a statement if it's not already one. module = builtin.Builtin.scope stmt = pr.Statement(module, [], [], [], (0, 0), None) stmt._commands = [old] # *args commands = stmt.get_commands() if not len(commands): continue if commands[0] == '*': arrays = evaluate.follow_call_list(commands[1:]) # *args must be some sort of an array, otherwise -> ignore for array in arrays: if isinstance(array, Array): for field_stmt in array: # yield from plz! yield None, field_stmt elif isinstance(array, Generator): for field_stmt in array.iter_content(): yield None, helpers.FakeStatement(field_stmt) # **kwargs elif commands[0] == '**': arrays = evaluate.follow_call_list(commands[1:]) for array in arrays: if isinstance(array, Array): for key_stmt, value_stmt in array.items(): # first index, is the key if syntactically correct call = key_stmt.get_commands()[0] if isinstance(call, pr.Name): yield call, value_stmt elif type(call) is pr.Call: yield call.name, value_stmt # Normal arguments (including key arguments). else: if stmt.assignment_details: key_arr, op = stmt.assignment_details[0] # named parameter if key_arr and isinstance(key_arr[0], pr.Call): yield key_arr[0].name, stmt else: yield None, stmt return iter(common.PushBackIterator(iterate())) def get_defined_names(self): """ Call the default method with the own instance (self implements all the necessary functions). Add also the params. """ return self.get_params() + pr.Scope.get_set_vars(self) get_set_vars = get_defined_names @common.rethrow_uncaught def copy_properties(self, prop): """ Literally copies a property of a Function. Copying is very expensive, because it is something like `copy.deepcopy`. However, these copied objects can be used for the executions, as if they were in the execution. """ # Copy all these lists into this local function. attr = getattr(self.decorated, prop) objects = [] for element in attr: if element is None: copied = element else: copied = helpers.fast_parent_copy(element) copied.parent = self._scope_copy(copied.parent) if isinstance(copied, pr.Function): copied = Function(copied) objects.append(copied) return objects def __getattr__(self, name): if name not in ['start_pos', 'end_pos', 'imports', '_sub_module']: raise AttributeError('Tried to access %s: %s. Why?' % (name, self)) return getattr(self.decorated, name) @cache.memoize_default() @common.rethrow_uncaught def _scope_copy(self, scope): """ Copies a scope (e.g. if) in an execution """ # TODO method uses different scopes than the subscopes property. # just check the start_pos, sometimes it's difficult with closures # to compare the scopes directly. if scope.start_pos == self.start_pos: return self else: copied = helpers.fast_parent_copy(scope) copied.parent = self._scope_copy(copied.parent) return copied @property @cache.memoize_default() def returns(self): return self.copy_properties('returns') @property @cache.memoize_default() def asserts(self): return self.copy_properties('asserts') @property @cache.memoize_default() def statements(self): return self.copy_properties('statements') @property @cache.memoize_default() def subscopes(self): return self.copy_properties('subscopes') def get_statement_for_position(self, pos): return pr.Scope.get_statement_for_position(self, pos) def __repr__(self): return "<%s of %s>" % \ (type(self).__name__, self.decorated) class Generator(use_metaclass(cache.CachedMetaClass, pr.Base)): """ Cares for `yield` statements. """ def __init__(self, func, var_args): super(Generator, self).__init__() self.func = func self.var_args = var_args def get_defined_names(self): """ Returns a list of names that define a generator, which can return the content of a generator. """ names = [] none_pos = (0, 0) executes_generator = ('__next__', 'send') for n in ('close', 'throw') + executes_generator: name = pr.Name(builtin.Builtin.scope, [(n, none_pos)], none_pos, none_pos) if n in executes_generator: name.parent = self else: name.parent = builtin.Builtin.scope names.append(name) debug.dbg('generator names', names) return names def iter_content(self): """ returns the content of __iter__ """ return Execution(self.func, self.var_args).get_return_types(True) def get_index_types(self, index=None): debug.warning('Tried to get array access on a generator', self) return [] def __getattr__(self, name): if name not in ['start_pos', 'end_pos', 'parent', 'get_imports', 'asserts', 'doc', 'docstr', 'get_parent_until', 'get_code', 'subscopes']: raise AttributeError("Accessing %s of %s is not allowed." % (self, name)) return getattr(self.func, name) def __repr__(self): return "<%s of %s>" % (type(self).__name__, self.func) class Array(use_metaclass(cache.CachedMetaClass, pr.Base)): """ Used as a mirror to pr.Array, if needed. It defines some getter methods which are important in this module. """ def __init__(self, array): self._array = array def get_index_types(self, index_arr=None): """ Get the types of a specific index or all, if not given """ if index_arr is not None: if index_arr and [x for x in index_arr if ':' in x.get_commands()]: # array slicing return [self] index_possibilities = self._follow_values(index_arr) if len(index_possibilities) == 1: # This is indexing only one element, with a fixed index number, # otherwise it just ignores the index (e.g. [1+1]). index = index_possibilities[0] if isinstance(index, Instance) \ and str(index.name) in ['int', 'str'] \ and len(index.var_args) == 1: # TODO this is just very hackish and a lot of use cases are # being ignored with common.ignored(KeyError, IndexError, UnboundLocalError, TypeError): return self.get_exact_index_types(index.var_args[0]) result = list(self._follow_values(self._array.values)) result += dynamic.check_array_additions(self) return set(result) def get_exact_index_types(self, mixed_index): """ Here the index is an int/str. Raises IndexError/KeyError """ index = mixed_index if self.type == pr.Array.DICT: index = None for i, key_statement in enumerate(self._array.keys): # Because we only want the key to be a string. key_commands = key_statement.get_commands() if len(key_commands) != 1: # cannot deal with complex strings continue key = key_commands[0] if isinstance(key, pr.Call) and key.type == pr.Call.STRING: str_key = key.name elif isinstance(key, pr.Name): str_key = str(key) if mixed_index == str_key: index = i break if index is None: raise KeyError('No key found in dictionary') # Can raise an IndexError values = [self._array.values[index]] return self._follow_values(values) def _follow_values(self, values): """ helper function for the index getters """ return list(itertools.chain.from_iterable(evaluate.follow_statement(v) for v in values)) def get_defined_names(self): """ This method generates all `ArrayMethod` for one pr.Array. It returns e.g. for a list: append, pop, ... """ # `array.type` is a string with the type, e.g. 'list'. scope = evaluate.find_name(builtin.Builtin.scope, self._array.type)[0] scope = Instance(scope) names = scope.get_defined_names() return [ArrayMethod(n) for n in names] @property def parent(self): return builtin.Builtin.scope def get_parent_until(self): return builtin.Builtin.scope def __getattr__(self, name): if name not in ['type', 'start_pos', 'get_only_subelement', 'parent', 'get_parent_until', 'items']: raise AttributeError('Strange access on %s: %s.' % (self, name)) return getattr(self._array, name) def __getitem__(self): return self._array.__getitem__() def __iter__(self): return self._array.__iter__() def __len__(self): return self._array.__len__() def __repr__(self): return "" % (type(self).__name__, self._array) class ArrayMethod(object): """ A name, e.g. `list.append`, it is used to access the original array methods. """ def __init__(self, name): super(ArrayMethod, self).__init__() self.name = name def __getattr__(self, name): # Set access privileges: if name not in ['parent', 'names', 'start_pos', 'end_pos', 'get_code']: raise AttributeError('Strange accesson %s: %s.' % (self, name)) return getattr(self.name, name) def get_parent_until(self): return builtin.Builtin.scope def __repr__(self): return "<%s of %s>" % (type(self).__name__, self.name) jedi-0.7.0/jedi/utils.py0000664000175000017500000000672412204171717015273 0ustar daviddavid00000000000000""" Utilities for end-users. """ from __future__ import absolute_import import __main__ from jedi import Interpreter def setup_readline(namespace_module=__main__): """ Install Jedi completer to :mod:`readline`. This function setups :mod:`readline` to use Jedi in Python interactive shell. If you want to use a custom ``PYTHONSTARTUP`` file (typically ``$HOME/.pythonrc.py``), you can add this piece of code:: try: from jedi.utils import setup_readline setup_readline() except ImportError: # Fallback to the stdlib readline completer if it is installed. # Taken from http://docs.python.org/2/library/rlcompleter.html print("Jedi is not installed, falling back to readline") try: import readline import rlcompleter readline.parse_and_bind("tab: complete") except ImportError: print("Readline is not installed either. No tab completion is enabled.") This will fallback to the readline completer if Jedi is not installed. The readline completer will only complete names in the global namespace, so for example, >>> ran # doctest: +SKIP will complete to ``range`` with both Jedi and readline, but >>> range(10).cou # doctest: +SKIP will show complete to ``range(10).count`` only with Jedi. You'll also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to your shell profile (usually ``.bash_profile`` or ``.profile`` if you use bash). """ class JediRL(): def complete(self, text, state): """ This complete stuff is pretty weird, a generator would make a lot more sense, but probably due to backwards compatibility this is still the way how it works. The only important part is stuff in the ``state == 0`` flow, everything else has been copied from the ``rlcompleter`` std. library module. """ if state == 0: import os, sys sys.path.insert(0, os.getcwd()) # Calling python doesn't have a path, so add to sys.path. try: interpreter = Interpreter(text, [namespace_module.__dict__]) path, dot, like = interpreter._get_completion_parts() before = text[:len(text) - len(like)] completions = interpreter.completions() finally: sys.path.pop(0) self.matches = [before + c.name_with_symbols for c in completions] try: return self.matches[state] except IndexError: return None try: import readline except ImportError: print("Module readline not available.") else: readline.set_completer(JediRL().complete) readline.parse_and_bind("tab: complete") # jedi itself does the case matching readline.parse_and_bind("set completion-ignore-case on") # because it's easier to hit the tab just once readline.parse_and_bind("set show-all-if-unmodified") readline.parse_and_bind("set show-all-if-ambiguous on") # don't repeat all the things written in the readline all the time readline.parse_and_bind("set completion-prefix-display-length 2") # No delimiters, Jedi handles that. readline.set_completer_delims('') jedi-0.7.0/jedi/api.py0000664000175000017500000006211412204171717014677 0ustar daviddavid00000000000000""" The API basically only provides one class. You can create a :class:`Script` and use its methods. Additionally you can add a debug function with :func:`set_debug_function` and catch :exc:`NotFoundError` which is being raised if your completion is not possible. """ from __future__ import with_statement import re import os import warnings from itertools import chain from jedi import parsing from jedi import parsing_representation as pr from jedi import debug from jedi import settings from jedi import helpers from jedi import common from jedi import cache from jedi import modules from jedi import interpret from jedi._compatibility import next, unicode, builtins import keywords import evaluate import api_classes import evaluate_representation as er import dynamic import imports import builtin class NotFoundError(Exception): """A custom error to avoid catching the wrong exceptions.""" class Script(object): """ A Script is the base for completions, goto or whatever you want to do with |jedi|. :param source: The source code of the current file, separated by newlines. :type source: str :param line: The line to perform actions on (starting with 1). :type line: int :param col: The column of the cursor (starting with 0). :type col: int :param path: The path of the file in the file system, or ``''`` if it hasn't been saved yet. :type path: str or None :param source_encoding: The encoding of ``source``, if it is not a ``unicode`` object (default ``'utf-8'``). :type source_encoding: str """ def __init__(self, source, line=None, column=None, path=None, source_encoding='utf-8', source_path=None): if source_path is not None: warnings.warn("Use path instead of source_path.", DeprecationWarning) path = source_path lines = source.splitlines() if source and source[-1] == '\n': lines.append('') self._line = max(len(lines), 1) if line is None else line self._column = len(lines[-1]) if column is None else column api_classes._clear_caches() debug.reset_time() self.source = modules.source_to_unicode(source, source_encoding) self.pos = self._line, self._column self._module = modules.ModuleWithCursor( path, source=self.source, position=self.pos) self._source_path = path self.path = None if path is None else os.path.abspath(path) debug.speed('init') @property def source_path(self): """ .. deprecated:: 0.7.0 Use :attr:`.path` instead. .. todo:: Remove! """ warnings.warn("Use path instead of source_path.", DeprecationWarning) return self.path def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, repr(self._source_path)) @property def _parser(self): """ lazy parser.""" return self._module.parser @api_classes._clear_caches_after_call def completions(self): """ Return :class:`api_classes.Completion` objects. Those objects contain information about the completions, more than just names. :return: Completion objects, sorted by name and __ comes last. :rtype: list of :class:`api_classes.Completion` """ def get_completions(user_stmt, bs): if isinstance(user_stmt, pr.Import): context = self._module.get_context() next(context) # skip the path if next(context) == 'from': # completion is just "import" if before stands from .. return ((k, bs) for k in keywords.keyword_names('import')) return self._simple_complete(path, like) debug.speed('completions start') path = self._module.get_path_until_cursor() if re.search('^\.|\.\.$', path): return [] path, dot, like = self._get_completion_parts() user_stmt = self._user_stmt(True) bs = builtin.Builtin.scope completions = get_completions(user_stmt, bs) if not dot: # named params have no dots for call_def in self.call_signatures(): if not call_def.module.is_builtin(): for p in call_def.params: completions.append((p.get_name(), p)) if not path and not isinstance(user_stmt, pr.Import): # add keywords completions += ((k, bs) for k in keywords.keyword_names( all=True)) needs_dot = not dot and path comps = [] comp_dct = {} for c, s in set(completions): n = c.names[-1] if settings.case_insensitive_completion \ and n.lower().startswith(like.lower()) \ or n.startswith(like): if not evaluate.filter_private_variable(s, user_stmt or self._parser.user_scope, n): new = api_classes.Completion(c, needs_dot, len(like), s) k = (new.name, new.complete) # key if k in comp_dct and settings.no_completion_duplicates: comp_dct[k]._same_name_completions.append(new) else: comp_dct[k] = new comps.append(new) debug.speed('completions end') return sorted(comps, key=lambda x: (x.name.startswith('__'), x.name.startswith('_'), x.name.lower())) def _simple_complete(self, path, like): try: scopes = list(self._prepare_goto(path, True)) except NotFoundError: scopes = [] scope_generator = evaluate.get_names_of_scope( self._parser.user_scope, self.pos) completions = [] for scope, name_list in scope_generator: for c in name_list: completions.append((c, scope)) else: completions = [] debug.dbg('possible scopes', scopes) for s in scopes: if s.isinstance(er.Function): names = s.get_magic_method_names() else: if isinstance(s, imports.ImportPath): under = like + self._module.get_path_after_cursor() if under == 'import': current_line = self._module.get_position_line() if not current_line.endswith('import import'): continue a = s.import_stmt.alias if a and a.start_pos <= self.pos <= a.end_pos: continue names = s.get_defined_names(on_import_stmt=True) else: names = s.get_defined_names() for c in names: completions.append((c, s)) return completions def _user_stmt(self, is_completion=False): user_stmt = self._parser.user_stmt debug.speed('parsed') if is_completion and not user_stmt: # for statements like `from x import ` (cursor not in statement) pos = next(self._module.get_context(yield_positions=True)) last_stmt = pos and self._parser.module.get_statement_for_position( pos, include_imports=True) if isinstance(last_stmt, pr.Import): user_stmt = last_stmt return user_stmt def _prepare_goto(self, goto_path, is_completion=False): """ Base for completions/goto. Basically it returns the resolved scopes under cursor. """ debug.dbg('start: %s in %s' % (goto_path, self._parser.user_scope)) user_stmt = self._user_stmt(is_completion) if not user_stmt and len(goto_path.split('\n')) > 1: # If the user_stmt is not defined and the goto_path is multi line, # something's strange. Most probably the backwards tokenizer # matched to much. return [] if isinstance(user_stmt, pr.Import): scopes = [self._get_on_import_stmt(user_stmt, is_completion)[0]] else: # just parse one statement, take it and evaluate it stmt = self._get_under_cursor_stmt(goto_path) scopes = evaluate.follow_statement(stmt) return scopes def _get_under_cursor_stmt(self, cursor_txt): offset = self.pos[0] - 1, self.pos[1] r = parsing.Parser(cursor_txt, no_docstr=True, offset=offset) try: stmt = r.module.statements[0] except IndexError: raise NotFoundError() stmt.parent = self._parser.user_scope return stmt def complete(self): """ .. deprecated:: 0.6.0 Use :attr:`.completions` instead. .. todo:: Remove! """ warnings.warn("Use completions instead.", DeprecationWarning) return self.completions() def goto(self): """ .. deprecated:: 0.6.0 Use :attr:`.goto_assignments` instead. .. todo:: Remove! """ warnings.warn("Use goto_assignments instead.", DeprecationWarning) return self.goto_assignments() def definition(self): """ .. deprecated:: 0.6.0 Use :attr:`.goto_definitions` instead. .. todo:: Remove! """ warnings.warn("Use goto_definitions instead.", DeprecationWarning) return self.goto_definitions() def get_definition(self): """ .. deprecated:: 0.5.0 Use :attr:`.goto_definitions` instead. .. todo:: Remove! """ warnings.warn("Use goto_definitions instead.", DeprecationWarning) return self.goto_definitions() def related_names(self): """ .. deprecated:: 0.6.0 Use :attr:`.usages` instead. .. todo:: Remove! """ warnings.warn("Use usages instead.", DeprecationWarning) return self.usages() def get_in_function_call(self): """ .. deprecated:: 0.6.0 Use :attr:`.call_signatures` instead. .. todo:: Remove! """ return self.function_definition() def function_definition(self): """ .. deprecated:: 0.6.0 Use :attr:`.call_signatures` instead. .. todo:: Remove! """ warnings.warn("Use line instead.", DeprecationWarning) sig = self.call_signatures() return sig[0] if sig else None @api_classes._clear_caches_after_call def goto_definitions(self): """ Return the definitions of a the path under the cursor. goto function! This follows complicated paths and returns the end, not the first definition. The big difference between :meth:`goto_assignments` and :meth:`goto_definitions` is that :meth:`goto_assignments` doesn't follow imports and statements. Multiple objects may be returned, because Python itself is a dynamic language, which means depending on an option you can have two different versions of a function. :rtype: list of :class:`api_classes.Definition` """ def resolve_import_paths(scopes): for s in scopes.copy(): if isinstance(s, imports.ImportPath): scopes.remove(s) scopes.update(resolve_import_paths(set(s.follow()))) return scopes goto_path = self._module.get_path_under_cursor() context = self._module.get_context() scopes = set() lower_priority_operators = ('()', '(', ',') """Operators that could hide callee.""" if next(context) in ('class', 'def'): scopes = set([self._module.parser.user_scope]) elif not goto_path: op = self._module.get_operator_under_cursor() if op and op not in lower_priority_operators: scopes = set([keywords.get_operator(op, self.pos)]) # Fetch definition of callee if not goto_path: (call, _) = self._func_call_and_param_index() if call is not None: while call.next is not None: call = call.next # reset cursor position: (row, col) = call.name.end_pos self.pos = (row, max(col - 1, 0)) self._module = modules.ModuleWithCursor( self._source_path, source=self.source, position=self.pos) # then try to find the path again goto_path = self._module.get_path_under_cursor() if not scopes: if goto_path: scopes = set(self._prepare_goto(goto_path)) elif op in lower_priority_operators: scopes = set([keywords.get_operator(op, self.pos)]) scopes = resolve_import_paths(scopes) # add keywords scopes |= keywords.keywords(string=goto_path, pos=self.pos) d = set([api_classes.Definition(s) for s in scopes if not isinstance(s, imports.ImportPath._GlobalNamespace)]) return self._sorted_defs(d) @api_classes._clear_caches_after_call def goto_assignments(self): """ Return the first definition found. Imports and statements aren't followed. Multiple objects may be returned, because Python itself is a dynamic language, which means depending on an option you can have two different versions of a function. :rtype: list of :class:`api_classes.Definition` """ results, _ = self._goto() d = [api_classes.Definition(d) for d in set(results) if not isinstance(d, imports.ImportPath._GlobalNamespace)] return self._sorted_defs(d) def _goto(self, add_import_name=False): """ Used for goto_assignments and usages. :param add_import_name: Add the the name (if import) to the result. """ def follow_inexistent_imports(defs): """ Imports can be generated, e.g. following `multiprocessing.dummy` generates an import dummy in the multiprocessing module. The Import doesn't exist -> follow. """ definitions = set(defs) for d in defs: if isinstance(d.parent, pr.Import) \ and d.start_pos == (0, 0): i = imports.ImportPath(d.parent).follow(is_goto=True) definitions.remove(d) definitions |= follow_inexistent_imports(i) return definitions goto_path = self._module.get_path_under_cursor() context = self._module.get_context() user_stmt = self._user_stmt() if next(context) in ('class', 'def'): user_scope = self._parser.user_scope definitions = set([user_scope.name]) search_name = unicode(user_scope.name) elif isinstance(user_stmt, pr.Import): s, name_part = self._get_on_import_stmt(user_stmt) try: definitions = [s.follow(is_goto=True)[0]] except IndexError: definitions = [] search_name = unicode(name_part) if add_import_name: import_name = user_stmt.get_defined_names() # imports have only one name if not user_stmt.star \ and name_part == import_name[0].names[-1]: definitions.append(import_name[0]) else: stmt = self._get_under_cursor_stmt(goto_path) defs, search_name = evaluate.goto(stmt) definitions = follow_inexistent_imports(defs) if isinstance(user_stmt, pr.Statement): c = user_stmt.get_commands() if c and not isinstance(c[0], (str, unicode)) and \ c[0].start_pos > self.pos: # The cursor must be after the start, otherwise the # statement is just an assignee. definitions = [user_stmt] return definitions, search_name @api_classes._clear_caches_after_call def usages(self, additional_module_paths=()): """ Return :class:`api_classes.Usage` objects, which contain all names that point to the definition of the name under the cursor. This is very useful for refactoring (renaming), or to show all usages of a variable. .. todo:: Implement additional_module_paths :rtype: list of :class:`api_classes.Usage` """ temp, settings.dynamic_flow_information = \ settings.dynamic_flow_information, False user_stmt = self._user_stmt() definitions, search_name = self._goto(add_import_name=True) if isinstance(user_stmt, pr.Statement): c = user_stmt.get_commands()[0] if not isinstance(c, unicode) and self.pos < c.start_pos: # the search_name might be before `=` definitions = [v for v in user_stmt.set_vars if unicode(v.names[-1]) == search_name] if not isinstance(user_stmt, pr.Import): # import case is looked at with add_import_name option definitions = dynamic.usages_add_import_modules(definitions, search_name) module = set([d.get_parent_until() for d in definitions]) module.add(self._parser.module) names = dynamic.usages(definitions, search_name, module) for d in set(definitions): if isinstance(d, pr.Module): names.append(api_classes.Usage(d, d)) elif isinstance(d, er.Instance): # Instances can be ignored, because they are being created by # ``__getattr__``. pass else: names.append(api_classes.Usage(d.names[-1], d)) settings.dynamic_flow_information = temp return self._sorted_defs(set(names)) @api_classes._clear_caches_after_call def call_signatures(self): """ Return the function object of the call you're currently in. E.g. if the cursor is here:: abs(# <-- cursor is here This would return the ``abs`` function. On the other hand:: abs()# <-- cursor is here This would return ``None``. :rtype: :class:`api_classes.CallDef` """ call, index = self._func_call_and_param_index() if call is None: return [] user_stmt = self._user_stmt() with common.scale_speed_settings(settings.scale_function_definition): _callable = lambda: evaluate.follow_call(call) origins = cache.cache_function_definition(_callable, user_stmt) debug.speed('func_call followed') return [api_classes.CallDef(o, index, call) for o in origins if o.isinstance(er.Function, er.Instance, er.Class)] def _func_call_and_param_index(self): debug.speed('func_call start') call, index = None, 0 if call is None: user_stmt = self._user_stmt() if user_stmt is not None and isinstance(user_stmt, pr.Statement): call, index, _ = helpers.search_function_definition( user_stmt, self.pos) debug.speed('func_call parsed') return call, index def _get_on_import_stmt(self, user_stmt, is_like_search=False): """ Resolve the user statement, if it is an import. Only resolve the parts until the user position. """ import_names = user_stmt.get_all_import_names() kill_count = -1 cur_name_part = None for i in import_names: if user_stmt.alias == i: continue for name_part in i.names: if name_part.end_pos >= self.pos: if not cur_name_part: cur_name_part = name_part kill_count += 1 context = self._module.get_context() just_from = next(context) == 'from' i = imports.ImportPath(user_stmt, is_like_search, kill_count=kill_count, direct_resolve=True, is_just_from=just_from) return i, cur_name_part def _get_completion_parts(self): """ Returns the parts for the completion :return: tuple - (path, dot, like) """ path = self._module.get_path_until_cursor() match = re.match(r'^(.*?)(\.|)(\w?[\w\d]*)$', path, flags=re.S) return match.groups() @staticmethod def _sorted_defs(d): # Note: `or ''` below is required because `module_path` could be # None and you can't compare None and str in Python 3. return sorted(d, key=lambda x: (x.module_path or '', x.line, x.column)) class Interpreter(Script): """ Jedi API for Python REPLs. In addition to completion of simple attribute access, Jedi supports code completion based on static code analysis. Jedi can complete attributes of object which is not initialized yet. >>> from os.path import join >>> namespace = locals() >>> script = Interpreter('join().up', [namespace]) >>> print(script.completions()[0].name) upper """ def __init__(self, source, namespaces=[], **kwds): """ Parse `source` and mixin interpreted Python objects from `namespaces`. :type source: str :arg source: Code to parse. :type namespaces: list of dict :arg namespaces: a list of namespace dictionaries such as the one returned by :func:`locals`. Other optional arguments are same as the ones for :class:`Script`. If `line` and `column` are None, they are assumed be at the end of `source`. """ super(Interpreter, self).__init__(source, **kwds) self.namespaces = namespaces # Here we add the namespaces to the current parser. importer = interpret.ObjectImporter(self._parser.user_scope) for ns in namespaces: importer.import_raw_namespace(ns) def _simple_complete(self, path, like): user_stmt = self._user_stmt(True) is_simple_path = not path or re.search('^[\w][\w\d.]*$', path) if isinstance(user_stmt, pr.Import) or not is_simple_path: return super(type(self), self)._simple_complete(path, like) else: class NamespaceModule: def __getattr__(_, name): for n in self.namespaces: try: return n[name] except KeyError: pass raise AttributeError() def __dir__(_): return list(set(chain.from_iterable(n.keys() for n in self.namespaces))) paths = path.split('.') if path else [] namespaces = (NamespaceModule(), builtins) for p in paths: old, namespaces = namespaces, [] for n in old: try: namespaces.append(getattr(n, p)) except AttributeError: pass completions = [] for n in namespaces: for name in dir(n): if name.lower().startswith(like.lower()): scope = self._parser.module n = pr.Name(self._parser.module, [(name, (0, 0))], (0, 0), (0, 0), scope) completions.append((n, scope)) return completions def defined_names(source, path=None, source_encoding='utf-8'): """ Get all definitions in `source` sorted by its position. This functions can be used for listing functions, classes and data defined in a file. This can be useful if you want to list them in "sidebar". Each element in the returned list also has `defined_names` method which can be used to get sub-definitions (e.g., methods in class). :rtype: list of api_classes.Definition """ parser = parsing.Parser( modules.source_to_unicode(source, source_encoding), module_path=path, ) return api_classes._defined_names(parser.module) def preload_module(*modules): """ Preloading modules tells Jedi to load a module now, instead of lazy parsing of modules. Usful for IDEs, to control which modules to load on startup. :param modules: different module names, list of string. """ for m in modules: s = "import %s as x; x." % m Script(s, 1, len(s), None).completions() def set_debug_function(func_cb=debug.print_to_stdout, warnings=True, notices=True, speed=True): """ Define a callback debug function to get all the debug messages. :param func_cb: The callback function for debug messages, with n params. """ debug.debug_function = func_cb debug.enable_warning = warnings debug.enable_notice = notices debug.enable_speed = speed jedi-0.7.0/jedi/modules.py0000664000175000017500000003450712204171717015603 0ustar daviddavid00000000000000""" Don't confuse these classes with :mod:`parsing_representation` modules, the modules here can access these representation with ``module.parser.module``. ``Module`` exists mainly for caching purposes. Basically :mod:`modules` offers the classes: - ``CachedModule``, a base class for Cachedmodule. - ``Module`` the class for all normal Python modules (not builtins, they are at home at :mod:`builtin`). - ``ModuleWithCursor``, holds the module information for :class:`api.Script`. Apart from those classes there's a ``sys.path`` fetching function, as well as `Virtual Env` and `Django` detection. """ from __future__ import with_statement import re import tokenizer as tokenize import sys import os from ast import literal_eval from jedi._compatibility import exec_function, unicode from jedi import cache from jedi import parsing_representation as pr from jedi import fast_parser from jedi import debug from jedi import common class CachedModule(object): """ The base type for all modules, which is not to be confused with `parsing_representation.Module`. Caching happens here. """ def __init__(self, path=None, name=None): self.path = path and os.path.abspath(path) self.name = name self._parser = None @property def parser(self): """ get the parser lazy """ if self._parser is None: self._parser = cache.load_module(self.path, self.name) \ or self._load_module() return self._parser def _get_source(self): raise NotImplementedError() def _load_module(self): source = self._get_source() p = self.path or self.name p = fast_parser.FastParser(source, p) cache.save_module(self.path, self.name, p) return p class Module(CachedModule): """ Manages all files, that are parsed and caches them. :param path: The module path of the file. :param source: The source code of the file. """ def __init__(self, path, source=None): super(Module, self).__init__(path=path) if source is None: with open(path) as f: source = f.read() self.source = source_to_unicode(source) self._line_cache = None def _get_source(self): """ Just one time """ s = self.source del self.source # memory efficiency return s class ModuleWithCursor(Module): """ Manages all files, that are parsed and caches them. Important are the params source and path, one of them has to be there. :param source: The source code of the file. :param path: The module path of the file or None. :param position: The position, the user is currently in. Only important \ for the main file. """ def __init__(self, path, source, position): super(ModuleWithCursor, self).__init__(path, source) self.position = position self.source = source self._path_until_cursor = None # this two are only used, because there is no nonlocal in Python 2 self._line_temp = None self._relevant_temp = None @property def parser(self): """ get the parser lazy """ if not self._parser: with common.ignored(KeyError): parser = cache.parser_cache[self.path].parser cache.invalidate_star_import_cache(parser.module) # Call the parser already here, because it will be used anyways. # Also, the position is here important (which will not be used by # default), therefore fill the cache here. self._parser = fast_parser.FastParser(self.source, self.path, self.position) # don't pickle that module, because it's changing fast cache.save_module(self.path, self.name, self._parser, pickling=False) return self._parser def get_path_until_cursor(self): """ Get the path under the cursor. """ if self._path_until_cursor is None: # small caching self._path_until_cursor, self._start_cursor_pos = \ self._get_path_until_cursor(self.position) return self._path_until_cursor def _get_path_until_cursor(self, start_pos=None): def fetch_line(): if self._is_first: self._is_first = False self._line_length = self._column_temp line = self._first_line else: line = self.get_line(self._line_temp) self._line_length = len(line) line = line + '\n' # add lines with a backslash at the end while True: self._line_temp -= 1 last_line = self.get_line(self._line_temp) #print self._line_temp, repr(last_line) if last_line and last_line[-1] == '\\': line = last_line[:-1] + ' ' + line self._line_length = len(last_line) else: break return line[::-1] self._is_first = True self._line_temp, self._column_temp = start_cursor = start_pos self._first_line = self.get_line(self._line_temp)[:self._column_temp] open_brackets = ['(', '[', '{'] close_brackets = [')', ']', '}'] gen = tokenize.generate_tokens(fetch_line) string = '' level = 0 force_point = False last_type = None try: for token_type, tok, start, end, line in gen: # print 'tok', token_type, tok, force_point if last_type == token_type == tokenize.NAME: string += ' ' if level > 0: if tok in close_brackets: level += 1 if tok in open_brackets: level -= 1 elif tok == '.': force_point = False elif force_point: # it is reversed, therefore a number is getting recognized # as a floating point number if token_type == tokenize.NUMBER and tok[0] == '.': force_point = False else: break elif tok in close_brackets: level += 1 elif token_type in [tokenize.NAME, tokenize.STRING]: force_point = True elif token_type == tokenize.NUMBER: pass else: self._column_temp = self._line_length - end[1] break x = start_pos[0] - end[0] + 1 l = self.get_line(x) l = self._first_line if x == start_pos[0] else l start_cursor = x, len(l) - end[1] self._column_temp = self._line_length - end[1] string += tok last_type = token_type except tokenize.TokenError: debug.warning("Tokenize couldn't finish", sys.exc_info) # string can still contain spaces at the end return string[::-1].strip(), start_cursor def get_path_under_cursor(self): """ Return the path under the cursor. If there is a rest of the path left, it will be added to the stuff before it. """ return self.get_path_until_cursor() + self.get_path_after_cursor() def get_path_after_cursor(self): line = self.get_line(self.position[0]) return re.search("[\w\d]*", line[self.position[1]:]).group(0) def get_operator_under_cursor(self): line = self.get_line(self.position[0]) after = re.match("[^\w\s]+", line[self.position[1]:]) before = re.match("[^\w\s]+", line[:self.position[1]][::-1]) return (before.group(0) if before is not None else '') \ + (after.group(0) if after is not None else '') def get_context(self, yield_positions=False): pos = self._start_cursor_pos while True: # remove non important white space line = self.get_line(pos[0]) while True: if pos[1] == 0: line = self.get_line(pos[0] - 1) if line and line[-1] == '\\': pos = pos[0] - 1, len(line) - 1 continue else: break if line[pos[1] - 1].isspace(): pos = pos[0], pos[1] - 1 else: break try: result, pos = self._get_path_until_cursor(start_pos=pos) if yield_positions: yield pos else: yield result except StopIteration: if yield_positions: yield None else: yield '' def get_line(self, line_nr): if not self._line_cache: self._line_cache = self.source.splitlines() if self.source: if self.source[-1] == '\n': self._line_cache.append('') else: # ''.splitlines() == [] self._line_cache = [''] if line_nr == 0: # This is a fix for the zeroth line. We need a newline there, for # the backwards parser. return '' if line_nr < 0: raise StopIteration() try: return self._line_cache[line_nr - 1] except IndexError: raise StopIteration() def get_position_line(self): return self.get_line(self.position[0])[:self.position[1]] def get_sys_path(): def check_virtual_env(sys_path): """ Add virtualenv's site-packages to the `sys.path`.""" venv = os.getenv('VIRTUAL_ENV') if not venv: return venv = os.path.abspath(venv) p = os.path.join( venv, 'lib', 'python%d.%d' % sys.version_info[:2], 'site-packages') sys_path.insert(0, p) check_virtual_env(sys.path) return [p for p in sys.path if p != ""] @cache.memoize_default([]) def sys_path_with_modifications(module): def execute_code(code): c = "import os; from os.path import *; result=%s" variables = {'__file__': module.path} try: exec_function(c % code, variables) except Exception: debug.warning('sys path detected, but failed to evaluate') return None try: res = variables['result'] if isinstance(res, str): return os.path.abspath(res) else: return None except KeyError: return None def check_module(module): try: possible_stmts = module.used_names['path'] except KeyError: return get_sys_path() sys_path = list(get_sys_path()) # copy for p in possible_stmts: if not isinstance(p, pr.Statement): continue commands = p.get_commands() if len(commands) != 1: # sys.path command is just one thing. continue call = commands[0] n = call.name if not isinstance(n, pr.Name) or len(n.names) != 3: continue if n.names[:2] != ('sys', 'path'): continue array_cmd = n.names[2] if call.execution is None: continue exe = call.execution if not (array_cmd == 'insert' and len(exe) == 2 or array_cmd == 'append' and len(exe) == 1): continue if array_cmd == 'insert': exe_type, exe.type = exe.type, pr.Array.NOARRAY exe_pop = exe.values.pop(0) res = execute_code(exe.get_code()) if res is not None: sys_path.insert(0, res) debug.dbg('sys path inserted: %s' % res) exe.type = exe_type exe.values.insert(0, exe_pop) elif array_cmd == 'append': res = execute_code(exe.get_code()) if res is not None: sys_path.append(res) debug.dbg('sys path added: %s' % res) return sys_path if module.path is None: # Support for modules without a path is bad, therefore return the # normal path. return list(get_sys_path()) curdir = os.path.abspath(os.curdir) with common.ignored(OSError): os.chdir(os.path.dirname(module.path)) result = check_module(module) result += detect_django_path(module.path) # cleanup, back to old directory os.chdir(curdir) return result def detect_django_path(module_path): """ Detects the path of the very well known Django library (if used) """ result = [] while True: new = os.path.dirname(module_path) # If the module_path doesn't change anymore, we're finished -> / if new == module_path: break else: module_path = new with common.ignored(IOError): with open(module_path + os.path.sep + 'manage.py'): debug.dbg('Found django path: %s' % module_path) result.append(module_path) return result def source_to_unicode(source, encoding=None): def detect_encoding(): """ For the implementation of encoding definitions in Python, look at: http://www.python.org/dev/peps/pep-0263/ http://docs.python.org/2/reference/lexical_analysis.html#encoding-\ declarations """ byte_mark = literal_eval(r"b'\xef\xbb\xbf'") if source.startswith(byte_mark): # UTF-8 byte-order mark return 'utf-8' first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', str(source)).group(0) possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)", first_two_lines) if possible_encoding: return possible_encoding.group(1) else: # the default if nothing else has been set -> PEP 263 return encoding if encoding is not None else 'iso-8859-1' if isinstance(source, unicode): # only cast str/bytes return source # cast to unicode by default return unicode(source, detect_encoding(), 'replace') jedi-0.7.0/jedi/refactoring.py0000664000175000017500000001557012204171717016435 0ustar daviddavid00000000000000""" Introduce some basic refactoring functions to |jedi|. This module is still in a very early development stage and needs much testing and improvement. .. warning:: I won't do too much here, but if anyone wants to step in, please do. Refactoring is none of my priorities It uses the |jedi| `API `_ and supports currently the following functions (sometimes bug-prone): - rename - extract variable - inline variable """ from __future__ import with_statement import difflib from jedi import common from jedi import modules from jedi import helpers from jedi import parsing_representation as pr class Refactoring(object): def __init__(self, change_dct): """ :param change_dct: dict(old_path=(new_path, old_lines, new_lines)) """ self.change_dct = change_dct def old_files(self): dct = {} for old_path, (new_path, old_l, new_l) in self.change_dct.items(): dct[new_path] = '\n'.join(new_l) return dct def new_files(self): dct = {} for old_path, (new_path, old_l, new_l) in self.change_dct.items(): dct[new_path] = '\n'.join(new_l) return dct def diff(self): texts = [] for old_path, (new_path, old_l, new_l) in self.change_dct.items(): if old_path: udiff = difflib.unified_diff(old_l, new_l) else: udiff = difflib.unified_diff(old_l, new_l, old_path, new_path) texts.append('\n'.join(udiff)) return '\n'.join(texts) def rename(script, new_name): """ The `args` / `kwargs` params are the same as in `api.Script`. :param operation: The refactoring operation to execute. :type operation: str :type source: str :return: list of changed lines/changed files """ return Refactoring(_rename(script.usages(), new_name)) def _rename(names, replace_str): """ For both rename and inline. """ order = sorted(names, key=lambda x: (x.module_path, x.line, x.column), reverse=True) def process(path, old_lines, new_lines): if new_lines is not None: # goto next file, save last dct[path] = path, old_lines, new_lines dct = {} current_path = object() new_lines = old_lines = None for name in order: if name.in_builtin_module(): continue if current_path != name.module_path: current_path = name.module_path process(current_path, old_lines, new_lines) if current_path is not None: # None means take the source that is a normal param. with open(current_path) as f: source = f.read() new_lines = modules.source_to_unicode(source).splitlines() old_lines = new_lines[:] nr, indent = name.line, name.column line = new_lines[nr - 1] new_lines[nr - 1] = line[:indent] + replace_str + \ line[indent + len(name.text):] process(current_path, old_lines, new_lines) return dct def extract(script, new_name): """ The `args` / `kwargs` params are the same as in `api.Script`. :param operation: The refactoring operation to execute. :type operation: str :type source: str :return: list of changed lines/changed files """ new_lines = modules.source_to_unicode(script.source).splitlines() old_lines = new_lines[:] user_stmt = script._parser.user_stmt # TODO care for multiline extracts dct = {} if user_stmt: pos = script.pos line_index = pos[0] - 1 arr, index = helpers.array_for_pos(user_stmt, pos) if arr is not None: start_pos = arr[index].start_pos end_pos = arr[index].end_pos # take full line if the start line is different from end line e = end_pos[1] if end_pos[0] == start_pos[0] else None start_line = new_lines[start_pos[0] - 1] text = start_line[start_pos[1]:e] for l in range(start_pos[0], end_pos[0] - 1): text += '\n' + l if e is None: end_line = new_lines[end_pos[0] - 1] text += '\n' + end_line[:end_pos[1]] # remove code from new lines t = text.lstrip() del_start = start_pos[1] + len(text) - len(t) text = t.rstrip() del_end = len(t) - len(text) if e is None: new_lines[end_pos[0] - 1] = end_line[end_pos[1] - del_end:] e = len(start_line) else: e = e - del_end start_line = start_line[:del_start] + new_name + start_line[e:] new_lines[start_pos[0] - 1] = start_line new_lines[start_pos[0]:end_pos[0] - 1] = [] # add parentheses in multiline case open_brackets = ['(', '[', '{'] close_brackets = [')', ']', '}'] if '\n' in text and not (text[0] in open_brackets and text[-1] == close_brackets[open_brackets.index(text[0])]): text = '(%s)' % text # add new line before statement indent = user_stmt.start_pos[1] new = "%s%s = %s" % (' ' * indent, new_name, text) new_lines.insert(line_index, new) dct[script.path] = script.path, old_lines, new_lines return Refactoring(dct) def inline(script): """ :type script: api.Script """ new_lines = modules.source_to_unicode(script.source).splitlines() dct = {} definitions = script.goto_assignments() with common.ignored(AssertionError): assert len(definitions) == 1 stmt = definitions[0]._definition usages = script.usages() inlines = [r for r in usages if not stmt.start_pos <= (r.line, r.column) <= stmt.end_pos] inlines = sorted(inlines, key=lambda x: (x.module_path, x.line, x.column), reverse=True) commands = stmt.get_commands() # don't allow multiline refactorings for now. assert stmt.start_pos[0] == stmt.end_pos[0] index = stmt.start_pos[0] - 1 line = new_lines[index] replace_str = line[commands[0].start_pos[1]:stmt.end_pos[1] + 1] replace_str = replace_str.strip() # tuples need parentheses if commands and isinstance(commands[0], pr.Array): arr = commands[0] if replace_str[0] not in ['(', '[', '{'] and len(arr) > 1: replace_str = '(%s)' % replace_str # if it's the only assignment, remove the statement if len(stmt.set_vars) == 1: line = line[:stmt.start_pos[1]] + line[stmt.end_pos[1]:] dct = _rename(inlines, replace_str) # remove the empty line new_lines = dct[script.path][2] if line.strip(): new_lines[index] = line else: new_lines.pop(index) return Refactoring(dct) jedi-0.7.0/jedi/tokenizer.py0000664000175000017500000002477412204171717016152 0ustar daviddavid00000000000000""" This tokenizer has been copied from the ``tokenize.py`` standard library tokenizer. The reason was simple: The standanrd library tokenizer fails if the indentation is not right. The fast parser of jedi however requires "wrong" indentation. Basically this is a stripped down version of the standard library module, so you can read the documentation there. """ import string import re from token import * from codecs import lookup, BOM_UTF8 import collections cookie_re = re.compile("coding[:=]\s*([-\w.]+)") namechars = string.ascii_letters + '_' COMMENT = N_TOKENS tok_name[COMMENT] = 'COMMENT' NL = N_TOKENS + 1 tok_name[NL] = 'NL' ENCODING = N_TOKENS + 2 tok_name[ENCODING] = 'ENCODING' N_TOKENS += 3 class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): def __repr__(self): annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % self._replace(type=annotated_type)) def group(*choices): return '(' + '|'.join(choices) + ')' def any(*choices): return group(*choices) + '*' def maybe(*choices): return group(*choices) + '?' # Note: we use unicode matching for names ("\w") but ascii matching for # number literals. Whitespace = r'[ \f\t]*' Comment = r'#[^\r\n]*' Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) Name = r'\w+' Hexnumber = r'0[xX][0-9a-fA-F]+' Binnumber = r'0[bB][01]+' Octnumber = r'0[oO][0-7]+' Decnumber = r'(?:0+|[1-9][0-9]*)' Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) Exponent = r'[eE][-+]?[0-9]+' Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent) Expfloat = r'[0-9]+' + Exponent Floatnumber = group(Pointfloat, Expfloat) Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]') Number = group(Imagnumber, Floatnumber, Intnumber) # Tail end of ' string. Single = r"[^'\\]*(?:\\.[^'\\]*)*'" # Tail end of " string. Double = r'[^"\\]*(?:\\.[^"\\]*)*"' # Tail end of ''' string. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" # Tail end of """ string. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' Triple = group("[bB]?[rR]?'''", '[bB]?[rR]?"""') # Single-line ' or " string. String = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'", r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"') # Because of leftmost-then-longest match semantics, be sure to put the # longest operators first (e.g., if = came before ==, == would get # recognized as two instances of =). Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", r"//=?", r"->", r"[+\-*/%&|^=<>]=?", r"~") Bracket = '[][(){}]' Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]') Funny = group(Operator, Bracket, Special) PlainToken = group(Number, Funny, String, Name) Token = Ignore + PlainToken # First (or only) line of ' or " string. ContStr = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r'\\\r?\n'), r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r'\\\r?\n')) PseudoExtras = group(r'\\\r?\n', Comment, Triple) PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) def _compile(expr): return re.compile(expr, re.UNICODE) tokenprog, pseudoprog, single3prog, double3prog = map( _compile, (Token, PseudoToken, Single3, Double3)) endprogs = {"'": _compile(Single), '"': _compile(Double), "'''": single3prog, '"""': double3prog, "r'''": single3prog, 'r"""': double3prog, "b'''": single3prog, 'b"""': double3prog, "br'''": single3prog, 'br"""': double3prog, "R'''": single3prog, 'R"""': double3prog, "B'''": single3prog, 'B"""': double3prog, "bR'''": single3prog, 'bR"""': double3prog, "Br'''": single3prog, 'Br"""': double3prog, "BR'''": single3prog, 'BR"""': double3prog, 'r': None, 'R': None, 'b': None, 'B': None} triple_quoted = {} for t in ("'''", '"""', "r'''", 'r"""', "R'''", 'R"""', "b'''", 'b"""', "B'''", 'B"""', "br'''", 'br"""', "Br'''", 'Br"""', "bR'''", 'bR"""', "BR'''", 'BR"""'): triple_quoted[t] = t single_quoted = {} for t in ("'", '"', "r'", 'r"', "R'", 'R"', "b'", 'b"', "B'", 'B"', "br'", 'br"', "Br'", 'Br"', "bR'", 'bR"', "BR'", 'BR"'): single_quoted[t] = t del _compile tabsize = 8 class TokenError(Exception): pass def generate_tokens(readline): lnum = parenlev = continued = 0 numchars = '0123456789' contstr, needcont = '', 0 contline = None indents = [0] while True: # loop over lines in stream try: line = readline() except StopIteration: line = b'' lnum += 1 pos, max = 0, len(line) if contstr: # continued string if not line: # multiline string has not been finished break endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) yield TokenInfo(STRING, contstr + line[:end], strstart, (lnum, end), contline + line) contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': yield TokenInfo(ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline) contstr = '' contline = None continue else: contstr = contstr + line contline = contline + line continue elif parenlev == 0 and not continued: # new statement if not line: break column = 0 while pos < max: # measure leading whitespace if line[pos] == ' ': column += 1 elif line[pos] == '\t': column = (column // tabsize + 1) * tabsize elif line[pos] == '\f': column = 0 else: break pos += 1 if pos == max: break if line[pos] in '#\r\n': # skip comments or blank lines if line[pos] == '#': comment_token = line[pos:].rstrip('\r\n') nl_pos = pos + len(comment_token) yield TokenInfo(COMMENT, comment_token, (lnum, pos), (lnum, pos + len(comment_token)), line) yield TokenInfo(NL, line[nl_pos:], (lnum, nl_pos), (lnum, len(line)), line) else: yield TokenInfo( (NL, COMMENT)[line[pos] == '#'], line[pos:], (lnum, pos), (lnum, len(line)), line) continue if column > indents[-1]: # count indents or dedents indents.append(column) yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) while column < indents[-1]: indents = indents[:-1] yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line) else: # continued statement if not line: # basically a statement has not been finished here. break continued = 0 while pos < max: pseudomatch = pseudoprog.match(line, pos) if pseudomatch: # scan for tokens start, end = pseudomatch.span(1) spos, epos, pos = (lnum, start), (lnum, end), end token, initial = line[start:end], line[start] if (initial in numchars or # ordinary number (initial == '.' and token != '.' and token != '...')): yield TokenInfo(NUMBER, token, spos, epos, line) elif initial in '\r\n': yield TokenInfo(NL if parenlev > 0 else NEWLINE, token, spos, epos, line) elif initial == '#': assert not token.endswith("\n") yield TokenInfo(COMMENT, token, spos, epos, line) elif token in triple_quoted: endprog = endprogs[token] endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] yield TokenInfo(STRING, token, spos, (lnum, pos), line) else: strstart = (lnum, start) # multiple lines contstr = line[start:] contline = line break elif initial in single_quoted or \ token[:2] in single_quoted or \ token[:3] in single_quoted: if token[-1] == '\n': # continued string strstart = (lnum, start) endprog = (endprogs[initial] or endprogs[token[1]] or endprogs[token[2]]) contstr, needcont = line[start:], 1 contline = line break else: # ordinary string yield TokenInfo(STRING, token, spos, epos, line) elif initial in namechars: # ordinary name yield TokenInfo(NAME, token, spos, epos, line) elif initial == '\\': # continued stmt continued = 1 else: if initial in '([{': parenlev += 1 elif initial in ')]}': parenlev -= 1 yield TokenInfo(OP, token, spos, epos, line) else: yield TokenInfo(ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos + 1), line) pos += 1 for indent in indents[1:]: # pop remaining indent levels yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '') yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') jedi-0.7.0/jedi/_compatibility.py0000664000175000017500000001134312204171717017134 0ustar daviddavid00000000000000""" To ensure compatibility from Python ``2.6`` - ``3.3``, a module has been created. Clearly there is huge need to use conforming syntax. But many changes (e.g. ``property``, ``hasattr`` in ``2.5``) can be rewritten in pure python. """ import sys import imp import os try: import importlib except: pass is_py3k = sys.hexversion >= 0x03000000 is_py33 = sys.hexversion >= 0x03030000 def find_module_py33(string, path=None): loader = importlib.machinery.PathFinder.find_module(string, path) if loader is None and path is None: # Fallback to find builtins loader = importlib.find_loader(string) if loader is None: raise ImportError("Couldn't find a loader for {0}".format(string)) try: is_package = loader.is_package(string) if is_package: module_path = os.path.dirname(loader.path) module_file = None else: module_path = loader.get_filename(string) module_file = open(module_path) except AttributeError: module_path = loader.load_module(string).__name__ module_file = None return module_file, module_path, is_package def find_module_pre_py33(string, path=None): module_file, module_path, description = imp.find_module(string, path) module_type = description[2] return module_file, module_path, module_type is imp.PKG_DIRECTORY find_module = find_module_py33 if is_py33 else find_module_pre_py33 find_module.__doc__ = """ Provides information about a module. This function isolates the differences in importing libraries introduced with python 3.3 on; it gets a module name and optionally a path. It will return a tuple containin an open file for the module (if not builtin), the filename or the name of the module if it is a builtin one and a boolean indicating if the module is contained in a package. """ # next was defined in python 2.6, in python 3 obj.next won't be possible # anymore try: next = next except NameError: _raiseStopIteration = object() def next(iterator, default=_raiseStopIteration): if not hasattr(iterator, 'next'): raise TypeError("not an iterator") try: return iterator.next() except StopIteration: if default is _raiseStopIteration: raise else: return default # unicode function try: unicode = unicode except NameError: unicode = str if is_py3k: utf8 = lambda s: s else: utf8 = lambda s: s.decode('utf-8') utf8.__doc__ = """ Decode a raw string into unicode object. Do nothing in Python 3. """ # exec function if is_py3k: def exec_function(source, global_map): exec(source, global_map) else: eval(compile("""def exec_function(source, global_map): exec source in global_map """, 'blub', 'exec')) # re-raise function if is_py3k: def reraise(exception, traceback): raise exception.with_traceback(traceback) else: eval(compile(""" def reraise(exception, traceback): raise exception, None, traceback """, 'blub', 'exec')) reraise.__doc__ = """ Re-raise `exception` with a `traceback` object. Usage:: reraise(Exception, sys.exc_info()[2]) """ # StringIO (Python 2.5 has no io module), so use io only for py3k try: from StringIO import StringIO except ImportError: from io import StringIO # hasattr function used because python if is_py3k: hasattr = hasattr else: def hasattr(obj, name): try: getattr(obj, name) return True except AttributeError: return False class Python3Method(object): def __init__(self, func): self.func = func def __get__(self, obj, objtype): if obj is None: return lambda *args, **kwargs: self.func(*args, **kwargs) else: return lambda *args, **kwargs: self.func(obj, *args, **kwargs) def use_metaclass(meta, *bases): """ Create a class with a metaclass. """ if not bases: bases = (object,) return meta("HackClass", bases, {}) try: from functools import reduce # Python 3 except ImportError: reduce = reduce try: encoding = sys.stdout.encoding if encoding is None: encoding = 'utf-8' except AttributeError: encoding = 'ascii' def u(string): """Cast to unicode DAMMIT! Written because Python2 repr always implicitly casts to a string, so we have to cast back to a unicode (and we now that we always deal with valid unicode, because we check that in the beginning). """ if is_py3k: return str(string) elif not isinstance(string, unicode): return unicode(str(string), 'UTF-8') return string try: import builtins # module name in python 3 except ImportError: import __builtin__ as builtins jedi-0.7.0/jedi/replstartup.py0000664000175000017500000000132312204171717016506 0ustar daviddavid00000000000000""" To use Jedi completion in Python interpreter, add the following in your shell setup (e.g., ``.bashrc``):: export PYTHONSTARTUP="$(python -m jedi repl)" Then you will be able to use Jedi completer in your Python interpreter:: $ python Python 2.7.2+ (default, Jul 20 2012, 22:15:08) [GCC 4.6.1] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> import os >>> os.path.join().split().in # doctest: +SKIP os.path.join().split().index os.path.join().split().insert """ import jedi.utils jedi.utils.setup_readline() del jedi # Note: try not to do many things here, as it will contaminate global # namespace of the interpreter. jedi-0.7.0/jedi/evaluate.py0000664000175000017500000007771012204171717015744 0ustar daviddavid00000000000000""" Evaluation of Python code in |jedi| is based on three assumptions: * Code is recursive (to weaken this assumption, the :mod:`dynamic` module exists). * No magic is being used: - metaclasses - ``setattr()`` / ``__import__()`` - writing to ``globals()``, ``locals()``, ``object.__dict__`` * The programmer is not a total dick, e.g. like `this `_ :-) That said, there's mainly one entry point in this script: ``follow_statement``. This is where autocompletion starts. Everything you want to complete is either a ``Statement`` or some special name like ``class``, which is easy to complete. Therefore you need to understand what follows after ``follow_statement``. Let's make an example:: import datetime datetime.date.toda# <-- cursor here First of all, this module doesn't care about completion. It really just cares about ``datetime.date``. At the end of the procedure ``follow_statement`` will return the ``datetime`` class. To *visualize* this (simplified): - ``follow_statement`` - ```` - Unpacking of the statement into ``[[]]`` - ``follow_call_list``, calls ``follow_call`` with ```` - ``follow_call`` - searches the ``datetime`` name within the module. This is exactly where it starts to get complicated. Now recursions start to kick in. The statement has not been resolved fully, but now we need to resolve the datetime import. So it continues - follow import, which happens in the :mod:`imports` module. - now the same ``follow_call`` as above calls ``follow_paths`` to follow the second part of the statement ``date``. - After ``follow_paths`` returns with the desired ``datetime.date`` class, the result is being returned and the recursion finishes. Now what would happen if we wanted ``datetime.date.foo.bar``? Just two more calls to ``follow_paths`` (which calls itself with a recursion). What if the import would contain another Statement like this:: from foo import bar Date = bar.baz Well... You get it. Just another ``follow_statement`` recursion. It's really easy. Just that Python is not that easy sometimes. To understand tuple assignments and different class scopes, a lot more code had to be written. Yet we're still not talking about Descriptors and Nested List Comprehensions, just the simple stuff. So if you want to change something, write a test and then just change what you want. This module has been tested by about 600 tests. Don't be afraid to break something. The tests are good enough. I need to mention now that this recursive approach is really good because it only *evaluates* what needs to be *evaluated*. All the statements and modules that are not used are just being ignored. It's a little bit similar to the backtracking algorithm. .. todo:: nonlocal statement, needed or can be ignored? (py3k) """ from __future__ import with_statement import sys import itertools from jedi._compatibility import next, hasattr, is_py3k, unicode, reraise, u from jedi import common from jedi import cache from jedi import parsing_representation as pr from jedi import debug import evaluate_representation as er import recursion import docstrings import builtin import imports import dynamic def get_defined_names_for_position(scope, position=None, start_scope=None): """ Return filtered version of ``scope.get_defined_names()``. This function basically does what :meth:`scope.get_defined_names ` does. - If `position` is given, delete all names defined after `position`. - For special objects like instances, `position` is ignored and all names are returned. :type scope: :class:`parsing_representation.IsScope` :param scope: Scope in which names are searched. :param position: the position as a line/column tuple, default is infinity. """ names = scope.get_defined_names() # Instances have special rules, always return all the possible completions, # because class variables are always valid and the `self.` variables, too. if (not position or isinstance(scope, (er.Array, er.Instance)) or start_scope != scope and isinstance(start_scope, (pr.Function, er.Execution))): return names names_new = [] for n in names: if n.start_pos[0] is not None and n.start_pos < position: names_new.append(n) return names_new def get_names_of_scope(scope, position=None, star_search=True, include_builtin=True): """ Get all completions (names) possible for the current scope. The star search option is only here to provide an optimization. Otherwise the whole thing would probably start a little recursive madness. This function is used to include names from outer scopes. For example, when the current scope is function: >>> from jedi.parsing import Parser >>> parser = Parser(''' ... x = ['a', 'b', 'c'] ... def func(): ... y = None ... ''') >>> scope = parser.module.subscopes[0] >>> scope `get_names_of_scope` is a generator. First it yields names from most inner scope. >>> pairs = list(get_names_of_scope(scope)) >>> pairs[0] (, []) Then it yield the names from one level outer scope. For this example, this is the most outer scope. >>> pairs[1] (, [, ]) Finally, it yields names from builtin, if `include_builtin` is true (default). >>> pairs[2] #doctest: +ELLIPSIS (, [, ...]) :rtype: [(pr.Scope, [pr.Name])] :return: Return an generator that yields a pair of scope and names. """ in_func_scope = scope non_flow = scope.get_parent_until(pr.Flow, reverse=True) while scope: if isinstance(scope, pr.SubModule) and scope.parent: # we don't want submodules to report if we have modules. scope = scope.parent continue # `pr.Class` is used, because the parent is never `Class`. # Ignore the Flows, because the classes and functions care for that. # InstanceElement of Class is ignored, if it is not the start scope. if not (scope != non_flow and scope.isinstance(pr.Class) or scope.isinstance(pr.Flow) or scope.isinstance(er.Instance) and non_flow.isinstance(er.Function)): try: if isinstance(scope, er.Instance): for g in scope.scope_generator(): yield g else: yield scope, get_defined_names_for_position(scope, position, in_func_scope) except StopIteration: reraise(common.MultiLevelStopIteration, sys.exc_info()[2]) if scope.isinstance(pr.ForFlow) and scope.is_list_comp: # is a list comprehension yield scope, scope.get_set_vars(is_internal_call=True) scope = scope.parent # This is used, because subscopes (Flow scopes) would distort the # results. if scope and scope.isinstance(er.Function, pr.Function, er.Execution): in_func_scope = scope # Add star imports. if star_search: for s in imports.remove_star_imports(non_flow.get_parent_until()): for g in get_names_of_scope(s, star_search=False): yield g # Add builtins to the global scope. if include_builtin: builtin_scope = builtin.Builtin.scope yield builtin_scope, builtin_scope.get_defined_names() def find_name(scope, name_str, position=None, search_global=False, is_goto=False, resolve_decorator=True): """ This is the search function. The most important part to debug. `remove_statements` and `filter_statements` really are the core part of this completion. :param position: Position of the last statement -> tuple of line, column :return: List of Names. Their parents are the scopes, they are defined in. :rtype: list """ def remove_statements(result): """ This is the part where statements are being stripped. Due to lazy evaluation, statements like a = func; b = a; b() have to be evaluated. """ res_new = [] for r in result: add = [] if r.isinstance(pr.Statement): check_instance = None if isinstance(r, er.InstanceElement) and r.is_class_var: check_instance = r.instance r = r.var # Global variables handling. if r.is_global(): for token_name in r.token_list[1:]: if isinstance(token_name, pr.Name): add = find_name(r.parent, str(token_name)) else: # generated objects are used within executions, but these # objects are in functions, and we have to dynamically # execute first. if isinstance(r, pr.Param): func = r.parent # Instances are typically faked, if the instance is not # called from outside. Here we check it for __init__ # functions and return. if isinstance(func, er.InstanceElement) \ and func.instance.is_generated \ and hasattr(func, 'name') \ and str(func.name) == '__init__' \ and r.position_nr > 0: # 0 would be self r = func.var.params[r.position_nr] # add docstring knowledge doc_params = docstrings.follow_param(r) if doc_params: res_new += doc_params continue if not r.is_generated: res_new += dynamic.search_params(r) if not res_new: c = r.get_commands()[0] if c in ('*', '**'): t = 'tuple' if c == '*' else 'dict' res_new = [er.Instance( find_name(builtin.Builtin.scope, t)[0]) ] if not r.assignment_details: # this means that there are no default params, # so just ignore it. continue # Remove the statement docstr stuff for now, that has to be # implemented with the evaluator class. #if r.docstr: #res_new.append(r) scopes = follow_statement(r, seek_name=name_str) add += remove_statements(scopes) if check_instance is not None: # class renames add = [er.InstanceElement(check_instance, a, True) if isinstance(a, (er.Function, pr.Function)) else a for a in add] res_new += add else: if isinstance(r, pr.Class): r = er.Class(r) elif isinstance(r, pr.Function): r = er.Function(r) if r.isinstance(er.Function) and resolve_decorator: r = r.get_decorated_func() res_new.append(r) debug.dbg('sfn remove, new: %s, old: %s' % (res_new, result)) return res_new def filter_name(scope_generator): """ Filters all variables of a scope (which are defined in the `scope_generator`), until the name fits. """ def handle_for_loops(loop): # Take the first statement (for has always only # one, remember `in`). And follow it. if not loop.inputs: return [] result = get_iterator_types(follow_statement(loop.inputs[0])) if len(loop.set_vars) > 1: commands = loop.set_stmt.get_commands() # loops with loop.set_vars > 0 only have one command result = assign_tuples(commands[0], result, name_str) return result def process(name): """ Returns the parent of a name, which means the element which stands behind a name. """ result = [] no_break_scope = False par = name.parent exc = pr.Class, pr.Function until = lambda: par.parent.parent.get_parent_until(exc) is_array_assignment = False if par is None: pass elif par.isinstance(pr.Flow): if par.command == 'for': result += handle_for_loops(par) else: debug.warning('Flow: Why are you here? %s' % par.command) elif par.isinstance(pr.Param) \ and par.parent is not None \ and isinstance(until(), pr.Class) \ and par.position_nr == 0: # This is where self gets added - this happens at another # place, if the var_args are clear. But sometimes the class is # not known. Therefore add a new instance for self. Otherwise # take the existing. if isinstance(scope, er.InstanceElement): inst = scope.instance else: inst = er.Instance(er.Class(until())) inst.is_generated = True result.append(inst) elif par.isinstance(pr.Statement): def is_execution(calls): for c in calls: if isinstance(c, (unicode, str)): continue if c.isinstance(pr.Array): if is_execution(c): return True elif c.isinstance(pr.Call): # Compare start_pos, because names may be different # because of executions. if c.name.start_pos == name.start_pos \ and c.execution: return True return False is_exe = False for assignee, op in par.assignment_details: is_exe |= is_execution(assignee) if is_exe: # filter array[3] = ... # TODO check executions for dict contents is_array_assignment = True else: details = par.assignment_details if details and details[0][1] != '=': no_break_scope = True # TODO this makes self variables non-breakable. wanted? if isinstance(name, er.InstanceElement) \ and not name.is_class_var: no_break_scope = True result.append(par) else: # TODO multi-level import non-breakable if isinstance(par, pr.Import) and len(par.namespace) > 1: no_break_scope = True result.append(par) return result, no_break_scope, is_array_assignment flow_scope = scope result = [] # compare func uses the tuple of line/indent = line/column comparison_func = lambda name: (name.start_pos) for nscope, name_list in scope_generator: break_scopes = [] # here is the position stuff happening (sorting of variables) for name in sorted(name_list, key=comparison_func, reverse=True): p = name.parent.parent if name.parent else None if isinstance(p, er.InstanceElement) \ and isinstance(p.var, pr.Class): p = p.var if name_str == name.get_code() and p not in break_scopes: r, no_break_scope, is_array_assignment = process(name) if is_goto: if not is_array_assignment: # shouldn't goto arr[1] = result.append(name) else: result += r # for comparison we need the raw class s = nscope.base if isinstance(nscope, er.Class) else nscope # this means that a definition was found and is not e.g. # in if/else. if result and not no_break_scope: if not name.parent or p == s: break break_scopes.append(p) while flow_scope: # TODO check if result is in scope -> no evaluation necessary n = dynamic.check_flow_information(flow_scope, name_str, position) if n: result = n break if result: break if flow_scope == nscope: break flow_scope = flow_scope.parent flow_scope = nscope if result: break if not result and isinstance(nscope, er.Instance): # __getattr__ / __getattribute__ result += check_getattr(nscope, name_str) debug.dbg('sfn filter "%s" in (%s-%s): %s@%s' % (name_str, scope, nscope, u(result), position)) return result def descriptor_check(result): """Processes descriptors""" res_new = [] for r in result: if isinstance(scope, (er.Instance, er.Class)) \ and hasattr(r, 'get_descriptor_return'): # handle descriptors with common.ignored(KeyError): res_new += r.get_descriptor_return(scope) continue res_new.append(r) return res_new if search_global: scope_generator = get_names_of_scope(scope, position=position) else: if isinstance(scope, er.Instance): scope_generator = scope.scope_generator() else: if isinstance(scope, (er.Class, pr.Module)): # classes are only available directly via chaining? # strange stuff... names = scope.get_defined_names() else: names = get_defined_names_for_position(scope, position) scope_generator = iter([(scope, names)]) if is_goto: return filter_name(scope_generator) return descriptor_check(remove_statements(filter_name(scope_generator))) def check_getattr(inst, name_str): """Checks for both __getattr__ and __getattribute__ methods""" result = [] # str is important to lose the NamePart! module = builtin.Builtin.scope name = pr.Call(module, str(name_str), pr.Call.STRING, (0, 0), (0, 0), inst) with common.ignored(KeyError): result = inst.execute_subscope_by_name('__getattr__', [name]) if not result: # this is a little bit special. `__getattribute__` is executed # before anything else. But: I know no use case, where this # could be practical and the jedi would return wrong types. If # you ever have something, let me know! with common.ignored(KeyError): result = inst.execute_subscope_by_name('__getattribute__', [name]) return result def get_iterator_types(inputs): """Returns the types of any iterator (arrays, yields, __iter__, etc).""" iterators = [] # Take the first statement (for has always only # one, remember `in`). And follow it. for it in inputs: if isinstance(it, (er.Generator, er.Array, dynamic.ArrayInstance)): iterators.append(it) else: if not hasattr(it, 'execute_subscope_by_name'): debug.warning('iterator/for loop input wrong', it) continue try: iterators += it.execute_subscope_by_name('__iter__') except KeyError: debug.warning('iterators: No __iter__ method found.') result = [] for gen in iterators: if isinstance(gen, er.Array): # Array is a little bit special, since this is an internal # array, but there's also the list builtin, which is # another thing. result += gen.get_index_types() elif isinstance(gen, er.Instance): # __iter__ returned an instance. name = '__next__' if is_py3k else 'next' try: result += gen.execute_subscope_by_name(name) except KeyError: debug.warning('Instance has no __next__ function', gen) else: # is a generator result += gen.iter_content() return result def assign_tuples(tup, results, seek_name): """ This is a normal assignment checker. In python functions and other things can return tuples: >>> a, b = 1, "" >>> a, (b, c) = 1, ("", 1.0) Here, if `seek_name` is "a", the number type will be returned. The first part (before `=`) is the param tuples, the second one result. :type tup: pr.Array """ def eval_results(index): types = [] for r in results: try: func = r.get_exact_index_types except AttributeError: debug.warning("invalid tuple lookup %s of result %s in %s" % (tup, results, seek_name)) else: with common.ignored(IndexError): types += func(index) return types result = [] for i, stmt in enumerate(tup): # Used in assignments. There is just one call and no other things, # therefore we can just assume, that the first part is important. command = stmt.get_commands()[0] if tup.type == pr.Array.NOARRAY: # unnessecary braces -> just remove. r = results else: r = eval_results(i) # LHS of tuples can be nested, so resolve it recursively result += find_assignments(command, r, seek_name) return result def find_assignments(lhs, results, seek_name): """ Check if `seek_name` is in the left hand side `lhs` of assignment. `lhs` can simply be a variable (`pr.Call`) or a tuple/list (`pr.Array`) representing the following cases:: a = 1 # lhs is pr.Call (a, b) = 2 # lhs is pr.Array :type lhs: pr.Call :type results: list :type seek_name: str """ if isinstance(lhs, pr.Array): return assign_tuples(lhs, results, seek_name) elif lhs.name.names[-1] == seek_name: return results else: return [] @recursion.RecursionDecorator @cache.memoize_default(default=()) def follow_statement(stmt, seek_name=None): """ The starting point of the completion. A statement always owns a call list, which are the calls, that a statement does. In case multiple names are defined in the statement, `seek_name` returns the result for this name. :param stmt: A `pr.Statement`. :param seek_name: A string. """ debug.dbg('follow_stmt %s (%s)' % (stmt, seek_name)) commands = stmt.get_commands() debug.dbg('calls: %s' % commands) result = follow_call_list(commands) # Assignment checking is only important if the statement defines multiple # variables. if len(stmt.get_set_vars()) > 1 and seek_name and stmt.assignment_details: new_result = [] for ass_commands, op in stmt.assignment_details: new_result += find_assignments(ass_commands[0], result, seek_name) result = new_result return set(result) @common.rethrow_uncaught def follow_call_list(call_list, follow_array=False): """ `call_list` can be either `pr.Array` or `list of list`. It is used to evaluate a two dimensional object, that has calls, arrays and operators in it. """ def evaluate_list_comprehension(lc, parent=None): input = lc.input nested_lc = lc.input.token_list[0] if isinstance(nested_lc, pr.ListComprehension): # is nested LC input = nested_lc.stmt module = input.get_parent_until() # create a for loop, which does the same as list comprehensions loop = pr.ForFlow(module, [input], lc.stmt.start_pos, lc.middle, True) loop.parent = parent or lc.get_parent_until(pr.IsScope) if isinstance(nested_lc, pr.ListComprehension): loop = evaluate_list_comprehension(nested_lc, loop) return loop result = [] calls_iterator = iter(call_list) for call in calls_iterator: if pr.Array.is_type(call, pr.Array.NOARRAY): r = list(itertools.chain.from_iterable(follow_statement(s) for s in call)) call_path = call.generate_call_path() next(call_path, None) # the first one has been used already result += follow_paths(call_path, r, call.parent, position=call.start_pos) elif isinstance(call, pr.ListComprehension): loop = evaluate_list_comprehension(call) # Caveat: parents are being changed, but this doesn't matter, # because nothing else uses it. call.stmt.parent = loop result += follow_statement(call.stmt) else: if isinstance(call, pr.Lambda): result.append(er.Function(call)) # With things like params, these can also be functions... elif isinstance(call, pr.Base) and call.isinstance(er.Function, er.Class, er.Instance, dynamic.ArrayInstance): result.append(call) # The string tokens are just operations (+, -, etc.) elif not isinstance(call, (str, unicode)): if str(call.name) == 'if': # Ternary operators. while True: try: call = next(calls_iterator) except StopIteration: break with common.ignored(AttributeError): if str(call.name) == 'else': break continue result += follow_call(call) elif call == '*': if [r for r in result if isinstance(r, er.Array) or isinstance(r, er.Instance) and str(r.name) == 'str']: # if it is an iterable, ignore * operations next(calls_iterator) return set(result) def follow_call(call): """Follow a call is following a function, variable, string, etc.""" path = call.generate_call_path() # find the statement of the Scope s = call while not s.parent.isinstance(pr.IsScope): s = s.parent return follow_call_path(path, s.parent, s.start_pos) def follow_call_path(path, scope, position): """Follows a path generated by `pr.Call.generate_call_path()`""" current = next(path) if isinstance(current, pr.Array): result = [er.Array(current)] else: if isinstance(current, pr.NamePart): # This is the first global lookup. scopes = find_name(scope, current, position=position, search_global=True) else: if current.type in (pr.Call.STRING, pr.Call.NUMBER): t = type(current.name).__name__ scopes = find_name(builtin.Builtin.scope, t) else: debug.warning('unknown type:', current.type, current) scopes = [] # Make instances of those number/string objects. scopes = [er.Instance(s, (current.name,)) for s in scopes] result = imports.strip_imports(scopes) return follow_paths(path, result, scope, position=position) def follow_paths(path, results, call_scope, position=None): """ In each result, `path` must be followed. Copies the path iterator. """ results_new = [] if results: if len(results) > 1: iter_paths = itertools.tee(path, len(results)) else: iter_paths = [path] for i, r in enumerate(results): fp = follow_path(iter_paths[i], r, call_scope, position=position) if fp is not None: results_new += fp else: # This means stop iteration. return results return results_new def follow_path(path, scope, call_scope, position=None): """ Uses a generator and tries to complete the path, e.g.:: foo.bar.baz `follow_path` is only responsible for completing `.bar.baz`, the rest is done in the `follow_call` function. """ # current is either an Array or a Scope. try: current = next(path) except StopIteration: return None debug.dbg('follow %s in scope %s' % (current, scope)) result = [] if isinstance(current, pr.Array): # This must be an execution, either () or []. if current.type == pr.Array.LIST: if hasattr(scope, 'get_index_types'): result = scope.get_index_types(current) elif current.type not in [pr.Array.DICT]: # Scope must be a class or func - make an instance or execution. debug.dbg('exe', scope) result = er.Execution(scope, current).get_return_types() else: # Curly braces are not allowed, because they make no sense. debug.warning('strange function call with {}', current, scope) else: # The function must not be decorated with something else. if scope.isinstance(er.Function): scope = scope.get_magic_method_scope() else: # This is the typical lookup while chaining things. if filter_private_variable(scope, call_scope, current): return [] result = imports.strip_imports(find_name(scope, current, position=position)) return follow_paths(path, set(result), call_scope, position=position) def filter_private_variable(scope, call_scope, var_name): """private variables begin with a double underline `__`""" if isinstance(var_name, (str, unicode)) and isinstance(scope, er.Instance)\ and var_name.startswith('__') and not var_name.endswith('__'): s = call_scope.get_parent_until((pr.Class, er.Instance)) if s != scope and s != scope.base.base: return True return False def goto(stmt, call_path=None): if call_path is None: commands = stmt.get_commands() if len(commands) == 0: return [], '' # Only the first command is important, the rest should basically not # happen except in broken code (e.g. docstrings that aren't code). call = commands[0] if isinstance(call, (str, unicode)): call_path = [call] else: call_path = list(call.generate_call_path()) scope = stmt.get_parent_until(pr.IsScope) pos = stmt.start_pos call_path, search = call_path[:-1], call_path[-1] pos = pos[0], pos[1] + 1 if call_path: scopes = follow_call_path(iter(call_path), scope, pos) search_global = False pos = None else: scopes = [scope] search_global = True follow_res = [] for s in scopes: follow_res += find_name(s, search, pos, search_global=search_global, is_goto=True) return follow_res, search jedi-0.7.0/jedi/parsing_representation.py0000664000175000017500000013676012204171717020724 0ustar daviddavid00000000000000""" If you know what an abstract syntax tree (ast) is, you'll see that this module is pretty much that. The classes represent syntax elements: ``Import``, ``Function``. A very central class is ``Scope``. It is not used directly by the parser, but inherited. It's used by ``Function``, ``Class``, ``Flow``, etc. A ``Scope`` may have ``subscopes``, ``imports`` and ``statements``. The entire parser is based on scopes, because they also stand for indentation. One special thing: ``Array`` values are statements. But if you think about it, this makes sense. ``[1, 2+33]`` for example would be an Array with two ``Statement`` inside. This is the easiest way to write a parser. The same behaviour applies to ``Param``, which is being used in a function definition. The easiest way to play with this module is to use :class:`parsing.Parser`. :attr:`parsing.Parser.module` holds an instance of :class:`SubModule`: >>> from jedi.parsing import Parser >>> parser = Parser('import os', 'example.py') >>> submodule = parser.module >>> submodule Any subclasses of :class:`Scope`, including :class:`SubModule` has attribute :attr:`imports `. This attribute has import statements in this scope. Check this out: >>> submodule.imports [] See also :attr:`Scope.subscopes` and :attr:`Scope.statements`. """ from __future__ import with_statement import os import re import tokenizer as tokenize from inspect import cleandoc from ast import literal_eval from jedi._compatibility import next, Python3Method, encoding, unicode, is_py3k from jedi import common from jedi import debug class Base(object): """ This is just here to have an isinstance check, which is also used on evaluate classes. But since they have sometimes a special type of delegation, it is important for those classes to override this method. I know that there is a chance to do such things with __instancecheck__, but since Python 2.5 doesn't support it, I decided to do it this way. """ __slots__ = () def isinstance(self, *cls): return isinstance(self, cls) class Simple(Base): """ The super class for Scope, Import, Name and Statement. Every object in the parser tree inherits from this class. """ __slots__ = ('parent', '_sub_module', '_start_pos', 'use_as_parent', '_end_pos') def __init__(self, module, start_pos, end_pos=(None, None)): """ Initialize :class:`Simple`. :type module: :class:`SubModule` :param module: The module in which this Python object locates. :type start_pos: 2-tuple of int :param start_pos: Position (line, column) of the Statement. :type end_pos: 2-tuple of int :param end_pos: Same as `start_pos`. """ self._sub_module = module self._start_pos = start_pos self._end_pos = end_pos self.parent = None # use this attribute if parent should be something else than self. self.use_as_parent = self @property def start_pos(self): return self._sub_module.line_offset + self._start_pos[0], \ self._start_pos[1] @start_pos.setter def start_pos(self, value): self._start_pos = value @property def end_pos(self): if None in self._end_pos: return self._end_pos return self._sub_module.line_offset + self._end_pos[0], \ self._end_pos[1] @end_pos.setter def end_pos(self, value): self._end_pos = value @Python3Method def get_parent_until(self, classes=(), reverse=False, include_current=True): """ Takes always the parent, until one class (not a Class) """ if type(classes) not in (tuple, list): classes = (classes,) scope = self if include_current else self.parent while scope.parent is not None: if classes and reverse != scope.isinstance(*classes): break scope = scope.parent return scope def __repr__(self): code = self.get_code().replace('\n', ' ') if not is_py3k: code = code.encode(encoding, 'replace') return "<%s: %s@%s,%s>" % \ (type(self).__name__, code, self.start_pos[0], self.start_pos[1]) class IsScope(Base): pass class Scope(Simple, IsScope): """ Super class for the parser tree, which represents the state of a python text file. A Scope manages and owns its subscopes, which are classes and functions, as well as variables and imports. It is used to access the structure of python files. :param start_pos: The position (line and column) of the scope. :type start_pos: tuple(int, int) """ def __init__(self, module, start_pos): super(Scope, self).__init__(module, start_pos) self.subscopes = [] self.imports = [] self.statements = [] self.docstr = '' self.asserts = [] # Needed here for fast_parser, because the fast_parser splits and # returns will be in "normal" modules. self.returns = [] self.is_generator = False def add_scope(self, sub, decorators): sub.parent = self.use_as_parent sub.decorators = decorators for d in decorators: # the parent is the same, because the decorator has not the scope # of the function d.parent = self.use_as_parent self.subscopes.append(sub) return sub def add_statement(self, stmt): """ Used to add a Statement or a Scope. A statement would be a normal command (Statement) or a Scope (Flow). """ stmt.parent = self.use_as_parent self.statements.append(stmt) return stmt def add_docstr(self, string): """ Clean up a docstring """ self.docstr = cleandoc(literal_eval(string)) def add_import(self, imp): self.imports.append(imp) imp.parent = self.use_as_parent def get_imports(self): """ Gets also the imports within flow statements """ i = [] + self.imports for s in self.statements: if isinstance(s, Scope): i += s.get_imports() return i def get_code(self, first_indent=False, indention=' '): """ :return: Returns the code of the current scope. :rtype: str """ string = "" if len(self.docstr) > 0: string += '"""' + self.docstr + '"""\n' objs = self.subscopes + self.imports + self.statements + self.returns for obj in sorted(objs, key=lambda x: x.start_pos): if isinstance(obj, Scope): string += obj.get_code(first_indent=True, indention=indention) else: if obj in self.returns and not isinstance(self, Lambda): string += 'yield ' if self.is_generator else 'return ' string += obj.get_code() if first_indent: string = common.indent_block(string, indention=indention) return string @Python3Method def get_set_vars(self): """ Get all the names, that are active and accessible in the current scope. See :meth:`get_defined_names` for examples. :return: list of Name :rtype: list """ n = [] for stmt in self.statements: try: n += stmt.get_set_vars(True) except TypeError: n += stmt.get_set_vars() # function and class names n += [s.name for s in self.subscopes] for i in self.imports: if not i.star: n += i.get_defined_names() return n def get_defined_names(self): """ Get all defined names in this scope. >>> from jedi.parsing import Parser >>> parser = Parser(''' ... a = x ... b = y ... b.c = z ... ''') >>> parser.module.get_defined_names() [, ] Note that unlike :meth:`get_set_vars`, assignment to object attribute does not change the result because it does not change the defined names in this scope. >>> parser.module.get_set_vars() [, , ] """ return [n for n in self.get_set_vars() if isinstance(n, Import) or len(n) == 1] def is_empty(self): """ :return: True if there are no subscopes, imports and statements. :rtype: bool """ return not (self.imports or self.subscopes or self.statements) @Python3Method def get_statement_for_position(self, pos, include_imports=False): checks = self.statements + self.asserts if include_imports: checks += self.imports if self.isinstance(Function): checks += self.params + self.decorators checks += [r for r in self.returns if r is not None] for s in checks: if isinstance(s, Flow): p = s.get_statement_for_position(pos, include_imports) while s.next and not p: s = s.next p = s.get_statement_for_position(pos, include_imports) if p: return p elif s.start_pos <= pos <= s.end_pos: return s for s in self.subscopes: if s.start_pos <= pos <= s.end_pos: p = s.get_statement_for_position(pos, include_imports) if p: return p def __repr__(self): try: name = self.path except AttributeError: try: name = self.name except AttributeError: name = self.command return "<%s: %s@%s-%s>" % (type(self).__name__, name, self.start_pos[0], self.end_pos[0]) class Module(IsScope): """ For isinstance checks. fast_parser.Module also inherits from this. """ class SubModule(Scope, Module): """ The top scope, which is always a module. Depending on the underlying parser this may be a full module or just a part of a module. """ def __init__(self, path, start_pos=(1, 0), top_module=None): """ Initialize :class:`SubModule`. :type path: str :arg path: File path to this module. .. todo:: Document `top_module`. """ super(SubModule, self).__init__(self, start_pos) self.path = path self.global_vars = [] self._name = None self.used_names = {} self.temp_used_names = [] # this may be changed depending on fast_parser self.line_offset = 0 self.use_as_parent = top_module or self def add_global(self, name): """ Global means in these context a function (subscope) which has a global statement. This is only relevant for the top scope. :param name: The name of the global. :type name: Name """ # set no parent here, because globals are not defined in this scope. self.global_vars.append(name) def get_set_vars(self): n = super(SubModule, self).get_set_vars() n += self.global_vars return n @property def name(self): """ This is used for the goto functions. """ if self._name is not None: return self._name if self.path is None: string = '' # no path -> empty name else: sep = (re.escape(os.path.sep),) * 2 r = re.search(r'([^%s]*?)(%s__init__)?(\.py|\.so)?$' % sep, self.path) # remove PEP 3149 names string = re.sub('\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1)) # positions are not real therefore choose (0, 0) names = [(string, (0, 0))] self._name = Name(self, names, (0, 0), (0, 0), self.use_as_parent) return self._name def is_builtin(self): return not (self.path is None or self.path.endswith('.py')) @property def has_explicit_absolute_import(self): """ Checks if imports in this module are explicitly absolute, i.e. there is a ``__future__`` import. """ for imp in self.imports: if imp.from_ns is None or imp.namespace is None: continue namespace, feature = imp.from_ns.names[0], imp.namespace.names[0] if namespace == "__future__" and feature == "absolute_import": return True return False class Class(Scope): """ Used to store the parsed contents of a python class. :param name: The Class name. :type name: str :param supers: The super classes of a Class. :type supers: list :param start_pos: The start position (line, column) of the class. :type start_pos: tuple(int, int) """ def __init__(self, module, name, supers, start_pos): super(Class, self).__init__(module, start_pos) self.name = name name.parent = self.use_as_parent self.supers = supers for s in self.supers: s.parent = self.use_as_parent self.decorators = [] def get_code(self, first_indent=False, indention=' '): string = "\n".join('@' + stmt.get_code() for stmt in self.decorators) string += 'class %s' % (self.name) if len(self.supers) > 0: sup = ', '.join(stmt.get_code(False) for stmt in self.supers) string += '(%s)' % sup string += ':\n' string += super(Class, self).get_code(True, indention) if self.is_empty(): string += "pass\n" return string @property def doc(self): """ Return a document string including call signature of __init__. """ for sub in self.subscopes: if sub.name.names[-1] == '__init__': return '%s\n\n%s' % ( sub.get_call_signature(funcname=self.name.names[-1]), self.docstr) return self.docstr class Function(Scope): """ Used to store the parsed contents of a python function. :param name: The Function name. :type name: str :param params: The parameters (Statement) of a Function. :type params: list :param start_pos: The start position (line, column) the Function. :type start_pos: tuple(int, int) """ def __init__(self, module, name, params, start_pos, annotation): super(Function, self).__init__(module, start_pos) self.name = name if name is not None: name.parent = self.use_as_parent self.params = params for p in params: p.parent = self.use_as_parent p.parent_function = self.use_as_parent self.decorators = [] self.listeners = set() # not used here, but in evaluation. if annotation is not None: annotation.parent = self.use_as_parent self.annotation = annotation def get_code(self, first_indent=False, indention=' '): string = "\n".join('@' + stmt.get_code() for stmt in self.decorators) params = ', '.join([stmt.get_code(False) for stmt in self.params]) string += "def %s(%s):\n" % (self.name, params) string += super(Function, self).get_code(True, indention) if self.is_empty(): string += 'pass\n' return string def is_empty(self): return super(Function, self).is_empty() and not self.returns def get_set_vars(self): n = super(Function, self).get_set_vars() for p in self.params: try: n.append(p.get_name()) except IndexError: debug.warning("multiple names in param %s" % n) return n def get_call_signature(self, width=72, funcname=None): """ Generate call signature of this function. :param width: Fold lines if a line is longer than this value. :type width: int :arg funcname: Override function name when given. :type funcname: str :rtype: str """ l = (funcname or self.name.names[-1]) + '(' lines = [] for (i, p) in enumerate(self.params): code = p.get_code(False) if i != len(self.params) - 1: code += ', ' if len(l + code) > width: lines.append(l[:-1] if l[-1] == ' ' else l) l = code else: l += code if l: lines.append(l) lines[-1] += ')' return '\n'.join(lines) @property def doc(self): """ Return a document string including call signature. """ return '%s\n\n%s' % (self.get_call_signature(), self.docstr) class Lambda(Function): def __init__(self, module, params, start_pos, parent): super(Lambda, self).__init__(module, None, params, start_pos, None) self.parent = parent def get_code(self, first_indent=False, indention=' '): params = ','.join([stmt.get_code() for stmt in self.params]) string = "lambda %s: " % params return string + super(Function, self).get_code(indention=indention) def __repr__(self): return "<%s @%s (%s-%s)>" % (type(self).__name__, self.start_pos[0], self.start_pos[1], self.end_pos[1]) class Flow(Scope): """ Used to describe programming structure - flow statements, which indent code, but are not classes or functions: - for - while - if - try - with Therefore statements like else, except and finally are also here, they are now saved in the root flow elements, but in the next variable. :param command: The flow command, if, while, else, etc. :type command: str :param inputs: The initializations of a flow -> while 'statement'. :type inputs: list(Statement) :param start_pos: Position (line, column) of the Flow statement. :type start_pos: tuple(int, int) :param set_vars: Local variables used in the for loop (only there). :type set_vars: list """ def __init__(self, module, command, inputs, start_pos, set_vars=None): self.next = None self.command = command super(Flow, self).__init__(module, start_pos) self._parent = None # These have to be statements, because of with, which takes multiple. self.inputs = inputs for s in inputs: s.parent = self.use_as_parent if set_vars is None: self.set_vars = [] else: self.set_vars = set_vars for s in self.set_vars: s.parent.parent = self.use_as_parent s.parent = self.use_as_parent @property def parent(self): return self._parent @parent.setter def parent(self, value): self._parent = value try: self.next.parent = value except AttributeError: return def get_code(self, first_indent=False, indention=' '): stmts = [] for s in self.inputs: stmts.append(s.get_code(new_line=False)) stmt = ', '.join(stmts) string = "%s %s:\n" % (self.command, stmt) string += super(Flow, self).get_code(True, indention) if self.next: string += self.next.get_code() return string def get_set_vars(self, is_internal_call=False): """ Get the names for the flow. This includes also a call to the super class. :param is_internal_call: defines an option for internal files to crawl through this class. Normally it will just call its superiors, to generate the output. """ if is_internal_call: n = list(self.set_vars) for s in self.inputs: n += s.set_vars if self.next: n += self.next.get_set_vars(is_internal_call) n += super(Flow, self).get_set_vars() return n else: return self.get_parent_until((Class, Function)).get_set_vars() def get_imports(self): i = super(Flow, self).get_imports() if self.next: i += self.next.get_imports() return i def set_next(self, next): """Set the next element in the flow, those are else, except, etc.""" if self.next: return self.next.set_next(next) else: self.next = next self.next.parent = self.parent return next class ForFlow(Flow): """ Used for the for loop, because there are two statement parts. """ def __init__(self, module, inputs, start_pos, set_stmt, is_list_comp=False): super(ForFlow, self).__init__(module, 'for', inputs, start_pos, set_stmt.used_vars) self.set_stmt = set_stmt set_stmt.parent = self.use_as_parent self.is_list_comp = is_list_comp def get_code(self, first_indent=False, indention=" " * 4): vars = ",".join(x.get_code() for x in self.set_vars) stmts = [] for s in self.inputs: stmts.append(s.get_code(new_line=False)) stmt = ', '.join(stmts) s = "for %s in %s:\n" % (vars, stmt) return s + super(Flow, self).get_code(True, indention) class Import(Simple): """ Stores the imports of any Scopes. :param start_pos: Position (line, column) of the Import. :type start_pos: tuple(int, int) :param namespace: The import, can be empty if a star is given :type namespace: Name :param alias: The alias of a namespace(valid in the current namespace). :type alias: Name :param from_ns: Like the namespace, can be equally used. :type from_ns: Name :param star: If a star is used -> from time import *. :type star: bool :param defunct: An Import is valid or not. :type defunct: bool """ def __init__(self, module, start_pos, end_pos, namespace, alias=None, from_ns=None, star=False, relative_count=0, defunct=False): super(Import, self).__init__(module, start_pos, end_pos) self.namespace = namespace self.alias = alias self.from_ns = from_ns for n in [namespace, alias, from_ns]: if n: n.parent = self.use_as_parent self.star = star self.relative_count = relative_count self.defunct = defunct def get_code(self, new_line=True): # in case one of the names is None alias = self.alias or '' namespace = self.namespace or '' from_ns = self.from_ns or '' if self.alias: ns_str = "%s as %s" % (namespace, alias) else: ns_str = str(namespace) nl = '\n' if new_line else '' if self.from_ns or self.relative_count: if self.star: ns_str = '*' dots = '.' * self.relative_count return "from %s%s import %s%s" % (dots, from_ns, ns_str, nl) else: return "import %s%s" % (ns_str, nl) def get_defined_names(self): if self.defunct: return [] if self.star: return [self] if self.alias: return [self.alias] if len(self.namespace) > 1: o = self.namespace n = Name(self._sub_module, [(o.names[0], o.start_pos)], o.start_pos, o.end_pos, parent=o.parent) return [n] else: return [self.namespace] def get_set_vars(self): return self.get_defined_names() def get_all_import_names(self): n = [] if self.from_ns: n.append(self.from_ns) if self.namespace: n.append(self.namespace) if self.alias: n.append(self.alias) return n class Statement(Simple): """ This is the class for all the possible statements. Which means, this class stores pretty much all the Python code, except functions, classes, imports, and flow functions like if, for, etc. :type set_vars: list of :class:`Name` :param set_vars: The variables which are defined by the statement. :type used_vars: list of :class:`Name` :param used_vars: The variables which are used by the statement. :type token_list: list :param token_list: List of tokens or names. Each element is either an instance of :class:`Name` or a tuple of token type value (e.g., :data:`tokenize.NUMBER`), token string (e.g., ``'='``), and start position (e.g., ``(1, 0)``). :type start_pos: 2-tuple of int :param start_pos: Position (line, column) of the Statement. """ __slots__ = ('token_list', 'used_vars', 'set_vars', '_commands', '_assignment_details', 'docstr') def __init__(self, module, set_vars, used_vars, token_list, start_pos, end_pos, parent=None): super(Statement, self).__init__(module, start_pos, end_pos) self.used_vars = used_vars self.token_list = token_list for s in set_vars + used_vars: s.parent = self.use_as_parent self.set_vars = self._remove_executions_from_set_vars(set_vars) self.parent = parent self.docstr = '' # cache self._commands = None self._assignment_details = [] # this is important for other scripts def add_docstr(self, string): """ Clean up a docstring """ self.docstr = cleandoc(literal_eval(string)) def _remove_executions_from_set_vars(self, set_vars): """ Important mainly for assosiative arrays:: a = 3 b = {} b[a] = 3 `a` is in this case not a set_var, it is used to index the dict. """ if not set_vars: return set_vars result = set(set_vars) last = None in_execution = 0 for tok in self.token_list: if isinstance(tok, Name): if tok not in result: break if in_execution: result.remove(tok) elif isinstance(tok, tuple): tok = tok[1] if tok in ['(', '['] and isinstance(last, Name): in_execution += 1 elif tok in [')', ']'] and in_execution > 0: in_execution -= 1 last = tok return list(result) def get_code(self, new_line=True): def assemble(command_list, assignment=None): pieces = [c.get_code() if isinstance(c, Simple) else unicode(c) for c in command_list] if assignment is None: return ''.join(pieces) return '%s %s ' % (''.join(pieces), assignment) code = ''.join(assemble(*a) for a in self.assignment_details) code += assemble(self.get_commands()) if new_line: return code + '\n' else: return code def get_set_vars(self): """ Get the names for the statement. """ return list(self.set_vars) def is_global(self): # first keyword of the first token is global -> must be a global return str(self.token_list[0]) == "global" def get_command(self, index): commands = self.get_commands() try: return commands[index] except IndexError: return None @property def assignment_details(self): # parse statement which creates the assignment details. self.get_commands() return self._assignment_details def get_commands(self): if self._commands is None: self._commands = ['time neeeeed'] # avoid recursions result = self._parse_statement() self._commands = result return self._commands def _parse_statement(self): """ This is not done in the main parser, because it might be slow and most of the statements won't need this data anyway. This is something 'like' a lazy execution. This is not really nice written, sorry for that. If you plan to replace it and make it nicer, that would be cool :-) """ def is_assignment(tok): return isinstance(tok, (str, unicode)) and tok.endswith('=') \ and not tok in ['>=', '<=', '==', '!='] def parse_array(token_iterator, array_type, start_pos, add_el=None, added_breaks=()): arr = Array(self._sub_module, start_pos, array_type, self) if add_el is not None: arr.add_statement(add_el) maybe_dict = array_type == Array.SET break_tok = None is_array = None while True: stmt, break_tok = parse_stmt(token_iterator, maybe_dict, break_on_assignment=bool(add_el), added_breaks=added_breaks) if stmt is None: break else: if break_tok == ',': is_array = True is_key = maybe_dict and break_tok == ':' arr.add_statement(stmt, is_key) if break_tok in closing_brackets \ or break_tok in added_breaks \ or is_assignment(break_tok): break if arr.type == Array.TUPLE and len(arr) == 1 and not is_array: arr.type = Array.NOARRAY if not arr.values and maybe_dict: # this is a really special case - empty brackets {} are # always dictionaries and not sets. arr.type = Array.DICT c = token_iterator.current[1] arr.end_pos = c.end_pos if isinstance(c, Simple) \ else (c[2][0], c[2][1] + len(c[1])) return arr, break_tok def parse_stmt(token_iterator, maybe_dict=False, added_breaks=(), break_on_assignment=False, stmt_class=Statement): token_list = [] used_vars = [] level = 1 tok = None first = True end_pos = None, None for i, tok_temp in token_iterator: if isinstance(tok_temp, Base): # the token is a Name, which has already been parsed tok = tok_temp if first: start_pos = tok.start_pos first = False end_pos = tok.end_pos if isinstance(tok, ListComprehension): # it's not possible to set it earlier tok.parent = self if isinstance(tok, Name): used_vars.append(tok) else: token_type, tok, start_tok_pos = tok_temp last_end_pos = end_pos end_pos = start_tok_pos[0], start_tok_pos[1] + len(tok) if first: first = False start_pos = start_tok_pos if tok == 'lambda': lambd, tok = parse_lambda(token_iterator) if lambd is not None: token_list.append(lambd) elif tok == 'for': list_comp, tok = parse_list_comp(token_iterator, token_list, start_pos, last_end_pos) if list_comp is not None: token_list = [list_comp] if tok in closing_brackets: level -= 1 elif tok in brackets.keys(): level += 1 if level == 0 and tok in closing_brackets \ or tok in added_breaks \ or level == 1 and (tok == ',' or maybe_dict and tok == ':' or is_assignment(tok) and break_on_assignment): end_pos = end_pos[0], end_pos[1] - 1 break token_list.append(tok_temp) if not token_list: return None, tok statement = stmt_class(self._sub_module, [], [], token_list, start_pos, end_pos, self.parent) statement.used_vars = used_vars return statement, tok def parse_lambda(token_iterator): params = [] start_pos = self.start_pos while True: param, tok = parse_stmt(token_iterator, added_breaks=[':'], stmt_class=Param) if param is None: break params.append(param) if tok == ':': break if tok != ':': return None, tok # since lambda is a Function scope, it needs Scope parents parent = self.get_parent_until(IsScope) lambd = Lambda(self._sub_module, params, start_pos, parent) ret, tok = parse_stmt(token_iterator) if ret is not None: ret.parent = lambd lambd.returns.append(ret) lambd.end_pos = self.end_pos return lambd, tok def parse_list_comp(token_iterator, token_list, start_pos, end_pos): def parse_stmt_or_arr(token_iterator, added_breaks=()): stmt, tok = parse_stmt(token_iterator, added_breaks=added_breaks) if not stmt: return None, tok if tok == ',': arr, tok = parse_array(token_iterator, Array.TUPLE, stmt.start_pos, stmt, added_breaks=added_breaks) used_vars = [] for stmt in arr: used_vars += stmt.used_vars start_pos = arr.start_pos[0], arr.start_pos[1] - 1 stmt = Statement(self._sub_module, [], used_vars, [], start_pos, arr.end_pos) arr.parent = stmt stmt.token_list = stmt._commands = [arr] else: for v in stmt.used_vars: v.parent = stmt return stmt, tok st = Statement(self._sub_module, [], [], token_list, start_pos, end_pos) middle, tok = parse_stmt_or_arr(token_iterator, added_breaks=['in']) if tok != 'in' or middle is None: debug.warning('list comprehension middle @%s' % str(start_pos)) return None, tok in_clause, tok = parse_stmt_or_arr(token_iterator) if in_clause is None: debug.warning('list comprehension in @%s' % str(start_pos)) return None, tok return ListComprehension(st, middle, in_clause, self), tok # initializations result = [] is_chain = False brackets = {'(': Array.TUPLE, '[': Array.LIST, '{': Array.SET} closing_brackets = ')', '}', ']' token_iterator = common.PushBackIterator(enumerate(self.token_list)) for i, tok_temp in token_iterator: if isinstance(tok_temp, Base): # the token is a Name, which has already been parsed tok = tok_temp token_type = None start_pos = tok.start_pos end_pos = tok.end_pos else: token_type, tok, start_pos = tok_temp end_pos = start_pos[0], start_pos[1] + len(tok) if is_assignment(tok): # This means, there is an assignment here. # Add assignments, which can be more than one self._assignment_details.append((result, tok)) result = [] is_chain = False continue elif tok == 'as': # just ignore as, because it sets values next(token_iterator, None) continue if tok == 'lambda': lambd, tok = parse_lambda(token_iterator) if lambd is not None: result.append(lambd) else: continue is_literal = token_type in [tokenize.STRING, tokenize.NUMBER] if isinstance(tok, Name) or is_literal: c_type = Call.NAME if is_literal: tok = literal_eval(tok) if token_type == tokenize.STRING: c_type = Call.STRING elif token_type == tokenize.NUMBER: c_type = Call.NUMBER call = Call(self._sub_module, tok, c_type, start_pos, end_pos, self) if is_chain: result[-1].set_next(call) else: result.append(call) is_chain = False elif tok in brackets.keys(): arr, is_ass = parse_array(token_iterator, brackets[tok], start_pos) if result and isinstance(result[-1], Call): result[-1].set_execution(arr) else: arr.parent = self result.append(arr) elif tok == '.': if result and isinstance(result[-1], Call): is_chain = True elif tok == ',': # implies a tuple # commands is now an array not a statement anymore t = result[0] start_pos = t[2] if isinstance(t, tuple) else t.start_pos # get the correct index i, tok = next(token_iterator, (len(self.token_list), None)) if tok is not None: token_iterator.push_back((i, tok)) t = self.token_list[i - 1] try: e = t.end_pos except AttributeError: e = (t[2][0], t[2][1] + len(t[1])) \ if isinstance(t, tuple) else t.start_pos stmt = Statement(self._sub_module, [], [], result, start_pos, e, self.parent) stmt._commands = result arr, break_tok = parse_array(token_iterator, Array.TUPLE, stmt.start_pos, stmt) result = [arr] if is_assignment(break_tok): self._assignment_details.append((result, break_tok)) result = [] is_chain = False else: if tok != '\n' and token_type != tokenize.COMMENT: result.append(tok) return result class Param(Statement): """ The class which shows definitions of params of classes and functions. But this is not to define function calls. """ __slots__ = ('position_nr', 'is_generated', 'annotation_stmt', 'parent_function') def __init__(self, module, set_vars, used_vars, token_list, start_pos, end_pos, parent=None): super(Param, self).__init__(module, set_vars, used_vars, token_list, start_pos, end_pos, parent) # this is defined by the parser later on, not at the initialization # it is the position in the call (first argument, second...) self.position_nr = None self.is_generated = False self.annotation_stmt = None self.parent_function = None def add_annotation(self, annotation_stmt): annotation_stmt.parent = self.use_as_parent self.annotation_stmt = annotation_stmt def get_name(self): """ get the name of the param """ n = self.set_vars or self.used_vars if len(n) > 1: debug.warning("Multiple param names (%s)." % n) return n[0] class Call(Simple): """ `Call` contains a call, e.g. `foo.bar` and owns the executions of those calls, which are `Array`s. """ NAME = 1 NUMBER = 2 STRING = 3 def __init__(self, module, name, type, start_pos, end_pos, parent=None): super(Call, self).__init__(module, start_pos, end_pos) self.name = name # parent is not the oposite of next. The parent of c: a = [b.c] would # be an array. self.parent = parent self.type = type self.next = None self.execution = None def set_next(self, call): """ Adds another part of the statement""" call.parent = self if self.next is not None: self.next.set_next(call) else: self.next = call def set_execution(self, call): """ An execution is nothing else than brackets, with params in them, which shows access on the internals of this name. """ call.parent = self if self.next is not None: self.next.set_execution(call) elif self.execution is not None: self.execution.set_execution(call) else: self.execution = call def generate_call_path(self): """ Helps to get the order in which statements are executed. """ try: for name_part in self.name.names: yield name_part except AttributeError: yield self if self.execution is not None: for y in self.execution.generate_call_path(): yield y if self.next is not None: for y in self.next.generate_call_path(): yield y def get_code(self): if self.type == Call.NAME: s = self.name.get_code() else: s = '' if self.name is None else repr(self.name) if self.execution is not None: s += self.execution.get_code() if self.next is not None: s += '.' + self.next.get_code() return s def __repr__(self): return "<%s: %s>" % \ (type(self).__name__, self.name) class Array(Call): """ Describes the different python types for an array, but also empty statements. In the Python syntax definitions this type is named 'atom'. http://docs.python.org/py3k/reference/grammar.html Array saves sub-arrays as well as normal operators and calls to methods. :param array_type: The type of an array, which can be one of the constants below. :type array_type: int """ NOARRAY = None # just brackets, like `1 * (3 + 2)` TUPLE = 'tuple' LIST = 'list' DICT = 'dict' SET = 'set' def __init__(self, module, start_pos, arr_type=NOARRAY, parent=None): super(Array, self).__init__(module, None, arr_type, start_pos, (None, None), parent) self.end_pos = None, None self.values = [] self.keys = [] def add_statement(self, statement, is_key=False): """Just add a new statement""" statement.parent = self if is_key: self.type = self.DICT self.keys.append(statement) else: self.values.append(statement) @staticmethod def is_type(instance, *types): """ This is not only used for calls on the actual object, but for ducktyping, to invoke this function with anything as `self`. """ try: if instance.type in types: return True except AttributeError: pass return False def __len__(self): return len(self.values) def __getitem__(self, key): if self.type == self.DICT: raise TypeError('no dicts allowed') return self.values[key] def __iter__(self): if self.type == self.DICT: raise TypeError('no dicts allowed') return iter(self.values) def items(self): if self.type != self.DICT: raise TypeError('only dicts allowed') return zip(self.keys, self.values) def get_code(self): map = { self.NOARRAY: '(%s)', self.TUPLE: '(%s)', self.LIST: '[%s]', self.DICT: '{%s}', self.SET: '{%s}' } inner = [] for i, stmt in enumerate(self.values): s = '' with common.ignored(IndexError): key = self.keys[i] s += key.get_code(new_line=False) + ': ' s += stmt.get_code(new_line=False) inner.append(s) add = ',' if self.type == self.TUPLE and len(self) == 1 else '' s = map[self.type] % (', '.join(inner) + add) return s + super(Array, self).get_code() def __repr__(self): if self.type == self.NOARRAY: typ = 'noarray' else: typ = self.type return "<%s: %s%s>" % (type(self).__name__, typ, self.values) class NamePart(str): """ A string. Sometimes it is important to know if the string belongs to a name or not. """ # Unfortunately there's no way to use slots for str (non-zero __itemsize__) # -> http://utcc.utoronto.ca/~cks/space/blog/python/IntSlotsPython3k #__slots__ = ('_start_pos', 'parent') def __new__(cls, s, parent, start_pos): self = super(NamePart, cls).__new__(cls, s) self._start_pos = start_pos self.parent = parent return self @property def start_pos(self): offset = self.parent._sub_module.line_offset return offset + self._start_pos[0], self._start_pos[1] @property def end_pos(self): return self.start_pos[0], self.start_pos[1] + len(self) def __getnewargs__(self): return str(self), self.parent, self._start_pos class Name(Simple): """ Used to define names in python. Which means the whole namespace/class/function stuff. So a name like "module.class.function" would result in an array of [module, class, function] """ __slots__ = ('names',) def __init__(self, module, names, start_pos, end_pos, parent=None): super(Name, self).__init__(module, start_pos, end_pos) self.names = tuple(n if isinstance(n, NamePart) else NamePart(n[0], self, n[1]) for n in names) if parent is not None: self.parent = parent def get_code(self): """ Returns the names in a full string format """ return ".".join(self.names) @property def docstr(self): """Return attribute docstring (PEP 257) if exists.""" return self.parent.docstr def __str__(self): return self.get_code() def __len__(self): return len(self.names) class ListComprehension(Base): """ Helper class for list comprehensions """ def __init__(self, stmt, middle, input, parent): self.stmt = stmt self.middle = middle self.input = input for s in [stmt, middle, input]: s.parent = self self.parent = parent def get_parent_until(self, *args, **kwargs): return Simple.get_parent_until(self, *args, **kwargs) @property def start_pos(self): return self.stmt.start_pos @property def end_pos(self): return self.stmt.end_pos def __repr__(self): return "<%s: %s>" % \ (type(self).__name__, self.get_code()) def get_code(self): statements = self.stmt, self.middle, self.input code = [s.get_code().replace('\n', '') for s in statements] return "%s for %s in %s" % tuple(code) jedi-0.7.0/jedi/interpret.py0000664000175000017500000001262012204171717016137 0ustar daviddavid00000000000000""" Module to handle interpreted Python objects. """ import itertools import tokenize from jedi import parsing_representation as pr class ObjectImporter(object): """ Import objects in "raw" namespace such as :func:`locals`. """ def __init__(self, scope): self.scope = scope count = itertools.count() self._genname = lambda: '*jedi-%s*' % next(count) """ Generate unique variable names to avoid name collision. To avoid name collision to already defined names, generated names are invalid as Python identifier. """ def import_raw_namespace(self, raw_namespace): """ Import interpreted Python objects in a namespace. Three kinds of objects are treated here. 1. Functions and classes. The objects imported like this:: from os.path import join 2. Modules. The objects imported like this:: import os 3. Instances. The objects created like this:: from datetime import datetime dt = datetime(2013, 1, 1) :type raw_namespace: dict :arg raw_namespace: e.g., the dict given by `locals` """ scope = self.scope for (variable, obj) in raw_namespace.items(): objname = getattr(obj, '__name__', None) # Import functions and classes module = getattr(obj, '__module__', None) if module and objname: fakeimport = self.make_fakeimport(module, objname, variable) scope.add_import(fakeimport) continue # Import modules if getattr(obj, '__file__', None) and objname: fakeimport = self.make_fakeimport(objname) scope.add_import(fakeimport) continue # Import instances objclass = getattr(obj, '__class__', None) module = getattr(objclass, '__module__', None) if objclass and module: alias = self._genname() fakeimport = self.make_fakeimport(module, objclass.__name__, alias) fakestmt = self.make_fakestatement(variable, alias, call=True) scope.add_import(fakeimport) scope.add_statement(fakestmt) continue def make_fakeimport(self, module, variable=None, alias=None): """ Make a fake import object. The following statements are created depending on what parameters are given: - only `module`: ``import `` - `module` and `variable`: ``from import `` - all: ``from import as `` :type module: str :arg module: ```` part in ``from import ...`` :type variable: str :arg variable: ```` part in ``from ... import `` :type alias: str :arg alias: ```` part in ``... import ... as ``. :rtype: :class:`parsing_representation.Import` """ submodule = self.scope._sub_module if variable: varname = pr.Name( module=submodule, names=[(variable, (-1, 0))], start_pos=(-1, 0), end_pos=(None, None)) else: varname = None modname = pr.Name( module=submodule, names=[(module, (-1, 0))], start_pos=(-1, 0), end_pos=(None, None)) if alias: aliasname = pr.Name( module=submodule, names=[(alias, (-1, 0))], start_pos=(-1, 0), end_pos=(None, None)) else: aliasname = None if varname: fakeimport = pr.Import( module=submodule, namespace=varname, from_ns=modname, alias=aliasname, start_pos=(-1, 0), end_pos=(None, None)) else: fakeimport = pr.Import( module=submodule, namespace=modname, alias=aliasname, start_pos=(-1, 0), end_pos=(None, None)) return fakeimport def make_fakestatement(self, lhs, rhs, call=False): """ Make a fake statement object that represents ``lhs = rhs``. :type call: bool :arg call: When `call` is true, make a fake statement that represents ``lhs = rhs()``. :rtype: :class:`parsing_representation.Statement` """ submodule = self.scope._sub_module lhsname = pr.Name( module=submodule, names=[(lhs, (0, 0))], start_pos=(0, 0), end_pos=(None, None)) rhsname = pr.Name( module=submodule, names=[(rhs, (0, 0))], start_pos=(0, 0), end_pos=(None, None)) token_list = [lhsname, (tokenize.OP, '=', (0, 0)), rhsname] if call: token_list.extend([ (tokenize.OP, '(', (0, 0)), (tokenize.OP, ')', (0, 0)), ]) return pr.Statement( module=submodule, set_vars=[lhsname], used_vars=[rhsname], token_list=token_list, start_pos=(0, 0), end_pos=(None, None)) jedi-0.7.0/jedi/fast_parser.py0000664000175000017500000003573212204171717016445 0ustar daviddavid00000000000000""" Basically a parser that is faster, because it tries to parse only parts and if anything changes, it only reparses the changed parts. But because it's not finished (and still not working as I want), I won't document it any further. """ import re from jedi._compatibility import use_metaclass from jedi import settings from jedi import parsing from jedi import parsing_representation as pr from jedi import cache from jedi import common SCOPE_CONTENTS = ['asserts', 'subscopes', 'imports', 'statements', 'returns'] class Module(pr.Simple, pr.Module): def __init__(self, parsers): super(Module, self).__init__(self, (1, 0)) self.parsers = parsers self.reset_caches() self.start_pos = 1, 0 self.end_pos = None, None def reset_caches(self): """ This module does a whole lot of caching, because it uses different parsers. """ self._used_names = None for p in self.parsers: p.user_scope = None p.user_stmt = None def __getattr__(self, name): if name.startswith('__'): raise AttributeError('Not available!') else: return getattr(self.parsers[0].module, name) @property def used_names(self): if self._used_names is None: dct = {} for p in self.parsers: for k, statement_set in p.module.used_names.items(): if k in dct: dct[k] |= statement_set else: dct[k] = set(statement_set) self._used_names = dct return self._used_names def __repr__(self): return "<%s: %s@%s-%s>" % (type(self).__name__, self.name, self.start_pos[0], self.end_pos[0]) class CachedFastParser(type): """ This is a metaclass for caching `FastParser`. """ def __call__(self, source, module_path=None, user_position=None): if not settings.fast_parser: return parsing.Parser(source, module_path, user_position) pi = cache.parser_cache.get(module_path, None) if pi is None or isinstance(pi.parser, parsing.Parser): p = super(CachedFastParser, self).__call__(source, module_path, user_position) else: p = pi.parser # pi is a `cache.ParserCacheItem` p.update(source, user_position) return p class ParserNode(object): def __init__(self, parser, code, parent=None): self.parent = parent self.code = code self.hash = hash(code) self.children = [] # must be created before new things are added to it. self.save_contents(parser) def save_contents(self, parser): self.parser = parser try: # with fast_parser we have either 1 subscope or only statements. self.content_scope = parser.module.subscopes[0] except IndexError: self.content_scope = parser.module scope = self.content_scope self._contents = {} for c in SCOPE_CONTENTS: self._contents[c] = list(getattr(scope, c)) self._is_generator = scope.is_generator self.old_children = self.children self.children = [] def reset_contents(self): scope = self.content_scope for key, c in self._contents.items(): setattr(scope, key, list(c)) scope.is_generator = self._is_generator self.parser.user_scope = self.parser.module if self.parent is None: # Global vars of the first one can be deleted, in the global scope # they make no sense. self.parser.module.global_vars = [] for c in self.children: c.reset_contents() def parent_until_indent(self, indent=None): if indent is None or self.indent >= indent and self.parent: self.old_children = [] if self.parent is not None: return self.parent.parent_until_indent(indent) return self @property def indent(self): if not self.parent: return 0 module = self.parser.module try: el = module.subscopes[0] except IndexError: try: el = module.statements[0] except IndexError: try: el = module.imports[0] except IndexError: try: el = [r for r in module.returns if r is not None][0] except IndexError: return self.parent.indent + 1 return el.start_pos[1] def _set_items(self, parser, set_parent=False): # insert parser objects into current structure scope = self.content_scope for c in SCOPE_CONTENTS: content = getattr(scope, c) items = getattr(parser.module, c) if set_parent: for i in items: if i is None: continue # happens with empty returns i.parent = scope.use_as_parent if isinstance(i, (pr.Function, pr.Class)): for d in i.decorators: d.parent = scope.use_as_parent content += items # global_vars cur = self while cur.parent is not None: cur = cur.parent cur.parser.module.global_vars += parser.module.global_vars scope.is_generator |= parser.module.is_generator def add_node(self, node, set_parent=False): """Adding a node means adding a node that was already added earlier""" self.children.append(node) self._set_items(node.parser, set_parent=set_parent) node.old_children = node.children node.children = [] return node def add_parser(self, parser, code): return self.add_node(ParserNode(parser, code, self), True) class FastParser(use_metaclass(CachedFastParser)): def __init__(self, code, module_path=None, user_position=None): # set values like `pr.Module`. self.module_path = module_path self.user_position = user_position self._user_scope = None self.current_node = None self.parsers = [] self.module = Module(self.parsers) self.reset_caches() try: self._parse(code) except: # FastParser is cached, be careful with exceptions self.parsers[:] = [] raise @property def user_scope(self): if self._user_scope is None: for p in self.parsers: if p.user_scope: if isinstance(p.user_scope, pr.SubModule): continue self._user_scope = p.user_scope if isinstance(self._user_scope, pr.SubModule) \ or self._user_scope is None: self._user_scope = self.module return self._user_scope @property def user_stmt(self): if self._user_stmt is None: for p in self.parsers: if p.user_stmt: self._user_stmt = p.user_stmt break return self._user_stmt def update(self, code, user_position=None): self.user_position = user_position self.reset_caches() try: self._parse(code) except: # FastParser is cached, be careful with exceptions self.parsers[:] = [] raise def _scan_user_scope(self, sub_module): """ Scan with self.user_position. """ for scope in sub_module.statements + sub_module.subscopes: if isinstance(scope, pr.Scope): if scope.start_pos <= self.user_position <= scope.end_pos: return self._scan_user_scope(scope) or scope return None def _split_parts(self, code): """ Split the code into different parts. This makes it possible to parse each part seperately and therefore cache parts of the file and not everything. """ def add_part(): txt = '\n'.join(current_lines) if txt: if add_to_last and parts: parts[-1] += '\n' + txt else: parts.append(txt) current_lines[:] = [] r_keyword = '^[ \t]*(def|class|@|%s)' % '|'.join(common.FLOWS) lines = code.splitlines() current_lines = [] parts = [] is_decorator = False current_indent = 0 old_indent = 0 new_indent = False in_flow = False add_to_last = False # All things within flows are simply being ignored. for i, l in enumerate(lines): # check for dedents m = re.match('^([\t ]*)(.?)', l) indent = len(m.group(1)) if m.group(2) in ['', '#']: current_lines.append(l) # just ignore comments and blank lines continue if indent < current_indent: # -> dedent current_indent = indent new_indent = False if not in_flow or indent < old_indent: add_part() add_to_last = False in_flow = False elif new_indent: current_indent = indent new_indent = False # Check lines for functions/classes and split the code there. if not in_flow: m = re.match(r_keyword, l) if m: in_flow = m.group(1) in common.FLOWS if not is_decorator and not in_flow: add_part() add_to_last = False is_decorator = '@' == m.group(1) if not is_decorator: old_indent = current_indent current_indent += 1 # it must be higher new_indent = True elif is_decorator: is_decorator = False add_to_last = True current_lines.append(l) add_part() return parts def _parse(self, code): """ :type code: str """ def empty_parser(): new, temp = self._get_parser('', '', 0, [], False) return new parts = self._split_parts(code) self.parsers[:] = [] line_offset = 0 start = 0 p = None is_first = True for code_part in parts: lines = code_part.count('\n') + 1 if is_first or line_offset >= p.end_pos[0]: indent = len(re.match(r'[ \t]*', code_part).group(0)) if is_first and self.current_node is not None: nodes = [self.current_node] else: nodes = [] if self.current_node is not None: self.current_node = \ self.current_node.parent_until_indent(indent) nodes += self.current_node.old_children # check if code_part has already been parsed # print '#'*45,line_offset, p and p.end_pos, '\n', code_part p, node = self._get_parser(code_part, code[start:], line_offset, nodes, not is_first) if is_first and p.module.subscopes: # special case, we cannot use a function subscope as a # base scope, subscopes would save all the other contents new = empty_parser() if self.current_node is None: self.current_node = ParserNode(new, '') else: self.current_node.save_contents(new) self.parsers.append(new) is_first = False if is_first: if self.current_node is None: self.current_node = ParserNode(p, code_part) else: self.current_node.save_contents(p) else: if node is None: self.current_node = \ self.current_node.add_parser(p, code_part) else: self.current_node = self.current_node.add_node(node) if self.current_node.parent and (isinstance(p.user_scope, pr.SubModule) or p.user_scope is None) \ and self.user_position \ and p.start_pos <= self.user_position < p.end_pos: p.user_scope = self.current_node.parent.content_scope self.parsers.append(p) is_first = False else: # print '#'*45, line_offset, p.end_pos, 'theheck\n', code_part pass line_offset += lines start += len(code_part) + 1 # +1 for newline if self.parsers: self.current_node = self.current_node.parent_until_indent() else: self.parsers.append(empty_parser()) self.module.end_pos = self.parsers[-1].end_pos # print(self.parsers[0].module.get_code()) del code def _get_parser(self, code, parser_code, line_offset, nodes, no_docstr): h = hash(code) hashes = [n.hash for n in nodes] node = None try: index = hashes.index(h) if nodes[index].code != code: raise ValueError() except ValueError: p = parsing.Parser(parser_code, self.module_path, self.user_position, offset=(line_offset, 0), is_fast_parser=True, top_module=self.module, no_docstr=no_docstr) p.module.parent = self.module else: if nodes[index] != self.current_node: offset = int(nodes[0] == self.current_node) self.current_node.old_children.pop(index - offset) node = nodes.pop(index) p = node.parser m = p.module m.line_offset += line_offset + 1 - m.start_pos[0] if self.user_position is not None and \ m.start_pos[0] <= self.user_position[0] <= m.end_pos[0]: # It's important to take care of the whole user # positioning stuff, if no reparsing is being done. p.user_stmt = m.get_statement_for_position( self.user_position, include_imports=True) if p.user_stmt: p.user_scope = p.user_stmt.parent else: p.user_scope = self._scan_user_scope(m) or m return p, node def reset_caches(self): self._user_scope = None self._user_stmt = None self.module.reset_caches() if self.current_node is not None: self.current_node.reset_contents() jedi-0.7.0/jedi/helpers.py0000664000175000017500000001113712204171717015567 0ustar daviddavid00000000000000from __future__ import with_statement import copy from jedi import common from jedi import parsing_representation as pr def fast_parent_copy(obj): """ Much, much faster than copy.deepcopy, but just for certain elements. """ new_elements = {} def recursion(obj): new_obj = copy.copy(obj) new_elements[obj] = new_obj try: items = list(new_obj.__dict__.items()) except AttributeError: # __dict__ not available, because of __slots__ items = [] before = () for cls in new_obj.__class__.__mro__: with common.ignored(AttributeError): if before == cls.__slots__: continue before = cls.__slots__ items += [(n, getattr(new_obj, n)) for n in before] for key, value in items: # replace parent (first try _parent and then parent) if key in ['parent', '_parent'] and value is not None: if key == 'parent' and '_parent' in items: # parent can be a property continue with common.ignored(KeyError): setattr(new_obj, key, new_elements[value]) elif key in ['parent_function', 'use_as_parent', '_sub_module']: continue elif isinstance(value, list): setattr(new_obj, key, list_rec(value)) elif isinstance(value, (pr.Simple, pr.Call)): setattr(new_obj, key, recursion(value)) return new_obj def list_rec(list_obj): copied_list = list_obj[:] # lists, tuples, strings, unicode for i, el in enumerate(copied_list): if isinstance(el, (pr.Simple, pr.Call)): copied_list[i] = recursion(el) elif isinstance(el, list): copied_list[i] = list_rec(el) return copied_list return recursion(obj) def check_arr_index(arr, pos): positions = arr.arr_el_pos for index, comma_pos in enumerate(positions): if pos < comma_pos: return index return len(positions) def array_for_pos(stmt, pos, array_types=None): """Searches for the array and position of a tuple""" def search_array(arr, pos): if arr.type == 'dict': for stmt in arr.values + arr.keys: new_arr, index = array_for_pos(stmt, pos, array_types) if new_arr is not None: return new_arr, index else: for i, stmt in enumerate(arr): new_arr, index = array_for_pos(stmt, pos, array_types) if new_arr is not None: return new_arr, index if arr.start_pos < pos <= stmt.end_pos: if not array_types or arr.type in array_types: return arr, i if len(arr) == 0 and arr.start_pos < pos < arr.end_pos: if not array_types or arr.type in array_types: return arr, 0 return None, 0 def search_call(call, pos): arr, index = None, 0 if call.next is not None: if isinstance(call.next, pr.Array): arr, index = search_array(call.next, pos) else: arr, index = search_call(call.next, pos) if not arr and call.execution is not None: arr, index = search_array(call.execution, pos) return arr, index if stmt.start_pos >= pos >= stmt.end_pos: return None, 0 for command in stmt.get_commands(): arr = None if isinstance(command, pr.Array): arr, index = search_array(command, pos) elif isinstance(command, pr.Call): arr, index = search_call(command, pos) if arr is not None: return arr, index return None, 0 def search_function_definition(stmt, pos): """ Returns the function Call that matches the position before. """ # some parts will of the statement will be removed stmt = fast_parent_copy(stmt) arr, index = array_for_pos(stmt, pos, [pr.Array.TUPLE, pr.Array.NOARRAY]) if arr is not None and isinstance(arr.parent, pr.Call): call = arr.parent while isinstance(call.parent, pr.Call): call = call.parent arr.parent.execution = None return call if call.type == pr.Call.NAME else None, index, False return None, 0, False class FakeStatement(pr.Statement): class SubModule(): line_offset = 0 def __init__(self, content): cls = type(self) p = 0, 0 super(cls, self).__init__(cls.SubModule, [], [], [content], p, p) jedi-0.7.0/jedi/docstrings.py0000664000175000017500000000727512204171717016314 0ustar daviddavid00000000000000""" Docstrings are another source of information for functions and classes. :mod:`dynamic` tries to find all executions of functions, while the docstring parsing is much easier. There are two different types of docstrings that |jedi| understands: - `Sphinx `_ - `Epydoc `_ For example, the sphinx annotation ``:type foo: str`` clearly states that the type of ``foo`` is ``str``. As an addition to parameter searching, this module also provides return annotations. """ import re from jedi import cache from jedi import parsing import evaluate import evaluate_representation as er DOCSTRING_PARAM_PATTERNS = [ r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc ] DOCSTRING_RETURN_PATTERNS = [ re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epydoc ] REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`') @cache.memoize_default() def follow_param(param): func = param.parent_function # print func, param, param.parent_function param_str = _search_param_in_docstr(func.docstr, str(param.get_name())) user_position = (1, 0) if param_str is not None: # Try to import module part in dotted name. # (e.g., 'threading' in 'threading.Thread'). if '.' in param_str: param_str = 'import %s\n%s' % ( param_str.rsplit('.', 1)[0], param_str) user_position = (2, 0) p = parsing.Parser(param_str, None, user_position, no_docstr=True) if p.user_stmt is None: return [] return evaluate.follow_statement(p.user_stmt) return [] def _search_param_in_docstr(docstr, param_str): """ Search `docstr` for a type of `param_str`. >>> _search_param_in_docstr(':type param: int', 'param') 'int' >>> _search_param_in_docstr('@type param: int', 'param') 'int' >>> _search_param_in_docstr( ... ':type param: :class:`threading.Thread`', 'param') 'threading.Thread' >>> _search_param_in_docstr('no document', 'param') is None True """ # look at #40 to see definitions of those params patterns = [re.compile(p % re.escape(param_str)) for p in DOCSTRING_PARAM_PATTERNS] for pattern in patterns: match = pattern.search(docstr) if match: return _strip_rest_role(match.group(1)) return None def _strip_rest_role(type_str): """ Strip off the part looks like a ReST role in `type_str`. >>> _strip_rest_role(':class:`ClassName`') # strip off :class: 'ClassName' >>> _strip_rest_role(':py:obj:`module.Object`') # works with domain 'module.Object' >>> _strip_rest_role('ClassName') # do nothing when not ReST role 'ClassName' See also: http://sphinx-doc.org/domains.html#cross-referencing-python-objects """ match = REST_ROLE_PATTERN.match(type_str) if match: return match.group(1) else: return type_str def find_return_types(func): def search_return_in_docstr(code): for p in DOCSTRING_RETURN_PATTERNS: match = p.search(code) if match: return match.group(1) if isinstance(func, er.InstanceElement): func = func.var if isinstance(func, er.Function): func = func.base_func type_str = search_return_in_docstr(func.docstr) if not type_str: return [] p = parsing.Parser(type_str, None, (1, 0), no_docstr=True) if p.user_stmt is None: return [] p.user_stmt.parent = func return list(evaluate.follow_statement(p.user_stmt)) jedi-0.7.0/jedi/common.py0000664000175000017500000001575212204171717015424 0ustar daviddavid00000000000000""" A universal module with functions / classes without dependencies. """ import sys import contextlib import functools import tokenizer as tokenize from jedi._compatibility import next, reraise from jedi import settings FLOWS = ['if', 'else', 'elif', 'while', 'with', 'try', 'except', 'finally'] class MultiLevelStopIteration(Exception): """ StopIteration's get catched pretty easy by for loops, let errors propagate. """ pass class UncaughtAttributeError(Exception): """ Important, because `__getattr__` and `hasattr` catch AttributeErrors implicitly. This is really evil (mainly because of `__getattr__`). `hasattr` in Python 2 is even more evil, because it catches ALL exceptions. Therefore this class originally had to be derived from `BaseException` instead of `Exception`. But because I removed relevant `hasattr` from the code base, we can now switch back to `Exception`. :param base: return values of sys.exc_info(). """ def rethrow_uncaught(func): """ Re-throw uncaught `AttributeError`. Usage: Put ``@rethrow_uncaught`` in front of the function which does **not** suppose to raise `AttributeError`. AttributeError is easily get caught by `hasattr` and another ``except AttributeError`` clause. This becomes problem when you use a lot of "dynamic" attributes (e.g., using ``@property``) because you can't distinguish if the property does not exist for real or some code inside of the "dynamic" attribute through that error. In a well written code, such error should not exist but getting there is very difficult. This decorator is to help us getting there by changing `AttributeError` to `UncaughtAttributeError` to avoid unexpected catch. This helps us noticing bugs earlier and facilitates debugging. .. note:: Treating StopIteration here is easy. Add that feature when needed. """ @functools.wraps(func) def wrapper(*args, **kwds): try: return func(*args, **kwds) except AttributeError: exc_info = sys.exc_info() reraise(UncaughtAttributeError(exc_info[1]), exc_info[2]) return wrapper class PushBackIterator(object): def __init__(self, iterator): self.pushes = [] self.iterator = iterator self.current = None def push_back(self, value): self.pushes.append(value) def __iter__(self): return self def next(self): """ Python 2 Compatibility """ return self.__next__() def __next__(self): if self.pushes: self.current = self.pushes.pop() else: self.current = next(self.iterator) return self.current class NoErrorTokenizer(object): def __init__(self, readline, offset=(0, 0), is_fast_parser=False): self.readline = readline self.gen = tokenize.generate_tokens(readline) self.offset = offset self.closed = False self.is_first = True self.push_backs = [] # fast parser options self.is_fast_parser = is_fast_parser self.current = self.previous = [None, None, (0, 0), (0, 0), ''] self.in_flow = False self.new_indent = False self.parser_indent = self.old_parser_indent = 0 self.is_decorator = False self.first_stmt = True def push_last_back(self): self.push_backs.append(self.current) def next(self): """ Python 2 Compatibility """ return self.__next__() def __next__(self): if self.closed: raise MultiLevelStopIteration() if self.push_backs: return self.push_backs.pop(0) self.last_previous = self.previous self.previous = self.current self.current = next(self.gen) c = list(self.current) if c[0] == tokenize.ENDMARKER: self.current = self.previous self.previous = self.last_previous raise MultiLevelStopIteration() # this is exactly the same check as in fast_parser, but this time with # tokenize and therefore precise. breaks = ['def', 'class', '@'] if self.is_first: c[2] = self.offset[0] + c[2][0], self.offset[1] + c[2][1] c[3] = self.offset[0] + c[3][0], self.offset[1] + c[3][1] self.is_first = False else: c[2] = self.offset[0] + c[2][0], c[2][1] c[3] = self.offset[0] + c[3][0], c[3][1] self.current = c def close(): if not self.first_stmt: self.closed = True raise MultiLevelStopIteration() # ignore indents/comments if self.is_fast_parser \ and self.previous[0] in (tokenize.INDENT, tokenize.NL, None, tokenize.NEWLINE, tokenize.DEDENT) \ and c[0] not in (tokenize.COMMENT, tokenize.INDENT, tokenize.NL, tokenize.NEWLINE, tokenize.DEDENT): # print c, tokenize.tok_name[c[0]] tok = c[1] indent = c[2][1] if indent < self.parser_indent: # -> dedent self.parser_indent = indent self.new_indent = False if not self.in_flow or indent < self.old_parser_indent: close() self.in_flow = False elif self.new_indent: self.parser_indent = indent self.new_indent = False if not self.in_flow: if tok in FLOWS or tok in breaks: self.in_flow = tok in FLOWS if not self.is_decorator and not self.in_flow: close() self.is_decorator = '@' == tok if not self.is_decorator: self.old_parser_indent = self.parser_indent self.parser_indent += 1 # new scope: must be higher self.new_indent = True if tok != '@': if self.first_stmt and not self.new_indent: self.parser_indent = indent self.first_stmt = False return c @contextlib.contextmanager def scale_speed_settings(factor): a = settings.max_executions b = settings.max_until_execution_unique settings.max_executions *= factor settings.max_until_execution_unique *= factor yield settings.max_executions = a settings.max_until_execution_unique = b def indent_block(text, indention=' '): """ This function indents a text block with a default of four spaces """ temp = '' while text and text[-1] == '\n': temp += text[-1] text = text[:-1] lines = text.split('\n') return '\n'.join(map(lambda s: indention + s, lines)) + temp @contextlib.contextmanager def ignored(*exceptions): """Context manager that ignores all of the specified exceptions. This will be in the standard library starting with Python 3.4.""" try: yield except exceptions: pass jedi-0.7.0/jedi/builtin.py0000664000175000017500000003506512204171717015601 0ustar daviddavid00000000000000""" A big part of the Python standard libraries are unfortunately not only written in Python. The process works like this: - ``BuiltinModule`` imports the builtin module (e.g. ``sys``) - then ``BuiltinModule`` generates code with the docstrings of builtin functions. - The :mod:`parsing` parser processes the generated code. This is possible, because many builtin functions supply docstrings, for example the method ``list.index`` has the following attribute ``__doc__``: L.index(value, [start, [stop]]) -> integer -- return first index of value. Raises ValueError if the value is not present. `PEP 257 `_ teaches how docstrings should look like for C functions. Additionally there's a ``Builtin`` instance in this module, to make it possible to access functions like ``list`` and ``int`` directly, the same way |jedi| access other functions. """ from __future__ import with_statement from jedi._compatibility import exec_function, is_py3k import re import sys import os if is_py3k: import io import types import inspect from jedi import common from jedi import debug from jedi import parsing from jedi import modules import evaluate class BuiltinModule(modules.CachedModule): """ This module is a parser for all builtin modules, which are programmed in C/C++. It should also work on third party modules. It can be instantiated with either a path or a name of the module. The path is important for third party modules. :param name: The name of the module. :param path: The path of the module. :param sys_path: The sys.path, which is can be customizable. """ map_types = { 'floating point number': '0.0', 'string': '""', 'str': '""', 'character': '"a"', 'integer': '0', 'int': '0', 'dictionary': '{}', 'list': '[]', 'file object': 'file("")', # TODO things like dbg: ('not working', 'tuple of integers') } if is_py3k: map_types['file object'] = 'import io; return io.TextIOWrapper()' def __init__(self, path=None, name=None, sys_path=None): if sys_path is None: sys_path = modules.get_sys_path() if not name: name = os.path.basename(path) name = name.rpartition('.')[0] # cut file type (normally .so) super(BuiltinModule, self).__init__(path=path, name=name) self.sys_path = list(sys_path) self._module = None @property def module(self): def load_module(name, path): if path: self.sys_path.insert(0, path) temp, sys.path = sys.path, self.sys_path content = {} try: exec_function('import %s as module' % name, content) self._module = content['module'] except AttributeError: # use sys.modules, because you cannot access some modules # directly. -> #59 self._module = sys.modules[name] sys.path = temp if path: self.sys_path.pop(0) # module might already be defined if not self._module: path = self.path name = self.name if self.path: dot_path = [] p = self.path # search for the builtin with the correct path while p and p not in sys.path: p, sep, mod = p.rpartition(os.path.sep) dot_path.append(mod.partition('.')[0]) if p: name = ".".join(reversed(dot_path)) path = p else: path = os.path.dirname(self.path) load_module(name, path) return self._module def _get_source(self): """ Override this abstract method """ return _generate_code(self.module, self._load_mixins()) def _load_mixins(self): """ Load functions that are mixed in to the standard library. E.g. builtins are written in C (binaries), but my autocompletion only understands Python code. By mixing in Python code, the autocompletion should work much better for builtins. """ regex = r'^(def|class)\s+([\w\d]+)' def process_code(code, depth=0): funcs = {} matches = list(re.finditer(regex, code, re.MULTILINE)) positions = [m.start() for m in matches] for i, pos in enumerate(positions): try: code_block = code[pos:positions[i + 1]] except IndexError: code_block = code[pos:len(code)] structure_name = matches[i].group(1) name = matches[i].group(2) if structure_name == 'def': funcs[name] = code_block elif structure_name == 'class': if depth > 0: raise NotImplementedError() # remove class line c = re.sub(r'^[^\n]+', '', code_block) # remove whitespace c = re.compile(r'^[ ]{4}', re.MULTILINE).sub('', c) funcs[name] = process_code(c) else: raise NotImplementedError() return funcs try: name = self.name # sometimes there are stupid endings like `_sqlite3.cpython-32mu` name = re.sub(r'\..*', '', name) if name == '__builtin__' and not is_py3k: name = 'builtins' path = os.path.dirname(os.path.abspath(__file__)) with open(os.path.sep.join([path, 'mixin', name]) + '.pym') as f: s = f.read() except IOError: return {} else: mixin_dct = process_code(s) if is_py3k and self.name == Builtin.name: # in the case of Py3k xrange is now range mixin_dct['range'] = mixin_dct['xrange'] return mixin_dct def _generate_code(scope, mixin_funcs={}, depth=0): """ Generate a string, which uses python syntax as an input to the Parser. """ def get_doc(obj, indent=False): doc = inspect.getdoc(obj) if doc: doc = ('r"""\n%s\n"""\n' % doc) if indent: doc = common.indent_block(doc) return doc return '' def is_in_base_classes(cls, name, comparison): """ Base classes may contain the exact same object """ if name in mixin_funcs: return False try: mro = cls.mro() except TypeError: # this happens, if cls == type return False for base in mro[1:]: try: attr = getattr(base, name) except AttributeError: continue if attr == comparison: return True return False def get_scope_objects(names): """ Looks for the names defined with dir() in an objects and divides them into different object types. """ classes = {} funcs = {} stmts = {} members = {} for n in names: try: # this has a builtin_function_or_method exe = getattr(scope, n) except AttributeError: # happens e.g. in properties of # PyQt4.QtGui.QStyleOptionComboBox.currentText # -> just set it to None members[n] = None else: if inspect.isclass(scope): if is_in_base_classes(scope, n, exe): continue if inspect.isbuiltin(exe) or inspect.ismethod(exe) \ or inspect.ismethoddescriptor(exe): funcs[n] = exe elif inspect.isclass(exe) or inspect.ismodule(exe): classes[n] = exe elif inspect.ismemberdescriptor(exe): members[n] = exe else: stmts[n] = exe return classes, funcs, stmts, members code = '' if inspect.ismodule(scope): # generate comment where the code's from. try: path = scope.__file__ except AttributeError: path = '?' code += '# Generated module %s from %s\n' % (scope.__name__, path) code += get_doc(scope) names = set(dir(scope)) - set(['__file__', '__name__', '__doc__', '__path__', '__package__']) \ | set(['mro']) classes, funcs, stmts, members = get_scope_objects(names) # classes for name, cl in classes.items(): bases = (c.__name__ for c in cl.__bases__) if inspect.isclass(cl) \ else [] code += 'class %s(%s):\n' % (name, ','.join(bases)) if depth == 0: try: mixin = mixin_funcs[name] except KeyError: mixin = {} cl_code = _generate_code(cl, mixin, depth + 1) code += common.indent_block(cl_code) code += '\n' # functions for name, func in funcs.items(): params, ret = _parse_function_doc(func) if depth > 0: params = 'self, ' + params doc_str = get_doc(func, indent=True) try: mixin = mixin_funcs[name] except KeyError: # normal code generation code += 'def %s(%s):\n' % (name, params) code += doc_str code += common.indent_block('%s\n\n' % ret) else: # generation of code with mixins # the parser only supports basic functions with a newline after # the double dots # find doc_str place try: pos = re.search(r'\):\s*\n', mixin).end() except TypeError: # pypy uses a different reversed builtin if name == 'reversed': mixin = 'def reversed(sequence):\n' \ ' for i in self.__sequence: yield i' pos = 24 else: debug.warning('mixin trouble in pypy: %s', name) raise if pos is None: raise Exception("Builtin function not parsed correctly") code += mixin[:pos] + doc_str + mixin[pos:] # class members (functions) properties? for name, func in members.items(): # recursion problem in properties TODO remove if name in ['fget', 'fset', 'fdel']: continue ret = 'pass' code += '@property\ndef %s(self):\n' % (name) code += common.indent_block(get_doc(func) + '%s\n\n' % ret) # variables for name, value in stmts.items(): if is_py3k: file_type = io.TextIOWrapper else: file_type = types.FileType if isinstance(value, file_type): value = 'open()' elif name == 'None': value = '' elif type(value).__name__ in ['int', 'bool', 'float', 'dict', 'list', 'tuple']: value = repr(value) else: # get the type, if the type is not simple. mod = type(value).__module__ value = type(value).__name__ + '()' if mod != '__builtin__': value = '%s.%s' % (mod, value) code += '%s = %s\n' % (name, value) return code def _parse_function_doc(func): """ Takes a function and returns the params and return value as a tuple. This is nothing more than a docstring parser. """ # TODO: things like utime(path, (atime, mtime)) and a(b [, b]) -> None doc = inspect.getdoc(func) # get full string, parse round parentheses: def func(a, (b,c)) try: count = 0 debug.dbg(func, func.__name__, doc) start = doc.index('(') for i, s in enumerate(doc[start:]): if s == '(': count += 1 elif s == ')': count -= 1 if count == 0: end = start + i break param_str = doc[start + 1:end] # remove square brackets, that show an optional param ( = None) def change_options(m): args = m.group(1).split(',') for i, a in enumerate(args): if a and '=' not in a: args[i] += '=None' return ','.join(args) while True: param_str, changes = re.subn(r' ?\[([^\[\]]+)\]', change_options, param_str) if changes == 0: break except (ValueError, AttributeError): debug.dbg('no brackets found - no param') end = 0 param_str = '' param_str = param_str.replace('-', '_') # see: isinstance.__doc__ if doc is not None: r = re.search('-[>-]* ', doc[end:end + 7]) if doc is None or r is None: ret = 'pass' else: index = end + r.end() # get result type, which can contain newlines pattern = re.compile(r'(,\n|[^\n-])+') ret_str = pattern.match(doc, index).group(0).strip() # New object -> object() ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str) ret = BuiltinModule.map_types.get(ret_str, ret_str) if ret == ret_str and ret not in ['None', 'object', 'tuple', 'set']: debug.dbg('not working', ret_str) if ret != 'pass': ret = ('return ' if 'return' not in ret else '') + ret return param_str, ret class Builtin(object): """ The builtin scope / module """ # Python 3 compatibility if is_py3k: name = 'builtins' else: name = '__builtin__' _builtin = None @property def builtin(self): if self._builtin is None: self._builtin = BuiltinModule(name=self.name) return self._builtin @property def scope(self): return self.builtin.parser.module @property def magic_function_scope(self): try: return self._magic_function_scope except AttributeError: # depth = 1 because this is not a module class Container(object): FunctionType = types.FunctionType source = _generate_code(Container, depth=0) parser = parsing.Parser(source, None) module = parser.module module.parent = self.scope typ = evaluate.follow_path(iter(['FunctionType']), module, module) s = self._magic_function_scope = typ.pop() return s Builtin = Builtin() jedi-0.7.0/jedi/settings.py0000664000175000017500000001354212204171717015767 0ustar daviddavid00000000000000""" This module contains variables with global |jedi| setting. To change the behavior of |jedi|, change the variables defined in :mod:`jedi.settings`. Plugins should expose an interface so that the user can adjust the configuration. Example usage:: from jedi import settings settings.case_insensitive_completion = True Completion output ~~~~~~~~~~~~~~~~~ .. autodata:: case_insensitive_completion .. autodata:: add_dot_after_module .. autodata:: add_bracket_after_function .. autodata:: no_completion_duplicates Filesystem cache ~~~~~~~~~~~~~~~~ .. autodata:: cache_directory .. autodata:: use_filesystem_cache Parser ~~~~~~ .. autodata:: fast_parser Dynamic stuff ~~~~~~~~~~~~~ .. autodata:: dynamic_arrays_instances .. autodata:: dynamic_array_additions .. autodata:: dynamic_params .. autodata:: dynamic_params_for_other_modules .. autodata:: additional_dynamic_modules .. _settings-recursion: Recursions ~~~~~~~~~~ Recursion settings are important if you don't want extremly recursive python code to go absolutely crazy. First of there is a global limit :data:`max_executions`. This limit is important, to set a maximum amount of time, the completion may use. The default values are based on experiments while completing the |jedi| library itself (inception!). But I don't think there's any other Python library that uses recursion in a similarly extreme way. These settings make the completion definitely worse in some cases. But a completion should also be fast. .. autodata:: max_until_execution_unique .. autodata:: max_function_recursion_level .. autodata:: max_executions_without_builtins .. autodata:: max_executions .. autodata:: scale_function_definition Caching ~~~~~~~ .. autodata:: star_import_cache_validity .. autodata:: function_definition_validity """ import os import platform # ---------------- # completion output settings # ---------------- case_insensitive_completion = True """ The completion is by default case insensitive. """ add_dot_after_module = False """ Adds a dot after a module, because a module that is not accessed this way is definitely not the normal case. However, in VIM this doesn't work, that's why it isn't used at the moment. """ add_bracket_after_function = False """ Adds an opening bracket after a function, because that's normal behaviour. Removed it again, because in VIM that is not very practical. """ no_completion_duplicates = True """ If set, completions with the same name don't appear in the output anymore, but are in the `same_name_completions` attribute. """ # ---------------- # Filesystem cache # ---------------- use_filesystem_cache = True """ Use filesystem cache to save once parsed files with pickle. """ if platform.system().lower() == 'windows': _cache_directory = os.path.join(os.getenv('APPDATA') or '~', 'Jedi', 'Jedi') elif platform.system().lower() == 'darwin': _cache_directory = os.path.join('~', 'Library', 'Caches', 'Jedi') else: _cache_directory = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache', 'jedi') cache_directory = os.path.expanduser(_cache_directory) """ The path where all the caches can be found. On Linux, this defaults to ``~/.cache/jedi/``, on OS X to ``~/Library/Caches/Jedi/`` and on Windows to ``%APPDATA%\\Jedi\\Jedi\\``. On Linux, if environment variable ``$XDG_CACHE_HOME`` is set, ``$XDG_CACHE_HOME/jedi`` is used instead of the default one. """ # ---------------- # parser # ---------------- fast_parser = True """ Use the fast parser. This means that reparsing is only being done if something has been changed e.g. to a function. If this happens, only the function is being reparsed. """ # ---------------- # dynamic stuff # ---------------- dynamic_arrays_instances = True """ Check for `append`, etc. on array instances like list() """ dynamic_array_additions = True """ check for `append`, etc. on arrays: [], {}, () """ dynamic_params = True """ A dynamic param completion, finds the callees of the function, which define the params of a function. """ dynamic_params_for_other_modules = True """ Do the same for other modules. """ additional_dynamic_modules = [] """ Additional modules in which |jedi| checks if statements are to be found. This is practical for IDEs, that want to administrate their modules themselves. """ dynamic_flow_information = True """ Check for `isinstance` and other information to infer a type. """ # ---------------- # recursions # ---------------- max_until_execution_unique = 50 """ This limit is probably the most important one, because if this limit is exceeded, functions can only be one time executed. So new functions will be executed, complex recursions with the same functions again and again, are ignored. """ max_function_recursion_level = 5 """ `max_function_recursion_level` is more about whether the recursions are stopped in deepth or in width. The ratio beetween this and `max_until_execution_unique` is important here. It stops a recursion (after the number of function calls in the recursion), if it was already used earlier. """ max_executions_without_builtins = 200 """ .. todo:: Document this. """ max_executions = 250 """ A maximum amount of time, the completion may use. """ scale_function_definition = 0.1 """ Because function_definition is normally used on every single key hit, it has to be faster than a normal completion. This is the factor that is used to scale `max_executions` and `max_until_execution_unique`: """ # ---------------- # caching validity (time) # ---------------- star_import_cache_validity = 60.0 """ In huge packages like numpy, checking all star imports on every completion might be slow, therefore we do a star import caching, that lasts a certain time span (in seconds). """ function_definition_validity = 3.0 """ Finding function calls might be slow (0.1-0.5s). This is not acceptible for normal writing. Therefore cache it for a short time. """ jedi-0.7.0/jedi/parsing.py0000664000175000017500000006711412204171717015576 0ustar daviddavid00000000000000""" The ``Parser`` tries to convert the available Python code in an easy to read format, something like an abstract syntax tree. The classes who represent this tree, are sitting in the :mod:`parsing_representation` module. The Python module ``tokenize`` is a very important part in the ``Parser``, because it splits the code into different words (tokens). Sometimes it looks a bit messy. Sorry for that! You might ask now: "Why didn't you use the ``ast`` module for this? Well, ``ast`` does a very good job understanding proper Python code, but fails to work as soon as there's a single line of broken code. There's one important optimization that needs to be known: Statements are not being parsed completely. ``Statement`` is just a representation of the tokens within the statement. This lowers memory usage and cpu time and reduces the complexity of the ``Parser`` (there's another parser sitting inside ``Statement``, which produces ``Array`` and ``Call``). """ from __future__ import with_statement import tokenizer as tokenize import keyword from jedi._compatibility import next, StringIO from jedi import debug from jedi import common from jedi import parsing_representation as pr class ParserError(Exception): pass class Parser(object): """ This class is used to parse a Python file, it then divides them into a class structure of different scopes. :param source: The codebase for the parser. :type source: str :param module_path: The path of the module in the file system, may be None. :type module_path: str :param user_position: The line/column, the user is currently on. :type user_position: tuple(int, int) :param no_docstr: If True, a string at the beginning is not a docstr. :param is_fast_parser: -> for fast_parser :param top_module: Use this module as a parent instead of `self.module`. """ def __init__(self, source, module_path=None, user_position=None, no_docstr=False, offset=(0, 0), is_fast_parser=None, top_module=None): self.user_position = user_position self.user_scope = None self.user_stmt = None self.no_docstr = no_docstr self.start_pos = self.end_pos = 1 + offset[0], offset[1] # initialize global Scope self.module = pr.SubModule(module_path, self.start_pos, top_module) self._scope = self.module self._current = (None, None) source = source + '\n' # end with \n, because the parser needs it buf = StringIO(source) self._gen = common.NoErrorTokenizer(buf.readline, offset, is_fast_parser) self.top_module = top_module or self.module try: self._parse() except (common.MultiLevelStopIteration, StopIteration): # StopIteration needs to be added as well, because python 2 has a # strange way of handling StopIterations. # sometimes StopIteration isn't catched. Just ignore it. pass # clean up unused decorators for d in self._decorators: # set a parent for unused decorators, avoid NullPointerException # because of `self.module.used_names`. d.parent = self.module if self._current[0] in (tokenize.NL, tokenize.NEWLINE): # we added a newline before, so we need to "remove" it again. self.end_pos = self._gen.previous[2] elif self._current[0] == tokenize.INDENT: self.end_pos = self._gen.last_previous[2] self.start_pos = self.module.start_pos self.module.end_pos = self.end_pos del self._gen def __repr__(self): return "<%s: %s>" % (type(self).__name__, self.module) def _check_user_stmt(self, simple): # this is not user checking, just update the used_names for tok_name in self.module.temp_used_names: try: self.module.used_names[tok_name].add(simple) except KeyError: self.module.used_names[tok_name] = set([simple]) self.module.temp_used_names = [] if not self.user_position: return # the position is right if simple.start_pos <= self.user_position <= simple.end_pos: if self.user_stmt is not None: # if there is already a user position (another import, because # imports are splitted) the names are checked. for n in simple.get_set_vars(): if n.start_pos < self.user_position <= n.end_pos: self.user_stmt = simple else: self.user_stmt = simple def _parse_dot_name(self, pre_used_token=None): """ The dot name parser parses a name, variable or function and returns their names. :return: Tuple of Name, token_type, nexttoken. :rtype: tuple(Name, int, str) """ def append(el): names.append(el) self.module.temp_used_names.append(el[0]) names = [] if pre_used_token is None: token_type, tok = self.next() if token_type != tokenize.NAME and tok != '*': return [], token_type, tok else: token_type, tok = pre_used_token if token_type != tokenize.NAME and tok != '*': # token maybe a name or star return None, token_type, tok append((tok, self.start_pos)) first_pos = self.start_pos while True: end_pos = self.end_pos token_type, tok = self.next() if tok != '.': break token_type, tok = self.next() if token_type != tokenize.NAME: break append((tok, self.start_pos)) n = pr.Name(self.module, names, first_pos, end_pos) if names else None return n, token_type, tok def _parse_import_list(self): """ The parser for the imports. Unlike the class and function parse function, this returns no Import class, but rather an import list, which is then added later on. The reason, why this is not done in the same class lies in the nature of imports. There are two ways to write them: - from ... import ... - import ... To distinguish, this has to be processed after the parser. :return: List of imports. :rtype: list """ imports = [] brackets = False continue_kw = [",", ";", "\n", ')'] \ + list(set(keyword.kwlist) - set(['as'])) while True: defunct = False token_type, tok = self.next() if tok == '(': # python allows only one `(` in the statement. brackets = True token_type, tok = self.next() if brackets and tok == '\n': self.next() i, token_type, tok = self._parse_dot_name(self._current) if not i: defunct = True name2 = None if tok == 'as': name2, token_type, tok = self._parse_dot_name() imports.append((i, name2, defunct)) while tok not in continue_kw: token_type, tok = self.next() if not (tok == "," or brackets and tok == '\n'): break return imports def _parse_parentheses(self): """ Functions and Classes have params (which means for classes super-classes). They are parsed here and returned as Statements. :return: List of Statements :rtype: list """ names = [] tok = None pos = 0 breaks = [',', ':'] while tok not in [')', ':']: param, tok = self._parse_statement(added_breaks=breaks, stmt_class=pr.Param) if param and tok == ':': # parse annotations annotation, tok = self._parse_statement(added_breaks=breaks) if annotation: param.add_annotation(annotation) # params without vars are usually syntax errors. if param and (param.set_vars or param.used_vars): param.position_nr = pos names.append(param) pos += 1 return names def _parse_function(self): """ The parser for a text functions. Process the tokens, which follow a function definition. :return: Return a Scope representation of the tokens. :rtype: Function """ first_pos = self.start_pos token_type, fname = self.next() if token_type != tokenize.NAME: return None fname = pr.Name(self.module, [(fname, self.start_pos)], self.start_pos, self.end_pos) token_type, open = self.next() if open != '(': return None params = self._parse_parentheses() token_type, colon = self.next() annotation = None if colon in ['-', '->']: # parse annotations if colon == '-': # The Python 2 tokenizer doesn't understand this token_type, colon = self.next() if colon != '>': return None annotation, colon = self._parse_statement(added_breaks=[':']) if colon != ':': return None # because of 2 line func param definitions scope = pr.Function(self.module, fname, params, first_pos, annotation) if self.user_scope and scope != self.user_scope \ and self.user_position > first_pos: self.user_scope = scope return scope def _parse_class(self): """ The parser for a text class. Process the tokens, which follow a class definition. :return: Return a Scope representation of the tokens. :rtype: Class """ first_pos = self.start_pos token_type, cname = self.next() if token_type != tokenize.NAME: debug.warning("class: syntax err, token is not a name@%s (%s: %s)" % (self.start_pos[0], tokenize.tok_name[token_type], cname)) return None cname = pr.Name(self.module, [(cname, self.start_pos)], self.start_pos, self.end_pos) super = [] token_type, _next = self.next() if _next == '(': super = self._parse_parentheses() token_type, _next = self.next() if _next != ':': debug.warning("class syntax: %s@%s" % (cname, self.start_pos[0])) return None # because of 2 line class initializations scope = pr.Class(self.module, cname, super, first_pos) if self.user_scope and scope != self.user_scope \ and self.user_position > first_pos: self.user_scope = scope return scope def _parse_statement(self, pre_used_token=None, added_breaks=None, stmt_class=pr.Statement): """ Parses statements like:: a = test(b) a += 3 - 2 or b and so on. One line at a time. :param pre_used_token: The pre parsed token. :type pre_used_token: set :return: Statement + last parsed token. :rtype: (Statement, str) """ set_vars = [] used_vars = [] level = 0 # The level of parentheses if pre_used_token: token_type, tok = pre_used_token else: token_type, tok = self.next() while token_type == tokenize.COMMENT: # remove newline and comment self.next() token_type, tok = self.next() first_pos = self.start_pos opening_brackets = ['{', '(', '['] closing_brackets = ['}', ')', ']'] # the difference between "break" and "always break" is that the latter # will even break in parentheses. This is true for typical flow # commands like def and class and the imports, which will never be used # in a statement. breaks = set(['\n', ':', ')']) always_break = [';', 'import', 'from', 'class', 'def', 'try', 'except', 'finally', 'while', 'return', 'yield'] not_first_break = ['del', 'raise'] if added_breaks: breaks |= set(added_breaks) tok_list = [] while not (tok in always_break or tok in not_first_break and not tok_list or tok in breaks and level <= 0): try: # print 'parse_stmt', tok, tokenize.tok_name[token_type] tok_list.append(self._current + (self.start_pos,)) if tok == 'as': token_type, tok = self.next() if token_type == tokenize.NAME: n, token_type, tok = self._parse_dot_name(self._current) if n: set_vars.append(n) tok_list.append(n) continue elif tok in ['lambda', 'for', 'in']: # don't parse these keywords, parse later in stmt. if tok == 'lambda': breaks.discard(':') elif token_type == tokenize.NAME: n, token_type, tok = self._parse_dot_name(self._current) # removed last entry, because we add Name tok_list.pop() if n: tok_list.append(n) used_vars.append(n) continue elif tok.endswith('=') and tok not in ['>=', '<=', '==', '!=']: # there has been an assignement -> change vars if level == 0: set_vars += used_vars used_vars = [] elif tok in opening_brackets: level += 1 elif tok in closing_brackets: level -= 1 token_type, tok = self.next() except (StopIteration, common.MultiLevelStopIteration): # comes from tokenizer break if not tok_list: return None, tok # print 'new_stat', set_vars, used_vars if self.freshscope and not self.no_docstr and len(tok_list) == 1 \ and self.last_token[0] == tokenize.STRING: self._scope.add_docstr(self.last_token[1]) return None, tok else: stmt = stmt_class(self.module, set_vars, used_vars, tok_list, first_pos, self.end_pos) stmt.parent = self.top_module self._check_user_stmt(stmt) # Attribute docstring (PEP 224) support (sphinx uses it, e.g.) with common.ignored(IndexError, AttributeError): # If string literal is being parsed first_tok = stmt.token_list[0] if (not stmt.set_vars and not stmt.used_vars and len(stmt.token_list) == 1 and first_tok[0] == tokenize.STRING): # ... then set it as a docstring self._scope.statements[-1].add_docstr(first_tok[1]) if tok in always_break + not_first_break: self._gen.push_last_back() return stmt, tok def next(self): return self.__next__() def __iter__(self): return self def __next__(self): """ Generate the next tokenize pattern. """ try: typ, tok, start_pos, end_pos, self.parserline = next(self._gen) # dedents shouldn't change positions if typ != tokenize.DEDENT: self.start_pos = start_pos if typ not in (tokenize.INDENT, tokenize.NEWLINE, tokenize.NL): self.start_pos, self.end_pos = start_pos, end_pos except (StopIteration, common.MultiLevelStopIteration): # on finish, set end_pos correctly s = self._scope while s is not None: if isinstance(s, pr.Module) \ and not isinstance(s, pr.SubModule): self.module.end_pos = self.end_pos break s.end_pos = self.end_pos s = s.parent raise if self.user_position and (self.start_pos[0] == self.user_position[0] or self.user_scope is None and self.start_pos[0] >= self.user_position[0]): debug.dbg('user scope found [%s] = %s' % (self.parserline.replace('\n', ''), repr(self._scope))) self.user_scope = self._scope self.last_token = self._current self._current = (typ, tok) return self._current def _parse(self): """ The main part of the program. It analyzes the given code-text and returns a tree-like scope. For a more detailed description, see the class description. :param text: The code which should be parsed. :param type: str :raises: IndentationError """ extended_flow = ['else', 'elif', 'except', 'finally'] statement_toks = ['{', '[', '(', '`'] self._decorators = [] self.freshscope = True self.iterator = iter(self) # This iterator stuff is not intentional. It grew historically. for token_type, tok in self.iterator: self.module.temp_used_names = [] # debug.dbg('main: tok=[%s] type=[%s] indent=[%s]'\ # % (tok, tokenize.tok_name[token_type], start_position[0])) while token_type == tokenize.DEDENT and self._scope != self.module: token_type, tok = self.next() if self.start_pos[1] <= self._scope.start_pos[1]: self._scope.end_pos = self.start_pos self._scope = self._scope.parent if isinstance(self._scope, pr.Module) \ and not isinstance(self._scope, pr.SubModule): self._scope = self.module # check again for unindented stuff. this is true for syntax # errors. only check for names, because thats relevant here. If # some docstrings are not indented, I don't care. while self.start_pos[1] <= self._scope.start_pos[1] \ and (token_type == tokenize.NAME or tok in ['(', '['])\ and self._scope != self.module: self._scope.end_pos = self.start_pos self._scope = self._scope.parent if isinstance(self._scope, pr.Module) \ and not isinstance(self._scope, pr.SubModule): self._scope = self.module use_as_parent_scope = self.top_module if isinstance(self._scope, pr.SubModule) else self._scope first_pos = self.start_pos if tok == 'def': func = self._parse_function() if func is None: debug.warning("function: syntax error@%s" % self.start_pos[0]) continue self.freshscope = True self._scope = self._scope.add_scope(func, self._decorators) self._decorators = [] elif tok == 'class': cls = self._parse_class() if cls is None: debug.warning("class: syntax error@%s" % self.start_pos[0]) continue self.freshscope = True self._scope = self._scope.add_scope(cls, self._decorators) self._decorators = [] # import stuff elif tok == 'import': imports = self._parse_import_list() for count, (m, alias, defunct) in enumerate(imports): e = (alias or m or self).end_pos end_pos = self.end_pos if count + 1 == len(imports) else e i = pr.Import(self.module, first_pos, end_pos, m, alias, defunct=defunct) self._check_user_stmt(i) self._scope.add_import(i) if not imports: i = pr.Import(self.module, first_pos, self.end_pos, None, defunct=True) self._check_user_stmt(i) self.freshscope = False elif tok == 'from': defunct = False # take care for relative imports relative_count = 0 while True: token_type, tok = self.next() if tok != '.': break relative_count += 1 # the from import mod, token_type, tok = self._parse_dot_name(self._current) if str(mod) == 'import' and relative_count: self._gen.push_last_back() tok = 'import' mod = None if not mod and not relative_count or tok != "import": debug.warning("from: syntax error@%s" % self.start_pos[0]) defunct = True if tok != 'import': self._gen.push_last_back() names = self._parse_import_list() for count, (name, alias, defunct2) in enumerate(names): star = name is not None and name.names[0] == '*' if star: name = None e = (alias or name or self).end_pos end_pos = self.end_pos if count + 1 == len(names) else e i = pr.Import(self.module, first_pos, end_pos, name, alias, mod, star, relative_count, defunct=defunct or defunct2) self._check_user_stmt(i) self._scope.add_import(i) self.freshscope = False # loops elif tok == 'for': set_stmt, tok = self._parse_statement(added_breaks=['in']) if tok == 'in': statement, tok = self._parse_statement() if tok == ':': s = [] if statement is None else [statement] f = pr.ForFlow(self.module, s, first_pos, set_stmt) self._scope = self._scope.add_statement(f) else: debug.warning('syntax err, for flow started @%s', self.start_pos[0]) if statement is not None: statement.parent = use_as_parent_scope if set_stmt is not None: set_stmt.parent = use_as_parent_scope else: debug.warning('syntax err, for flow incomplete @%s', self.start_pos[0]) if set_stmt is not None: set_stmt.parent = use_as_parent_scope elif tok in ['if', 'while', 'try', 'with'] + extended_flow: added_breaks = [] command = tok if command in ['except', 'with']: added_breaks.append(',') # multiple inputs because of with inputs = [] first = True while first or command == 'with' \ and tok not in [':', '\n']: statement, tok = \ self._parse_statement(added_breaks=added_breaks) if command == 'except' and tok in added_breaks: # the except statement defines a var # this is only true for python 2 n, token_type, tok = self._parse_dot_name() if n: n.parent = statement statement.set_vars.append(n) if statement: inputs.append(statement) first = False if tok == ':': f = pr.Flow(self.module, command, inputs, first_pos) if command in extended_flow: # the last statement has to be another part of # the flow statement, because a dedent releases the # main scope, so just take the last statement. try: s = self._scope.statements[-1].set_next(f) except (AttributeError, IndexError): # If set_next doesn't exist, just add it. s = self._scope.add_statement(f) else: s = self._scope.add_statement(f) self._scope = s else: for i in inputs: i.parent = use_as_parent_scope debug.warning('syntax err, flow started @%s', self.start_pos[0]) # returns elif tok in ['return', 'yield']: s = self.start_pos self.freshscope = False # add returns to the scope func = self._scope.get_parent_until(pr.Function) if tok == 'yield': func.is_generator = True stmt, tok = self._parse_statement() if stmt is not None: stmt.parent = use_as_parent_scope try: func.returns.append(stmt) # start_pos is the one of the return statement stmt.start_pos = s except AttributeError: debug.warning('return in non-function') # globals elif tok == 'global': stmt, tok = self._parse_statement(self._current) if stmt: self._scope.add_statement(stmt) for name in stmt.used_vars: # add the global to the top, because there it is # important. self.module.add_global(name) # decorator elif tok == '@': stmt, tok = self._parse_statement() if stmt is not None: self._decorators.append(stmt) elif tok == 'pass': continue elif tok == 'assert': stmt, tok = self._parse_statement() if stmt is not None: stmt.parent = use_as_parent_scope self._scope.asserts.append(stmt) # default elif token_type in [tokenize.NAME, tokenize.STRING, tokenize.NUMBER] \ or tok in statement_toks: # this is the main part - a name can be a function or a # normal var, which can follow anything. but this is done # by the statement parser. stmt, tok = self._parse_statement(self._current) if stmt: self._scope.add_statement(stmt) self.freshscope = False else: if token_type not in [tokenize.COMMENT, tokenize.INDENT, tokenize.NEWLINE, tokenize.NL]: debug.warning('token not classified', tok, token_type, self.start_pos[0]) continue self.no_docstr = False jedi-0.7.0/jedi/dynamic.py0000664000175000017500000005132012204171717015547 0ustar daviddavid00000000000000""" To understand Python on a deeper level, |jedi| needs to understand some of the dynamic features of Python, however this probably the most complicated part: - Array modifications (e.g. ``list.append``) - Parameter completion in functions - Flow checks (e.g. ``if isinstance(a, str)`` -> a is a str) Array modifications ******************* If the content of an array (``set``/``list``) is wanted somewhere, the current module will be checked for appearances of ``arr.append``, ``arr.insert``, etc. If the ``arr`` name points to an actual array, the content will be added This can be really cpu intensive, as you can imagine. Because |jedi| has to follow **every** ``append``. However this works pretty good, because in *slow* cases, the recursion detector and other settings will stop this process. It is important to note that: 1. Array modfications work only in the current module 2. Only Array additions are being checked, ``list.pop``, etc. is being ignored. Parameter completion ******************** One of the really important features of |jedi| is to have an option to understand code like this:: def foo(bar): bar. # completion here foo(1) There's no doubt wheter bar is an ``int`` or not, but if there's also a call like ``foo('str')``, what would happen? Well, we'll just show both. Because that's what a human would expect. It works as follows: - A param is being encountered - search for function calls named ``foo`` - execute these calls and check the injected params. This work with a ``ParamListener``. Flow checks *********** Flow checks are not really mature. There's only a check for ``isinstance``. It would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``. Unfortunately every other thing is being ignored (e.g. a == '' would be easy to check for -> a is a string). There's big potential in these checks. """ from __future__ import with_statement import os from jedi import cache from jedi import parsing_representation as pr from jedi import modules from jedi import settings from jedi import common from jedi import debug from jedi import fast_parser import api_classes import evaluate import imports import evaluate_representation as er # This is something like the sys.path, but only for searching params. It means # that this is the order in which Jedi searches params. search_param_modules = ['.'] search_param_cache = {} def get_directory_modules_for_name(mods, name): """ Search a name in the directories of modules. """ def check_python_file(path): try: return cache.parser_cache[path].parser.module except KeyError: try: return check_fs(path) except IOError: return None def check_fs(path): with open(path) as f: source = modules.source_to_unicode(f.read()) if name in source: return modules.Module(path, source).parser.module # skip non python modules mods = set(m for m in mods if m.path is None or m.path.endswith('.py')) mod_paths = set() for m in mods: mod_paths.add(m.path) yield m if settings.dynamic_params_for_other_modules: paths = set(settings.additional_dynamic_modules) for p in mod_paths: if p is not None: d = os.path.dirname(p) for entry in os.listdir(d): if entry not in mod_paths: if entry.endswith('.py'): paths.add(d + os.path.sep + entry) for p in sorted(paths): # make testing easier, sort it - same results on every interpreter c = check_python_file(p) if c is not None and c not in mods: yield c def search_param_memoize(func): """ Is only good for search params memoize, respectively the closure, because it just caches the input, not the func, like normal memoize does. """ def wrapper(*args, **kwargs): key = (args, frozenset(kwargs.items())) if key in search_param_cache: return search_param_cache[key] else: rv = func(*args, **kwargs) search_param_cache[key] = rv return rv return wrapper class ParamListener(object): """ This listener is used to get the params for a function. """ def __init__(self): self.param_possibilities = [] def execute(self, params): self.param_possibilities.append(params) @cache.memoize_default([]) def search_params(param): """ This is a dynamic search for params. If you try to complete a type: >>> def func(foo): >>> # here is the completion >>> foo >>> func(1) >>> func("") It is not known what the type is, because it cannot be guessed with recursive madness. Therefore one has to analyse the statements that are calling the function, as well as analyzing the incoming params. """ if not settings.dynamic_params: return [] def get_params_for_module(module): """ Returns the values of a param, or an empty array. """ @search_param_memoize def get_posibilities(module, func_name): try: possible_stmts = module.used_names[func_name] except KeyError: return [] for stmt in possible_stmts: if isinstance(stmt, pr.Import): continue calls = _scan_statement(stmt, func_name) for c in calls: # no execution means that params cannot be set call_path = list(c.generate_call_path()) pos = c.start_pos scope = stmt.parent # this whole stuff is just to not execute certain parts # (speed improvement), basically we could just call # ``follow_call_path`` on the call_path and it would # also work. def listRightIndex(lst, value): return len(lst) - lst[-1::-1].index(value) -1 # Need to take right index, because there could be a # func usage before. i = listRightIndex(call_path, func_name) first, last = call_path[:i], call_path[i+1:] if not last and not call_path.index(func_name) != i: continue scopes = [scope] if first: scopes = evaluate.follow_call_path(iter(first), scope, pos) pos = None for scope in scopes: s = evaluate.find_name(scope, func_name, position=pos, search_global=not first, resolve_decorator=False) c = [getattr(escope, 'base_func', None) or escope.base for escope in s if escope.isinstance(er.Function, er.Class) ] if compare in c: # only if we have the correct function we execute # it, otherwise just ignore it. evaluate.follow_paths(iter(last), s, scope) return listener.param_possibilities result = [] for params in get_posibilities(module, func_name): for p in params: if str(p) == param_name: result += evaluate.follow_statement(p.parent) return result func = param.get_parent_until(pr.Function) current_module = param.get_parent_until() func_name = str(func.name) compare = func if func_name == '__init__' and isinstance(func.parent, pr.Class): func_name = str(func.parent.name) compare = func.parent # get the param name if param.assignment_details: # first assignment details, others would be a syntax error commands, op = param.assignment_details[0] else: commands = param.get_commands() offset = 1 if commands[0] in ['*', '**'] else 0 param_name = str(commands[offset].name) # add the listener listener = ParamListener() func.listeners.add(listener) result = [] # This is like backtracking: Get the first possible result. for mod in get_directory_modules_for_name([current_module], func_name): result = get_params_for_module(mod) if result: break # cleanup: remove the listener; important: should not stick. func.listeners.remove(listener) return result def check_array_additions(array): """ Just a mapper function for the internal _check_array_additions """ if not pr.Array.is_type(array._array, pr.Array.LIST, pr.Array.SET): # TODO also check for dict updates return [] is_list = array._array.type == 'list' current_module = array._array.get_parent_until() res = _check_array_additions(array, current_module, is_list) return res def _scan_statement(stmt, search_name, assignment_details=False): """ Returns the function Call that match search_name in an Array. """ def scan_array(arr, search_name): result = [] if arr.type == pr.Array.DICT: for key_stmt, value_stmt in arr.items(): result += _scan_statement(key_stmt, search_name) result += _scan_statement(value_stmt, search_name) else: for stmt in arr: result += _scan_statement(stmt, search_name) return result check = list(stmt.get_commands()) if assignment_details: for commands, op in stmt.assignment_details: check += commands result = [] for c in check: if isinstance(c, pr.Array): result += scan_array(c, search_name) elif isinstance(c, pr.Call): s_new = c while s_new is not None: n = s_new.name if isinstance(n, pr.Name) and search_name in n.names: result.append(c) if s_new.execution is not None: result += scan_array(s_new.execution, search_name) s_new = s_new.next return result @cache.memoize_default([]) def _check_array_additions(compare_array, module, is_list): """ Checks if a `pr.Array` has "add" statements: >>> a = [""] >>> a.append(1) """ if not settings.dynamic_array_additions or module.is_builtin(): return [] def check_calls(calls, add_name): """ Calls are processed here. The part before the call is searched and compared with the original Array. """ result = [] for c in calls: call_path = list(c.generate_call_path()) separate_index = call_path.index(add_name) if add_name == call_path[-1] or separate_index == 0: # this means that there is no execution -> [].append # or the keyword is at the start -> append() continue backtrack_path = iter(call_path[:separate_index]) position = c.start_pos scope = c.get_parent_until(pr.IsScope) found = evaluate.follow_call_path(backtrack_path, scope, position) if not compare_array in found: continue params = call_path[separate_index + 1] if not params.values: continue # no params: just ignore it if add_name in ['append', 'add']: for param in params: result += evaluate.follow_statement(param) elif add_name in ['insert']: try: second_param = params[1] except IndexError: continue else: result += evaluate.follow_statement(second_param) elif add_name in ['extend', 'update']: for param in params: iterators = evaluate.follow_statement(param) result += evaluate.get_iterator_types(iterators) return result def get_execution_parent(element, *stop_classes): """ Used to get an Instance/Execution parent """ if isinstance(element, er.Array): stmt = element._array.parent else: # is an Instance with an ArrayInstance inside stmt = element.var_args[0].var_args.parent if isinstance(stmt, er.InstanceElement): stop_classes = list(stop_classes) + [er.Function] return stmt.get_parent_until(stop_classes) temp_param_add = settings.dynamic_params_for_other_modules settings.dynamic_params_for_other_modules = False search_names = ['append', 'extend', 'insert'] if is_list else \ ['add', 'update'] comp_arr_parent = get_execution_parent(compare_array, er.Execution) possible_stmts = [] res = [] for n in search_names: try: possible_stmts += module.used_names[n] except KeyError: continue for stmt in possible_stmts: # Check if the original scope is an execution. If it is, one # can search for the same statement, that is in the module # dict. Executions are somewhat special in jedi, since they # literally copy the contents of a function. if isinstance(comp_arr_parent, er.Execution): stmt = comp_arr_parent. \ get_statement_for_position(stmt.start_pos) if stmt is None: continue # InstanceElements are special, because they don't get copied, # but have this wrapper around them. if isinstance(comp_arr_parent, er.InstanceElement): stmt = er.InstanceElement(comp_arr_parent.instance, stmt) if evaluate.follow_statement.push_stmt(stmt): # check recursion continue res += check_calls(_scan_statement(stmt, n), n) evaluate.follow_statement.pop_stmt() # reset settings settings.dynamic_params_for_other_modules = temp_param_add return res def check_array_instances(instance): """Used for set() and list() instances.""" if not settings.dynamic_arrays_instances: return instance.var_args ai = ArrayInstance(instance) return [ai] class ArrayInstance(pr.Base): """ Used for the usage of set() and list(). This is definitely a hack, but a good one :-) It makes it possible to use set/list conversions. """ def __init__(self, instance): self.instance = instance self.var_args = instance.var_args def iter_content(self): """ The index is here just ignored, because of all the appends, etc. lists/sets are too complicated too handle that. """ items = [] for stmt in self.var_args: for typ in evaluate.follow_statement(stmt): if isinstance(typ, er.Instance) and len(typ.var_args): array = typ.var_args[0] if isinstance(array, ArrayInstance): # prevent recursions # TODO compare Modules if self.var_args.start_pos != array.var_args.start_pos: items += array.iter_content() else: debug.warning( 'ArrayInstance recursion', self.var_args) continue items += evaluate.get_iterator_types([typ]) # TODO check if exclusion of tuple is a problem here. if isinstance(self.var_args, tuple) or self.var_args.parent is None: return [] # generated var_args should not be checked for arrays module = self.var_args.get_parent_until() is_list = str(self.instance.name) == 'list' items += _check_array_additions(self.instance, module, is_list) return items def usages(definitions, search_name, mods): def compare_array(definitions): """ `definitions` are being compared by module/start_pos, because sometimes the id's of the objects change (e.g. executions). """ result = [] for d in definitions: module = d.get_parent_until() result.append((module, d.start_pos)) return result def check_call(call): result = [] follow = [] # There might be multiple search_name's in one call_path call_path = list(call.generate_call_path()) for i, name in enumerate(call_path): # name is `pr.NamePart`. if name == search_name: follow.append(call_path[:i + 1]) for f in follow: follow_res, search = evaluate.goto(call.parent, f) follow_res = usages_add_import_modules(follow_res, search) compare_follow_res = compare_array(follow_res) # compare to see if they match if any(r in compare_definitions for r in compare_follow_res): scope = call.parent result.append(api_classes.Usage(search, scope)) return result if not definitions: return set() compare_definitions = compare_array(definitions) mods |= set([d.get_parent_until() for d in definitions]) names = [] for m in get_directory_modules_for_name(mods, search_name): try: stmts = m.used_names[search_name] except KeyError: continue for stmt in stmts: if isinstance(stmt, pr.Import): count = 0 imps = [] for i in stmt.get_all_import_names(): for name_part in i.names: count += 1 if name_part == search_name: imps.append((count, name_part)) for used_count, name_part in imps: i = imports.ImportPath(stmt, kill_count=count - used_count, direct_resolve=True) f = i.follow(is_goto=True) if set(f) & set(definitions): names.append(api_classes.Usage(name_part, stmt)) else: for call in _scan_statement(stmt, search_name, assignment_details=True): names += check_call(call) return names def usages_add_import_modules(definitions, search_name): """ Adds the modules of the imports """ new = set() for d in definitions: if isinstance(d.parent, pr.Import): s = imports.ImportPath(d.parent, direct_resolve=True) with common.ignored(IndexError): new.add(s.follow(is_goto=True)[0]) return set(definitions) | new def check_flow_information(flow, search_name, pos): """ Try to find out the type of a variable just with the information that is given by the flows: e.g. It is also responsible for assert checks.:: if isinstance(k, str): k. # <- completion here ensures that `k` is a string. """ if not settings.dynamic_flow_information: return None result = [] if isinstance(flow, (pr.Scope, fast_parser.Module)) and not result: for ass in reversed(flow.asserts): if pos is None or ass.start_pos > pos: continue result = _check_isinstance_type(ass, search_name) if result: break if isinstance(flow, pr.Flow) and not result: if flow.command in ['if', 'while'] and len(flow.inputs) == 1: result = _check_isinstance_type(flow.inputs[0], search_name) return result def _check_isinstance_type(stmt, search_name): try: commands = stmt.get_commands() # this might be removed if we analyze and, etc assert len(commands) == 1 call = commands[0] assert type(call) is pr.Call and str(call.name) == 'isinstance' assert bool(call.execution) # isinstance check isinst = call.execution.values assert len(isinst) == 2 # has two params obj, classes = [statement.get_commands() for statement in isinst] assert len(obj) == 1 assert len(classes) == 1 assert isinstance(obj[0], pr.Call) # names fit? assert str(obj[0].name) == search_name assert isinstance(classes[0], pr.Call) # can be type or tuple except AssertionError: return [] result = [] for c in evaluate.follow_call(classes[0]): if isinstance(c, er.Array): result += c.get_index_types() else: result.append(c) for i, c in enumerate(result): result[i] = er.Instance(c) return result jedi-0.7.0/jedi/imports.py0000664000175000017500000003671712204171717015635 0ustar daviddavid00000000000000""" :mod:`imports` is here to resolve import statements and return the modules/classes/functions/whatever, which they stand for. However there's not any actual importing done. This module is about finding modules in the filesystem. This can be quite tricky sometimes, because Python imports are not always that simple. This module uses imp for python up to 3.2 and importlib for python 3.3 on; the correct implementation is delegated to _compatibility. This module also supports import autocompletion, which means to complete statements like ``from datetim`` (curser at the end would return ``datetime``). """ from __future__ import with_statement import os import pkgutil import sys import itertools from jedi._compatibility import find_module from jedi import modules from jedi import common from jedi import debug from jedi import parsing_representation as pr from jedi import cache import builtin import evaluate # for debugging purposes only imports_processed = 0 class ModuleNotFound(Exception): pass class ImportPath(pr.Base): """ An ImportPath is the path of a `pr.Import` object. """ class _GlobalNamespace(object): def __init__(self): self.start_pos = 0, 0 self.line_offset = 0 def get_defined_names(self): return [] def get_imports(self): return [] def get_parent_until(self): return None GlobalNamespace = _GlobalNamespace() def __init__(self, import_stmt, is_like_search=False, kill_count=0, direct_resolve=False, is_just_from=False): self.import_stmt = import_stmt self.is_like_search = is_like_search self.direct_resolve = direct_resolve self.is_just_from = is_just_from self.is_partial_import = bool(max(0, kill_count)) path = import_stmt.get_parent_until().path self.file_path = os.path.dirname(path) if path is not None else None # rest is import_path resolution self.import_path = [] if import_stmt.from_ns: self.import_path += import_stmt.from_ns.names if import_stmt.namespace: if self._is_nested_import() and not direct_resolve: self.import_path.append(import_stmt.namespace.names[0]) else: self.import_path += import_stmt.namespace.names for i in range(kill_count + int(is_like_search)): self.import_path.pop() def __repr__(self): return '<%s: %s>' % (type(self).__name__, self.import_stmt) def _is_nested_import(self): """ This checks for the special case of nested imports, without aliases and from statement:: import foo.bar """ return not self.import_stmt.alias and not self.import_stmt.from_ns \ and len(self.import_stmt.namespace.names) > 1 \ and not self.direct_resolve def _get_nested_import(self, parent): """ See documentation of `self._is_nested_import`. Generates an Import statement, that can be used to fake nested imports. """ i = self.import_stmt # This is not an existing Import statement. Therefore, set position to # 0 (0 is not a valid line number). zero = (0, 0) names = i.namespace.names[1:] n = pr.Name(i._sub_module, names, zero, zero, self.import_stmt) new = pr.Import(i._sub_module, zero, zero, n) new.parent = parent debug.dbg('Generated a nested import: %s' % new) return new def get_defined_names(self, on_import_stmt=False): names = [] for scope in self.follow(): if scope is ImportPath.GlobalNamespace: if self._is_relative_import() == 0: names += self._get_module_names() if self.file_path is not None: path = os.path.abspath(self.file_path) for i in range(self.import_stmt.relative_count - 1): path = os.path.dirname(path) names += self._get_module_names([path]) if self._is_relative_import(): rel_path = self._get_relative_path() + '/__init__.py' with common.ignored(IOError): m = modules.Module(rel_path) names += m.parser.module.get_defined_names() else: if on_import_stmt and isinstance(scope, pr.Module) \ and scope.path.endswith('__init__.py'): pkg_path = os.path.dirname(scope.path) paths = self._namespace_packages(pkg_path, self.import_path) names += self._get_module_names([pkg_path] + paths) if self.is_just_from: # In the case of an import like `from x.` we don't need to # add all the variables. if ['os'] == self.import_path and not self._is_relative_import(): # os.path is a hardcoded exception, because it's a # ``sys.modules`` modification. p = (0, 0) names.append(pr.Name(self.GlobalNamespace, [('path', p)], p, p, self.import_stmt)) continue for s, scope_names in evaluate.get_names_of_scope(scope, include_builtin=False): for n in scope_names: if self.import_stmt.from_ns is None \ or self.is_partial_import: # from_ns must be defined to access module # values plus a partial import means that there # is something after the import, which # automatically implies that there must not be # any non-module scope. continue names.append(n) return names def _get_module_names(self, search_path=None): """ Get the names of all modules in the search_path. This means file names and not names defined in the files. """ def generate_name(name): return pr.Name(self.GlobalNamespace, [(name, inf_pos)], inf_pos, inf_pos, self.import_stmt) names = [] inf_pos = float('inf'), float('inf') # add builtin module names if search_path is None: names += [generate_name(name) for name in sys.builtin_module_names] if search_path is None: search_path = self._sys_path_with_modifications() for module_loader, name, is_pkg in pkgutil.iter_modules(search_path): names.append(generate_name(name)) return names def _sys_path_with_modifications(self): # If you edit e.g. gunicorn, there will be imports like this: # `from gunicorn import something`. But gunicorn is not in the # sys.path. Therefore look if gunicorn is a parent directory, #56. in_path = [] if self.import_path: parts = self.file_path.split(os.path.sep) for i, p in enumerate(parts): if p == self.import_path[0]: new = os.path.sep.join(parts[:i]) in_path.append(new) module = self.import_stmt.get_parent_until() return in_path + modules.sys_path_with_modifications(module) def follow(self, is_goto=False): """ Returns the imported modules. """ if evaluate.follow_statement.push_stmt(self.import_stmt): # check recursion return [] if self.import_path: try: scope, rest = self._follow_file_system() except ModuleNotFound: debug.warning('Module not found: ' + str(self.import_stmt)) evaluate.follow_statement.pop_stmt() return [] scopes = [scope] scopes += remove_star_imports(scope) # follow the rest of the import (not FS -> classes, functions) if len(rest) > 1 or rest and self.is_like_search: scopes = [] if ['os', 'path'] == self.import_path[:2] \ and not self._is_relative_import(): # This is a huge exception, we follow a nested import # ``os.path``, because it's a very important one in Python # that is being achieved by messing with ``sys.modules`` in # ``os``. scopes = evaluate.follow_path(iter(rest), scope, scope) elif rest: if is_goto: scopes = itertools.chain.from_iterable( evaluate.find_name(s, rest[0], is_goto=True) for s in scopes) else: scopes = itertools.chain.from_iterable( evaluate.follow_path(iter(rest), s, s) for s in scopes) scopes = list(scopes) if self._is_nested_import(): scopes.append(self._get_nested_import(scope)) else: scopes = [ImportPath.GlobalNamespace] debug.dbg('after import', scopes) evaluate.follow_statement.pop_stmt() return scopes def _is_relative_import(self): return bool(self.import_stmt.relative_count) def _get_relative_path(self): path = self.file_path for i in range(self.import_stmt.relative_count - 1): path = os.path.dirname(path) return path def _namespace_packages(self, found_path, import_path): """ Returns a list of paths of possible ``pkgutil``/``pkg_resources`` namespaces. If the package is no "namespace package", an empty list is returned. """ def follow_path(directories, paths): try: directory = next(directories) except StopIteration: return paths else: deeper_paths = [] for p in paths: new = os.path.join(p, directory) if os.path.isdir(new) and new != found_path: deeper_paths.append(new) return follow_path(directories, deeper_paths) with open(os.path.join(found_path, '__init__.py')) as f: content = f.read() # these are strings that need to be used for namespace packages, # the first one is ``pkgutil``, the second ``pkg_resources``. options = 'declare_namespace(__name__)', 'extend_path(__path__' if options[0] in content or options[1] in content: # It is a namespace, now try to find the rest of the modules. return follow_path(iter(import_path), sys.path) return [] def _follow_file_system(self): """ Find a module with a path (of the module, like usb.backend.libusb10). """ def follow_str(ns_path, string): debug.dbg('follow_module', ns_path, string) path = None if ns_path: path = ns_path elif self._is_relative_import(): path = self._get_relative_path() global imports_processed imports_processed += 1 if path is not None: importing = find_module(string, [path]) else: debug.dbg('search_module', string, self.file_path) # Override the sys.path. It works only good that way. # Injecting the path directly into `find_module` did not work. sys.path, temp = sys_path_mod, sys.path try: importing = find_module(string) finally: sys.path = temp return importing if self.file_path: sys_path_mod = list(self._sys_path_with_modifications()) module = self.import_stmt.get_parent_until() if not module.has_explicit_absolute_import: # If the module explicitly asks for absolute imports, # there's probably a bogus local one. sys_path_mod.insert(0, self.file_path) else: sys_path_mod = list(modules.get_sys_path()) def module_not_found(): raise ModuleNotFound('The module you searched has not been found') current_namespace = (None, None, None) # now execute those paths rest = [] for i, s in enumerate(self.import_path): try: current_namespace = follow_str(current_namespace[1], s) except ImportError: _continue = False if self._is_relative_import() and len(self.import_path) == 1: # follow `from . import some_variable` rel_path = self._get_relative_path() with common.ignored(ImportError): current_namespace = follow_str(rel_path, '__init__') elif current_namespace[2]: # is a package for n in self._namespace_packages(current_namespace[1], self.import_path[:i]): try: current_namespace = follow_str(n, s) if current_namespace[1]: _continue = True break except ImportError: pass if not _continue: if current_namespace[1]: rest = self.import_path[i:] break else: module_not_found() if current_namespace == (None, None, False): module_not_found() sys_path_mod.pop(0) # TODO why is this here? path = current_namespace[1] is_package_directory = current_namespace[2] f = None if is_package_directory or current_namespace[0]: # is a directory module if is_package_directory: path += '/__init__.py' with open(path) as f: source = f.read() else: source = current_namespace[0].read() current_namespace[0].close() if path.endswith('.py'): f = modules.Module(path, source) else: f = builtin.BuiltinModule(path=path) else: f = builtin.BuiltinModule(name=path) return f.parser.module, rest def strip_imports(scopes): """ Here we strip the imports - they don't get resolved necessarily. Really used anymore? Merge with remove_star_imports? """ result = [] for s in scopes: if isinstance(s, pr.Import): result += ImportPath(s).follow() else: result.append(s) return result @cache.cache_star_import def remove_star_imports(scope, ignored_modules=()): """ Check a module for star imports: >>> from module import * and follow these modules. """ modules = strip_imports(i for i in scope.get_imports() if i.star) new = [] for m in modules: if m not in ignored_modules: new += remove_star_imports(m, modules) modules += new # Filter duplicate modules. return set(modules) jedi-0.7.0/jedi/debug.py0000664000175000017500000000312012204171717015204 0ustar daviddavid00000000000000from _compatibility import u, encoding, is_py3k import inspect import time try: # Use colorama for nicer console output. from colorama import Fore, init init() except ImportError: class Fore(object): RED = '' GREEN = '' YELLOW = '' RESET = '' NOTICE = object() WARNING = object() SPEED = object() enable_speed = False enable_warning = False enable_notice = False # callback, interface: level, str debug_function = None ignored_modules = ['parsing', 'builtin', 'jedi.builtin', 'jedi.parsing'] def reset_time(): global start_time start_time = time.time() def dbg(*args): """ Looks at the stack, to see if a debug message should be printed. """ if debug_function and enable_notice: frm = inspect.stack()[1] mod = inspect.getmodule(frm[0]) if not (mod.__name__ in ignored_modules): debug_function(NOTICE, 'dbg: ' + ', '.join(u(a) for a in args)) def warning(*args): if debug_function and enable_warning: debug_function(WARNING, 'warning: ' + ', '.join(u(a) for a in args)) def speed(name): if debug_function and enable_speed: now = time.time() debug_function(SPEED, 'speed: ' + '%s %s' % (name, now - start_time)) def print_to_stdout(level, str_out): """ The default debug function """ if level == NOTICE: col = Fore.GREEN elif level == WARNING: col = Fore.RED else: col = Fore.YELLOW if not is_py3k: str_out = str_out.encode(encoding, 'replace') print(col + str_out + Fore.RESET) # debug_function = print_to_stdout jedi-0.7.0/jedi/cache.py0000664000175000017500000002424012204171717015167 0ustar daviddavid00000000000000""" This caching is very important for speed and memory optimizations. There's nothing really spectacular, just some decorators. The following cache types are available: - module caching (`load_module` and `save_module`), which uses pickle and is really important to assure low load times of modules like ``numpy``. - the popular ``memoize_default`` works like a typical memoize and returns the default otherwise. - ``CachedMetaClass`` uses ``memoize_default`` to do the same with classes. - ``time_cache`` can be used to cache something for just a limited time span, which can be useful if there's user interaction and the user cannot react faster than a certain time. This module is one of the reasons why |jedi| is not thread-safe. As you can see there are global variables, which are holding the cache information. Some of these variables are being cleaned after every API usage. """ from __future__ import with_statement import time import os import sys import json import hashlib try: import cPickle as pickle except: import pickle import shutil from jedi import settings from jedi import common from jedi import debug # memoize caches will be deleted after every action memoize_caches = [] time_caches = [] star_import_cache = {} # for fast_parser, should not be deleted parser_cache = {} class ParserCacheItem(object): def __init__(self, parser, change_time=None): self.parser = parser if change_time is None: change_time = time.time() self.change_time = change_time def clear_caches(delete_all=False): """ Jedi caches many things, that should be completed after each completion finishes. :param delete_all: Deletes also the cache that is normally not deleted, like parser cache, which is important for faster parsing. """ global memoize_caches, time_caches # memorize_caches must never be deleted, because the dicts will get lost in # the wrappers. for m in memoize_caches: m.clear() if delete_all: time_caches = [] star_import_cache.clear() parser_cache.clear() else: # normally just kill the expired entries, not all for tc in time_caches: # check time_cache for expired entries for key, (t, value) in list(tc.items()): if t < time.time(): # delete expired entries del tc[key] def memoize_default(default=None, cache=memoize_caches): """ This is a typical memoization decorator, BUT there is one difference: To prevent recursion it sets defaults. Preventing recursion is in this case the much bigger use than speed. I don't think, that there is a big speed difference, but there are many cases where recursion could happen (think about a = b; b = a). """ def func(function): memo = {} cache.append(memo) def wrapper(*args, **kwargs): key = (args, frozenset(kwargs.items())) if key in memo: return memo[key] else: memo[key] = default rv = function(*args, **kwargs) memo[key] = rv return rv return wrapper return func class CachedMetaClass(type): """ This is basically almost the same than the decorator above, it just caches class initializations. I haven't found any other way, so I do it with meta classes. """ @memoize_default() def __call__(self, *args, **kwargs): return super(CachedMetaClass, self).__call__(*args, **kwargs) def time_cache(time_add_setting): """ This decorator works as follows: Call it with a setting and after that use the function with a callable that returns the key. But: This function is only called if the key is not available. After a certain amount of time (`time_add_setting`) the cache is invalid. """ def _temp(key_func): dct = {} time_caches.append(dct) def wrapper(optional_callable, *args, **kwargs): key = key_func(*args, **kwargs) value = None if key in dct: expiry, value = dct[key] if expiry > time.time(): return value value = optional_callable() time_add = getattr(settings, time_add_setting) if key is not None: dct[key] = time.time() + time_add, value return value return wrapper return _temp @time_cache("function_definition_validity") def cache_function_definition(stmt): module_path = stmt.get_parent_until().path return None if module_path is None else (module_path, stmt.start_pos) def cache_star_import(func): def wrapper(scope, *args, **kwargs): with common.ignored(KeyError): mods = star_import_cache[scope] if mods[0] + settings.star_import_cache_validity > time.time(): return mods[1] # cache is too old and therefore invalid or not available invalidate_star_import_cache(scope) mods = func(scope, *args, **kwargs) star_import_cache[scope] = time.time(), mods return mods return wrapper def invalidate_star_import_cache(module, only_main=False): """ Important if some new modules are being reparsed """ with common.ignored(KeyError): t, mods = star_import_cache[module] del star_import_cache[module] for m in mods: invalidate_star_import_cache(m, only_main=True) if not only_main: # We need a list here because otherwise the list is being changed # during the iteration in py3k: iteritems -> items. for key, (t, mods) in list(star_import_cache.items()): if module in mods: invalidate_star_import_cache(key) def load_module(path, name): """ Returns the module or None, if it fails. """ if path is None and name is None: return None tim = os.path.getmtime(path) if path else None n = name if path is None else path try: parser_cache_item = parser_cache[n] if not path or tim <= parser_cache_item.change_time: return parser_cache_item.parser else: # In case there is already a module cached and this module # has to be reparsed, we also need to invalidate the import # caches. invalidate_star_import_cache(parser_cache_item.parser.module) except KeyError: if settings.use_filesystem_cache: return ModulePickling.load_module(n, tim) def save_module(path, name, parser, pickling=True): try: p_time = None if not path else os.path.getmtime(path) except OSError: p_time = None pickling = False n = name if path is None else path item = ParserCacheItem(parser, p_time) parser_cache[n] = item if settings.use_filesystem_cache and pickling: ModulePickling.save_module(n, item) class _ModulePickling(object): version = 3 """ Version number (integer) for file system cache. Increment this number when there are any incompatible changes in parser representation classes. For example, the following changes are regarded as incompatible. - Class name is changed. - Class is moved to another module. - Defined slot of the class is changed. """ def __init__(self): self.__index = None self.py_tag = 'cpython-%s%s' % sys.version_info[:2] """ Short name for distinguish Python implementations and versions. It's like `sys.implementation.cache_tag` but for Python < 3.3 we generate something similar. See: http://docs.python.org/3/library/sys.html#sys.implementation .. todo:: Detect interpreter (e.g., PyPy). """ def load_module(self, path, original_changed_time): try: pickle_changed_time = self._index[path] except KeyError: return None if original_changed_time is not None \ and pickle_changed_time < original_changed_time: # the pickle file is outdated return None with open(self._get_hashed_path(path), 'rb') as f: parser_cache_item = pickle.load(f) debug.dbg('pickle loaded', path) parser_cache[path] = parser_cache_item return parser_cache_item.parser def save_module(self, path, parser_cache_item): self.__index = None try: files = self._index except KeyError: files = {} self._index = files with open(self._get_hashed_path(path), 'wb') as f: pickle.dump(parser_cache_item, f, pickle.HIGHEST_PROTOCOL) files[path] = parser_cache_item.change_time self._flush_index() @property def _index(self): if self.__index is None: try: with open(self._get_path('index.json')) as f: data = json.load(f) except (IOError, ValueError): self.__index = {} else: # 0 means version is not defined (= always delete cache): if data.get('version', 0) != self.version: self.delete_cache() self.__index = {} else: self.__index = data['index'] return self.__index def _remove_old_modules(self): # TODO use change = False if change: self._flush_index(self) self._index # reload index def _flush_index(self): data = {'version': self.version, 'index': self._index} with open(self._get_path('index.json'), 'w') as f: json.dump(data, f) self.__index = None def delete_cache(self): shutil.rmtree(self._cache_directory()) def _get_hashed_path(self, path): return self._get_path('%s.pkl' % hashlib.md5(path.encode("utf-8")).hexdigest()) def _get_path(self, file): dir = self._cache_directory() if not os.path.exists(dir): os.makedirs(dir) return os.path.join(dir, file) def _cache_directory(self): return os.path.join(settings.cache_directory, self.py_tag) # is a singleton ModulePickling = _ModulePickling() jedi-0.7.0/setup.cfg0000664000175000017500000000007312204171764014460 0ustar daviddavid00000000000000[egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 jedi-0.7.0/.coveragerc0000664000175000017500000000064512143361722014762 0ustar daviddavid00000000000000[run] omit = jedi/_compatibility.py [report] # Regexes for lines to exclude from consideration exclude_lines = # Don't complain about missing debug-only code: def __repr__ if self\.debug # Don't complain if tests don't hit defensive assertion code: raise AssertionError raise NotImplementedError # Don't complain if non-runnable code isn't run: if 0: if __name__ == .__main__.: jedi-0.7.0/conftest.py0000664000175000017500000000205612143361722015036 0ustar daviddavid00000000000000import tempfile import shutil import jedi collect_ignore = ["setup.py"] # The following hooks (pytest_configure, pytest_unconfigure) are used # to modify `jedi.settings.cache_directory` because `clean_jedi_cache` # has no effect during doctests. Without these hooks, doctests uses # user's cache (e.g., ~/.cache/jedi/). We should remove this # workaround once the problem is fixed in py.test. # # See: # - https://github.com/davidhalter/jedi/pull/168 # - https://bitbucket.org/hpk42/pytest/issue/275/ jedi_cache_directory_orig = None jedi_cache_directory_temp = None def pytest_configure(config): global jedi_cache_directory_orig, jedi_cache_directory_temp jedi_cache_directory_orig = jedi.settings.cache_directory jedi_cache_directory_temp = tempfile.mkdtemp(prefix='jedi-test-') jedi.settings.cache_directory = jedi_cache_directory_temp def pytest_unconfigure(config): global jedi_cache_directory_orig, jedi_cache_directory_temp jedi.settings.cache_directory = jedi_cache_directory_orig shutil.rmtree(jedi_cache_directory_temp) jedi-0.7.0/setup.py0000775000175000017500000000322712204171717014356 0ustar daviddavid00000000000000#!/usr/bin/env python from __future__ import with_statement try: from setuptools import setup except ImportError: # Distribute is not actually required to install from distutils.core import setup __AUTHOR__ = 'David Halter' __AUTHOR_EMAIL__ = 'davidhalter88@gmail.com' readme = open('README.rst').read() + '\n\n' + open('CHANGELOG.rst').read() import jedi VERSION = '.'.join(str(x) for x in jedi.__version__) setup(name='jedi', version=VERSION, description='An autocompletion tool for Python that can be used for text editors.', author=__AUTHOR__, author_email=__AUTHOR_EMAIL__, maintainer=__AUTHOR__, maintainer_email=__AUTHOR_EMAIL__, url='https://github.com/davidhalter/jedi', license='MIT', keywords='python completion refactoring vim', long_description=readme, packages=['jedi'], package_data={'jedi': ['mixin/*.pym']}, platforms=['any'], classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Plugins', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Text Editors :: Integrated Development Environments (IDE)', 'Topic :: Utilities', ], ) jedi-0.7.0/README.rst0000664000175000017500000001031012204171716014316 0ustar daviddavid00000000000000################################################### Jedi - an awesome autocompletion library for Python ################################################### .. image:: https://secure.travis-ci.org/davidhalter/jedi.png?branch=master :target: http://travis-ci.org/davidhalter/jedi :alt: Travis-CI build status .. image:: https://coveralls.io/repos/davidhalter/jedi/badge.png?branch=master :target: https://coveralls.io/r/davidhalter/jedi :alt: Coverage Status .. image:: https://pypip.in/d/jedi/badge.png :target: https://crate.io/packages/jedi/ Jedi is an autocompletion tool for Python that can be used in IDEs/editors. Jedi works. Jedi is fast. It understands all of the basic Python syntax elements including many builtin functions. Additionaly, Jedi suports two different goto functions and has support for renaming as well as Pydoc support and some other IDE features. Jedi uses a very simple API to connect with IDE's. There's a reference implementation as a `VIM-Plugin `_, which uses Jedi's autocompletion. I encourage you to use Jedi in your IDEs. It's really easy. If there are any problems (also with licensing), just contact me. Jedi can be used with the following editors: - Vim (jedi-vim_, YouCompleteMe_) - Emacs (Jedi.el_) - Sublime Text (SublimeJEDI_ [ST2 + ST3], anaconda_ [only ST3]) And it powers the following projects: - wdb_ Here are some pictures: .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_complete.png Completion for almost anything (Ctrl+Space). .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_function.png Display of function/class bodies, docstrings. .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_pydoc.png Pydoc support (with highlighting, Shift+k). There is also support for goto and renaming. Get the latest version from `github `_ (master branch should always be kind of stable/working). Docs are available at `https://jedi.readthedocs.org/ `_. Pull requests with documentation enhancements and/or fixes are awesome and most welcome. Jedi uses `semantic versioning `_. Installation ============ pip install jedi Note: This just installs the Jedi library, not the editor plugins. For information about how to make it work with your editor, refer to the corresponding documentation. You don't want to use ``pip``? Please refer to the `manual `_. Feature Support and Caveats =========================== Jedi really understands your Python code. For a comprehensive list what Jedi can do, see: `Features `_. A list of caveats can be found on the same page. You can run Jedi on cPython 2.6, 2.7, 3.2 or 3.3, but it should also understand/parse code older than those versions. Tips on how to use Jedi efficiently can be found `here `_. API for IDEs ============ It's very easy to create an editor plugin that uses Jedi. See `Plugin API `_ for more information. Development =========== There's a pretty good and extensive `development documentation `_. Testing ======= The test suite depends on ``tox`` and ``pytest``:: pip install tox pytest To run the tests for all supported Python versions:: tox If you want to test only a specific Python version (e.g. Python 2.7), it's as easy as :: tox -e py27 Tests are also run automatically on `Travis CI `_. For more detailed information visit the `testing documentation `_ .. _jedi-vim: https://github.com/davidhalter/jedi-vim .. _youcompleteme: http://valloric.github.io/YouCompleteMe/ .. _Jedi.el: https://github.com/tkf/emacs-jedi .. _sublimejedi: https://github.com/srusskih/SublimeJEDI .. _anaconda: https://github.com/DamnWidget/anaconda .. _wdb: https://github.com/Kozea/wdb jedi-0.7.0/CHANGELOG.rst0000664000175000017500000000072412204171716014660 0ustar daviddavid00000000000000.. :changelog: Changelog --------- 0.7.0 (2013-08-09) ++++++++++++++++++ * switched from LGPL to MIT license * added an Interpreter class to the API to make autocompletion in REPL possible. * added autocompletion support for namespace packages * add sith.py, a new random testing method 0.6.0 (2013-05-14) ++++++++++++++++++ * much faster parser with builtin part caching * a test suite, thanks @tkf 0.5 versions (2012) +++++++++++++++++++ * Initial development jedi-0.7.0/LICENSE.txt0000664000175000017500000000212712204171716014461 0ustar daviddavid00000000000000The MIT License (MIT) Copyright (c) <2013> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. jedi-0.7.0/jedi.egg-info/0000775000175000017500000000000012204171764015244 5ustar daviddavid00000000000000jedi-0.7.0/jedi.egg-info/PKG-INFO0000664000175000017500000001562612204171763016352 0ustar daviddavid00000000000000Metadata-Version: 1.1 Name: jedi Version: 0.7.0 Summary: An autocompletion tool for Python that can be used for text editors. Home-page: https://github.com/davidhalter/jedi Author: David Halter Author-email: davidhalter88@gmail.com License: MIT Description: ################################################### Jedi - an awesome autocompletion library for Python ################################################### .. image:: https://secure.travis-ci.org/davidhalter/jedi.png?branch=master :target: http://travis-ci.org/davidhalter/jedi :alt: Travis-CI build status .. image:: https://coveralls.io/repos/davidhalter/jedi/badge.png?branch=master :target: https://coveralls.io/r/davidhalter/jedi :alt: Coverage Status .. image:: https://pypip.in/d/jedi/badge.png :target: https://crate.io/packages/jedi/ Jedi is an autocompletion tool for Python that can be used in IDEs/editors. Jedi works. Jedi is fast. It understands all of the basic Python syntax elements including many builtin functions. Additionaly, Jedi suports two different goto functions and has support for renaming as well as Pydoc support and some other IDE features. Jedi uses a very simple API to connect with IDE's. There's a reference implementation as a `VIM-Plugin `_, which uses Jedi's autocompletion. I encourage you to use Jedi in your IDEs. It's really easy. If there are any problems (also with licensing), just contact me. Jedi can be used with the following editors: - Vim (jedi-vim_, YouCompleteMe_) - Emacs (Jedi.el_) - Sublime Text (SublimeJEDI_ [ST2 + ST3], anaconda_ [only ST3]) And it powers the following projects: - wdb_ Here are some pictures: .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_complete.png Completion for almost anything (Ctrl+Space). .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_function.png Display of function/class bodies, docstrings. .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_pydoc.png Pydoc support (with highlighting, Shift+k). There is also support for goto and renaming. Get the latest version from `github `_ (master branch should always be kind of stable/working). Docs are available at `https://jedi.readthedocs.org/ `_. Pull requests with documentation enhancements and/or fixes are awesome and most welcome. Jedi uses `semantic versioning `_. Installation ============ pip install jedi Note: This just installs the Jedi library, not the editor plugins. For information about how to make it work with your editor, refer to the corresponding documentation. You don't want to use ``pip``? Please refer to the `manual `_. Feature Support and Caveats =========================== Jedi really understands your Python code. For a comprehensive list what Jedi can do, see: `Features `_. A list of caveats can be found on the same page. You can run Jedi on cPython 2.6, 2.7, 3.2 or 3.3, but it should also understand/parse code older than those versions. Tips on how to use Jedi efficiently can be found `here `_. API for IDEs ============ It's very easy to create an editor plugin that uses Jedi. See `Plugin API `_ for more information. Development =========== There's a pretty good and extensive `development documentation `_. Testing ======= The test suite depends on ``tox`` and ``pytest``:: pip install tox pytest To run the tests for all supported Python versions:: tox If you want to test only a specific Python version (e.g. Python 2.7), it's as easy as :: tox -e py27 Tests are also run automatically on `Travis CI `_. For more detailed information visit the `testing documentation `_ .. _jedi-vim: https://github.com/davidhalter/jedi-vim .. _youcompleteme: http://valloric.github.io/YouCompleteMe/ .. _Jedi.el: https://github.com/tkf/emacs-jedi .. _sublimejedi: https://github.com/srusskih/SublimeJEDI .. _anaconda: https://github.com/DamnWidget/anaconda .. _wdb: https://github.com/Kozea/wdb .. :changelog: Changelog --------- 0.7.0 (2013-08-09) ++++++++++++++++++ * switched from LGPL to MIT license * added an Interpreter class to the API to make autocompletion in REPL possible. * added autocompletion support for namespace packages * add sith.py, a new random testing method 0.6.0 (2013-05-14) ++++++++++++++++++ * much faster parser with builtin part caching * a test suite, thanks @tkf 0.5 versions (2012) +++++++++++++++++++ * Initial development Keywords: python completion refactoring vim Platform: any Classifier: Development Status :: 4 - Beta Classifier: Environment :: Plugins Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.2 Classifier: Programming Language :: Python :: 3.3 Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Topic :: Text Editors :: Integrated Development Environments (IDE) Classifier: Topic :: Utilities jedi-0.7.0/jedi.egg-info/dependency_links.txt0000664000175000017500000000000112204171763021311 0ustar daviddavid00000000000000 jedi-0.7.0/jedi.egg-info/top_level.txt0000664000175000017500000000000512204171763017770 0ustar daviddavid00000000000000jedi jedi-0.7.0/jedi.egg-info/SOURCES.txt0000664000175000017500000000606412204171764017136 0ustar daviddavid00000000000000.coveragerc AUTHORS.txt CHANGELOG.rst LICENSE.txt MANIFEST.in README.rst conftest.py pytest.ini setup.py sith.py tox.ini jedi/__init__.py jedi/__main__.py jedi/_compatibility.py jedi/api.py jedi/api_classes.py jedi/builtin.py jedi/cache.py jedi/common.py jedi/debug.py jedi/docstrings.py jedi/dynamic.py jedi/evaluate.py jedi/evaluate_representation.py jedi/fast_parser.py jedi/helpers.py jedi/imports.py jedi/interpret.py jedi/keywords.py jedi/modules.py jedi/parsing.py jedi/parsing_representation.py jedi/recursion.py jedi/refactoring.py jedi/replstartup.py jedi/settings.py jedi/tokenizer.py jedi/utils.py jedi.egg-info/PKG-INFO jedi.egg-info/SOURCES.txt jedi.egg-info/dependency_links.txt jedi.egg-info/top_level.txt jedi/mixin/_functools.pym jedi/mixin/_sqlite3.pym jedi/mixin/_sre.pym jedi/mixin/_weakref.pym jedi/mixin/builtins.pym jedi/mixin/datetime.pym jedi/mixin/posix.pym test/__init__.py test/conftest.py test/helpers.py test/refactor.py test/run.py test/test_absolute_import.py test/test_api.py test/test_api_classes.py test/test_cache.py test/test_call_signatures.py test/test_defined_names.py test/test_docstring.py test/test_fast_parser.py test/test_full_name.py test/test_integration.py test/test_integration_import.py test/test_integration_keyword.py test/test_interpreter.py test/test_jedi_system.py test/test_namespace_package.py test/test_parsing.py test/test_regression.py test/test_speed.py test/test_unicode.py test/test_utils.py test/absolute_import/local_module.py test/absolute_import/unittest.py test/completion/__init__.py test/completion/arrays.py test/completion/basic.py test/completion/classes.py test/completion/complex.py test/completion/decorators.py test/completion/definition.py test/completion/descriptors.py test/completion/docstring.py test/completion/dynamic.py test/completion/functions.py test/completion/generators.py test/completion/goto.py test/completion/imports.py test/completion/invalid.py test/completion/isinstance.py test/completion/keywords.py test/completion/named_param.py test/completion/ordering.py test/completion/std.py test/completion/sys_path.py test/completion/types.py test/completion/usages.py test/completion/import_tree/__init__.py test/completion/import_tree/mod1.py test/completion/import_tree/mod2.py test/completion/import_tree/random.py test/completion/import_tree/recurse_class1.py test/completion/import_tree/recurse_class2.py test/completion/import_tree/rename1.py test/completion/import_tree/rename2.py test/completion/import_tree/pkg/__init__.py test/completion/import_tree/pkg/mod1.py test/completion/thirdparty/PyQt4_.py test/completion/thirdparty/django_.py test/completion/thirdparty/jedi_.py test/completion/thirdparty/psycopg2_.py test/completion/thirdparty/pylab_.py test/namespace_package/ns1/pkg/__init__.py test/namespace_package/ns1/pkg/ns1_file.py test/namespace_package/ns1/pkg/ns1_folder/__init__.py test/namespace_package/ns2/pkg/ns2_file.py test/namespace_package/ns2/pkg/ns2_folder/__init__.py test/namespace_package/ns2/pkg/ns2_folder/nested/__init__.py test/refactor/extract.py test/refactor/inline.py test/refactor/rename.py