jedi-0.9.0/0000775000175000017500000000000012517736563012654 5ustar daviddavid00000000000000jedi-0.9.0/pytest.ini0000664000175000017500000000070612517736533014705 0ustar daviddavid00000000000000[pytest] addopts = --doctest-modules # Ignore broken files in blackbox test directories norecursedirs = .* docs completion refactor absolute_import namespace_package scripts extensions speed static_analysis not_in_sys_path buildout_project egg-link init_extension_module # Activate `clean_jedi_cache` fixture for all tests. This should be # fine as long as we are using `clean_jedi_cache` as a session scoped # fixture. usefixtures = clean_jedi_cache jedi-0.9.0/sith.py0000775000175000017500000001567412331540214014171 0ustar daviddavid00000000000000#!/usr/bin/env python """ Sith attacks (and helps debugging) Jedi. Randomly search Python files and run Jedi on it. Exception and used arguments are recorded to ``./record.json`` (specified by --record):: ./sith.py random /path/to/sourcecode Redo recorded exception:: ./sith.py redo Show recorded exception:: ./sith.py show Run a specific operation ./sith.py run Where operation is one of completions, goto_assignments, goto_definitions, usages, or call_signatures. Note: Line numbers start at 1; columns start at 0 (this is consistent with many text editors, including Emacs). Usage: sith.py [--pdb|--ipdb|--pudb] [-d] [-n=] [-f] [--record=] random [-s] [] sith.py [--pdb|--ipdb|--pudb] [-d] [-f] [--record=] redo sith.py [--pdb|--ipdb|--pudb] [-d] [-f] run sith.py show [--record=] sith.py -h | --help Options: -h --help Show this screen. --record= Exceptions are recorded in here [default: record.json]. -f, --fs-cache By default, file system cache is off for reproducibility. -n, --maxtries= Maximum of random tries [default: 100] -d, --debug Jedi print debugging when an error is raised. -s Shows the path/line numbers of every completion before it starts. --pdb Launch pdb when error is raised. --ipdb Launch ipdb when error is raised. --pudb Launch pudb when error is raised. """ from __future__ import print_function, division, unicode_literals from docopt import docopt import json import os import random import sys import traceback import jedi class SourceFinder(object): _files = None @staticmethod def fetch(file_path): if not os.path.isdir(file_path): yield file_path return for root, dirnames, filenames in os.walk(file_path): for name in filenames: if name.endswith('.py'): yield os.path.join(root, name) @classmethod def files(cls, file_path): if cls._files is None: cls._files = list(cls.fetch(file_path)) return cls._files class TestCase(object): def __init__(self, operation, path, line, column, traceback=None): if operation not in self.operations: raise ValueError("%s is not a valid operation" % operation) # Set other attributes self.operation = operation self.path = path self.line = line self.column = column self.traceback = traceback @classmethod def from_cache(cls, record): with open(record) as f: args = json.load(f) return cls(*args) operations = [ 'completions', 'goto_assignments', 'goto_definitions', 'usages', 'call_signatures'] @classmethod def generate(cls, file_path): operation = random.choice(cls.operations) path = random.choice(SourceFinder.files(file_path)) with open(path) as f: source = f.read() lines = source.splitlines() if not lines: lines = [''] line = random.randint(1, len(lines)) column = random.randint(0, len(lines[line - 1])) return cls(operation, path, line, column) def run(self, debugger, record=None, print_result=False): try: with open(self.path) as f: self.script = jedi.Script(f.read(), self.line, self.column, self.path) self.objects = getattr(self.script, self.operation)() if print_result: print("{path}: Line {line} column {column}".format(**self.__dict__)) self.show_location(self.line, self.column) self.show_operation() except jedi.NotFoundError: pass except Exception: self.traceback = traceback.format_exc() if record is not None: call_args = (self.operation, self.path, self.line, self.column, self.traceback) with open(record, 'w') as f: json.dump(call_args, f) self.show_errors() if debugger: einfo = sys.exc_info() pdb = __import__(debugger) if debugger == 'pudb': pdb.post_mortem(einfo[2], einfo[0], einfo[1]) else: pdb.post_mortem(einfo[2]) exit(1) def show_location(self, lineno, column, show=3): # Three lines ought to be enough lower = lineno - show if lineno - show > 0 else 0 prefix = ' |' for i, line in enumerate(self.script.source.split('\n')[lower:lineno]): print(prefix, lower + i + 1, line) print(prefix, ' ', ' ' * (column + len(str(lineno))), '^') def show_operation(self): print("%s:\n" % self.operation.capitalize()) if self.operation == 'completions': self.show_completions() else: self.show_definitions() def show_completions(self): for completion in self.objects: print(completion.name) def show_definitions(self): for completion in self.objects: print(completion.desc_with_module) if completion.module_path is None: continue if os.path.abspath(completion.module_path) == os.path.abspath(self.path): self.show_location(completion.line, completion.column) def show_errors(self): print(self.traceback) print(("Error with running Script(...).{operation}() with\n" "\tpath: {path}\n" "\tline: {line}\n" "\tcolumn: {column}").format(**self.__dict__)) def main(arguments): debugger = 'pdb' if arguments['--pdb'] else \ 'ipdb' if arguments['--ipdb'] else \ 'pudb' if arguments['--pudb'] else None record = arguments['--record'] jedi.settings.use_filesystem_cache = arguments['--fs-cache'] if arguments['--debug']: jedi.set_debug_function() if arguments['redo'] or arguments['show']: t = TestCase.from_cache(record) if arguments['show']: t.show_errors() else: t.run(debugger) elif arguments['run']: TestCase( arguments[''], arguments[''], int(arguments['']), int(arguments['']) ).run(debugger, print_result=True) else: for _ in range(int(arguments['--maxtries'])): t = TestCase.generate(arguments[''] or '.') if arguments['-s']: print('%s %s %s %s ' % (t.operation, t.path, t.line, t.column)) sys.stdout.flush() else: print('.', end='') t.run(debugger, record) sys.stdout.flush() print() if __name__ == '__main__': arguments = docopt(__doc__) main(arguments) jedi-0.9.0/AUTHORS.txt0000664000175000017500000000233412517736533014541 0ustar daviddavid00000000000000Main Authors ============ David Halter (@davidhalter) Takafumi Arakaki (@tkf) Code Contributors ================= Danilo Bargen (@dbrgn) Laurens Van Houtven (@lvh) <_@lvh.cc> Aldo Stracquadanio (@Astrac) Jean-Louis Fuchs (@ganwell) tek (@tek) Yasha Borevich (@jjay) Aaron Griffin andviro (@andviro) Mike Gilbert (@floppym) Aaron Meurer (@asmeurer) Lubos Trilety Akinori Hattori (@hattya) srusskih (@srusskih) Steven Silvester (@blink1073) Colin Duquesnoy (@ColinDuquesnoy) Jorgen Schaefer (@jorgenschaefer) Fredrik Bergroth (@fbergroth) Mathias Fußenegger (@mfussenegger) Syohei Yoshida (@syohex) ppalucky (@ppalucky) immerrr (@immerrr) immerrr@gmail.com Albertas Agejevas (@alga) Savor d'Isavano (@KenetJervet) Phillip Berndt (@phillipberndt) Ian Lee (@IanLee1521) Farkhad Khatamov (@hatamov) Note: (@user) means a github user name. jedi-0.9.0/PKG-INFO0000664000175000017500000002561712517736563013764 0ustar daviddavid00000000000000Metadata-Version: 1.1 Name: jedi Version: 0.9.0 Summary: An autocompletion tool for Python that can be used for text editors. Home-page: https://github.com/davidhalter/jedi Author: David Halter Author-email: davidhalter88@gmail.com License: MIT Description: ################################################################### Jedi - an awesome autocompletion/static analysis library for Python ################################################################### .. image:: https://secure.travis-ci.org/davidhalter/jedi.png?branch=master :target: http://travis-ci.org/davidhalter/jedi :alt: Travis-CI build status .. image:: https://coveralls.io/repos/davidhalter/jedi/badge.png?branch=master :target: https://coveralls.io/r/davidhalter/jedi :alt: Coverage Status *If you have specific questions, please add an issue or ask on* `stackoverflow `_ *with the label* ``python-jedi``. Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its historic focus is autocompletion, but does static analysis for now as well. Jedi is fast and is very well tested. It understands Python on a deeper level than all other static analysis frameworks for Python. Jedi has support for two different goto functions. It's possible to search for related names and to list all names in a Python file and infer them. Jedi understands docstrings and you can use Jedi autocompletion in your REPL as well. Jedi uses a very simple API to connect with IDE's. There's a reference implementation as a `VIM-Plugin `_, which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs. It's really easy. Jedi can currently be used with the following editors: - Vim (jedi-vim_, YouCompleteMe_) - Emacs (Jedi.el_, elpy_, anaconda-mode_, ycmd_) - Sublime Text (SublimeJEDI_ [ST2 + ST3], anaconda_ [only ST3]) - SynWrite_ - TextMate_ (Not sure if it's actually working) - Kate_ version 4.13+ supports it natively, you have to enable it, though. [`proof `_] And it powers the following projects: - wdb_ - Web Debugger Here are some pictures taken from jedi-vim_: .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_complete.png Completion for almost anything (Ctrl+Space). .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_function.png Display of function/class bodies, docstrings. .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_pydoc.png Pydoc support (Shift+k). There is also support for goto and renaming. Get the latest version from `github `_ (master branch should always be kind of stable/working). Docs are available at `https://jedi.readthedocs.org/en/latest/ `_. Pull requests with documentation enhancements and/or fixes are awesome and most welcome. Jedi uses `semantic versioning `_. Installation ============ pip install jedi Note: This just installs the Jedi library, not the editor plugins. For information about how to make it work with your editor, refer to the corresponding documentation. You don't want to use ``pip``? Please refer to the `manual `_. Feature Support and Caveats =========================== Jedi really understands your Python code. For a comprehensive list what Jedi understands, see: `Features `_. A list of caveats can be found on the same page. You can run Jedi on cPython 2.6, 2.7, 3.2, 3.3 or 3.4, but it should also understand/parse code older than those versions. Tips on how to use Jedi efficiently can be found `here `_. API --- You can find the documentation for the `API here `_. Autocompletion / Goto / Pydoc ----------------------------- Please check the API for a good explanation. There are the following commands: - ``jedi.Script.goto_assignments`` - ``jedi.Script.completions`` - ``jedi.Script.usages`` The returned objects are very powerful and really all you might need. Autocompletion in your REPL (IPython, etc.) ------------------------------------------- It's possible to have Jedi autocompletion in REPL modes - `example video `_. This means that IPython and others are `supported `_. Static Analysis / Linter ------------------------ To do all forms of static analysis, please try to use ``jedi.names``. It will return a list of names that you can use to infer types and so on. Linting is another thing that is going to be part of Jedi. For now you can try an alpha version ``python -m jedi linter``. The API might change though and it's still buggy. It's Jedi's goal to be smarter than classic linter and understand ``AttributeError`` and other code issues. Refactoring ----------- Jedi would in theory support refactoring, but we have never publicized it, because it's not production ready. If you're interested in helping out here, let me know. With the latest parser changes, it should be very easy to actually make it work. Development =========== There's a pretty good and extensive `development documentation `_. Testing ======= The test suite depends on ``tox`` and ``pytest``:: pip install tox pytest To run the tests for all supported Python versions:: tox If you want to test only a specific Python version (e.g. Python 2.7), it's as easy as :: tox -e py27 Tests are also run automatically on `Travis CI `_. For more detailed information visit the `testing documentation `_ .. _jedi-vim: https://github.com/davidhalter/jedi-vim .. _youcompleteme: http://valloric.github.io/YouCompleteMe/ .. _Jedi.el: https://github.com/tkf/emacs-jedi .. _elpy: https://github.com/jorgenschaefer/elpy .. _anaconda-mode: https://github.com/proofit404/anaconda-mode .. _ycmd: https://github.com/abingham/emacs-ycmd .. _sublimejedi: https://github.com/srusskih/SublimeJEDI .. _anaconda: https://github.com/DamnWidget/anaconda .. _SynWrite: http://uvviewsoft.com/synjedi/ .. _wdb: https://github.com/Kozea/wdb .. _TextMate: https://github.com/lawrenceakka/python-jedi.tmbundle .. _Kate: http://kate-editor.org .. :changelog: Changelog --------- 0.9.0 (2015-04-10) ++++++++++++++++++ - Integrated the parser of 2to3. This will make refactoring possible. It will also be possible to check for error messages (like compiling an AST would give) in the future. - With the new parser, the evaluation also completely changed. It's now simpler and more readable. - Completely rewritten REPL completion. - Added ``jedi.names``, a command to do static analysis. Thanks to that sourcegraph guys for sponsoring this! - Alpha version of the linter. 0.8.1 (2014-07-23) +++++++++++++++++++ - Bugfix release, the last release forgot to include files that improve autocompletion for builtin libraries. Fixed. 0.8.0 (2014-05-05) +++++++++++++++++++ - Memory Consumption for compiled modules (e.g. builtins, sys) has been reduced drastically. Loading times are down as well (it takes basically as long as an import). - REPL completion is starting to become usable. - Various small API changes. Generally this release focuses on stability and refactoring of internal APIs. - Introducing operator precedence, which makes calculating correct Array indices and ``__getattr__`` strings possible. 0.7.0 (2013-08-09) ++++++++++++++++++ - Switched from LGPL to MIT license. - Added an Interpreter class to the API to make autocompletion in REPL possible. - Added autocompletion support for namespace packages. - Add sith.py, a new random testing method. 0.6.0 (2013-05-14) ++++++++++++++++++ - Much faster parser with builtin part caching. - A test suite, thanks @tkf. 0.5 versions (2012) +++++++++++++++++++ - Initial development. Keywords: python completion refactoring vim Platform: any Classifier: Development Status :: 4 - Beta Classifier: Environment :: Plugins Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.2 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Topic :: Text Editors :: Integrated Development Environments (IDE) Classifier: Topic :: Utilities jedi-0.9.0/MANIFEST.in0000664000175000017500000000050312517736533014405 0ustar daviddavid00000000000000include README.rst include CHANGELOG.rst include LICENSE.txt include AUTHORS.txt include .coveragerc include sith.py include conftest.py include pytest.ini include tox.ini include jedi/evaluate/compiled/fake/*.pym include jedi/parser/grammar*.txt recursive-include test * recursive-include docs * recursive-exclude * *.pyc jedi-0.9.0/tox.ini0000664000175000017500000000134112517736533014163 0ustar daviddavid00000000000000[tox] envlist = py26, py27, py32, py33, py34 [testenv] deps = pytest>=2.3.5 pytest-cache # docopt for sith doctests docopt # coloroma for colored debug output colorama setenv = # https://github.com/tomchristie/django-rest-framework/issues/1957 # tox corrupts __pycache__, solution from here: PYTHONDONTWRITEBYTECODE=1 commands = py.test [] [testenv:py26] deps = unittest2 {[testenv]deps} [testenv:cov] deps = coverage {[testenv]deps} commands = coverage run --source jedi -m py.test coverage report [testenv:sith] commands = {envpython} -c "import os; a='{envtmpdir}'; os.path.exists(a) or os.makedirs(a)" {envpython} sith.py --record {envtmpdir}/record.json random {posargs:jedi} jedi-0.9.0/docs/0000775000175000017500000000000012517736563013604 5ustar daviddavid00000000000000jedi-0.9.0/docs/_screenshots/0000775000175000017500000000000012517736563016303 5ustar daviddavid00000000000000jedi-0.9.0/docs/_screenshots/screenshot_pydoc.png0000664000175000017500000005303112143361722022350 0ustar daviddavid00000000000000PNG  IHDR&%sRGBbKGD pHYs  tIME  30ﵞ IDATxwxlnz )"METPTlȵ]*^+6PQvQQJG@:(5@M?] |?(Μ2UZ;*h,ABaTm| ByOB BILB!@nDQq\t9Q̣+89D3?kKٞ-?61Њj,&-'N+S6 o_](/N٬ cBV8=A1gdWΚOx'i%_+]g@߇{}<'{R4yNPv ?4*bt5~"JӼFcI'g_aiT9Wv:RAU vZ 66ڍpPRq 0)(9FFbi{?9Pm2?NXO|PL<lWJncw>Enf3NBBJ ޿>#ٱo6$ /[ե8^Jw\4PA)mtg.-8SMՓuSUl; eN76Ui꽝t`3^ q`&XD–4v`K厇\Xr8VLڱak.Ԇ͗™/hEASӤdOz8T 7q#'OV:i~Zy5TCs4گK6`Ms|8yZC'N#6ķ4qp{U;rJ(|xLe<'vͣ`7!? g Q`eǏ1bpj(6zX1A>Z7O*g/O8~^`q⯩4 s61v#sRo99C3}:KE/S-[G<(WC_iAڏW>/\4@Ś`q5UbbM~__FzUIXra9$X6Ejrb~ڠ?>'u6PY $ 墁 ߿CLw@%0s}Kŭ+x>뿀d)O1x;^~/eB~h1]C 濟1\|)MRMA5MLMu8RA_Zt!.jO&`,uWђ,5̂,X1 亙:m] ƌZfKlԶ*ng!pu,/$C{;=Ƙ̸͋PM-lE!6]A/ Iw8iP\[ƴ~,$L<[ |KtY,W:<`>AֆL5*k3IzF<V&lf!R\7bQBSfݤxZ g)QVN)ֆV00t~7<&ڨ[O%& zBGemާs`ELLL<; H3zk()@މ"obu֏SZf;l7ëaGmWp:_ONnPK!qu-4(O7MrS1,?6Ńg|l_d95@qX+h!F ˘;ok4n~> #``WO{/ 5su'f[;5N}?/g# zshwAMTq񙤓mR&+J [E$t4Sp;6weB/TB Hp 8tq⫄(KۦOtW")K9nڱ%Ioɶ ⣄il>~QѶ!'Nb~Gter&њb U~ ڿ¹2 VJ&EA8:T7:\ERӰTpHGi`1 Mx/Q{czYg=>dbb;F-fd xWP jC5f1X!JEgcW?xԵ= {;9_12"~^/gO7 :7Wh~9kVE~@#_ǧZb"LddxlV}A/hIcq\t~|~2`Gř&K)`Qd)7Tוo|hOm-Xۥ[ǣj؂/cb83Yg/ 'B}Nh` `.? Ey4+EbQ(G+s_.qWoU3߿ʜܬ-xn|VS(6XklyA bL=/[TCAՏRQHU(2/R/XSU:5Ɔf(}T~SP UZRw"zٻ ~k=zTWB6*\ZIJjMN~&腍g`LRKşQW 2G1]X²m$#;DF,Pn;54cVP8U*V&Z#+-J؝/ 5tXЂM`GD޽VR4iATLfQ?J}ĤlOn~u+-nKw3w\;n@VxXj'^αp:IߤRf=zBUH-(@+7Pa LiN4.^bmKɴ2?4%2~HgEAmŠ7z/?=}Ha8+]Px;YW\g[IlxU t4IN.[ FcU,{Տ8is58QW_P Zr|EwƒngK<i|B|>/B׬áel]b?zרF }'l{Kn`x=&c;!gV 6J*Gym2)KШ^dPI.:h{ uX4Djm;ƹU 2ʋUs~lflM67*D.+=9ILZª S/'$μBD1C:;pvj)sWLdT}~S8 ȩ08Wj9j$#y~V(cgΩ\V*,1#N)+&3J\s OR9?X3#+&GVLVb+&|tK%1B!DUJB!B!$&B!D!"(|]X!]2=_^͵%2 2g !"<( qf1,BZZp Mb,B0Ebbl|wOH}S3QVzwQ'J,T.el? (e_zGcrȜtwɐ)yAB!NҊɱTNf`dY!HL3>*y WqVl]n!~M'r+GQiq_=nGq1Wt/Bo娜qo=iCʭt+ƪ8l(E!1zm]Xd!Y@ !d$&B!Q"wZB!B!$&B!D!B!BHb"B!l4yj&~@vbt϶f;5/ RʧQ -!W7HE"ZBպuώ3D;›?)S,fAi&%e.5V$~j?4?y_\D~bzMp;Wy!׼z+^ùg E!W.9_Po/jѶ_m^,c>>|l&y?,yV/J?49\UR&s< %2z~_BY19z!g9|MLdb$ӈlW#ILI^^v~ĽC㣍Y?tNQ!;cNe9ܽ{9V3ŢL2W3;ht f;rH&XNJ-,~ iPSSzR?R m˙vu?X^I{3f=NZGjۅ7G_u?2cjnx:ƣ#OVCU,wgc_c2oX%)\_Ȩ+xG7}:u ߭yKZ̩7n}郡V½L2[X+4>B?w+]4٣87VO7*?9WL2J7CPM {P|{COE5jozNJn QF5?˷_{F~L*/g~iϝ7g3}?z.cqGf\aOuK]ȽxaB_jh>OO ٌvcVLcrb ah9}ݽ,(b\X^Zi5&W_V`=dC?T/87)3zs]$c۷t:_(5nc?3֨9_{i00W{N >*c%,_M'!1Q,\ޜoNdj{ְ;͇8z-ʊ$zLEt7+?XA?8ԟ {sY|L zv0rM0<.'cz u̜Cl~]CQt`k^i-hB'dBL&Shi |ɶo-(qUYXC>|{g$J/8nT,U]mKܙ} fl̢ڧ+ =C0(bVRlȉG`363GQHhmcw_&6͵O) y11R&v,.?:⃔jmIS!^cщBCD+l2Jsj$w{m|&1jcxz^¡c 5 LURV*>a/Ek$]V>׵Wf@|qsVTW'&Sd?WK͐C>юNgUInN+t{ zվ_A!*!1Qc;{9RpՉc1FYF|'6:SuFo]vA!%*T9} [Gi9;:DYy~wLwa4I>HauԸVnT~;fR>fʗnC=k u[Сk=J_>jG Q+&6 IcK .Í tvK)y{Lf1_W}/+ V]I<~T/oIqC)=Tdh3eb8/6/dtMi3;j` lYȴEs1o̧Z?E(qW{Bwuvg6n6>k(g{| ? |Ī\Y})?z(`ѐ3OZǪ ]6[.VrCWNjw8q*r"ukgSFh\wlܾŠriѪJfq󂫹L|!WQ)+&U%Ò,Td(wẌ́vʤ. !8!*Qoݤ^̗bv\HBߩ+G! y>\!!BHb"BILB!NnbYluh !"|QpZZ>l;c6kR487wlI5Vwf99?y HBHbrki`aݥ 㤪*UIda^#O&p8޹_~bKY=.Z\Uyyu>KCB!$19$.]V?%'zLu*U6Sd͈4R/\Mհ+,Wn{ĞEH/ IDATp;Y\F!Meo}D'*!+U:݌q=bl3/UfLP-V!N2-t{0rA],_EIl{>kpbnAQĚgR!N .0''R(fW(eTi\nL5=4;M3O:E̡4>UumG2B2)GLBHb.q] <0.m>*79U@fθ"ztFrכ9;FgSu7GK~Dz+eInJho槌a'B!),Sժ`*Qj Hg|AʚzM^Šv19sRI"SV hɁtT!j%Nj:t~&ZC)JBi&K&(oŞ@OVN>SuFo]uiQuSqٵ;oL#:wA`wŒ!TWr/ yfmuY%8q%b^\@'xu6iΣo34i2O!TW Ϙ!B5Jz!BHb"B!B!$1B!D!!BHbr32w BBY)<=9vdΖޔg:IsBbo:7'4>sHBWkdޜX:v aMURF*eBow3.z/'W2z <绥 !8-ES}yr|&ab$`ZKoQI M(nSRd&2ܛa֣tfΖؓ9d'g]O +Kvv??b+qNYB!+&s^{@A*nphU/=xB%&&3)P陠r[7 Lg|AF aتѵnksf◼4!+~=gJIB!+&AS ƪ83[&g$5M7a0 uĥz-6jtexY9j uҭL99;Q9i`ъ^7N?{ ѢCZB!+&ǻt&eS$5NK&=mN:#49}*M`?S|vSG80?a!{% 0ԍQMjhI E_ tdQIRPh5 9TFVϤwXjw³)͹BHb^/'6+՟JgVCq)3,@i&K&(oC=9;rdWY_xgmZ:̰X({ @?(,XK@d{x9=~Šo9/%7-yq9McϬݗfom.='jLU(#IL?kpK_ *dUƞpT -MZL4|\0-vYlSk*Yگxn8  Щq vCΚoX;%_!}dqM[鷿(gCsAˁ !8KlK*#Ao_*&Xo种i2bՠ~c^-6٬d(jJk Zeq5π ΁) 3Gu{(qfoԝ x" h";cZNJy#`&;!UHt~XF;~Ɓn6ZL}e|Y' ;88=̲p]L s&/= s{U+>:F`>޳/]ً2B^ 3 n*NZt3ǐvHdo"!u΄9:'r)X|lQ*&gP en@gmV6=GK(OڜjN CݎHn{ 5&Z5OGz+26óh6k޴mr w}Ǹ8Bq%&F8Ja~CQ-i_=u^7wSe5V {~=Wʏ=Z ɱGCŦ28TFԝG7ncYxL;-E)QS^e;ѱEjHRɕLBn-=;2 ,t TRxOSYb8J5W GO}nĬua$Wxm~3U\ /bPXwWV5hgWaֺר:}IR"KL׺H+R0v0-Dލa38i )mx㛩='ax xylP3?a."-ҸMt},'g2n̢\ƀB.11 zΧbr/|(!&WL_:ׅBa^2q /ꄟY+XM5 VB?:z\. o⤥BY1B!Dv6 B!dD!!BHb"B*+*1Q]ZmЊx<2 !BSDc-N玳UpdH8:/=Cb,B0Eeę(7ٹσu`OR$B!8yE i>j*-1B!Dg{_,4h$1N7atB!NfbZZC9Gtq?Ժ.K|G!'-1qЧ;~%|L!ޘ8#F,Bl;k iq$49$B!OT~+!Nt|WG{`ŝW\[oT5m6]-Y%7烧INrG857xEݿ܋7<@{,.K+W-㧪S;U|EhyVbzr|N׫Aڑ '_0/$ϻ7ajݿ^ʖty+xSϩߪ?"osi'Y ٸ7scI.gPtu ,[~X#3vfc|XWSSzR?R m˙vuLΖ&lZw&~+ u|uL20KUo2z2fYӣ\5~2s3(] C0FתwfWXTIFj&}rFʯwhdc6ؿΏ ~ f;rH&XNJ-,~ i*}|G~<~d?;sc3> LVP{a/9g'Zਈ>͟ʲO6^9)sD[?(*vDQ <ۗSTsPtؽsx]z_L\ coi5&W_V`DeWK]HFݸgryFB8QqÐ5㶦~'ܺo=\ϡ]y<ʭFxƬ}JzKxf#K:;3뱁\Ҍs[G&W6c?aӮW>Q,\?i<2f3պ[3jYŒDoRgvIG&;䞃P:/W]6Cdنw4yUQVK&qPj2n2vˠ4%5lڿϿO}'JjoI=7wP䗍 y̚BSrm5+ɝ`l,1v1o,; }q {sY|L zvsIOTL|eۍЪ%[N~L9}&Gw#Wү#c{WRO 6~C/e~l/}wϧ=Q??G:~#n0yL^ꍪɀu z|РCԇ6NML [9 N'wqC-1M5x>?F(T}e(b/V1paɤq5HzQOP^v~%9oG4z1nkǑg9OEZ$jE;ڏ}գs|qo[}EϑP| )JxbpֳMUEPB7'm/܅> c]b;yQ2Ț2SᅆpWcxǖEaD+o62'+~o،p_HNBfհR ԋxn0x*}9|xvx#lz^ި(BLj%iPl뒤4^M?9x{pE21?dN~U"'k04hm)5peQi%0F~,Lf-FM4YYy~wLwa4iY (j?Â2>tKJLj=ZvS $3U\gf5i,?Oٹ[PυCBtZG(5/Vs8qQsgT{ ;{cn4%Ń$H緈7nb ?|㴈 gGC-ff`f|\<sˠ 1mQߝe(ŹwQɥ&sGe.U[|y!3WClŢRrym:V3Y 75Ge^>ק/Xu`_gU?}xKwg`˲;Tș/mĕ[ף[6.=Q!yecgZUl4\Xk4bR>ʚ9%{?tC/zF#>TԂ9*mWV_W$ƦQU|v17qXE ~2Qj=w< ͡-,A0O͟">Zz_F"A×O<>|^'͹ܿ"B?d|aS^Ƃ JkgƮtbk+g\1|s%e!ĩ"!B޵-wi^ *N5%VgdJR"iNVLBQeȯ !PLӔ $ BBdD!!BTij).LNwDó3[֥B!*511nMhS m}A7 `T6MIeӔjl~1yn?(@Vl$chz!xϮ5.MS}aSٙjs1E;Ës<8Tv&_:6ڻՄ6~5ޛ'xPwE7UovMh:fIF.{LT')IV4\wouU;+:(jo|_|:PP.#1/|u7 z rs ˜] ޻jZtBO%(45T{WRO 5)ӻkwhWu`K{Zũ. y)p7m0lX fۇk7N>Ek#h+{v{ISz޹q>^{5LoA#v:@&צjF7a\2!_IL4<ϋAsQJ02S~>ל7ȸu%opg5<$7rds~~&^X]ӮFx?@@i {ʱ6$%jToѨM=Zǡ֏+3PӨ噔oQY@?}W{؇ ]յiy M^»\v)Z?DqBRë|R;b!7\b AQO6su ۊm%+Θm__ŏt^.BDKf=Y)?i{?MƐyLL'^dnћ5<;ONpGq.q+'$zaW6ԬPDPņ4Q#Υ݀ 8լFj+-ZO).{6Eh11_%0 >D{_2=P%ܸŸ?E\8xǩ3 XfdOjpzG'y]߈?"|ӄ;F#ҟ0JRixeUIg_:l.l.("^`X(ʀ,WEE" . :l2aG.{  ת!pT$8K~ӧoA¨U^#,T Ns^UQ0D%zQ)$9Nٯ,zV(!#wIDAT=;E]d2V 0*nPO;_ˣh[=/_^_NɿΩwׯQsh % uC/5Q}6 =ȟ%n ,|֟vNK}x$ F|1ܚ$51c=PBM'8 pk>!>fJ) @) 43a$Ec _Fޑ\|D`T~HbГ pn=Ɉ_!vv* 'ѽ'+pRv%Ս#$ ^-Yw=?R̨T㞠T߂m8SDYz>>eKv;B!%%]q8Y;qg䭔 j`H|ޭKEk?A[s P<0o|S_c֚tgߺcSQN0wd%^_:%G3y"b"Dyk7%˸ (#9{t1@_:,N4gn|݆S=hS!Д6c1lnA5v a%֖-sb 5YGlZӥoV3" 8L%>s):E|!Dm`&soz6P0f5lL \0p/T+u:Y+% 'z.ؚ3jbxSws> 0>TG!crCD!o.,BZCiѪUwLI!B\5!B !B)L×G` D=4ߑJc/xA휿v)ErSQMs~d>dnSEwF ؎}$?BZtLL #6? 5Ksôi`. P$Sf3 qe3&T[UH=Gv| ac 䰖Ht~ seGQ _9,Ϳ4M:?a"?&9Sʰ$=F ]eW|j G7=lNͳmC/09JA,a~؄ʶ;Qy/ɞCPuϏ'jT=U!`{>(LuoL4P#:%nJ&L6X4}ϙQy{|2>RKrX 7|>;n'}A7oFB45edl؋=t)rWT /MezǙno˻iȘͥgzϙƓ[HV.ϣmU{ʶ*=HNsX63Ι!p@w??> rTa+SP{{~סcb4mWchc(oU)*:Wݣ5(lFp+ff@A8$u'_9?i(-ѳěJIɡ@ȻUc9T@+`5ًo~JtMo8䧰xvr옲K)8gt| 3ɪ}r|wvH~~VOgO-SnJCFd.o*s@PJŹ5xu<_߱&J`zJX~f3&u&{y  DYz7;# ku~5gTAvF%v6=n'dZHJW=,^ԃfސlvթbyS)9Qs'I)>ط=._Ǹ{) 2/q(S@,&c]-^uL,I-dnSEwF ؎KB\k115d72.΍Ӧ4@fL#(=RI̘T#&&1gK6deJғ LyR^Eb Su.aBQ _9,Ϳ4M:?a"?&9Sʰ$=FB.w4L-6ޅ#pX$f C^R\il;]/$_7'KI.pϲ%hSUԂZ0lr1]"CypF֟IJhom{q!gw٫g _].22GL1Qyp*2p%+*3.޹/0ySwngA+yu.~m힛y>M"X]=XuBT?cpOk~*J@#ww>F2M4EHNI S:V7h}_X0ڭĚG,{4,S`榡44R(@G:=@ǦIrǒ'SuiJů.XFʧS91D$e<(D5ap=J?EX ο Vq0.$?1a%W|58^ D#/|[:d̡_q~*މ=z&^9hǑ6hP &]8d뫨t ,8l/K>,Ԥ`j{@j r a)'`~,L OSEAi[:ufꁚ bbƃǯptÅKQq2~%:Z(F5#`XCDR HL˺Z5A/{/u+M&/nI}lpW}*GUBT{ǔPd|>|IL[Hz:YԦMuf2>q'3bAZ}Kcؽhlڗ͸]J x Xnױ:ma>K6rk4G3@Yʏ& .PƖ# ?aՠY6%ilKyk^UQϿ;co .Ҳr.v櫩M&jO&!O(-Zꣻz@")Bj:-!i>J~&q`ϳܡ!B\+1B!D! !B !B)LBQk?P@IENDB`jedi-0.9.0/docs/_screenshots/screenshot_complete.png0000664000175000017500000004132512204171716023045 0ustar daviddavid00000000000000PNG  IHDR-{sRGBbKGD pHYs  tIME 8/  IDATxy|MGǿ%77{BBl%RZTK-Em UUUԮ Z[k%B%քrAm/ |ɜy3<;3sH(@ sd@ B@ ;Qxɀ=&J-_Y2Qon]FosXE! -&P=D[0مW@::~W(A޵/ h9`7 Gql; Ⰹk}pIX`q JQ/=B)O&OMmhMØ9jmd?̈D7cS1ÚUBCvQ%+(822r~lW4oT0\;_Kn=t2 ]?yxA.~MoS?.R;1x]E&?;uxy>anXv@[CHfSF|wBuy/ܼ4ό8NNGȞTy(z 8d~J w{V}WgHSJs8Z-% 'wj3+{xԭԖj}S{?Nc䝘rnPl{U:,YaNB ~L; sޓ"e Ss[`$;r D0""f6x4WkW`cv "Cq!6Z2X:uGd4g `௹EvvWua.Y72PQ>P'w1{g,R>!8kjOB$/\눉GzЅi6;6HSbB^G{k m ,S&e0x]wJig" ҙfQ`\o>FMӌ n@S'kܨ$eNC4*ыuW>;4%ygϴNNT/Ҟy|-ER*:Ocƨ2-Vr1e6Z?ħU#yZu]5+ {`<\^-ffufo pR/-ؿJr]uо%]@SW;s392USssD[ۍ9 "d6kc5"_l\=QyL`=a'SZccfң\m^)YBBG$g׆e2aM7b1R4(2&#WW"͌k$l Q^JƜ)&t^Ul6[i[bK&{*̧,)&;lX/r6,':C%s["0TP{S+["I={ct5a7an~w_O~otd@ @тr{z,j W7H40m c7&f [ 7u,W| nCgV\4nsﺽ៳uLVG1 }%>sVA(;evE %ѐڝ}췥\&)2;7ejzCvA\4 II#;1}'L|[s?_9&HQ[ImxQp1FHuй tj1$eN(YOZPR/!*r`=tzO ɱɘѥm}'{;4JF7h_3_AڍIk:M IHygfYtUQJ%mƲmfE1h5JPVq\ֺ*Zóx%j/chd >͚D@YnHrewfa%= Nݣ^U 羋Isܶ_޴%*BSbF  bcp>MRg4r~ Gfn&;8UX"yD; ʤQT_pNľ1H;L% Yx(aI9h<_ZX.,qǂ^ nhQ,dm،ܹaGL>ΞY׿ѯ32<0)Gؼg*^)+y}Ff=ӟzNtfОn؞P+MD߅ɒSlֳ9L yylBڗGe8?mdʤվ||8WX:" 8011Pw.A~΃+j bᚪ,i1ҰI*e0eg__kC9Zpl$Nɳd"iyVo5/ 0hPHI 6.볊 -"3Z _k1:~*0stp~9>㟱{,WgpD_,eyo +ϋ"32I @ I@ B@ "@ǍK|u_D@ xE«/6T \é3Y4΋f ψP ҩxꁢ =ΟT""?4s-D1C7-C"eFu5&ytH̀,8c4׽n=3.ؖ%q@ xRE52z_=ƪ9*jpty_2q"2NW`^*LWcFqO,{T\v^Ku] kSMDևar"/2xhfgLzx<7"}Vmb=lO3eP6?/gsR4ìZ֟Z*'ϩce ZKZW}fǖ}"n  _qsvYtSL8t?C0Z~_; ;3M~evO`/v i)RKfa8V^]/UVL~uig" ҙfQ`nKӦi{J7Rŵ8QލN]$:KXw5^᳿3@y *+P[̔Zv~sߞ)ZѱA13Q6M}L V*j}DKCL<B8u)KJa7d&+Do9 .'!fq%A0Bֳau,gor3P29%c@e ߘʱڛҕ\I l?ԀS-lj!Q6}(YQOrh*S @۝d|K+sf'ڌ$$`MH\?w|Jܫ1ODҶ1|^̶(PPVF3v6d PS]zjApYAW(-r(|sy($NESiM [Mou^*mx;_W?jluۍA *r,/ё@ $"T[4H|>݉V9*l)ħ;1OscyMWʨap=KJq|Q|&ֽ3Yyqd&aNu=rhܔw/Ʃh@/X0l]$@ xWbKmd|8gH 2ZɹJd5oLPPƸ[AdJ?IrBrl2fEƭktipkiKCiyu;sJn~[[cSj╨UN(ܖq,W1zE40@ o͍1xx2 l;RmG+HxDI9¢=5fF֜%W3CfmLE O gJsȖ* d壄%iM `OfGΌÄat{s)oo*V_ cЋ>&$Ԧ.Ӄ㣉_F]he@ <b D1߮n6 vYb8kƌfֺ+ @@ |X@ D@ -@ h)h);|;jG]-u/Wk_{"D'"Z @ %G4jj~XŇ+s܍3Y0֋ygӄQ~aʲ|;fW-˽H;Wl6J=AxR:n3o7 zZt7ǝ <'_ʮhV|TƹZBDa}I|~Q|T4ybɯ%swq5קHf+Gva![ӓqP@ DK-^V"%3Ӽve*ʘ<;H_ Ur>ņsd=J}]W#a1#J"[q!G3߬?{Gf#)9djNa{>n_9,}[z[a6PցlMYNtJ&D` LaޔʖHm`O>e$tnigW Ja<ǖCɪݎ7br>='?pi\IP,Ccy)V3HT!*4#U+=&I³;=Ѫo)=OaRAlg#n N0!5U:ɢ1j?Qm (lHBڬI+.s]7̈́.Hj~W + bœ'3s.RQE*LnNS]zjApYAW(bN|m޴QdPIHHӚxTCXw~*kpQˠll((v+f; vH*2k%$ o ~ ~xӉJq|55 uI|TjY\ %7E!t:gd j36圖튄2ַ0nr OVܻ~{N.^$x[Mc%'\q9U1ROwBqXlTUk;W3HK4.HjW0Ykcc^2>jAU{ʸI{Q6t{lŕJ61'BӅgrxl;gL&jB&켾4aKLRev4nȻUE@CCrnL$SxFmOEPRTtx9^Tqg ;}\KGirg\)Xn.~˞JbC@ <1ѢQ'$m&.!C=ytHWӽE35mFa{˲guJj =Gʕ29).Gr>#jv:G .dRvl)ذ*g7oZP8(F vv3a37ތbM_//y֤KCfÞ} &35 1{&a+p ٻlY-΃g3EΧSiߜ 1JehP*+D{) H {L0>ڕ>a[``0gZ$YTЖ]\XբZ$͙pȵ۬Q(켛\I4:nCWyGC73$ .J4^:pbWDZ6sɐːC即,XdKKe$*ݱkd*C$@ N' 9YO Fvݟ8),E̒՞IYɓg9xODe80Q <+O\+_ɇ9̴c `QrD1&UW>(i8 OW0"75߬<,@ Dp-&57F~Bh|y#'/m.G)YXAuS ? f7aCAG, gJ7OWN{w["@ D@ !ZAޠ)׌ covdy &~7(x1^hʇ|_3fv7jOw&])scڔ<28ϢE~E xڰ i]9ndOlޟJ^$W>O1ӳM3zkcg#i&\|Jq&~v?L䇛U <=("\ڥd%Ra62g0w" @РS0]8Aą4t G D@ ޖ~}Jႄǀm,pteVl/t,김SY {qXWَ)g,d#GY^oVCg/5*`\wSNkiDx70c4+Xc0c~ ]~)tum-e냱x| %4`ꎕ>tSz^CkGsr)_x(/{-_YʟwcTߑ}'5F M2q!Rln!-7y{AZ^VLDS?9FG.!?.l.Lv`>_!??S>LP nقѧ"Zk)bC@_.a{.0 yMnwoU קӢYiؓac&b-yh xgVGYp)Oj|*L:omUŅݓdND6/<8qs;xg[K'w:JG=Y6SF ١}69$czX!͟:OW02gsͭ#|ϋ-݈'1$Y!& -}'9{%}{_n2-bX^/&A`BKe&9.v V".[̵.PyRɦdZlq|fP?l JV<"b]R9ؒم Y[(VN юuKT|A?Kl^If i3Ӓ}7+C<.ߑH%jEUh=|)VeIo^j__K>ye[QSgu*qdH4(Ӵ"7*/gŏ#csw([CA~GEϺѤU>i–/6p&yk{o;3uElB7] U^ːxl#x n`:5:瑼IŽ|s G}w(+Hڴ_tӲ@^Vm+u:g{?IZ^5-8U,YɄ-ds-|sDž Q qx]K~B<ݐbє#;ٹ}h?6'0gٱc¦X80Q x큉.1 = 燻L3-AGRZ̴*T˨H^h6 r7)A.A &yt5C~@])Ӗh hyw&A9F.l] 4cvY=G<3A, O]<$36^$ @vo@ !b@ hsXnÈ>4; h[4?`k)ي)se 7_!Zcz /z)IOg{ԪK6U/y6dʜ)w߯Z'Kx5L>w|"?8؈+hG&ч/(i|5B!v֝}:ݯr㟧O bE x P⃉Fd&2톅(m-ET*ځGQS,o9-Èܿ5{s+=,ڰ B 5OwL\֍NoC15a;/tg۶ ys.[Ahfv{Uo%8ٝ*3ov"È ʑ//]+ޅ9kCٺ1BpQܥ\OaDnq>0~[2wOm."wy^W߲?';Y>ʁ'S;,Y;!}Gڿ z~LT_ `O>N-}*q1)&10u1:NOcp30.v/ևuI*K3hjQm_/v1&/|=F8a.k#f2u5Z]MOg*9-^iyY({ifNLFҡ?H "xؓ~+IR/xQΊCpPiFK+T-Nd;1 `󨃏և?oINǞO)|@fZmOWEڋ /GI1g!с}[ i쳦\!U^7-Dk#vǼځfv"̸6p:ӖF\cIH\*0n$ TvXL w0jgbgRkf%);=1d+yY+jtszMκg54|ѐفtn *nT3N? _I3&])Y6aMЋ8Q?'}؈+p7X߈b.k{u$26Z%׺4 ®sdROsdJ~owwGXZՃpOԖkEH*x@U_@l]e벣)=ahyQJzVƵ$cy@BVy糱;5L>/3Jͷdojn.r-ӔIalT;ؿr 85ZC`#WX'>ۿ]I KVvyk9?DJ•\0J ;F>2bqt`M@1|z]OOB\`73z"ګɼɊsg)g6WCG|ӀΙuy?gD`q|.ep!t0&SQ?YUZhxuol(g:Sbz8}~=+Q]\;#o.&deذ)>51yb0u-aKHng,?S'p@ xiUk.bU3蟂i0bE 3-b#@ X0M^N{;-fZ@ h@ D@ 4aY ߎBM;9^$s^hʇ|_3fv7jVVxЗǢ!)ydɛ}J/-ӆT(Ox؃S%JtWO I7S?ӳM3zkcy_๿ߧx~)3KJ3 '𨲑?IX?a msEww!Y-C_$~bk~cQ ]~)tum-e냱x| %4`ꎕ>tcxU{~ tdw;+MQȞ17YđHy ߐ)iQ{Yo?˱r|ȥot~h}o3 KljݲOE4R6.&$ [ou;߽sxEpOȼ~h|Z4>Ͷ{2s ?D-݈'1$Y!FNA;|\]~;?YC**;2qd_%6|\Kά'\Oj|*L:omUŅݓdND6/<8_K'w:JG=Y6SF ١} 3I'OsJI?1PB?%?u>{2ad<{;Rn p{5"v$"g$Zo w9CE AŎb &^qTfRΜf%6/$4Ov:E 98/P𘎁[8q (f@w<5ʓJĸO6'bÔ|o$\ B$c$d+(Y^uZHؾwg.dsd=nѢX92F;JU,8Qp}%81oWI@PDKʖߙf˭3¹Y0q0ٳ0?d[2oV:rqu=X~c]Ig*=s-$o$2=28/?`>[z2rgqO FR4>k~B0gW I0BAy:R᡼t(.{H``Y%vDXLq -[5;[;Ih$]\033 fS J <N2rk_YN">x7L_jԳT7{jFs5kn2<̽ľ^<ƼlMv#ĕqZNlGBv{i'j!7frbAp,>L14ceŻd>^o}d0cqN gi:irُ2jhlB+oIn&qqH=wOr)x=B*Rlsu}_aヱWAdv'}u[9rO.S׾0`D+-" EPvCsZBX%FdSq|P""SRP>Sh{Hdc~{HDd=EDDD|A<-"""RQ~@IENDB`jedi-0.9.0/docs/_screenshots/screenshot_function.png0000664000175000017500000011614312143361722023063 0ustar daviddavid00000000000000PNG  IHDR6sRGBbKGD pHYs  tIME  0&S IDATxy|L߳fUAPb5 RJ([]-4$b-X"L֙?(Z y^^W{sysG@ DL^@ xPExC  B~. W@ Ep@ (ߘu?ETQJ|5ߎbW~b]3Vmɱ0"缋pIK D1hTt?NyrOgl,k1R^'PWSNlss.御`:Df[^7b/1?_8߷n%KyyWߚky%|ar*0sSNʝ;lQVJ&_%,2s V$4mR"DE$![%W-sFQxfLMc؜p ecJ[' ߡN৬+\(@t7=u6$ۗLĜIS̒utp| :QV]}b?=Kԑt0ChV !QzPO{v({5A,_v΍q8ـև_bP*ҹqh%'.",.wm_W97:Pwt4ri&M^'?[>0f w6-犕,:v6{M1־?3#L8>9j Ge4KNR}J9 &x Vk$͜‘u ߕEYHzpgݸvq>E_xSW|wלb˷/])3io״/gՍ"rdv EQ  wQMf. Άb+r7`Xl;Dһ\.e?cR_Lj&p;GT=סն6&<&?nrViW3cWvM bZpuA?ӽTY|ẜ~-q>8gh?`ƒ=Iԋ4b$]k.nɼi57~п?ߟFڍ 7Hɰ?~cl4&OAaD6Tm1{LJ3+|8twUdr'k굤Dt=w3lVǽn W2YqH/;>`S'iMY6{>Yµ[|~}=$:gq~77s]IvO >jZOeOϑ0, v,2yQe>um:%uiN".6(}(2gQz 7صj5σhP&Ȱ;䷎QlJ 7YQ55LXҿGOV>D]9grkX=H|] AVp\9[FhCܱz@ A%toIlm rLt KCzI%ټvGhF  kb^]˞.6R3&zMZy !9>Qп{h޸Z!ʹcU{|*YS_n(&[{ӳIQlOVl+z RxP2 QĥީjXBKt))kݾ%L,Mf&O9CFqY5旂yygdbǏ@N̸y?)+ gO$$I^ %pp(Yg 8?LC1dJVTb=K*IR!{7jGwO,+tRC)p4nӛпֆtvP i?(吖xR qdB|pkW.>\ rD¹q-@ 0a)f(W(ȼPبX$rvJ*@\aI R1+fRiS ) үq,իoy{'zѫ#JZɐYI;]ύTEpR|?lI[M?KĖǪmݽ%>JBMSuIdjzj92Ҽ+S[9*YsVZJ>fЧp.qw M?ӹMoF/9BPg7Y@m0c֜!U|F ^bPo/aܷ^ҟu'nОǴ|!a7ձcU_{e={@]km&dXL1́Irŏqkr>vH ;U~;+}ʪGܻ6>r#,Ni\? ߖ#כ?#ʨ/k'c_Hm+iNΔfԺ86v'~^GȖ?CQ|DZqf^:&.(G%ްJU;Os r]"ef>D}l}JEYm9PEx0tme.g^]iD=Tz5ݬK/313uFoCI3b|,nFsJDPR? dʬz日}4g ' %b_UM\1wT^nAޙU8up1ܡ"\.|׮Vʪa:n: =5%ZezkL|([ҸȝZbp>7+$@` 13X+u[OJ{H^Kxy3v <ݽAl|y99rN`ĉL ̠(@ bP@ P@ @@ Q @ $, :.|9(**a0>o%s=S8Z 0bAo[#Ig1tj{}Zm:۫ @ ^  DͻwL6Gz[^g%jm@ u*u}dYI8 J M) )AC})@ N,6LAa2S_ײ@ ^bP&ofR!KOc|X'H@ }1(Sip)S#S8ie˱V @  )̟w` oT-+un@ (LrP1(@ x%A+@#A@ Q @ D1(@ Š@ GPbP @75>c9ʬ}eť´~^'qǐk,>KaWQp _cSA߁%Y{ R(mmz"pƘ]H&RMؕMLVW%\1}| k9Y?)? Az'3| g]r^}\g-:NԳD@щg>~( KY288/ M^nW(CZNwG<iϰfWZx\qmfTsSۗОL͵G_Š+.Z S@&EϨ V}z`NwXy(;RvNBJp)茮4N??c78DZm.ʁYuO!H&25uRLȑXtr_`/Mg y|u~w1уu}E:]DjNy0>u/IR/o[G< w{s];|R(cBٲ8;Ӻ_03h";d^ͅҾ0'TM䞶wA+c&$*C'rc}L=Gԟž9Գ2}6ya; ApHz~/Sg?=wp05H]\ȖnM?/}cо ޼?gg6p K.~plϘ 91m4Ǯm`L<颗B_ ĦҥMewL([RBenw|ߌ/#S&?㍶o@T/ J`Hu=9Yl3,cRmޫ?BI6<TxOMbՙZՑGb[M.3ƴ*ۀA0h'x%Ҏ`WVC4t~4遟fk@LKQoHэ?J62g'_WSM=dFvtz)_ƥҦ!LےYOS|ۍxۻ={1fT LYe7ӳ;tr짫qjњqg}&}1;}s0?gt gl@SZm[혴-7m),+*'vQZutF$aȇsӷqvԙ3K4*Տmwޮ1Z? <ʼ 8ŇV&|}dw/W(ʚCW {{"1 qQ?>e74ڴ: NJa{@K6c1ߌo~;>7˘ϘX7ͪD!hٙA+q"sJ;=o0n;R2 zՈa`nh 4W=iRao2 hR:vG񬂇М5)=nyw@mWb:\ӭWǐ~ݳBȨ _'\uSws;CC&)30X}ٗw*Y '  CVqi}>MO} Gw k9h?r]Xc?3erg)`H:|JQz8ȏ9c}R2sEs6*F~7r̩6gk&.k "Y7I­_ ٗ~oϿ&gh2Ƅ_>/%$;gLcY.A!eXsiHyhex8)+]KRDOALIW48`@VY\] ,Mft~?\!P:ڍy_P +Nrx(i&L r\ΠQB.[¦TqTQ$ Cv&z$$lM6$ ze(LEɞ,S3OFQ79ƞ&.h󲮀_OܲtME4@3_\֘WWWs_ܗZe5;oFrD\^28)Rf6;1޽2߼[_3P_)F_@abQI\NcټL:Qabj-Yy-c"+w]qUrV@q%s8!ɦs%V֜Lx>69Ф93OeU3RŐ_ݚ2apفK)YSyNx* K&0H2vN1j Q;O3tMgNLe[1x,ch3%NDԴL;L|$,AHzz&[}TC<*]QsDz?g~5y7HRAc$ ڿFMY:7 -˝llHfָɰ+@6 #ӹkLX3~'6xڡuijjm*Uc%99%ldlKxt`uus޽Yz)0˷ZCѻ2~deJ%۽&UT DZ9hCRlْJ3Sfo8.uƿ}-pCQ?i\ߗSI̶,jэJ^fWQQCCDfUDaz">L)@?QoKHRM5],G>l'9dG_Фrn9 eeE@?ە1s ឝ0+w'>^4$q`h7~'fבj잯 A|ywszmlM,6'GQg1sfeB|^ھ܅FKB?F?1?S\ɾƺp<\Y'Np[<IDVخcT-[ ;Xa?[ L}J\'Fs"v== ;yi~#ϹOFᬺ}sלf)ae 踽x&HF~[[ _ql%G>"Sf}+56cGhsǏ)W~3\=> X҇CF^61R@VӶvقW̺<!񊡮6U[p6ME/S`[3!Usw"S8IO7/ ILDU}b$ve(=n$ D~D @ xL##?}lt8}.2@ JM2@ ew @ bP @JQ }c0ƿDi-+.Ex Nx>CCf-X^Il$ @ؕ|TF#U0M߅_w}_^EO` ƗǾGe *[E"+U_;Sa]#pHM6;8rUҌFcŐ>n37%s~:S~<`l;7[tg_~)B޿\_&űe}|]i9GWC?/ςE9eBiOxbd}\"ԫR)~CcGT&b[՜"=^ % 2% 4ib0X2/ + 4(a8ʭ4jrYsA *?ʊLMՉڅRnZNKqihbzh;v}̾"~h"5'ټ~~E=/n2bskO] ewL([`Dk\< m4ĢG4}\T} χ&rO[\R6߳9"2^k}~cסʪ+' KmT(pk3Î&R{CF0x}k6 eOE5fƝ0&vۛ}Fk[a[t7Zڽ %xKWJLڷ`FĢ{jO[6p K.&RMdq6тNQwA+c&$*C'rkg,ԟž9Գ7M?eapO!H&25uRec7/S[}ˋ0xNv^>;ci}l$>WCFsƴ/8/{7`j4 - u\he7Tȱ+[*>Q/cbJ)y4%~ >cѡ!nmagg|6:+YW(% IDATeA.Lso MA> c4fxيqݘdnw#dFvtz)_ƥҦ!m.{dqz+3d'xoOJm Qfwax<ZC泓ߩ&zn@OS|ۍxۻ={1fT \xЅ.=x<&=K buk>cM?ͬ6m`R)ƥҲͪ{S?>mY:4wӪlm(àn2oB9N|! (߅q3B,w5^~rVh͸y혝>9ֿyo _Ui>^;SgӭXxuf7Vu˔})h~Svԙ3K4*Տmwޮ1њvCW|ƒ ~ᣬ9t@'VT1Oͣ<QlHlJĩjUIگ5)V@ʹdYb|1#~_g>ϋvT_8cҲlKjǤϹW__XA9>JŀջLnuT~?ݡk{{êޥ>>~QIz =+u"96qGd##bQҊUC͋iߨ}Yd=En'^{j8= YX0u73$0dr?Q˾WI2O8IpP,>.h6̺L)Eܢ5\V\|92 hR:vG|#q~vqќJdz㉏8TܗptAnJe)C)) To1,  !W'MJ+Mn+)[Ŗ;OCYSʹ琌ć1ʜjӮ~&nƀ>!uӎm05u$]z;:*0-`Zơ,卣W]wF+nsY?X2w|13~_/QYآtIf_f ZY1d2cd qŞ#]"OS(R)=~`w:MZvp8=UXIp;Hz\^A&G(_@FŦ~4u 7IWA?O4q$fJϼ3wy;FF 5l RQE['n<Kڋv*_duϘ|= ?w@F|Wk0  %\%>`#aDdБƀA2̴gf(YSP(]KRDǃ>I3%]UpYgD/} _WsIBxֲM/q5}Bl`qYc^2o'ٱ,eBee@ ̓6g|E_fJruLQ9fJh'vV1TE}zjԨ(OM2$@<Ӷy^|R.yAl7mq.;E<'8RnD4e†0=\%Sԝ,J_f \G99v4{g١i6iM|DH|AsTCsSEa*?ʀoRI!M6*Ҹ;vFʽ jgl8R H YՊ'ƯJ ޕ{2nj%lTR2Jcm߄bj@Mȩd2#V9$&-ɱԊ=AԜĿ%rI }ǣeH܇wo*lJQGfnfq3kk:ZnIVSF̶GX v_׽7SHi+}ҹ~7-TSH|t-a45me+f;M_5j ;Hd]Lv6BOr>$MѢuvEjH? ǿgg,?}1UlꫦX5m|~P(-3 Μz%zܼ#H1+S),>#%w2L.d$ OQ~:2^$Dc6t&?s 鵱56}eqڭAUOFQhIvިdz~#]aiVelѥ09g QYupYq?\ ,?]VMD}ǣ! 1B8t#1oElowo5 +-'ȪDkMԏ1}=5w ]a(+.rٮdD5^(ɂm3r݄bluHsYu a89͆?Sa̿8͎#XC)}#}/%.߉Dh9s=,吡ѣפOrˊ=kotig 9m}j4_D)_A볠3Doe:F3>ˏ/  ,5\BB#:O&[xD xQW-Yz8Φ)-ׁݪ97 ̺<Q(M6c؎N`)m1#X(7UrKӢy(#$Bl&7>cӺfjΰNJ7L##?}lt8\ vbX ebX M{ @ ^bP)A@ 7BV ́TD|XO``;4d#)8{|eť´/iU'oU ~ת*Δۮ si}#z3_A;>>_]2çЭ|}%_Cf_[lB},.O-}d-xyX22x)ȸCK{gHdgee }p:˶Z39yc_A;>> IˍAɸ#$=>q:g-M.,,43(á m\!)̀&@F > F-.0xFkʹ `FĢ{jOϧG+mRL`ݲt~]FlupϢK eLiߡ!nmagZu/IR/o[G<ܑVؕM$ïdy}&)ёji"Dg-(xh_쿗lOaOٛ?[1Cgu)r?MO(pk3Î&R{CF襰L|?Fg~L dx!i9vmcGh|ϲkԞgǣ!>poͦY̸@,Ư )!ǵ8s |T&phXoZ5~XH߂ע" ))-_B ,His>=O+C)7{FLEt(O-:|e:[#0y\۲thr4U PAs?[ Ji:n*Pǫ3K#^J0?Ȍv:81S.KץMChMm߮"Дe[2V;&-hƦP몵wI[4fxيqݘ҈,rF n2oB9N|! (߅q3B\oǔ5^~rVh͸y혝>9v}y')BF ݏힽ3֖O3RS?ycUxb/|5DbOPw0V5x|.L^&t!%K6&=K bu4ǯ1,F/H]ɹpCY(o#m1gIn"ގkJ'.2|T LBgl Z+εݑh<[F K&sCkBȄIZIk9h?r]c݄Jr}Ibq}QOo>SmdM\'Dn#nCf#*YAsɪ0dFae* oO)5$pP-B5M߼ ć]ET6ce s[ұvtRq1Y3\h )c:ڬVOܲtME4@3_\֘O#>8 $9t-I}wO Ir .,P/g8:(j /&̧G\Wk7* 8?(5yQPU"l>^=^c<0dgGB2dc@ \f~em.vgЈ(F-aS8(,?Ǩ_qR8d'ơѻ#j}]MRU司86n,]j/fVt|=c,2xXON4`8Ҹ) e+w?,oS >5娒!,9xYJ'&hc2vs1IŎUrV@aY6Z=߳ <\@ƣ;O3< o~M3TVexcϋdbe4PIϸ0H26]=ό $nMa8@%Wɔ~OvTC<\Rػ`χ(lq.8]+x`? d=@ȹx[92įI)!e.lgXeZpj̬2AfѷLi,tb†e&6T*|8Ҷ=o9p,ȰVdX));fߞ(֨!UKء@"3*WdSY$1bX znN 2R4݇b+}](r^*^*@\}1UlꫦXȩdA!%fV+>Rv(U68zWRXˌƯ_G>.}#q6MWq+E?ole@v:߾JP֡W-4-SY9hCRlْJ%O/Y?Rq֔!H`6oB15p&X>?2^ EGckT??>$eo8Q ~5pU 3˨?LַM58}ӣ\pbpo\/b݉^ErZza /U]h$ soY}~?S`.߉Dh9s Ks ]a(+.rٮdD5zxmL~r˳Ɵkckj {'"u귅{ܼ80?EglI“OLz5vPa0;ZD! 1B8t#1oEl@5 +-'ȪDk-$M?WlK6;`}Ej_J>#L<-3t'\ӥ)aT?E,Nu;Y}= (ٚ_1~a^T˫ȇ}^>UQ9~o) &11Xմ-'qhȬ˓kۘIauߴ9Oyš(!!'E`i_!cθ |E10p:Q1 ^ .AMBk~}r*sb"1 /%E[ c@,*4qqK )e2?̤BP fBWp@qh5Us5=VeVPnne\ JP<"\.r6Tzeb@ 7x03ȃN 'TXS8A >t @ ɈbP @9C|Q>_Ϻ@Mē -ilq@ bP_;j;V׳#+<}#f|;j݌_wE ,nݕ%𬄯cyjUbHH{c%پdUMV%_r?}\qi>t_5;%0`z}ߎn%[ Đ|S3iÜk,W>@Z&lB v|q-k`tC s|PO˨d1.& ˻|˾?hq U?,^bI*s+bY\/H?jݹ] qȐ4B<[ Y}۟ 'ėgW_#?A dA׬/O]{jUU`hז6s?z>}ץq麴}#]W:|_?R-3?x{2nmH;ѐԓ<1F֥Eʱ@4L?^I EJXa_244Ɠ:iKМ |03- 2OGs#Τ+ [E|+\{kIs ;ŇZ5i,XrGSߤ=|w{8,>K];jDܖ5*>uccj[-r#׈t.;&Xɜ6yS?%>hAÉόUW+ G.}2&Db9viΎ}aK<ٰ1`ejOľsFrWFlfE7eH’Oyikܕ A)+IHč4[ɧn2bfVͮt] ewL([`gjˊ{#8| ۠i 5S¦js!I7Ⱥ8,E>cŨY6EU'sm YͰJԊ_2f^2܁CO濫ϧ iiHiɈUc3Uw@m q0_CdFL+~r6f#o$N'á^7Xgm q2ٿOՇaV| _]D^NG\hJJ'!Z#y(m+ҹ@ ٺ%$ 2QD.KƠӁ*`H@w!2٣* /7QJro"K;7 ?yu1~Egۺ KQP<3ҹWtDedI+مbVk>`RtWYgEGWQAi4])Q'c;CcJJԆ@ ÝB}X>k W۞*8 ۹asMHqtbʸ9QIxrp}e5 ,ֲL΍lJS.b*U&/YAXr~tIÂ0&RixŐ˽PUل I$=.u@1~ТʖLƝVؗ\^*(yM9;s>c̘F JehْB("RfWh!ReK1 c;ssB2#CGs~^ػoNF '2(/a𵧯 VmOZo0˽簿N/nRۯZSʧod$>Yʾ_C4N6b!8<~-v7O]P#ˮ#L(-P*QYt8\L@A<b&)~|7րKl?qYف?7XOˣٲgn) xS(10_u$+Ga5I99W%6W?5U1={♙+ g6-߇3зgsm)uHt/۽ 4K Ğ؂w5iBGffr7uj NqlK;wKȕA'欟ˬ_yK (Iӆҧf#'ҤzON&`!4nED-V -jܭ y6mDèt&Z}9 fH ^ 7)B<+'aƹ>9I=wTΒקcvZ 4'g]Sr''@F?ÿaXTV?j r6 iѽд_vAަN8tDަ27Xl6+W÷y`d5S?ګ:סm ШDԛisH332ܖؖ'ǻvԼ<]8Uq۷WAAvښpUq5YvQz*j!8t͝tpmTm^hԡA;HSo$B={4u ^W*U77v<vıqئbڭ'~ܽU8^<މ7+]E{lj>yOU埍3x~*gpwXo(Չ}[!Ŀ6/_Ygg9AE|5[ |fAr5[N)XNSegb'8%g}z؎ϛ-抲}x|*iT, ޺u큾,w߇S>t}[!ĿTOԦ5N' ſX͠B!OB! @]G ) !Bː !BHB!uVO1o6\vQFX]XB!(àfi$E֊4t~fkѧZQJјkS)73~cOH;qc)BRT*wk%P,,-aP|hkn3kw|].L!B\aR)QV ^OMJV4ɬb `hf2LK3Q Tl!vTSǷ :J5T?kEV|o`I|&wǡZ|U TQY7+>}P< 'v>_&n/"On&5֙]xM Y y7?ڕ X;6mihۭa[GRhmT|S7buC`]T1 ! 9Զdm| *G>uR`Lr` zn̬S2k@[Ş?@Q p.$9G֌JUC5޷ز!5Fjׂ@k;ڡmwpm ,*-&KdEY3'˟g“ݓw`e`f|w\L0`BN;`䷹&҄EAe lFqpH[3¨nGC}Ǫ|à+ә z0;E.BUj •w"E1E5t'=_`/ʓ}y.nO'=i#sI=jcGlJKF},#/K,]4B!{Rot]9Tn&O=\.v k.D͔j%֛ O X (d;+\-X^q\L̼L #JBíjM|\G V@ANK}hTفj'R-L"B!ΔJ7OJWOvqUJ(z@bV P"٤+KI]x+j4xO/aU[`Q {>e/C~mGBq$NIkD^?'~0g?OρI䫑 O5 Z.[(fo\V_g|@^5뇐^ֿ;W-׹B!.]Tݼ".׾41vR!B⌘9Z&B!IB!]C7c}B!8KR3(B!aP!BHB!B!:DuƒIv͔B!tàBh}~̃a'N@x=B!.0o<D! ]]+>?1 B!.0h~u3 !'? '0QHB!({J7qa7VIA !BEm::ժ`'}HB!.0h u~EF0@A"TB!ĥ6!GQó4ǃdA!BK9 |,@B!.&2B!A!B!aP!BHB!B!A!B!a_x]y>Z_{k6>!Xq4FP(X(e'BH,)kxnA"&iOmv_& h^xel k XvptZrZ#iJvNEO7cLczn;ڼhsS"i<.K`EL!)H"I9*Gd.fk.1EW|[h?]NJ2Id/\֮jY <NB3Y;A^~ InsHB!.0Rw`4G<Pp֛и8Jb ?Kz1y*u QZTi)S<4=ZmޗYOP.CxKeT=qO6)Ld>|fQrT:Z3,A(3bW C8l ^ib6w@Vߎ+PG GmW>هO~} ꣭PB?_)@}mj Tw"(fq0 PݯP0R'O 5 21PHd- V ڤ,NОdA/a@Cz.F&a^ {=M\cGOXhs`J֘)r !tX\N%:X%0(ZoL"dܤp}dbAxr{)ܤp~۩(yp^/q7hNG5 VLw \ru$;Ž Ŧ* *~0/< c2rB[PBi<VPk_a%c#yPŠ9k=\gy1yHv#85)G2fܼB!Dĸٌd&_ Ao V;+v9LG;|sHd(>EP,*V_<ȍfQ>*_Ice%CtiST͙ 03/ 54դqg^?\ !-(P! կa(d愿 Lg@HDv=ɟRH9,BOS3( @)XMM7פIUU݊lO"23ӼƎЦNQ4H^|nOL%Y^[cs>̣yT+&̂M;TlP!ECQLEtS͖s so,z{ ]AYRؑE p?! (AM ã[P氫$~?}P@PIwcb{zl6@#fh& skS!Y(7t 8nKRs# A-!<eP7ȱb%f3/?c:w~8uES P` @ Qܸ3PBQR$Tr[Sx+'Q[U(-ԭPԅD8 +DׂVh`9PѵeRh0j:G5&TKB!Jr]Pyf3 /0tF >ƬBӨ۷Ѥ ??<4VA,*3ܙœұRˋ=xno w诽#EyiEȃf-n9*>[AYp4@LA(-@bO@?{UB});kqssX[ I{ald#Yf:QG(A-ڷIL^Oixn-ޓԤ&:3~ ?>A>+q:QR8kЦ|2Mؾy/lҞq;hB oxQ0P]uؿ=^K*F!LgѰ'A1{6nRh`?|kWAnV:Ά0rIth&N `U(wѻ$Z$c_,Ƹ>>>L D9qt:u&W6M&t( -Sxf3zۏ/[Q'go +Q ;1ݎׇA˩XotE"c:vA&|x2BqAvy,u,W$8Udd~8UjԴ[}jJZ~!fMQ7_I'5]?6 7Y7-R̟K~0:ݛyY|C,9u,X@y7/'mp~.GMbhM`ɨOؚce4GI!(17tm=^ޝw1_-*Amjk:۶W OjN{zLS!DHtjp4A`*OEѢF021ntLLÏAQih="U+ƒKk:/QeJhԢ2dB!DYVzYCѨHmX|1 'B֕;RJaԛ\A,* :K7ܙœwC y5'Eg0H]v/3KjXy[3$#*On}*n3MFgoşkD^?'~0g?OρI䫑 O5 Z.[(fo\V(b2j$#!w8O܇[3!즠낪_j0BLIaNjL_x ޳[7!B\ZF XAA3|B g4_\Mԁ$렕ݗ2x>ހk3v%*cU^N3v4-S :%ѣi&Q}">n='<x9TD i|Zߨ0c1/ƾCb?{.?#/7BQ9jݻsIEN C4=ʷ.ܼ=d{ ,QVڑR]G5Mk ףSS7߿z&漺%4'y\ܞNzFz$,Ǝٔ Y:F^NYLAh-}Bt:nxO6Pp8C4%wV⃨$L#=c6kbu뱶Rެ4S 6;WGnC?i89L&Nm]"|ucb~N/&E$MEiU4V,B\aH W1X59] RFf`oh'k\e3o<݅fmRyy(o7yڍt oڒ0hQrPb#,PB ^Zeπ1O>HfAxB!."ӆ{FÓDYi9pFСҼe u( ˟};ĵh'Ս氢f 'S%HA NàfT /0ޘ] (9@3c~@ШNXZ4jQ@En2!^7?Ě&[^Ʉy 8%ơżVlb-֌]㭛"[=k7Cse|Ȉg7P {;XiUϮ2VZ3G1m?#@,=WxĢi\b)#of}_=mb=&Bi֛X!B\\J7B!HB!0(B!$ !B B!B B!0(B!.UڐD R8аT%'-BQN͠j˕][ų*L`BB!nkbi7X<1s4ƅ#[8UB!.0h&jnWpć'D_.B!`,'Jz&ĐrB!(JcT]+2vdx]zL@"Bq A( 5k0%B!%Q@j]=?9B!ĥUÉ$kkoO=B!ĥDB!."2B!A!B!aP!BHB!B!A!B!a ǷjJ4 .Z Qb6I{ _k*ӥrOX6Ʊ(coj(-F[?`ٱWQ*jr% !A5NY$Iүi81,/ aIˆ]\ AVXRFVD`Ct(z7_M?D!H0 R3X+[!#]ehBVfX֔L#S'W{۰CS^gfRۢk<˩-+"P"`qc?N'8]PTw;e4F&@dy9>ߙ8\B!(A(=#;qnJn6>6:vCT;yZL0'po] 0 |V(:<&Aph&GVetȻ to%<0@~Tu~u'gܿ7܏\gc 6}>-:D5@{IeT26 cYk:y(jݰN|DiPơV)3ca;߭e@i4f MѾEcMTǑ|9,q}93cCW%k<bOf>֮d͚eh!B:5E(Vb_&kz nѰSѱ3XI:N|~JX0oRp,oi3DARxF.y51#[1+JDaX#"?UQ~_FLBkQ1u 0%?dMd̙(F@`9ԞL7HU.6-ݏ#ۍ$ԤÆaȘur !,}BMLn:biDU e[ Ј:ϭIWwh!wVMk@aKQ s=h}BQ0N6;*%|P\ CPQ[re61`QEnckvm4\*D:4ȑ`!B0ǯi***/&Uq.7\1aRsXk FL]1Xergd3|!9GP~v ]v&RSԷGI4 Y PP,C2c~EmfC?c9v"CGhsKB!άtzXՠr\_+),CJ1aM*N jT vh`Svw&!yw>M*QjB&6ϵ 64/fh1"C7Ei%1SAr~dAsoU_CEh71# !*Eytfp4F+&ߗkt-!)k<%a45*Í !Ҫd)InLZ ήRy]cf%?Oۏ:+&xua1{>?QE}kე4Υ;{P﯍rL/ڹԋ0)CVw>ydcnre'dJƳoaOsJzko%FӐR֒+׿BqS .J -YqSOȲ4N_6H'g3XV*P#.pYuJ6߯DiSmWBA3؜ehBG4Z֭[,E؟bn1Q1bq/6_ih'w ሡb\eoiEtfb𚑌#\';=hNu~n%.@#"]^ovz%JsL+,%X._:^ӨQ#,YB5, ;#J7 5ode܅fPP.>hҷ>ry?Z?ym2^X~c#I/V\!Ŀ͛7端"$[wJ Œ_n>2OyO  t['݅>IG_ׄu 08_DK~>`3/^EU$T 5LdTbμ~x_bgm۶|gյ+9XH,Vl9>2:v`I]LjS6lii"8TOZ|ܙ<8DLv5"4- |JD(٬Sμ\!ĿcXuxh׮޽z ZH ! $De9;<;IvZ YE/Ç7rvj*^?;,4iDt9u`X7O^u v v%2mOX_r!c5^'ڵ+oN-<SY14yp_YGpmHg/ =VIx]$x._wy>Ĝ| 1?J\!ĿO|յkc۱ʫRB!eR3h\h^頂;2BB!àRJLmEc݇X! !B\aPQٿ2o'u.`@"Ӝ!BQ3Ca6e\OaK [!St1ʭRB!iB!0(B!$ !B B!B B!0(B!.U)q1 l<2z?V{W#'xeZOUMWc^X)VD%/¿-]'^wQ}l\!ʪTH$Zc >\O7&io돚|R8UKMx y#nPׂ`LS" \>jt}3r}pM_;9~-/cႾ VD 庞/0b YM6@ƪ7xu⋼X<]BqnaQ; UF*aO"4/˲s7ۥܩ)ڤ\%Jr~?[*~ֻܝ2A#W_0EHVmyxR:~=#~gasezqu ^pvxJ &ëz9| ύ$w!G!JAO_8`qqXz ? Q DV+40d }ڍnÍasq hsM<09zv!4Y~ +Rb:}F5/cүJ@V[#oo,s1~-AF6@jXeyġڮJQDaƓ`Ru @e=ˇ&~:aX'4\xQB` LsxJTǑ37gh<6/ㄿmҠ+;'[ptjۄarO]_-f@ NiR9dӢYq%7v&A(w~:`M]]+~ׁ%̚<.T!80Os`>s W)jUn)#ut|?)~!7)d Xvo(/Nl5Z{L] 6|>%(iOC9 ,R!POw䦧 ?gz7~xcQ)g@\s( Q*Y0AE[,XwXcE}uV0|Z.h F;~8:e03̌](BQ!K_[)RRQHD?Bl%.m0a̘19?F0|-s?s}>|G b6& }atTd,m,o)/n/8'\)ks3wأ?߽k^\I1%GsӊزOoKl'8zʇz=ǿZбH%ٓg1%/e},fX$?tɿ$\.*O@/tJfI3T㩁!BH3~9,enT r/st[hj F?Ts{ǁx+'f'I scEz6J໏WrPE-8NGߐ~__T-g13z${xa' M7Aw@"HG9IDATu-QA*Bc&HmsN -=&KB[mS"E[HUo=\Ej3yUC˻ ٳ籵D'ǡʥi B6$dNBjؙ.O7!To%}vb3z &tnag͸_9VÂ,ƳDZx1}*@ KQnluZS˻_BÙuc'8oOdfvgPP#ȎBT 3LJh3t&xR*W$+8Sy'1TOaƴ pM=%λY% 9K`o^8 *JH~ ,ReϬI` 4dߊ(Dg&rfSXʊ{He;M&;¼ JU 7g:X;ft2HE-0o\f̓-Aq/g xn$N(mصT7}AD, c̙a(=^-}3Wҵbɛoc2V҅/jH Gi@@0?GT?Bup6)>LeC `7BMf}T(oWmI2եsG5Le\[FN22?n:<ܥ'>ųR&S7`Ϥ$7zI.aF \~:x;/*zKxGg `c唳jg[ _k NߑnUxdsEDwV3;L|) =pvYBj' NUW܏ D>T g:iH[ 5|6jHg[ D:t2DK/;wKz Lj>@f\]dedvOP !{aO'>Վ/ ٻXL7YBCL^ϗ$K-e/8ЬIۉ/}=oڕ~vVy>^EgƤhHk 9l6PR'׀ZAF ujl)N:ϡ|hh}C=jjP?w_[ӐfǞ tn0t${FKj"\]j0` Iu;/ > B=17 I*Hee@!,Ir ٷ3}|{${ïvDG^f`҃\=2c|Kk]^c۸;0> u)gFBzK-$!m/؉x֝VWDqin1N*/d g    Ŵj=h% p- AADAA@2PA-Ň=hh>?)n<@؜Gjc6B\q:aTݴ1͆9BHn] E)ebG5À׽/.26bT l*К) v?+$QiQAЫ>L|7æF/?t1~xQ/hv6bG?7OŻ8~$ۧ>:$M#Xv%F&@38Ss |<W{]7Y[Snʰ1#rOe,U@ںw6z;4pN?Ԕ z=i}e{>.jzIЖ?j$xARxMV+ r>k)sdԨGvaHO{040V!{[ / u&* L n=}YfLƄl>١2G{ G0\/TW:  :(~|m[0wmũ*INf B-&޼vq@ =՞( z0P=ds9 gzOϳeW3r錺>˧%Ȫqp$ldD\x=܅ W1p4"|} mmh*C|iVgbQ }gL=xu_7f0$ zuÖLŨ,;ri:vbuOSq/ՖN3QSʢʸ{`/6^<؞ >ZGnI~+s$1geL7ۣMۿiۆTr@#.<@@NT][~${1%ԯ9Ruب uwcKSk`},*S|JIBdۈB~ sJA>-U'Y֐u_!ρQV:{NǴ7bKv pp&f߱Lr8s`G25'[LdXYJAޑEa|8 J%ӃH!K^ɪ;arh@R]J$)? I~d6 9oͯAy߮B:ati1$B1Z?Sȁ8 bAe4qqv&KzImFәHNe*L_ ?MCf_3A, `Z0z:3=?|2@Fz]IdKQW:p,!|77D]E$#Z֗.Y$ޣPl/ V@WAXe y%_&8*$6$azhKH\dq$ @oWWW B\( 0fO%8z,Ox#"1Om"٘ X+)}^&?zg|`!kҮ3/z$+#}v<׾@bg; \zTԯ`@VdE>/y*&a-e~l"*b3+EO:[Z͋jqKyU$qy{Q]lF t5zheՒS$*6oG&c C-j0݋!nlgOʏ'0×{R$n\-P˹~2bp^AA\.;s$I:o#n{<$t }w(zqډWYzmܩ,:f`OXnͻBq}Lx^:NZIGENJrY3OdMxoɟ. a@hp?CR%6gVvQ! sep| U6#{r sV8V_F.L?f{S>}dtTZwhpCN8cсٌ*D'oy9mAZ 9r1Jye^8Gpa65>Ђ~hN'zJ|V)ra݅%Tߑy@#H 9}'Ԣh'kt'&W~Yb'AJg A-oCzJŽ#1ojԴy_ zeb9? g)j>DrBV.uABH-94 $Xَ9/tf7UAPAA&*#3@s_*AY,=cp %]&mzA:s-AAChAAdsxoWILVvLe@?΄-+ ByAWOCؽ4 iVlٽEU,8a>o'fj}~|<˃]eDoʦiSk\֮b(֯Z_C,BlNLV}&o=hAxm\OhOpdf^9i֮X?\~[0+WEe"t+X{|&2eܕl^ook; %s--kl'fzM@s|W% &oAPAntg:X$]"e#%Π=#ѳl$07Vo~Ycx9_1:ztvh%q Z!YȩLWH!KH=;^yaO~D@Dξydbo{qrn Kѣ&jr* {_ηfzwQť\~X&ds. 'm;`)wENs9p_\wˌ֋5.e|h:wһ4)4ڿ!ge:5_9ӃiNؕ' , K;Ɵ摞xk:'Su0DG J&3sH}D[ݕk9iSQ GumbyyICAN ;$Ax;)])HkۆlMllV]/ $#6S|RbBoJVQAdO6Lӿo 4פe5+{  aЭcM27zJP3#Ҕ&eLVzQ?{ο~j6)—'7l-saPfBLw' _k!m`>14mwh.n4TՅjHί._fMؼM_=E'TMA({^# 6MLx*V8WjN"[Ce_ 2CDOc?g2= ^O#˞Ʀ0ȥ4G6DI !AIu 2ѹ~ULC N\-^c|l1 A~pfށJ{5F +6/""֤KcRLöD2c]^u;~J}c5kA3 3'?@13 WvNKWR`F#OI03:hvo8^gWP`G FADPf>1¢y@;r򋛒.<ʇ>q ɉ3qoOG#yTxƭY?M/$& #SD5͹ώ(>7Z\*rc+)>//*Xc O[Ȩ)1Aa Ư%ٵ1#b.Ǎu@d>Qup wg߈?fæO^xozvm> +S['jŎ#  &&zJe1cܷ| 9=1ۯ\E MF>Ӭ>t':*aM" - " w՚N  pۄA`h ]늯{A%ӂ  wjIENDB`jedi-0.9.0/docs/conf.py0000664000175000017500000002176112331540214015066 0ustar daviddavid00000000000000# -*- coding: utf-8 -*- # # Jedi documentation build configuration file, created by # sphinx-quickstart on Wed Dec 26 00:11:34 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import datetime # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) sys.path.append(os.path.abspath('_themes')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.todo', 'sphinx.ext.intersphinx', 'sphinx.ext.inheritance_diagram'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Jedi' copyright = u'2012 - {today.year}, Jedi contributors'.format(today=datetime.date.today()) import jedi from jedi.utils import version_info # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '.'.join(str(x) for x in version_info()[:2]) # The full version, including alpha/beta/rc tags. release = jedi.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'flask' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_themes'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = { '**': [ 'sidebarlogo.html', 'localtoc.html', #'relations.html', 'ghbuttons.html', #'sourcelink.html', #'searchbox.html' ] } # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Jedidoc' #html_style = 'default.css' # Force usage of default template on RTD # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Jedi.tex', u'Jedi Documentation', u'Jedi contributors', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'jedi', u'Jedi Documentation', [u'Jedi contributors'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Jedi', u'Jedi Documentation', u'Jedi contributors', 'Jedi', 'Awesome Python autocompletion library.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for todo module --------------------------------------------------- todo_include_todos = False # -- Options for autodoc module ------------------------------------------------ autoclass_content = 'both' autodoc_member_order = 'bysource' autodoc_default_flags = [] #autodoc_default_flags = ['members', 'undoc-members'] # -- Options for intersphinx module -------------------------------------------- intersphinx_mapping = { 'http://docs.python.org/': None, } def skip_deprecated(app, what, name, obj, skip, options): """ All attributes containing a deprecated note shouldn't be documented anymore. This makes it even clearer that they are not supported anymore. """ doc = obj.__doc__ return skip or doc and '.. deprecated::' in doc def setup(app): app.connect('autodoc-skip-member', skip_deprecated) jedi-0.9.0/docs/_templates/0000775000175000017500000000000012517736563015741 5ustar daviddavid00000000000000jedi-0.9.0/docs/_templates/sidebarlogo.html0000664000175000017500000000021012143361722021074 0ustar daviddavid00000000000000 jedi-0.9.0/docs/_templates/ghbuttons.html0000664000175000017500000000034612143361722020631 0ustar daviddavid00000000000000

Github



jedi-0.9.0/docs/_static/0000775000175000017500000000000012517736563015232 5ustar daviddavid00000000000000jedi-0.9.0/docs/_static/logo-src.txt0000664000175000017500000000017212204171716017502 0ustar daviddavid00000000000000The source of the logo is a photoshop file hosted here: https://dl.dropboxusercontent.com/u/170011615/Jedi12_Logo.psd.xz jedi-0.9.0/docs/_static/logo.png0000664000175000017500000007017312204171716016672 0ustar daviddavid00000000000000PNG  IHDR o AiCCPICC ProfileH wTSϽ7" %z ;HQIP&vDF)VdTG"cE b PQDE݌k 5ޚYg}׺PtX4X\XffGD=HƳ.d,P&s"7C$ E6<~&S2)212 "įl+ɘ&Y4Pޚ%ᣌ\%g|eTI(L0_&l2E9r9hxgIbטifSb1+MxL 0oE%YmhYh~S=zU&ϞAYl/$ZUm@O ޜl^ ' lsk.+7oʿ9V;?#I3eE妧KD d9i,UQ h A1vjpԁzN6p\W p G@ K0ށiABZyCAP8C@&*CP=#t] 4}a ٰ;GDxJ>,_“@FXDBX$!k"EHqaYbVabJ0՘cVL6f3bձX'?v 6-V``[a;p~\2n5׌ &x*sb|! ߏƿ' Zk! $l$T4QOt"y\b)AI&NI$R$)TIj"]&=&!:dGrY@^O$ _%?P(&OJEBN9J@y@yCR nXZOD}J}/G3ɭk{%Oחw_.'_!JQ@SVF=IEbbbb5Q%O@%!BӥyҸM:e0G7ӓ e%e[(R0`3R46i^)*n*|"fLUo՝mO0j&jajj.ϧwϝ_4갺zj=U45nɚ4ǴhZ ZZ^0Tf%9->ݫ=cXgN].[7A\SwBOK/X/_Q>QG[ `Aaac#*Z;8cq>[&IIMST`ϴ kh&45ǢYYF֠9<|y+ =X_,,S-,Y)YXmĚk]c}džjcΦ浭-v};]N"&1=xtv(}'{'IߝY) Σ -rqr.d._xpUەZM׍vm=+KGǔ ^WWbj>:>>>v}/avO8 FV> 2 u/_$\BCv< 5 ]s.,4&yUx~xw-bEDCĻHGKwFGEGME{EEKX,YFZ ={$vrK .3\rϮ_Yq*©L_wד+]eD]cIIIOAu_䩔)3ѩiB%a+]3='/40CiU@ёL(sYfLH$%Y jgGeQn~5f5wugv5k֮\۹Nw]m mHFˍenQQ`hBBQ-[lllfjۗ"^bO%ܒY}WwvwXbY^Ю]WVa[q`id2JjGէ{׿m>PkAma꺿g_DHGGu;776ƱqoC{P38!9 ҝˁ^r۽Ug9];}}_~imp㭎}]/}.{^=}^?z8hc' O*?f`ϳgC/Oϩ+FFGGόzˌㅿ)ѫ~wgbk?Jި9mdwi獵ޫ?cǑOO?w| x&mf2:Y~ pHYs  iTXtXML:com.adobe.xmp 1 5 72 1 72 200 1 226 2013-07-05T14:07:80 Pixelmator 2.2 '1s@IDATx} ]E-zIwgYȾB q\WG qAEtfQe}Ig{׷ߩ{_w^u[U:u9Uu.r]7y 5дyh^y PyA^-h -('k WAh.+Q>pUV||CN9^O%nr͇, 9"I\H?{vi*96d-p҉s+29Dîx`B?dۘm۱}hhQ /_/ <zSkq֒w,veYs/vQEڱn8": Nt$-C3eIláx83ܽ#GV|MqR;H%n^*(L"Q?;pR)g ! !8 0(J{#^g\w>МeA:/^~aH$FNKÀ]%PĜ)bd搙!B[*av$qѝoZɇk סp;)禴^JCwJje)!$LB0I I+\~EǪUY! y;>Ux̜?H;})#1nJ Q5wv/pȨsI v?.<oNaL0q+X5}D3J4r#BI4v(Q_g̺Ko+dtB/NHm… #[[\;)pxN灴*q5 -NxΥeK'- cQ[z,.HDCN]}N9k5u}AN@i,Y(lꑞp?~91o9*&/N!I *QNCXiWyUA㤒paՇl-vi R"7/a_3fR{gjȇ]k;m[ 2҇q Q?JV8R:7>aّ\|. ;V.1ȓԯ8F#L/ڡ,Ns9 ; Kbʎhq(8JKJwس),U^o8zLTV%L0ݸp|بx}[Km7TR: ZoHhF^ȐP:Շuc|-;a%g؎|h6= //H}&z 7NUUnz5Q|k̴i[_RF.:i cZ]ulF2vCHĊU8 Kͯ {͊xJyi%}޵XN\`q_t*W^ݹb7~}(q3kRRd~29*W,uY~[;}RݿpJ|kM`xݱc;ު][6] a=?w ?&EUUw֧*Ĺ%UAZaƞwʤTU[oUlnXNq-bzcEMTWDxWq<;kjҌbzf=pxUS{N WoH/wWGp+lӷݿ~SoFtyT=z9"+q n*yU1gϻvvI]GRAtwv/7o~}s- Tsp7,ZS=a\g6).))8VDq:ӇOס؂ [|qX#Xj뙐h(Wva±X!n~zú:ig_~milmǤ>dҥY*wXhK(YJ5K1>&=X6vqio1p}~{|)3h@/DT2m)w>hq2=U=]ۆ^uC5WuV =d`IZ#Fc9ppU_[;=ܷC.m rHc 9YĚ0guۛW֥P|yۨ{ԩ)3YM˯ȎED3[n\9]_7p9 `[x-xnjNf !0r˲cؽ0"Dë! \b>ZR:'qGQhн12a% #XG{ׁ ڋЀd{ @\8Fphj:(w`~$ڷ7OؘIޣ҂A0GYP4^ҶY#0BFbʫ'[cB* p_ٜ0m1[d .jϷ 8k?%^ Ϙ3l b?DOL$ H$Muzl&V)L<wx`/ Gwۨɟk*W9ȸsϛ떕 7 Vj7WŇ7BS 4+7 Aa3 m5]c-(+ԳS9%XtYI|ƨ0riƌmgO;Vhi -47rXFgԘ3f^iA&ѷҖ.3遆@s``&Bt9{ >rZ6VhJ eTrG;BkadE?DžgJzc}> nXh̿gAԄ2͂`UTu/:cp2~]s߄"/e G6er)= V&t#YB|ӱօ^,honX9𓟌[CN]('mz">xpxy|ڞ>b7HUƭo 1.?/]ڳvN{ê%91'u z48J:[{rO1c&B.ԣ& }PˤJ2t!3 pE_{EKTIb,S&5@? ^Q/A@guw-;{|ǶpfGW5+2&}4PelWTfmE}5@ [t=q?~mA_EH6,AO2Hұm̙_&wS=h'ۀiK|+XpVJx6& XLxƥhs$2EooMvA {*`ñyGg5x+!z*(Y_\jB>@LNe(L fxVxu̼yq;Ȥy[ֈr|6T[ t:/p| \/=ӓL ̶ț-%2&2jǁJ?׳Z<1Qerrۍf͟bÏ]o^!XG۔F삖el}:g:*nN;d_[]~ _?ⵇ@j2[b "3$oW})Xa=XTޯخ%wo n߁^.pftn.Nw\kE~ esph|t)MUVnXSe{U?2CUoJ4pRrT8;n1N+}qMnYf֦QLEld | ciZJ/$j>|>J;t#ܸ闿w,yYf\E= 'q*yrM:ZSu{utedU}D)]W?+RXnXMMaE >>`v$d:U2$;ٻϯZ92zD*5Oiۅ>S*ey@%)9;IpI3& ԰LҵK[93f_f=6:13D`Σ@P7#hB'M^'?x뭷ұmu[.3SOխ)6*p41 U8ȘyLLR?3AWd`t4'nxիlwp~?}'Z҇ޓӑ3f P( 7ALB'#( #fDo@6=8E0fN#2&)Q]XĶ_w/p=|<9RՈą=ĮM>ϾMKp r{W'ί):I(G@TK6Yiy}ͪ^v* *M04EAgh~U E0ު[wk5QMƜ1*7r:Wffai ɼ@m&ͼ(b*+Y':p3g5M e8Q"Vmϯ<:AO=kW#Ex1^U7oq>SNh_'ILF4F8PA ="`ݛ~9aƝvΈʐg3ӞS͒ZFJ1K8qKYef ~u:%b^>>ڽm۶;SD.2|*17c\՚MH7xiXnBA[a>Qyj?ϻ*OVw8o,\NjARH2J];zSGگ~(yvڅ/F08\>!fbQEI%Nj\mY߿;Tz4mڴ}[sŨ=,`륩,?,%e*b+~)|;)lxa|TI;%[vmYuyk?D'Fˡ>Ɯr_'9ϾrM펫r:DˑiO9I@)iHxsv4sPf|tihY&Kn]u%OQ[w!Y Y=Y5)It[iF szp[k1s/9J|ͿF4 'JI Du8t3ff-*x|WoxlSӧ݁S4ʤŸXa<C\SDTBTm4ڮ ¡ʟRٌG=C5_C#,cA$G‹w|V׬ ӦWH6oJYƥ L&ʈsOW8;ts˾[hj02)yJFK1|tJ|$ 'LIw~#;=:d@ҩau{tL Y<.3:y!8KZƲG]]g91CڬdS^8RS9Ɣ @t6t9uѐsSfٺfogAdN#MC1y0fL833eɺ2j:4li#8 `LD@%^^$"{ 07LNoc#=ܓ.) (.?vs6V̊~Υرt!FŘy )қYxȒHW帯<#r6y!:9$N4Id؁diLV v4|Œ3jBWHtӦ*#;ɼz3sPmo>WQQɪk?gr>Q'Xiÿp;[N~%;'n5cVƵَ7^~ve<|ڙӝX|dJJD2.ip e8Wu6?aji\8֍4q[|:[²o=|"i4M;DxI!c@3CHc݂:&,}J56#8fU N )cE_ŅrN~`GAY7Ï:|9Ȍ3qtZRA3 441.|0}vvO-{cMQűbMЎ9J!P;x%Ro_/3yو3'jS'.mݟvCKuC fuJм}yig79贷5 {j`<UÑ4v[ Mx/~:(vp%7 #6"1&c{k.(TCɫ?~֑'JFI :fHΗ ܆֯!@nSvզזXOh ʀrhǀxt~rG. ;AS}ʹx76Ӽi<<ĆuwwF:DǢJs]]4pti6*A§ >;S_O4|^` /M%:qt3yu0UA4|++ˍ'Ոm6Pq_"-Ӕ6xC8*jđF\I:X#vnCKuk՜[E۞A)CBXde ,waPGB{e]DxPqU$2 4ԋXD^!t N hE׵2^9Xd 6~$lvf(h)R֏xNvmJ4;G ΎS +]XX3ZaWM\@|TF]P%#6\bQx}Qlf(-)~o֋#U;p̙u:SOoxŠ7SvCUV+TnLbڕ{"_?͕St)C08!)t*a`>D CFY853w!05 NwsEK#DT?p[qC;ڊq $ieV ]pNwZV #njͮfe7{etճx%bȆ }DL"ȧRrPhW5Aˬy{ZF}'$mtxXv[rPxbVpیG3%t1FipiL{V l [ќ +[.OFc12^KL 6 ,Ih^h5@@GZQ#`J 54,bqޕ?wsD*: %.NfhK: knAbC`K04p ʧ =};,)EC8̒Er]J AgK/At+'\TRB;qd`@6X@po=-PQ'# A}5~s *hc: }㸴¨chGAC]C;*pW2dxO79hȼȠ/}T`̢^F'LQA NX8'"#?4ZMFX|,#z0HQc~Jc7+-Th[onSjKBPO YLAXU6mRY8xxRtD:>@NZ~R(/0Oƈ[Y# 6^Ջa0SBe3pnZ »XQR1B3bkCc$>6xj`aL|\<":H=ԭ~DvD=au}MqY]o؟E`f: FJ jDH=) mWίί_َ<읆 ƋiD "4&Ol$Q*db LWbDp~&U52+cp@V?P{N_rmp!)29_hTǍA) 0=ey tB*H?Q9-DC ڮxk"Ԉ:~LjQC^0QI~oxiw5moh: {/@C; ủꨰU> b0>m$݊*qźMxEy82PT꜁ʔ!!ksLX*!V}c쁧X44jN"i3J,viwHyGnS8HOsQlk){lg6>)3zmti39% 0oQ屒2uh"҇DVp,y2zb'n5+m )NGum a;r :BC][ ۍ9KAi{ ښNCO")L'Z@TrK 큉DF85s] ~ sfly|kXoѽ]zPȸXQA|8zheƧ$!KUt%sq|zyK>LuA>}q tBJtoYx\;l6s?#|qɂx>-zrPPU,!{wGK;W3R%t:O du+C ~ x* H(l&TF I O& a&LWR4seWEd7VΐSbɒ:1BݛX=:h˪RW/@ڣEWJYgitQF)a[4OE=_Q 4 ?s2uASV=wiEAg dy%+xp@m(Ov]8uŧz_`w ?jWk\>93%|Cb(yTYF)e!!A2IB%}%%ƓDJcVĀ[HQ*Qa~;yI6NH1,qڇ?7}M4yUh+v &&` 7nPߑć~ cTG0?Ah8I{ΣnT Mg`^ʃ'=:Ej> B61Uj{[ߠNȁ+p$| :۽=;A nY'jS]:-oFg9gpcbbTʬiC&vAM6֬L6uA]r%BN#[-;I:<@=ӫ-l2{6~Ӯy;ABl3>Cru=@ߡaÆ;\?M *% K!N2aA7:4JYN>ll_kQN_;b D[uٴN;UԩBo-{h$mk@U(~4yg#3fpSZN92kT @+hʤωDNm|*J|ÛDB@` %YeҞ$xfL1#:p*)㇟<|YLNx]&;3'" mgPqettrűA|hT*vԬDڒ1})  `)`eARP33J"jFpZPeHʫ:pV'LL+,CӟKCȑ"!Ѧgg6@EG-T"hҫO?1 h/7]YN]fs2>f`N5?iMyepB`ʘM 2Gq~_PiK(`ʫGa+:5#(KI@Ty0ATM:ܻI9w/p7h[ߙ9ұU0jpgs{/Ig ^^-؁u~)J/Zu~ &Og*aKX@,UsM͐g$t KSH'b1R $ 3H%iJ& (l4#K24^̳2 9^-,Z/^pO?|u;uͯlaג)^_PXP8ɗ\n+j靏7AC}تt>N-. D @ݿa3HTLz1&LFp, *cP1s(jq S7A7:P)By_rA 嬗} iܨѬbd p#o#naqɓn۰fӀw|FAY%]:ء=KHO1NGԤM +5cca4_Pu 7Z\֙A 2d}&@yq0p _x˙HՍ:|2:|麝_y/-sߎ7^mv]Q`ǺK <8үߨ{vO,?`bnx8%k1:%KO[v*@石 Sοx|2來mBe!FPb?Gv<%v vM۾.4*-X{)eeR;N.c G{{bKhX*0#*R1А/|(dRRVg +c+2I#e`!He5| ЇD>K r+5o Uq,TeM[=k I|bp¬"L %[xhaРz;Qx(:/k #-Tm~D04f dLzڧj}VywXW*jRN_xmъҨY#bU94\l /iIO$wu&K^lY`.1KU 4w,eo gAcVZoW!찆헎DB |JIhLQLIG"_rE,N<S$Pg$M«JM(GF*2W>#dTĜPg՚@gdѕ1Ih WIMO^ڽT2л! %<JFXK-zŏ/1))TB+=F'Ώwu{EաK}<;!yq觅^Fpy_hv&/0Wܦ.ʁiIi NI2&c$z ,k0p)j:Hޗ]}^ӧk`o6AH.iSpEjWaLSd?zyg'bC񋭪4n)R4-q*D4KQ~{F¼8K,1Aע%k P7cQ0Lt` L0!@O%Dy4јb &PV y -c I&=#iAp&/ HnԮd=x+Z6[l9NbPJeJuVPR4?*>Arb"4чX0U2@MG "Gȟ' d)J(,DJLe`@hx+NʨRš$"hJ`R0,"Eh<锢I 9CO (}-Zk^?SSF ݏOF1k17ZɌTG !<ҊXK'Lc^ټ eM'tM N8Ĵ_*$N`ZЊ N)rU Wе2WX$,Xk"&-f0*. Y5#QޱĊ%!hfv%*a(])xv, *CYAyFxIeLM^8ib6Rf,]ơJTyYXQMPZYȫL8~gSi< .!RA#eL2 &iuL{O(톶 -@9!g89qQމqQX{tTI f.UGiΈi(r0R0jc1iO=yH Lx,HEkx *PFB.@X$-T_D J9opLtcԴ5p14DqbpfPhGGVT,Qw;®] |S0 F` 1U8b>oM'ey"\Jx'YvǠY9L{y]BI1\%@ ~ę@1O$m{M`ڬ4glr?`U>^X1Z%)uH,~@w`wXo95ډueTopHҔbR6@wBS& ԴP||hZ4M ɌZjeDRX-B$kH᚟ƼȭdR'Rij9*荾4qAT錳Bx:{ .szJxfek[gJǛuHC1f V̬VJ)ؙ iPEYvp7PEN4d~@#Dpbe'OMBd·vɍB#v ?p%yO:HcNJY(@ذ$T90/"B/PqQgL3z%8%BNL$_%/DiHC[f5٥2OMU0Ӽ7h;!#,cHZYAY ?$5x~1߷;nG vSɭ5Ƿڿkoe2oиoV?z9E%7 ϘcO(ݴX >/yuP^ Lc""Ҽkp݁E3KZ Ӵ%܋'̯YԵnO(T쎜اpАanzLءu5ՓX'b|Әz*V씞hjh!xiKR(͟#Z`gb0EBML~MlrྗbNy4U_܋2|i3()ԎSQn7}!bFz͌ #V?L+"20i8bk&e19VnOȉV~k{q]pvp/Dcx3 4B!M!Щa^ #fa LJ$>ي;rYNt\xkP:<{*&$8EBrE*G':k-^}-,8P[k]u:&\E :{V8H6q IKʣ`BҺl ["e\Ymy غ曋wxLF˞s- afmsK%]@F#8 2IXJ@uG>מY>-3 IDAT\<> TWv$"m&51P$OʥOoZЃ4,NRNHbH|sIrK6jMV[[{~aςK/'B)7O(ǐ7Ncb`lM(0>bo}K,r?s}>N֟ΐ`{AfU[+,yg3noَПe#؍hV+*qm5|7:oO;_}߮xlyW;Pdۏ1h.Ahs\YL5b`,i ŊԀn,ӤN/u9䱣oPfH|[W}ֶVW+-+HnqQ련Wh %bg`J$R/߳魷˻3w~3.T_6J3ѫY_YV-Cࢗ83b|.G/Zs?J7atو%}MWh(-iS +Vliv[d[,ҎZV4RXXuՖV~$I%&m*@⋫}֔&0|z|bص6peI{L#k wO "J핁#YC?m6acNv &8QMnV-\6A :~\ho2/@Vmq׺YYFtlx5nY&`;r+"gEHmtsq^Ƹ\=}e]!1m-u52:___T X˸3B @IJ>yͪ-L 2lYCWـp9$c:0SxcE]8~؁_gпcf3U 2YuAxwsA4n}ϣy);Rq{ʅhS!36(/".krO᮵}ӣ|áT{|V-8౒T\hTN8꧞2.Q4zunM i /ϥoex8aA2NBhWIM88V }`9kVlʅ&匃p 'Kd@>䤝zI2ܰGCO?c(q8L8*~:,~kƢ<7u'o?_o}ߌQVm~~ūiP.ҖhQ )50<%Sʔ Pű}EpvoRQq4WZSB1V7Z)FwsfeCaԪz\:v3(:P)+NT\)Zţp ~wV=sE9rA/+b @ԍu%!ʮWs!dh"e&t)gs ގo`׈:'esAȷW=2~ 3.I[ҨK/Ph'I.ӻl땝?aEgl^9 /_;ÓĒs/F=U#keK-B)2Yg-'#,=H+f|h`G#O?gL"߷H3jZ/'dӣ_x4J;k޼a~0bb 0{2򪌂=gh t漡 cY+KY~ōšx 럅mrAURQ$ nLƸ&ds?\Aԭs1AG :#QކXeA3\cN=H:״e͐rӗ* m6|J1W6zBe G>KOa`M0ru "V?1v`Ĵ|Gδk.]}vvΐP>lO+p  ʹOrKG?wNP:Phdį~?0/V'= _mkCW/`Bi4HA^9*1}lU^9s*7`ʲdI:ST.ut CrA^{jd0.9d' U*Xt_{sǴ힏-CF~DV<[!Y35y‘Aa&LqM:[8GӠa_>[9dڕi!hdF!T';dv8vݟ}~;|nemO]q/A&C-6nԤlzq n>DUi#< ˓#`":[> 1hb>|֭}fSwO ^9Өq> ON|̙]"4x8_rC\tبLr^曹T~p7+[]m_jZF"*&ǘS|"~agbˊ]qZp) 2ż0ҳ*% DYO?t)7ץNYRj!Pe}*L{P}^^}~V=GO=Ab+TTUyOȨ9hdB8}:ag1miSp(`f#|1APq H)2LiqH_0I7Qy?S"O;&Hה1Bu~3R#e;%*{o6-wkU#QJ|`(_4~v"`g8ᴟV1ki[VR7TI( 8+ڵksr[׈j 6l8^eBJb4)ƾ߹KN[Z=.^>|”+:;n*CϠ)#ʬcpp}'A+öC?Xpq;:kפMZWЮC_RSCȮAz8nXd=֮%le84(`Ą#ө4|5c+ij4ѡ:i77xĄ?q+(lcF^p|)#iN/j':gS7m\'XYb#Ξx w1۷NC xKxa8zr5\zP!c'.JD Mvt;#2?¦6iɛNa%[u葻/ٶ^ t4?Py4 "6زS)peL; +|A2}v:j" >j ALnJYUim*n"{PwL/9qG/ Ht5.>²HsO’ _0 t?~}3xәT~!M3yCtQСN2?fE/|mJY9w~"b(l_I1y Pzr@L %{5Ʊ w=] lpU;''Vc`=oAbT}eQ07pG %kSDⅿ1kol.&'{d't jfylD}4.uw qۯ,oB?炩 :\t]f qeHNbP0,(~Yౢʋ:ʺ6ai mE"cU)"l\bP+/ iZ+\ھ;7,PS'kR)#nĞ"ѳ0 ZH;b .ݺ-^=0+z:SՎ5IL\:-!u kIH=}`9~hyվ}L_Owո}  Vp|y|CLkF3IedYæQүŵU Be܇E) g 8g:L{2Np +@',SCqCMT]s3xg܂HsIhM,QG@*fj+Sc{#ȡIX}4jyGl/j2E6J%km>}&xR}s38L$`U$~`xq|eӔ4RzxG?:G>I7*vU+,RyUᒋ^tt"PQ~Y!7= lLmXi K&ק9mz #TMHa񫉣G r*S}  e[!8x9!=a0hcGܱeK`l8=?Z1]6ױ#WG cY1ފ>g<iTcg:qp$r±G1RzN=̡%O G f+gdb.1qTf]C MXu0k)oTxljH']\/;zdG-d2vةoWփR>gႁI$LR% ,|SN&b&Ad>eϦdYs?w퇃{[;HæYVIh ;:/Jӓap4f$Ƃ_^ GDɺ]^?{ײxzwѣGg/RTVvjر$֑Cbe K"bXaXDbEE>&sF DcҸK,SV9R:xڃwm1e{?eԜ;<}܁%Kmn.t}(ydkjpjj&T3OO8VRH9\UL+tʻm\_>vk}Yi]5up3ut 7`AoEѳNhEW^|'>q٠A;iyhtx3u~^sM՟N1g$M>3d諿Y G~ଏ}n*LYnŮ|pCyԬw' χڥf@^h (&k;HhAyiA9yT^y@^-h -('k yk Qy $oy <*m 4wG5i* y@ rx M[0|l~/ 1nSl|iOv| Dݕ$@T4AVhZ[UhB `W|ꙁ@SB4m0Hf%v'7p±ƹq`ZtOtE-hF ,GCv`'"uT>Βe8F8gq<682ĝזR8К~gGp< G48[mpztyMjp\$I;I43-ppkV:'yp" ]us7 HCll?6!ppn1weMtyi_o>cy}E ًW6Jƥֳf{IlgoWV;y-e8>F,Ʊ #yiF8ɽh_}_w9h.h]#;HҶ7N"?#J)n*|^;Hֶ7 N׆ȣ.m"H{}kb/nס뺴 ,8.lzs_N[m $yi_?2gy~i 4h[CKm#maIӞwAdRo4HiAO}=vVQs;+9Ӧ JKBI瀸]&B~eVξA| y9iM mLҿG[u '՝l[fym%Ьk]"2N ?h1$whˠ|MxC sX|-ݽ-뵨st\qs_<9KUМ(:G6}:P_[l-$@p=tOֆԝtt\??Gl֞Z,ѐcryr0זGSUfd8^m ߇JrA/ᘍ/F4.FD-@K4RR/N},|r+nkw8~:8nk*4d}G)z/ΰ-؇;V$X=@vn)_J >nA猃[z*`4Zs߃̏mF}pk '$|sL 09&^^Wy^kq S!IENDB`jedi-0.9.0/docs/global.rst0000664000175000017500000000004512143361722015557 0ustar daviddavid00000000000000:orphan: .. |jedi| replace:: *Jedi* jedi-0.9.0/docs/docs/0000775000175000017500000000000012517736563014534 5ustar daviddavid00000000000000jedi-0.9.0/docs/docs/plugin-api.rst0000664000175000017500000000406712331540214017316 0ustar daviddavid00000000000000.. include:: ../global.rst The Plugin API ============== .. currentmodule:: jedi Note: This documentation is for Plugin developers, who want to improve their editors/IDE autocompletion If you want to use |jedi|, you first need to ``import jedi``. You then have direct access to the :class:`.Script`. You can then call the functions documented here. These functions return :ref:`API classes `. Deprecations ------------ The deprecation process is as follows: 1. A deprecation is announced in the next major/minor release. 2. We wait either at least a year & at least two minor releases until we remove the deprecated functionality. API documentation ----------------- API Interface ~~~~~~~~~~~~~ .. automodule:: jedi.api :members: :undoc-members: Examples -------- Completions: .. sourcecode:: python >>> import jedi >>> source = '''import json; json.l''' >>> script = jedi.Script(source, 1, 19, '') >>> script >>> completions = script.complete() >>> completions [, ] >>> completions[1] >>> completions[1].complete 'oads' >>> completions[1].word 'loads' Definitions / Goto: .. sourcecode:: python >>> import jedi >>> source = '''def my_func(): ... print 'called' ... ... alias = my_func ... my_list = [1, None, alias] ... inception = my_list[2] ... ... inception()''' >>> script = jedi.Script(source, 8, 1, '') >>> >>> script.goto_assignments() [] >>> >>> script.goto_definitions() [] Related names: .. sourcecode:: python >>> import jedi >>> source = '''x = 3 ... if 1 == 2: ... x = 4 ... else: ... del x''' >>> script = jedi.Script(source, 5, 8, '') >>> rns = script.related_names() >>> rns [, ] >>> rns[0].start_pos (3, 4) >>> rns[0].is_keyword False >>> rns[0].text 'x' jedi-0.9.0/docs/docs/usage.rst0000664000175000017500000000401512363566726016373 0ustar daviddavid00000000000000.. include:: ../global.rst End User Usage ============== If you are a not an IDE Developer, the odds are that you just want to use |jedi| as a browser plugin or in the shell. Yes that's :ref:`also possible `! |jedi| is relatively young and can be used in a variety of Plugins and Software. If your Editor/IDE is not among them, recommend |jedi| to your IDE developers. .. _editor-plugins: Editor Plugins -------------- Vim: - jedi-vim_ - YouCompleteMe_ Emacs: - Jedi.el_ - elpy_ - anaconda-mode_ Sublime Text 2/3: - SublimeJEDI_ (ST2 & ST3) - anaconda_ (only ST3) SynWrite: - SynJedi_ TextMate: - Textmate_ (Not sure if it's actually working) Kate: - Kate_ version 4.13+ `supports it natively `__, you have to enable it, though. .. _other-software: Other Software Using Jedi ------------------------- - wdb_ - Web Debugger .. _repl-completion: Tab completion in the Python Shell ---------------------------------- There are two different options how you can use Jedi autocompletion in your Python interpreter. One with your custom ``$HOME/.pythonrc.py`` file and one that uses ``PYTHONSTARTUP``. Using ``PYTHONSTARTUP`` ~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: jedi.replstartup Using a custom ``$HOME/.pythonrc.py`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autofunction:: jedi.utils.setup_readline .. _jedi-vim: https://github.com/davidhalter/jedi-vim .. _youcompleteme: http://valloric.github.io/YouCompleteMe/ .. _Jedi.el: https://github.com/tkf/emacs-jedi .. _elpy: https://github.com/jorgenschaefer/elpy .. _anaconda-mode: https://github.com/proofit404/anaconda-mode .. _sublimejedi: https://github.com/srusskih/SublimeJEDI .. _anaconda: https://github.com/DamnWidget/anaconda .. _SynJedi: http://uvviewsoft.com/synjedi/ .. _wdb: https://github.com/Kozea/wdb .. _TextMate: https://github.com/lawrenceakka/python-jedi.tmbundle .. _kate: http://kate-editor.org/ jedi-0.9.0/docs/docs/installation.rst0000664000175000017500000000507212517736533017770 0ustar daviddavid00000000000000.. include:: ../global.rst Installation and Configuration ============================== You can either include |jedi| as a submodule in your text editor plugin (like jedi-vim_ does by default), or you can install it systemwide. .. note:: This just installs the |jedi| library, not the :ref:`editor plugins `. For information about how to make it work with your editor, refer to the corresponding documentation. The preferred way ----------------- On any system you can install |jedi| directly from the Python package index using pip:: sudo pip install jedi If you want to install the current development version (master branch):: sudo pip install -e git://github.com/davidhalter/jedi.git#egg=jedi System-wide installation via a package manager ---------------------------------------------- Arch Linux ~~~~~~~~~~ You can install |jedi| directly from official Arch Linux packages: - `python-jedi `__ (Python 3) - `python2-jedi `__ (Python 2) The specified Python version just refers to the *runtime environment* for |jedi|. Use the Python 2 version if you're running vim (or whatever editor you use) under Python 2. Otherwise, use the Python 3 version. But whatever version you choose, both are able to complete both Python 2 and 3 *code*. (There is also a packaged version of the vim plugin available: `vim-jedi at Arch Linux`__.) Debian ~~~~~~ Debian packages are available in the `unstable repository `__. Others ~~~~~~ We are in the discussion of adding |jedi| to the Fedora repositories. Manual installation from a downloaded package --------------------------------------------- If you prefer not to use an automated package installer, you can `download `__ a current copy of |jedi| and install it manually. To install it, navigate to the directory containing `setup.py` on your console and type:: sudo python setup.py install Inclusion as a submodule ------------------------ If you use an editor plugin like jedi-vim_, you can simply include |jedi| as a git submodule of the plugin directory. Vim plugin managers like Vundle_ or Pathogen_ make it very easy to keep submodules up to date. .. _jedi-vim: https://github.com/davidhalter/jedi-vim .. _vundle: https://github.com/gmarik/vundle .. _pathogen: https://github.com/tpope/vim-pathogen jedi-0.9.0/docs/docs/features.rst0000664000175000017500000001610012517736533017077 0ustar daviddavid00000000000000.. include:: ../global.rst Features and Caveats ==================== Jedi obviously supports autocompletion. It's also possible to get it working in (:ref:`your REPL (IPython, etc.) `). Static analysis is also possible by using the command ``jedi.names``. The Jedi Linter is currently in an alpha version and can be tested by calling ``python -m jedi linter``. Jedi would in theory support refactoring, but we have never publicized it, because it's not production ready. If you're interested in helping out here, let me know. With the latest parser changes, it should be very easy to actually make it work. General Features ---------------- - python 2.6+ and 3.2+ support - ignores syntax errors and wrong indentation - can deal with complex module / function / class structures - virtualenv support - can infer function arguments from sphinx, epydoc and basic numpydoc docstrings (:ref:`type hinting `) Supported Python Features ------------------------- |jedi| supports many of the widely used Python features: - builtins - multiple returns or yields - tuple assignments / array indexing / dictionary indexing - with-statement / exception handling - ``*args`` / ``**kwargs`` - decorators / lambdas / closures - generators / iterators - some descriptors: property / staticmethod / classmethod - some magic methods: ``__call__``, ``__iter__``, ``__next__``, ``__get__``, ``__getitem__``, ``__init__`` - ``list.append()``, ``set.add()``, ``list.extend()``, etc. - (nested) list comprehensions / ternary expressions - relative imports - ``getattr()`` / ``__getattr__`` / ``__getattribute__`` - function annotations (py3k feature, are ignored right now, but being parsed. I don't know what to do with them.) - class decorators (py3k feature, are being ignored too, until I find a use case, that doesn't work with |jedi|) - simple/usual ``sys.path`` modifications - ``isinstance`` checks for if/while/assert - namespace packages (includes ``pkgutil`` and ``pkg_resources`` namespaces) - Django / Flask / Buildout support Unsupported Features -------------------- Not yet implemented: - manipulations of instances outside the instance variables without using methods Will probably never be implemented: - metaclasses (how could an auto-completion ever support this) - ``setattr()``, ``__import__()`` - writing to some dicts: ``globals()``, ``locals()``, ``object.__dict__`` - evaluating ``if`` / ``while`` / ``del`` Caveats ------- **Malformed Syntax** Syntax errors and other strange stuff may lead to undefined behaviour of the completion. |jedi| is **NOT** a Python compiler, that tries to correct you. It is a tool that wants to help you. But **YOU** have to know Python, not |jedi|. **Legacy Python 2 Features** This framework should work for both Python 2/3. However, some things were just not as *pythonic* in Python 2 as things should be. To keep things simple, some older Python 2 features have been left out: - Classes: Always Python 3 like, therefore all classes inherit from ``object``. - Generators: No ``next()`` method. The ``__next__()`` method is used instead. **Slow Performance** Importing ``numpy`` can be quite slow sometimes, as well as loading the builtins the first time. If you want to speed things up, you could write import hooks in |jedi|, which preload stuff. However, once loaded, this is not a problem anymore. The same is true for huge modules like ``PySide``, ``wx``, etc. **Security** Security is an important issue for |jedi|. Therefore no Python code is executed. As long as you write pure python, everything is evaluated statically. But: If you use builtin modules (``c_builtin``) there is no other option than to execute those modules. However: Execute isn't that critical (as e.g. in pythoncomplete, which used to execute *every* import!), because it means one import and no more. So basically the only dangerous thing is using the import itself. If your ``c_builtin`` uses some strange initializations, it might be dangerous. But if it does you're screwed anyways, because eventualy you're going to execute your code, which executes the import. Recipes ------- Here are some tips on how to use |jedi| efficiently. .. _type-hinting: Type Hinting ~~~~~~~~~~~~ If |jedi| cannot detect the type of a function argument correctly (due to the dynamic nature of Python), you can help it by hinting the type using one of the following docstring syntax styles: **Sphinx style** http://sphinx-doc.org/domains.html#info-field-lists :: def myfunction(node, foo): """Do something with a ``node``. :type node: ProgramNode :param str foo: foo parameter description """ node.| # complete here **Epydoc** http://epydoc.sourceforge.net/manual-fields.html :: def myfunction(node): """Do something with a ``node``. @type node: ProgramNode """ node.| # complete here **Numpydoc** https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt In order to support the numpydoc format, you need to install the `numpydoc `__ package. :: def foo(var1, var2, long_var_name='hi'): r"""A one-line summary that does not use variable names or the function name. ... Parameters ---------- var1 : array_like Array_like means all those objects -- lists, nested lists, etc. -- that can be converted to an array. We can also refer to variables like `var1`. var2 : int The type above can either refer to an actual Python type (e.g. ``int``), or describe the type of the variable in more detail, e.g. ``(N,) ndarray`` or ``array_like``. long_variable_name : {'hi', 'ho'}, optional Choices in brackets, default first when optional. ... """ var2.| # complete here A little history ---------------- The Star Wars Jedi are awesome. My Jedi software tries to imitate a little bit of the precognition the Jedi have. There's even an awesome `scene `_ of Monty Python Jedis :-). But actually the name hasn't so much to do with Star Wars. It's part of my second name. After I explained Guido van Rossum, how some parts of my auto-completion work, he said (we drank a beer or two): *"Oh, that worries me..."* When it's finished, I hope he'll like it :-) I actually started Jedi, because there were no good solutions available for VIM. Most auto-completions just didn't work well. The only good solution was PyCharm. But I like my good old VIM. Rope was never really intended to be an auto-completion (and also I really hate project folders for my Python scripts). It's more of a refactoring suite. So I decided to do my own version of a completion, which would execute non-dangerous code. But I soon realized, that this wouldn't work. So I built an extremely recursive thing which understands many of Python's key features. By the way, I really tried to program it as understandable as possible. But I think understanding it might need quite some time, because of its recursive nature. jedi-0.9.0/docs/docs/testing.rst0000664000175000017500000000154112331540214016720 0ustar daviddavid00000000000000.. include:: ../global.rst Jedi Testing ============ The test suite depends on ``tox`` and ``pytest``:: pip install tox pytest To run the tests for all supported Python versions:: tox If you want to test only a specific Python version (e.g. Python 2.7), it's as easy as:: tox -e py27 Tests are also run automatically on `Travis CI `_. You want to add a test for |jedi|? Great! We love that. Normally you should write your tests as :ref:`Blackbox Tests `. Most tests would fit right in there. For specific API testing we're using simple unit tests, with a focus on a simple and readable testing structure. .. _blackbox: Blackbox Tests (run.py) ~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: test.run Refactoring Tests (refactor.py) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: test.refactor jedi-0.9.0/docs/docs/plugin-api-classes.rst0000664000175000017500000000023712331540214020744 0ustar daviddavid00000000000000.. include:: ../global.rst .. _plugin-api-classes: API Return Classes ------------------ .. automodule:: jedi.api.classes :members: :undoc-members: jedi-0.9.0/docs/docs/settings.rst0000664000175000017500000000011512331540214017077 0ustar daviddavid00000000000000.. include:: ../global.rst Settings ======== .. automodule:: jedi.settings jedi-0.9.0/docs/docs/development.rst0000664000175000017500000001303512331540214017566 0ustar daviddavid00000000000000.. include:: ../global.rst Jedi Development ================ .. currentmodule:: jedi .. note:: This documentation is for Jedi developers who want to improve Jedi itself, but have no idea how Jedi works. If you want to use Jedi for your IDE, look at the `plugin api `_. Introduction ------------ This page tries to address the fundamental demand for documentation of the |jedi| interals. Understanding a dynamic language is a complex task. Especially because type inference in Python can be a very recursive task. Therefore |jedi| couldn't get rid of complexity. I know that **simple is better than complex**, but unfortunately it sometimes requires complex solutions to understand complex systems. Since most of the Jedi internals have been written by me (David Halter), this introduction will be written mostly by me, because no one else understands to the same level how Jedi works. Actually this is also the reason for exactly this part of the documentation. To make multiple people able to edit the Jedi core. In five chapters I'm trying to describe the internals of |jedi|: - :ref:`The Jedi Core ` - :ref:`Core Extensions ` - :ref:`Imports & Modules ` - :ref:`Caching & Recursions ` - :ref:`Helper modules ` .. note:: Testing is not documented here, you'll find that `right here `_. .. _core: The Jedi Core ------------- The core of Jedi consists of three parts: - :ref:`Parser ` - :ref:`Python code evaluation ` - :ref:`API ` Most people are probably interested in :ref:`code evaluation `, because that's where all the magic happens. I need to introduce the :ref:`parser ` first, because :mod:`jedi.evaluate` uses it extensively. .. _parser: Parser (parser/__init__.py) ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: jedi.parser Parser Representation (parser/representation.py) ++++++++++++++++++++++++++++++++++++++++++++++++ .. automodule:: jedi.parser.representation Class inheritance diagram: .. inheritance-diagram:: SubModule Class Function Lambda Flow ForFlow Import Statement Param Call Array Name ListComprehension :parts: 1 .. _evaluate: Evaluation of python code (evaluate/__init__.py) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: jedi.evaluate Evaluation Representation (evaluate/representation.py) ++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. automodule:: jedi.evaluate.representation .. inheritance-diagram:: Executable Instance InstanceElement Class Function FunctionExecution :parts: 1 .. _name_resolution: Name resolution (evaluate/finder.py) ++++++++++++++++++++++++++++++++++++ .. automodule:: jedi.evaluate.finder .. _dev-api: API (api.py and api_classes.py) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The API has been designed to be as easy to use as possible. The API documentation can be found `here `_. The API itself contains little code that needs to be mentioned here. Generally I'm trying to be conservative with the API. I'd rather not add new API features if they are not necessary, because it's much harder to deprecate stuff than to add it later. .. _core-extensions: Core Extensions --------------- Core Extensions is a summary of the following topics: - :ref:`Iterables & Dynamic Arrays ` - :ref:`Dynamic Parameters ` - :ref:`Fast Parser ` - :ref:`Docstrings ` - :ref:`Refactoring ` These topics are very important to understand what Jedi additionally does, but they could be removed from Jedi and Jedi would still work. But slower and without some features. .. _iterables: Iterables & Dynamic Arrays (evaluate/iterable.py) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To understand Python on a deeper level, |jedi| needs to understand some of the dynamic features of Python, however this probably the most complicated part: .. automodule:: jedi.evaluate.iterable .. _dynamic: Parameter completion (evaluate/dynamic.py) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: jedi.evaluate.dynamic .. _fast_parser: Fast Parser (parser/fast.py) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: jedi.parser.fast .. _docstrings: Docstrings (evaluate/docstrings.py) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: jedi.evaluate.docstrings .. _refactoring: Refactoring (evaluate/refactoring.py) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: jedi.refactoring .. _imports-modules: Imports & Modules ------------------- - :ref:`Modules ` - :ref:`Builtin Modules ` - :ref:`Imports ` .. _builtin: Compiled Modules (evaluate/compiled.py) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: jedi.evaluate.compiled .. _imports: Imports (evaluate/imports.py) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: jedi.evaluate.imports .. _caching-recursions: Caching & Recursions -------------------- - :ref:`Caching ` - :ref:`Recursions ` .. _cache: Caching (cache.py) ~~~~~~~~~~~~~~~~~~ .. automodule:: jedi.cache .. _recursion: Recursions (recursion.py) ~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: jedi.evaluate.recursion .. _dev-helpers: Helper Modules --------------- Most other modules are not really central to how Jedi works. They all contain relevant code, but you if you understand the modules above, you pretty much understand Jedi. Python 2/3 compatibility (_compatibility.py) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: jedi._compatibility jedi-0.9.0/docs/index.rst0000664000175000017500000000141512517736533015443 0ustar daviddavid00000000000000.. include global.rst Jedi - an awesome autocompletion/static analysis library for Python =================================================================== Release v\ |release|. (:doc:`Installation `) .. automodule:: jedi Autocompletion can look like this (e.g. VIM plugin): .. figure:: _screenshots/screenshot_complete.png .. _toc: Docs ---- .. toctree:: :maxdepth: 2 docs/usage docs/installation docs/features docs/plugin-api docs/plugin-api-classes docs/settings docs/development docs/testing .. _resources: Resources --------- - `Source Code on Github `_ - `Travis Testing `_ - `Python Package Index `_ jedi-0.9.0/docs/Makefile0000664000175000017500000001266412143361722015237 0ustar daviddavid00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Jedi.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Jedi.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Jedi" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Jedi" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." jedi-0.9.0/docs/_themes/0000775000175000017500000000000012517736563015230 5ustar daviddavid00000000000000jedi-0.9.0/docs/_themes/flask/0000775000175000017500000000000012517736563016330 5ustar daviddavid00000000000000jedi-0.9.0/docs/_themes/flask/static/0000775000175000017500000000000012517736563017617 5ustar daviddavid00000000000000jedi-0.9.0/docs/_themes/flask/static/flasky.css_t0000664000175000017500000001441312143361722022132 0ustar daviddavid00000000000000/* * flasky.css_t * ~~~~~~~~~~~~ * * :copyright: Copyright 2010 by Armin Ronacher. * :license: Flask Design License, see LICENSE for details. */ {% set page_width = '940px' %} {% set sidebar_width = '220px' %} @import url("basic.css"); /* -- page layout ----------------------------------------------------------- */ body { font-family: 'Georgia', serif; font-size: 17px; background-color: white; color: #000; margin: 0; padding: 0; } div.document { width: {{ page_width }}; margin: 30px auto 0 auto; } div.documentwrapper { float: left; width: 100%; } div.bodywrapper { margin: 0 0 0 {{ sidebar_width }}; } div.sphinxsidebar { width: {{ sidebar_width }}; } hr { border: 1px solid #B1B4B6; } div.body { background-color: #ffffff; color: #3E4349; padding: 0 30px 0 30px; } img.floatingflask { padding: 0 0 10px 10px; float: right; } div.footer { width: {{ page_width }}; margin: 20px auto 30px auto; font-size: 14px; color: #888; text-align: right; } div.footer a { color: #888; } div.related { display: none; } div.sphinxsidebar a { color: #444; text-decoration: none; border-bottom: 1px dotted #999; } div.sphinxsidebar a:hover { border-bottom: 1px solid #999; } div.sphinxsidebar { font-size: 14px; line-height: 1.5; } div.sphinxsidebarwrapper { padding: 18px 10px; } div.sphinxsidebarwrapper p.logo { padding: 0 0 20px 0; margin: 0; text-align: center; } div.sphinxsidebar h3, div.sphinxsidebar h4 { font-family: 'Garamond', 'Georgia', serif; color: #444; font-size: 24px; font-weight: normal; margin: 0 0 5px 0; padding: 0; } div.sphinxsidebar h4 { font-size: 20px; } div.sphinxsidebar h3 a { color: #444; } div.sphinxsidebar p.logo a, div.sphinxsidebar h3 a, div.sphinxsidebar p.logo a:hover, div.sphinxsidebar h3 a:hover { border: none; } div.sphinxsidebar p { color: #555; margin: 10px 0; } div.sphinxsidebar ul { margin: 10px 0; padding: 0; color: #000; } div.sphinxsidebar input { border: 1px solid #ccc; font-family: 'Georgia', serif; font-size: 1em; } /* -- body styles ----------------------------------------------------------- */ a { color: #004B6B; text-decoration: underline; } a:hover { color: #6D4100; text-decoration: underline; } div.body h1, div.body h2, div.body h3, div.body h4, div.body h5, div.body h6 { font-family: 'Garamond', 'Georgia', serif; font-weight: normal; margin: 30px 0px 10px 0px; padding: 0; } {% if theme_index_logo %} div.indexwrapper h1 { text-indent: -999999px; background: url({{ theme_index_logo }}) no-repeat center center; height: {{ theme_index_logo_height }}; } {% endif %} div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } div.body h2 { font-size: 180%; } div.body h3 { font-size: 150%; } div.body h4 { font-size: 130%; } div.body h5 { font-size: 100%; } div.body h6 { font-size: 100%; } a.headerlink { color: #ddd; padding: 0 4px; text-decoration: none; } a.headerlink:hover { color: #444; } div.body p, div.body dd, div.body li { line-height: 1.4em; } div.admonition { background: #fafafa; margin: 20px -30px; padding: 10px 30px; border-top: 1px solid #ccc; border-bottom: 1px solid #ccc; } div.admonition tt.xref, div.admonition a tt { border-bottom: 1px solid #fafafa; } dd div.admonition { margin-left: -60px; padding-left: 60px; } div.admonition p.admonition-title { font-family: 'Garamond', 'Georgia', serif; font-weight: normal; font-size: 24px; margin: 0 0 10px 0; padding: 0; line-height: 1; } div.admonition p.last { margin-bottom: 0; } div.highlight { background-color: white; } dt:target, .highlight { background: #FAF3E8; } div.note { background-color: #eee; border: 1px solid #ccc; } div.seealso { background-color: #ffc; border: 1px solid #ff6; } div.topic { background-color: #eee; } p.admonition-title { display: inline; } p.admonition-title:after { content: ":"; } pre, tt { font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.9em; } img.screenshot { } tt.descname, tt.descclassname { font-size: 0.95em; } tt.descname { padding-right: 0.08em; } img.screenshot { -moz-box-shadow: 2px 2px 4px #eee; -webkit-box-shadow: 2px 2px 4px #eee; box-shadow: 2px 2px 4px #eee; } table.docutils { border: 1px solid #888; -moz-box-shadow: 2px 2px 4px #eee; -webkit-box-shadow: 2px 2px 4px #eee; box-shadow: 2px 2px 4px #eee; } table.docutils td, table.docutils th { border: 1px solid #888; padding: 0.25em 0.7em; } table.field-list, table.footnote { border: none; -moz-box-shadow: none; -webkit-box-shadow: none; box-shadow: none; } table.footnote { margin: 15px 0; width: 100%; border: 1px solid #eee; background: #fdfdfd; font-size: 0.9em; } table.footnote + table.footnote { margin-top: -15px; border-top: none; } table.field-list th { padding: 0 0.8em 0 0; } table.field-list td { padding: 0; } table.footnote td.label { width: 0px; padding: 0.3em 0 0.3em 0.5em; } table.footnote td { padding: 0.3em 0.5em; } dl { margin: 0; padding: 0; } dl dd { margin-left: 30px; } blockquote { margin: 0 0 0 30px; padding: 0; } ul, ol { margin: 10px 0 10px 30px; padding: 0; } pre { background: #eee; padding: 7px 30px; margin: 15px -30px; line-height: 1.3em; } dl pre, blockquote pre, li pre { margin-left: -60px; padding-left: 60px; } dl dl pre { margin-left: -90px; padding-left: 90px; } tt { background-color: #ecf0f3; color: #222; /* padding: 1px 2px; */ } tt.xref, a tt { background-color: #FBFBFB; border-bottom: 1px solid white; } a.reference { text-decoration: none; border-bottom: 1px dotted #004B6B; } a.reference:hover { border-bottom: 1px solid #6D4100; } a.footnote-reference { text-decoration: none; font-size: 0.7em; vertical-align: top; border-bottom: 1px dotted #004B6B; } a.footnote-reference:hover { border-bottom: 1px solid #6D4100; } a:hover tt { background: #EEE; } jedi-0.9.0/docs/_themes/flask/static/small_flask.css0000664000175000017500000000172012143361722022603 0ustar daviddavid00000000000000/* * small_flask.css_t * ~~~~~~~~~~~~~~~~~ * * :copyright: Copyright 2010 by Armin Ronacher. * :license: Flask Design License, see LICENSE for details. */ body { margin: 0; padding: 20px 30px; } div.documentwrapper { float: none; background: white; } div.sphinxsidebar { display: block; float: none; width: 102.5%; margin: 50px -30px -20px -30px; padding: 10px 20px; background: #333; color: white; } div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, div.sphinxsidebar h3 a { color: white; } div.sphinxsidebar a { color: #aaa; } div.sphinxsidebar p.logo { display: none; } div.document { width: 100%; margin: 0; } div.related { display: block; margin: 0; padding: 10px 0 20px 0; } div.related ul, div.related ul li { margin: 0; padding: 0; } div.footer { display: none; } div.bodywrapper { margin: 0; } div.body { min-height: 0; padding: 0; } jedi-0.9.0/docs/_themes/flask/layout.html0000664000175000017500000000171312143361722020517 0ustar daviddavid00000000000000{%- extends "basic/layout.html" %} {%- block extrahead %} {{ super() }} {% if theme_touch_icon %} {% endif %} Fork me on GitHub {% endblock %} {%- block relbar2 %}{% endblock %} {% block header %} {{ super() }} {% if pagename == 'index' %}
{% endif %} {% endblock %} {%- block footer %} {% if pagename == 'index' %}
{% endif %} {%- endblock %} jedi-0.9.0/docs/_themes/flask/LICENSE0000664000175000017500000000337512143361722017327 0ustar daviddavid00000000000000Copyright (c) 2010 by Armin Ronacher. Some rights reserved. Redistribution and use in source and binary forms of the theme, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. We kindly ask you to only use these themes in an unmodified manner just for Flask and Flask-related products, not for unrelated projects. If you like the visual style and want to use it for your own projects, please consider making some larger changes to the themes (such as changing font faces, sizes, colors or margins). THIS THEME IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS THEME, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. jedi-0.9.0/docs/_themes/flask/theme.conf0000664000175000017500000000024212143361722020261 0ustar daviddavid00000000000000[theme] inherit = basic stylesheet = flasky.css pygments_style = flask_theme_support.FlaskyStyle [options] index_logo = index_logo_height = 120px touch_icon = jedi-0.9.0/docs/_themes/flask/relations.html0000664000175000017500000000111612143361722021177 0ustar daviddavid00000000000000

Related Topics

jedi-0.9.0/docs/_themes/flask_theme_support.py0000664000175000017500000001502012143361722021640 0ustar daviddavid00000000000000""" Copyright (c) 2010 by Armin Ronacher. Some rights reserved. Redistribution and use in source and binary forms of the theme, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. We kindly ask you to only use these themes in an unmodified manner just for Flask and Flask-related products, not for unrelated projects. If you like the visual style and want to use it for your own projects, please consider making some larger changes to the themes (such as changing font faces, sizes, colors or margins). THIS THEME IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS THEME, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ # flasky extensions. flasky pygments style based on tango style from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace, Punctuation, Other, Literal class FlaskyStyle(Style): background_color = "#f8f8f8" default_style = "" styles = { # No corresponding class for the following: #Text: "", # class: '' Whitespace: "underline #f8f8f8", # class: 'w' Error: "#a40000 border:#ef2929", # class: 'err' Other: "#000000", # class 'x' Comment: "italic #8f5902", # class: 'c' Comment.Preproc: "noitalic", # class: 'cp' Keyword: "bold #004461", # class: 'k' Keyword.Constant: "bold #004461", # class: 'kc' Keyword.Declaration: "bold #004461", # class: 'kd' Keyword.Namespace: "bold #004461", # class: 'kn' Keyword.Pseudo: "bold #004461", # class: 'kp' Keyword.Reserved: "bold #004461", # class: 'kr' Keyword.Type: "bold #004461", # class: 'kt' Operator: "#582800", # class: 'o' Operator.Word: "bold #004461", # class: 'ow' - like keywords Punctuation: "bold #000000", # class: 'p' # because special names such as Name.Class, Name.Function, etc. # are not recognized as such later in the parsing, we choose them # to look the same as ordinary variables. Name: "#000000", # class: 'n' Name.Attribute: "#c4a000", # class: 'na' - to be revised Name.Builtin: "#004461", # class: 'nb' Name.Builtin.Pseudo: "#3465a4", # class: 'bp' Name.Class: "#000000", # class: 'nc' - to be revised Name.Constant: "#000000", # class: 'no' - to be revised Name.Decorator: "#888", # class: 'nd' - to be revised Name.Entity: "#ce5c00", # class: 'ni' Name.Exception: "bold #cc0000", # class: 'ne' Name.Function: "#000000", # class: 'nf' Name.Property: "#000000", # class: 'py' Name.Label: "#f57900", # class: 'nl' Name.Namespace: "#000000", # class: 'nn' - to be revised Name.Other: "#000000", # class: 'nx' Name.Tag: "bold #004461", # class: 'nt' - like a keyword Name.Variable: "#000000", # class: 'nv' - to be revised Name.Variable.Class: "#000000", # class: 'vc' - to be revised Name.Variable.Global: "#000000", # class: 'vg' - to be revised Name.Variable.Instance: "#000000", # class: 'vi' - to be revised Number: "#990000", # class: 'm' Literal: "#000000", # class: 'l' Literal.Date: "#000000", # class: 'ld' String: "#4e9a06", # class: 's' String.Backtick: "#4e9a06", # class: 'sb' String.Char: "#4e9a06", # class: 'sc' String.Doc: "italic #8f5902", # class: 'sd' - like a comment String.Double: "#4e9a06", # class: 's2' String.Escape: "#4e9a06", # class: 'se' String.Heredoc: "#4e9a06", # class: 'sh' String.Interpol: "#4e9a06", # class: 'si' String.Other: "#4e9a06", # class: 'sx' String.Regex: "#4e9a06", # class: 'sr' String.Single: "#4e9a06", # class: 's1' String.Symbol: "#4e9a06", # class: 'ss' Generic: "#000000", # class: 'g' Generic.Deleted: "#a40000", # class: 'gd' Generic.Emph: "italic #000000", # class: 'ge' Generic.Error: "#ef2929", # class: 'gr' Generic.Heading: "bold #000080", # class: 'gh' Generic.Inserted: "#00A000", # class: 'gi' Generic.Output: "#888", # class: 'go' Generic.Prompt: "#745334", # class: 'gp' Generic.Strong: "bold #000000", # class: 'gs' Generic.Subheading: "bold #800080", # class: 'gu' Generic.Traceback: "bold #a40000", # class: 'gt' } jedi-0.9.0/test/0000775000175000017500000000000012517736563013633 5ustar daviddavid00000000000000jedi-0.9.0/test/refactor/0000775000175000017500000000000012517736563015440 5ustar daviddavid00000000000000jedi-0.9.0/test/refactor/extract.py0000664000175000017500000000127112143361723017450 0ustar daviddavid00000000000000# --- simple def test(): #? 35 a return test(100, (30 + b, c) + 1) # +++ def test(): a = (30 + b, c) + 1 return test(100, a) # --- simple #2 def test(): #? 25 a return test(100, (30 + b, c) + 1) # +++ def test(): a = 30 + b return test(100, (a, c) + 1) # --- multiline def test(): #? 30 x return test(1, (30 + b, c) + 1) # +++ def test(): x = ((30 + b, c) + 1) return test(1, x ) # --- multiline #2 def test(): #? 25 x return test(1, (30 + b, c) + 1) # +++ def test(): x = 30 + b return test(1, (x, c) + 1) jedi-0.9.0/test/refactor/rename.py0000664000175000017500000000043712143361723017250 0ustar daviddavid00000000000000""" Test coverage for renaming is mostly being done by testing `Script.usages`. """ # --- simple def test1(): #? 7 blabla test1() AssertionError return test1, test1.not_existing # +++ def blabla(): blabla() AssertionError return blabla, blabla.not_existing jedi-0.9.0/test/refactor/inline.py0000664000175000017500000000036612143361723017260 0ustar daviddavid00000000000000# --- simple def test(): #? 4 a = (30 + b, c) + 1 return test(100, a) # +++ def test(): return test(100, (30 + b, c) + 1) # --- simple if 1: #? 4 a = 1, 2 return test(100, a) # +++ if 1: return test(100, (1, 2)) jedi-0.9.0/test/test_api/0000775000175000017500000000000012517736563015443 5ustar daviddavid00000000000000jedi-0.9.0/test/test_api/__init__.py0000664000175000017500000000000012331540214017516 0ustar daviddavid00000000000000jedi-0.9.0/test/test_api/test_classes.py0000664000175000017500000002410512517736533020510 0ustar daviddavid00000000000000""" Test all things related to the ``jedi.api_classes`` module. """ from textwrap import dedent from inspect import cleandoc import pytest from jedi import Script, defined_names, __doc__ as jedi_doc, names from ..helpers import cwd_at from ..helpers import TestCase def test_is_keyword(): #results = Script('import ', 1, 1, None).goto_definitions() #assert len(results) == 1 and results[0].is_keyword is True results = Script('str', 1, 1, None).goto_definitions() assert len(results) == 1 and results[0].is_keyword is False def make_definitions(): """ Return a list of definitions for parametrized tests. :rtype: [jedi.api_classes.BaseDefinition] """ source = dedent(""" import sys class C: pass x = C() def f(): pass def g(): yield h = lambda: None """) definitions = [] definitions += defined_names(source) source += dedent(""" variable = sys or C or x or f or g or g() or h""") lines = source.splitlines() script = Script(source, len(lines), len('variable'), None) definitions += script.goto_definitions() script2 = Script(source, 4, len('class C'), None) definitions += script2.usages() source_param = "def f(a): return a" script_param = Script(source_param, 1, len(source_param), None) definitions += script_param.goto_assignments() return definitions @pytest.mark.parametrize('definition', make_definitions()) def test_basedefinition_type(definition): assert definition.type in ('module', 'class', 'instance', 'function', 'generator', 'statement', 'import', 'param') def test_basedefinition_type_import(): def get_types(source, **kwargs): return set([t.type for t in Script(source, **kwargs).completions()]) # import one level assert get_types('import t') == set(['module']) assert get_types('import ') == set(['module']) assert get_types('import datetime; datetime') == set(['module']) # from assert get_types('from datetime import timedelta') == set(['class']) assert get_types('from datetime import timedelta; timedelta') == set(['class']) assert get_types('from json import tool') == set(['module']) assert get_types('from json import tool; tool') == set(['module']) # import two levels assert get_types('import json.tool; json') == set(['module']) assert get_types('import json.tool; json.tool') == set(['module']) assert get_types('import json.tool; json.tool.main') == set(['function']) assert get_types('import json.tool') == set(['module']) assert get_types('import json.tool', column=9) == set(['module']) def test_function_call_signature_in_doc(): defs = Script(""" def f(x, y=1, z='a'): pass f""").goto_definitions() doc = defs[0].doc assert "f(x, y=1, z='a')" in str(doc) def test_class_call_signature(): defs = Script(""" class Foo: def __init__(self, x, y=1, z='a'): pass Foo""").goto_definitions() doc = defs[0].doc assert "Foo(self, x, y=1, z='a')" in str(doc) def test_position_none_if_builtin(): gotos = Script('import sys; sys.path').goto_assignments() assert gotos[0].line is None assert gotos[0].column is None @cwd_at('.') def test_completion_docstring(): """ Jedi should follow imports in certain conditions """ def docstr(src, result): c = Script(src).completions()[0] assert c.docstring(raw=True, fast=False) == cleandoc(result) c = Script('import jedi\njed').completions()[0] assert c.docstring(fast=False) == cleandoc(jedi_doc) docstr('import jedi\njedi.Scr', cleandoc(Script.__doc__)) docstr('abcd=3;abcd', '') docstr('"hello"\nabcd=3\nabcd', 'hello') # It works with a ; as well. docstr('"hello"\nabcd=3;abcd', 'hello') # Shouldn't work with a tuple. docstr('"hello",0\nabcd=3\nabcd', '') def test_completion_params(): c = Script('import string; string.capwords').completions()[0] assert [p.name for p in c.params] == ['s', 'sep'] def test_signature_params(): def check(defs): params = defs[0].params assert len(params) == 1 assert params[0].name == 'bar' s = dedent(''' def foo(bar): pass foo''') check(Script(s).goto_definitions()) check(Script(s).goto_assignments()) check(Script(s + '\nbar=foo\nbar').goto_assignments()) def test_param_endings(): """ Params should be represented without the comma and whitespace they have around them. """ sig = Script('def x(a, b=5, c=""): pass\n x(').call_signatures()[0] assert [p.description for p in sig.params] == ['a', 'b=5', 'c=""'] class TestIsDefinition(TestCase): def _def(self, source, index=-1): return names(dedent(source), references=True, all_scopes=True)[index] def _bool_is_definitions(self, source): ns = names(dedent(source), references=True, all_scopes=True) # Assure that names are definitely sorted. ns = sorted(ns, key=lambda name: (name.line, name.column)) return [name.is_definition() for name in ns] def test_name(self): d = self._def('name') assert d.name == 'name' assert not d.is_definition() def test_stmt(self): src = 'a = f(x)' d = self._def(src, 0) assert d.name == 'a' assert d.is_definition() d = self._def(src, 1) assert d.name == 'f' assert not d.is_definition() d = self._def(src) assert d.name == 'x' assert not d.is_definition() def test_import(self): assert self._bool_is_definitions('import x as a') == [False, True] assert self._bool_is_definitions('from x import y') == [False, True] assert self._bool_is_definitions('from x.z import y') == [False, False, True] class TestParent(TestCase): def _parent(self, source, line=None, column=None): defs = Script(dedent(source), line, column).goto_assignments() assert len(defs) == 1 return defs[0].parent() def test_parent(self): parent = self._parent('foo=1\nfoo') assert parent.type == 'module' parent = self._parent(''' def spam(): if 1: y=1 y''') assert parent.name == 'spam' assert parent.parent().type == 'module' def test_on_function(self): parent = self._parent('''\ def spam(): pass''', 1, len('def spam')) assert parent.name == '' assert parent.type == 'module' def test_parent_on_completion(self): parent = Script(dedent('''\ class Foo(): def bar(): pass Foo().bar''')).completions()[0].parent() assert parent.name == 'Foo' assert parent.type == 'instance' parent = Script('str.join').completions()[0].parent() assert parent.name == 'str' assert parent.type == 'class' def test_type(): """ Github issue #397, type should never raise an error. """ for c in Script('import os; os.path.').completions(): assert c.type class TestGotoAssignments(TestCase): """ This tests the BaseDefinition.goto_assignments function, not the jedi function. They are not really different in functionality, but really different as an implementation. """ def test_repetition(self): defs = names('a = 1; a', references=True, definitions=False) # Repeat on the same variable. Shouldn't change once we're on a # definition. for _ in range(3): assert len(defs) == 1 ass = defs[0].goto_assignments() assert ass[0].description == 'a = 1' def test_named_params(self): src = """\ def foo(a=1, bar=2): pass foo(bar=1) """ bar = names(dedent(src), references=True)[-1] param = bar.goto_assignments()[0] assert param.start_pos == (1, 13) assert param.type == 'param' def test_class_call(self): src = 'from threading import Thread; Thread(group=1)' n = names(src, references=True)[-1] assert n.name == 'group' param_def = n.goto_assignments()[0] assert param_def.name == 'group' assert param_def.type == 'param' def test_parentheses(self): n = names('("").upper', references=True)[-1] assert n.goto_assignments()[0].name == 'upper' def test_import(self): nms = names('from json import load', references=True) assert nms[0].name == 'json' assert nms[0].type == 'import' n = nms[0].goto_assignments()[0] assert n.name == 'json' assert n.type == 'module' assert nms[1].name == 'load' assert nms[1].type == 'import' n = nms[1].goto_assignments()[0] assert n.name == 'load' assert n.type == 'function' nms = names('import os; os.path', references=True) assert nms[0].name == 'os' assert nms[0].type == 'import' n = nms[0].goto_assignments()[0] assert n.name == 'os' assert n.type == 'module' n = nms[2].goto_assignments()[0] assert n.name == 'path' assert n.type == 'import' nms = names('import os.path', references=True) n = nms[0].goto_assignments()[0] assert n.name == 'os' assert n.type == 'module' n = nms[1].goto_assignments()[0] # This is very special, normally the name doesn't chance, but since # os.path is a sys.modules hack, it does. assert n.name in ('ntpath', 'posixpath') assert n.type == 'module' def test_import_alias(self): nms = names('import json as foo', references=True) assert nms[0].name == 'json' assert nms[0].type == 'import' n = nms[0].goto_assignments()[0] assert n.name == 'json' assert n.type == 'module' assert nms[1].name == 'foo' assert nms[1].type == 'import' ass = nms[1].goto_assignments() assert len(ass) == 1 assert ass[0].name == 'json' assert ass[0].type == 'module' jedi-0.9.0/test/test_api/test_unicode.py0000664000175000017500000000414312331540214020460 0ustar daviddavid00000000000000# -*- coding: utf-8 -*- """ All character set and unicode related tests. """ from jedi import Script from jedi._compatibility import u, unicode def test_unicode_script(): """ normally no unicode objects are being used. (<=2.7) """ s = unicode("import datetime; datetime.timedelta") completions = Script(s).completions() assert len(completions) assert type(completions[0].description) is unicode s = u("author='öä'; author") completions = Script(s).completions() x = completions[0].description assert type(x) is unicode s = u("#-*- coding: iso-8859-1 -*-\nauthor='öä'; author") s = s.encode('latin-1') completions = Script(s).completions() assert type(completions[0].description) is unicode def test_unicode_attribute(): """ github jedi-vim issue #94 """ s1 = u('#-*- coding: utf-8 -*-\nclass Person():\n' ' name = "e"\n\nPerson().name.') completions1 = Script(s1).completions() assert 'strip' in [c.name for c in completions1] s2 = u('#-*- coding: utf-8 -*-\nclass Person():\n' ' name = "é"\n\nPerson().name.') completions2 = Script(s2).completions() assert 'strip' in [c.name for c in completions2] def test_multibyte_script(): """ `jedi.Script` must accept multi-byte string source. """ try: code = u("import datetime; datetime.d") comment = u("# multi-byte comment あいうえおä") s = (u('%s\n%s') % (code, comment)).encode('utf-8') except NameError: pass # python 3 has no unicode method else: assert len(Script(s, 1, len(code)).completions()) def test_goto_definition_at_zero(): """At zero usually sometimes raises unicode issues.""" assert Script("a", 1, 1).goto_definitions() == [] s = Script("str", 1, 1).goto_definitions() assert len(s) == 1 assert list(s)[0].description == 'class str' assert Script("", 1, 0).goto_definitions() == [] def test_complete_at_zero(): s = Script("str", 1, 3).completions() assert len(s) == 1 assert list(s)[0].name == 'str' s = Script("", 1, 0).completions() assert len(s) > 0 jedi-0.9.0/test/test_api/test_api_classes_follow_definition.py0000664000175000017500000000376012517736533025137 0ustar daviddavid00000000000000from itertools import chain import jedi from ..helpers import cwd_at def test_import_empty(): """ github #340, return the full word. """ completion = jedi.Script("import ").completions()[0] definition = completion.follow_definition()[0] assert definition def check_follow_definition_types(source): # nested import completions = jedi.Script(source, path='some_path.py').completions() defs = chain.from_iterable(c.follow_definition() for c in completions) return [d.type for d in defs] def test_follow_import_incomplete(): """ Completion on incomplete imports should always take the full completion to do any evaluation. """ datetime = check_follow_definition_types("import itertool") assert datetime == ['module'] # empty `from * import` parts itert = jedi.Script("from itertools import ").completions() definitions = [d for d in itert if d.name == 'chain'] assert len(definitions) == 1 assert [d.type for d in definitions[0].follow_definition()] == ['class'] # incomplete `from * import` part datetime = check_follow_definition_types("from datetime import datetim") assert set(datetime) == set(['class', 'instance']) # py33: builtin and pure py version # os.path check ospath = check_follow_definition_types("from os.path import abspat") assert ospath == ['function'] # alias alias = check_follow_definition_types("import io as abcd; abcd") assert alias == ['module'] @cwd_at('test/completion/import_tree') def test_follow_definition_nested_import(): types = check_follow_definition_types("import pkg.mod1; pkg") assert types == ['module'] types = check_follow_definition_types("import pkg.mod1; pkg.mod1") assert types == ['module'] types = check_follow_definition_types("import pkg.mod1; pkg.mod1.a") assert types == ['instance'] def test_follow_definition_land_on_import(): types = check_follow_definition_types("import datetime; datetim") assert types == ['module'] jedi-0.9.0/test/test_api/test_call_signatures.py0000664000175000017500000002315012517736533022231 0ustar daviddavid00000000000000from textwrap import dedent import inspect from ..helpers import TestCase from jedi import Script from jedi import cache from jedi._compatibility import is_py33 class TestCallSignatures(TestCase): def _run(self, source, expected_name, expected_index=0, line=None, column=None): signatures = Script(source, line, column).call_signatures() assert len(signatures) <= 1 if not signatures: assert expected_name is None else: assert signatures[0].name == expected_name assert signatures[0].index == expected_index def _run_simple(self, source, name, index=0, column=None, line=1): self._run(source, name, index, line, column) def test_valid_call(self): self._run('str()', 'str', column=4) def test_simple(self): run = self._run_simple s7 = "str().upper().center(" s8 = "str(int[zip(" run(s7, 'center', 0) # simple s1 = "sorted(a, str(" run(s1, 'sorted', 0, 7) run(s1, 'sorted', 1, 9) run(s1, 'sorted', 1, 10) run(s1, 'sorted', 1, 11) run(s1, 'str', 0, 14) s2 = "abs(), " run(s2, 'abs', 0, 4) run(s2, None, column=5) run(s2, None) s3 = "abs()." run(s3, None, column=5) run(s3, None) # more complicated s4 = 'abs(zip(), , set,' run(s4, None, column=3) run(s4, 'abs', 0, 4) run(s4, 'zip', 0, 8) run(s4, 'abs', 0, 9) #run(s4, 'abs', 1, 10) s5 = "sorted(1,\nif 2:\n def a():" run(s5, 'sorted', 0, 7) run(s5, 'sorted', 1, 9) s6 = "str().center(" run(s6, 'center', 0) run(s6, 'str', 0, 4) s7 = "str().upper().center(" s8 = "str(int[zip(" run(s7, 'center', 0) run(s8, 'zip', 0) run(s8, 'str', 0, 8) run("import time; abc = time; abc.sleep(", 'sleep', 0) # jedi #57 s = "def func(alpha, beta): pass\n" \ "func(alpha='101'," run(s, 'func', 0, column=13, line=2) def test_flows(self): # jedi-vim #9 self._run_simple("with open(", 'open', 0) # jedi-vim #11 self._run_simple("for sorted(", 'sorted', 0) self._run_simple("for s in sorted(", 'sorted', 0) def test_complex(self): s = """ def abc(a,b): pass def a(self): abc( if 1: pass """ self._run(s, 'abc', 0, line=6, column=24) s = """ import re def huhu(it): re.compile( return it * 2 """ self._run(s, 'compile', 0, line=4, column=31) # jedi-vim #70 s = """def foo(""" assert Script(s).call_signatures() == [] # jedi-vim #116 s = """import itertools; test = getattr(itertools, 'chain'); test(""" self._run(s, 'chain', 0) def test_call_signature_on_module(self): """github issue #240""" s = 'import datetime; datetime(' # just don't throw an exception (if numpy doesn't exist, just ignore it) assert Script(s).call_signatures() == [] def test_call_signatures_empty_parentheses_pre_space(self): s = dedent("""\ def f(a, b): pass f( )""") self._run(s, 'f', 0, line=3, column=3) def test_multiple_signatures(self): s = dedent("""\ if x: def f(a, b): pass else: def f(a, b): pass f(""") assert len(Script(s).call_signatures()) == 2 def test_call_signatures_whitespace(self): s = dedent("""\ abs( def x(): pass """) self._run(s, 'abs', 0, line=1, column=5) def test_decorator_in_class(self): """ There's still an implicit param, with a decorator. Github issue #319. """ s = dedent("""\ def static(func): def wrapped(obj, *args): return f(type(obj), *args) return wrapped class C(object): @static def test(cls): return 10 C().test(""") signatures = Script(s).call_signatures() assert len(signatures) == 1 x = [p.description for p in signatures[0].params] assert x == ['*args'] def test_additional_brackets(self): self._run('str((', 'str', 0) def test_unterminated_strings(self): self._run('str(";', 'str', 0) def test_whitespace_before_bracket(self): self._run('str (', 'str', 0) self._run('str (";', 'str', 0) # TODO this is not actually valid Python, the newline token should be # ignored. self._run('str\n(', 'str', 0) def test_brackets_in_string_literals(self): self._run('str (" (', 'str', 0) self._run('str (" )', 'str', 0) def test_function_definitions_should_break(self): """ Function definitions (and other tokens that cannot exist within call signatures) should break and not be able to return a call signature. """ assert not Script('str(\ndef x').call_signatures() def test_flow_call(self): assert not Script('if (1').call_signatures() def test_chained_calls(self): source = dedent(''' class B(): def test2(self, arg): pass class A(): def test1(self): return B() A().test1().test2(''') self._run(source, 'test2', 0) def test_return(self): source = dedent(''' def foo(): return '.'.join()''') self._run(source, 'join', 0, column=len(" return '.'.join(")) class TestParams(TestCase): def params(self, source, line=None, column=None): signatures = Script(source, line, column).call_signatures() assert len(signatures) == 1 return signatures[0].params def test_param_name(self): if not is_py33: p = self.params('''int(''') # int is defined as: `int(x[, base])` assert p[0].name == 'x' # `int` docstring has been redefined: # http://bugs.python.org/issue14783 # TODO have multiple call signatures for int (like in the docstr) #assert p[1].name == 'base' p = self.params('''open(something,''') assert p[0].name in ['file', 'name'] assert p[1].name == 'mode' def test_builtins(self): """ The self keyword should be visible even for builtins, if not instantiated. """ p = self.params('str.endswith(') assert p[0].name == 'self' assert p[1].name == 'suffix' p = self.params('str().endswith(') assert p[0].name == 'suffix' def test_signature_is_definition(): """ Through inheritance, a call signature is a sub class of Definition. Check if the attributes match. """ s = """class Spam(): pass\nSpam""" signature = Script(s + '(').call_signatures()[0] definition = Script(s + '(').goto_definitions()[0] signature.line == 1 signature.column == 6 # Now compare all the attributes that a CallSignature must also have. for attr_name in dir(definition): dont_scan = ['defined_names', 'line_nr', 'start_pos', 'documentation', 'doc', 'parent', 'goto_assignments'] if attr_name.startswith('_') or attr_name in dont_scan: continue attribute = getattr(definition, attr_name) signature_attribute = getattr(signature, attr_name) if inspect.ismethod(attribute): assert attribute() == signature_attribute() else: assert attribute == signature_attribute def test_no_signature(): # str doesn't have a __call__ method assert Script('str()(').call_signatures() == [] s = dedent("""\ class X(): pass X()(""") assert Script(s).call_signatures() == [] assert len(Script(s, column=2).call_signatures()) == 1 def test_dict_literal_in_incomplete_call(): source = """\ import json def foo(): json.loads( json.load.return_value = {'foo': [], 'bar': True} c = Foo() """ script = Script(dedent(source), line=4, column=15) assert script.call_signatures() def test_completion_interference(): """Seems to cause problems, see also #396.""" cache.parser_cache.pop(None, None) assert Script('open(').call_signatures() # complete something usual, before doing the same call_signatures again. assert Script('from datetime import ').completions() assert Script('open(').call_signatures() def test_signature_index(): def get(source): return Script(source).call_signatures()[0] assert get('sorted([], key=a').index == 2 assert get('sorted([], no_key=a').index is None args_func = 'def foo(*kwargs): pass\n' assert get(args_func + 'foo(a').index == 0 assert get(args_func + 'foo(a, b').index == 0 kwargs_func = 'def foo(**kwargs): pass\n' assert get(kwargs_func + 'foo(a=2').index == 0 assert get(kwargs_func + 'foo(a=2, b=2').index == 0 both = 'def foo(*args, **kwargs): pass\n' assert get(both + 'foo(a=2').index == 1 assert get(both + 'foo(a=2, b=2').index == 1 assert get(both + 'foo(a, b, c').index == 0 def test_bracket_start(): def bracket_start(src): signatures = Script(src).call_signatures() assert len(signatures) == 1 return signatures[0].bracket_start assert bracket_start('str(') == (1, 3) jedi-0.9.0/test/test_api/test_api.py0000664000175000017500000000771212517736533017631 0ustar daviddavid00000000000000""" Test all things related to the ``jedi.api`` module. """ from textwrap import dedent from jedi import api from jedi._compatibility import is_py3 from pytest import raises def test_preload_modules(): def check_loaded(*modules): # +1 for None module (currently used) assert len(parser_cache) == len(modules) + 1 for i in modules: assert [i in k for k in parser_cache.keys() if k is not None] from jedi import cache temp_cache, cache.parser_cache = cache.parser_cache, {} parser_cache = cache.parser_cache api.preload_module('sys') check_loaded() # compiled (c_builtin) modules shouldn't be in the cache. api.preload_module('json', 'token') check_loaded('json', 'token') cache.parser_cache = temp_cache def test_empty_script(): assert api.Script('') def test_line_number_errors(): """ Script should raise a ValueError if line/column numbers are not in a valid range. """ s = 'hello' # lines with raises(ValueError): api.Script(s, 2, 0) with raises(ValueError): api.Script(s, 0, 0) # columns with raises(ValueError): api.Script(s, 1, len(s) + 1) with raises(ValueError): api.Script(s, 1, -1) # ok api.Script(s, 1, 0) api.Script(s, 1, len(s)) def _check_number(source, result='float'): completions = api.Script(source).completions() assert completions[0].parent().name == result def test_completion_on_number_literals(): # No completions on an int literal (is a float). assert api.Script('1.').completions() == [] # Multiple points after an int literal basically mean that there's a float # and a call after that. _check_number('1..') _check_number('1.0.') # power notation _check_number('1.e14.') _check_number('1.e-3.') _check_number('9e3.') assert api.Script('1.e3..').completions() == [] assert api.Script('1.e-13..').completions() == [] def test_completion_on_hex_literals(): assert api.Script('0x1..').completions() == [] _check_number('0x1.', 'int') # hexdecimal # Completing binary literals doesn't work if they are not actually binary # (invalid statements). assert api.Script('0b2.').completions() == [] _check_number('0b1.', 'int') # binary _check_number('0o7.', 'int') # octal _check_number('0x2e.', 'int') _check_number('0xE7.', 'int') _check_number('0xEa.', 'int') # theoretically, but people can just check for syntax errors: #assert api.Script('0x.').completions() == [] def test_completion_on_complex_literals(): assert api.Script('1j..').completions() == [] _check_number('1j.', 'complex') _check_number('44.j.', 'complex') _check_number('4.0j.', 'complex') # No dot no completion assert api.Script('4j').completions() == [] def test_goto_assignments_on_non_name(): assert api.Script('for').goto_assignments() == [] assert api.Script('assert').goto_assignments() == [] if is_py3: assert api.Script('True').goto_assignments() == [] else: # In Python 2.7 True is still a name. assert api.Script('True').goto_assignments()[0].description == 'class bool' def test_goto_definitions_on_non_name(): assert api.Script('import x', column=0).goto_definitions() == [] def test_goto_definition_not_multiple(): """ There should be only one Definition result if it leads back to the same origin (e.g. instance method) """ s = dedent('''\ import random class A(): def __init__(self, a): self.a = 3 def foo(self): pass if random.randint(0, 1): a = A(2) else: a = A(1) a''') assert len(api.Script(s).goto_definitions()) == 1 def test_usage_description(): descs = [u.description for u in api.Script("foo = ''; foo").usages()] assert set(descs) == set(["foo = ''", 'foo']) jedi-0.9.0/test/test_api/test_interpreter.py0000664000175000017500000000646212517736533021424 0ustar daviddavid00000000000000""" Tests of ``jedi.api.Interpreter``. """ from ..helpers import TestCase import jedi from jedi._compatibility import is_py33 class TestInterpreterAPI(TestCase): def check_interpreter_complete(self, source, namespace, completions, **kwds): script = jedi.Interpreter(source, [namespace], **kwds) cs = script.completions() actual = [c.name for c in cs] self.assertEqual(sorted(actual), sorted(completions)) def test_complete_raw_function(self): from os.path import join self.check_interpreter_complete('join().up', locals(), ['upper']) def test_complete_raw_function_different_name(self): from os.path import join as pjoin self.check_interpreter_complete('pjoin().up', locals(), ['upper']) def test_complete_raw_module(self): import os self.check_interpreter_complete('os.path.join().up', locals(), ['upper']) def test_complete_raw_instance(self): import datetime dt = datetime.datetime(2013, 1, 1) completions = ['time', 'timetz', 'timetuple'] if is_py33: completions += ['timestamp'] self.check_interpreter_complete('(dt - dt).ti', locals(), completions) def test_list(self): array = ['haha', 1] self.check_interpreter_complete('array[0].uppe', locals(), ['upper']) self.check_interpreter_complete('array[0].real', locals(), []) # something different, no index given, still just return the right self.check_interpreter_complete('array[int].real', locals(), ['real']) self.check_interpreter_complete('array[int()].real', locals(), ['real']) # inexistent index self.check_interpreter_complete('array[2].upper', locals(), ['upper']) def test_slice(self): class Foo(): bar = [] baz = 'xbarx' self.check_interpreter_complete('getattr(Foo, baz[1:-1]).append', locals(), ['append']) def test_getitem_side_effects(self): class Foo(): def __getitem__(self, index): # possible side effects here, should therefore not call this. return index foo = Foo() self.check_interpreter_complete('foo[0].', locals(), []) def test_property_error(self): class Foo(): @property def bar(self): raise ValueError foo = Foo() self.check_interpreter_complete('foo.bar', locals(), ['bar']) self.check_interpreter_complete('foo.bar.baz', locals(), []) jedi-0.9.0/test/test_api/test_full_name.py0000664000175000017500000000475112517736533021022 0ustar daviddavid00000000000000""" Tests for :attr:`.BaseDefinition.full_name`. There are three kinds of test: #. Test classes derived from :class:`MixinTestFullName`. Child class defines :attr:`.operation` to alter how the api definition instance is created. #. :class:`TestFullDefinedName` is to test combination of ``obj.full_name`` and ``jedi.defined_names``. #. Misc single-function tests. """ import textwrap import pytest import jedi from ..helpers import TestCase class MixinTestFullName(object): operation = None def check(self, source, desired): script = jedi.Script(textwrap.dedent(source)) definitions = getattr(script, type(self).operation)() for d in definitions: self.assertEqual(d.full_name, desired) def test_os_path_join(self): self.check('import os; os.path.join', 'os.path.join') def test_builtin(self): self.check('TypeError', 'TypeError') class TestFullNameWithGotoDefinitions(MixinTestFullName, TestCase): operation = 'goto_definitions' @pytest.mark.skipif('sys.version_info[0] < 3', reason='Python 2 also yields None.') def test_tuple_mapping(self): self.check(""" import re any_re = re.compile('.*') any_re""", '_sre.compile.SRE_Pattern') def test_from_import(self): self.check('from os import path', 'os.path') class TestFullNameWithCompletions(MixinTestFullName, TestCase): operation = 'completions' class TestFullDefinedName(TestCase): """ Test combination of ``obj.full_name`` and ``jedi.defined_names``. """ def check(self, source, desired): definitions = jedi.defined_names(textwrap.dedent(source)) full_names = [d.full_name for d in definitions] self.assertEqual(full_names, desired) def test_local_names(self): self.check(""" def f(): pass class C: pass """, ['f', 'C']) def test_imports(self): self.check(""" import os from os import path from os.path import join from os import path as opath """, ['os', 'os.path', 'os.path.join', 'os.path']) def test_sub_module(): """ ``full_name needs to check sys.path to actually find it's real path module path. """ defs = jedi.Script('from jedi.api import classes; classes').goto_definitions() assert [d.full_name for d in defs] == ['jedi.api.classes'] defs = jedi.Script('import jedi.api; jedi.api').goto_definitions() assert [d.full_name for d in defs] == ['jedi.api'] jedi-0.9.0/test/test_api/test_defined_names.py0000664000175000017500000000462012517736533021634 0ustar daviddavid00000000000000""" Tests for `api.defined_names`. """ import textwrap from jedi import api from ..helpers import TestCase class TestDefinedNames(TestCase): def assert_definition_names(self, definitions, names): assert [d.name for d in definitions] == names def check_defined_names(self, source, names): definitions = api.names(textwrap.dedent(source)) self.assert_definition_names(definitions, names) return definitions def test_get_definitions_flat(self): self.check_defined_names(""" import module class Class: pass def func(): pass data = None """, ['module', 'Class', 'func', 'data']) def test_dotted_assignment(self): self.check_defined_names(""" x = Class() x.y.z = None """, ['x', 'z']) # TODO is this behavior what we want? def test_multiple_assignment(self): self.check_defined_names(""" x = y = None """, ['x', 'y']) def test_multiple_imports(self): self.check_defined_names(""" from module import a, b from another_module import * """, ['a', 'b']) def test_nested_definitions(self): definitions = self.check_defined_names(""" class Class: def f(): pass def g(): pass """, ['Class']) subdefinitions = definitions[0].defined_names() self.assert_definition_names(subdefinitions, ['f', 'g']) self.assertEqual([d.full_name for d in subdefinitions], ['Class.f', 'Class.g']) def test_nested_class(self): definitions = self.check_defined_names(""" class L1: class L2: class L3: def f(): pass def f(): pass def f(): pass def f(): pass """, ['L1', 'f']) subdefs = definitions[0].defined_names() subsubdefs = subdefs[0].defined_names() self.assert_definition_names(subdefs, ['L2', 'f']) self.assert_definition_names(subsubdefs, ['L3', 'f']) self.assert_definition_names(subsubdefs[0].defined_names(), ['f']) def test_follow_imports(): # github issue #344 imp = api.defined_names('import datetime')[0] assert imp.name == 'datetime' datetime_names = [str(d.name) for d in imp.defined_names()] assert 'timedelta' in datetime_names jedi-0.9.0/test/test_parser/0000775000175000017500000000000012517736563016166 5ustar daviddavid00000000000000jedi-0.9.0/test/test_parser/__init__.py0000664000175000017500000000000012331540214020241 0ustar daviddavid00000000000000jedi-0.9.0/test/test_parser/test_get_code.py0000664000175000017500000000457512517736533021360 0ustar daviddavid00000000000000import difflib import pytest from jedi._compatibility import u from jedi.parser import Parser, load_grammar code_basic_features = u(''' """A mod docstring""" def a_function(a_argument, a_default = "default"): """A func docstring""" a_result = 3 * a_argument print(a_result) # a comment b = """ from to""" + "huhu" if a_default == "default": return str(a_result) else return None ''') def diff_code_assert(a, b, n=4): if a != b: diff = "\n".join(difflib.unified_diff( a.splitlines(), b.splitlines(), n=n, lineterm="" )) assert False, "Code does not match:\n%s\n\ncreated code:\n%s" % ( diff, b ) pass @pytest.mark.skipif('True', reason='Refactor a few parser things first.') def test_basic_parsing(): """Validate the parsing features""" prs = Parser(load_grammar(), code_basic_features) diff_code_assert( code_basic_features, prs.module.get_code() ) def test_operators(): src = u('5 * 3') prs = Parser(load_grammar(), src) diff_code_assert(src, prs.module.get_code()) def test_get_code(): """Use the same code that the parser also generates, to compare""" s = u('''"""a docstring""" class SomeClass(object, mixin): def __init__(self): self.xy = 3.0 """statement docstr""" def some_method(self): return 1 def yield_method(self): while hasattr(self, 'xy'): yield True for x in [1, 2]: yield x def empty(self): pass class Empty: pass class WithDocstring: """class docstr""" pass def method_with_docstring(): """class docstr""" pass ''') assert Parser(load_grammar(), s).module.get_code() == s def test_end_newlines(): """ The Python grammar explicitly needs a newline at the end. Jedi though still wants to be able, to return the exact same code without the additional new line the parser needs. """ def test(source, end_pos): module = Parser(load_grammar(), u(source)).module assert module.get_code() == source assert module.end_pos == end_pos test('a', (1, 1)) test('a\n', (2, 0)) test('a\nb', (2, 1)) test('a\n#comment\n', (3, 0)) test('a\n#comment', (2, 8)) test('a#comment', (1, 9)) test('def a():\n pass', (2, 5)) jedi-0.9.0/test/test_parser/test_fast_parser.py0000664000175000017500000002161412517736533022111 0ustar daviddavid00000000000000from textwrap import dedent import jedi from jedi._compatibility import u from jedi import cache from jedi.parser import load_grammar from jedi.parser.fast import FastParser def test_add_to_end(): """ fast_parser doesn't parse everything again. It just updates with the help of caches, this is an example that didn't work. """ a = dedent(""" class Abc(): def abc(self): self.x = 3 class Two(Abc): def h(self): self """) # ^ here is the first completion b = " def g(self):\n" \ " self." assert jedi.Script(a, 8, 12, 'example.py').completions() assert jedi.Script(a + b, path='example.py').completions() a = a[:-1] + '.\n' assert jedi.Script(a, 8, 13, 'example.py').completions() assert jedi.Script(a + b, path='example.py').completions() def test_class_in_docstr(): """ Regression test for a problem with classes in docstrings. """ a = '"\nclasses\n"' jedi.Script(a, 1, 0)._parser b = a + '\nimport os' assert jedi.Script(b, 4, 8).goto_assignments() def test_carriage_return_splitting(): source = u(dedent(''' "string" class Foo(): pass ''')) source = source.replace('\n', '\r\n') p = FastParser(load_grammar(), source) assert [n.value for lst in p.module.names_dict.values() for n in lst] == ['Foo'] def test_split_parts(): cache.parser_cache.pop(None, None) def splits(source): class Mock(FastParser): def __init__(self, *args): self.number_of_splits = 0 return tuple(FastParser._split_parts(Mock(None, None), source)) def test(*parts): assert splits(''.join(parts)) == parts test('a\n\n', 'def b(): pass\n', 'c\n') test('a\n', 'def b():\n pass\n', 'c\n') def check_fp(src, number_parsers_used, number_of_splits=None, number_of_misses=0): if number_of_splits is None: number_of_splits = number_parsers_used p = FastParser(load_grammar(), u(src)) cache.save_parser(None, p, pickling=False) # TODO Don't change get_code, the whole thing should be the same. # -> Need to refactor the parser first, though. assert src == p.module.get_code() assert p.number_of_splits == number_of_splits assert p.number_parsers_used == number_parsers_used assert p.number_of_misses == number_of_misses return p.module def test_change_and_undo(): # Empty the parser cache for the path None. cache.parser_cache.pop(None, None) func_before = 'def func():\n pass\n' # Parse the function and a. check_fp(func_before + 'a', 2) # Parse just b. check_fp(func_before + 'b', 1, 2) # b has changed to a again, so parse that. check_fp(func_before + 'a', 1, 2) # Same as before no parsers should be used. check_fp(func_before + 'a', 0, 2) # Getting rid of an old parser: Still no parsers used. check_fp('a', 0, 1) # Now the file has completely change and we need to parse. check_fp('b', 1, 1) # And again. check_fp('a', 1, 1) def test_positions(): # Empty the parser cache for the path None. cache.parser_cache.pop(None, None) func_before = 'class A:\n pass\n' m = check_fp(func_before + 'a', 2) assert m.start_pos == (1, 0) assert m.end_pos == (3, 1) m = check_fp('a', 0, 1) assert m.start_pos == (1, 0) assert m.end_pos == (1, 1) def test_if(): src = dedent('''\ def func(): x = 3 if x: def y(): return x return y() func() ''') # Two parsers needed, one for pass and one for the function. check_fp(src, 2) assert [d.name for d in jedi.Script(src, 8, 6).goto_definitions()] == ['int'] def test_if_simple(): src = dedent('''\ if 1: a = 3 ''') check_fp(src + 'a', 1) check_fp(src + "else:\n a = ''\na", 1) def test_for(): src = dedent("""\ for a in [1,2]: a for a1 in 1,"": a1 """) check_fp(src, 1) def test_class_with_class_var(): src = dedent("""\ class SuperClass: class_super = 3 def __init__(self): self.foo = 4 pass """) check_fp(src, 3) def test_func_with_if(): src = dedent("""\ def recursion(a): if foo: return recursion(a) else: if bar: return inexistent else: return a """) check_fp(src, 1) def test_decorator(): src = dedent("""\ class Decorator(): @memoize def dec(self, a): return a """) check_fp(src, 2) def test_nested_funcs(): src = dedent("""\ def memoize(func): def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper """) check_fp(src, 3) def test_class_and_if(): src = dedent("""\ class V: def __init__(self): pass if 1: c = 3 def a_func(): return 1 # COMMENT a_func()""") check_fp(src, 5, 5) assert [d.name for d in jedi.Script(src).goto_definitions()] == ['int'] def test_func_with_for_and_comment(): # The first newline is important, leave it. It should not trigger another # parser split. src = dedent("""\ def func(): pass for a in [1]: # COMMENT a""") check_fp(src, 2) # We don't need to parse the for loop, but we need to parse the other two, # because the split is in a different place. check_fp('a\n' + src, 2, 3) def test_multi_line_params(): src = dedent("""\ def x(a, b): pass foo = 1 """) check_fp(src, 2) def test_one_statement_func(): src = dedent("""\ first def func(): a """) check_fp(src + 'second', 3) # Empty the parser cache, because we're not interested in modifications # here. cache.parser_cache.pop(None, None) check_fp(src + 'def second():\n a', 3) def test_class_func_if(): src = dedent("""\ class Class: def func(self): if 1: a else: b pass """) check_fp(src, 3) def test_for_on_one_line(): src = dedent("""\ foo = 1 for x in foo: pass def hi(): pass """) check_fp(src, 2) src = dedent("""\ def hi(): for x in foo: pass pass pass """) check_fp(src, 2) src = dedent("""\ def hi(): for x in foo: pass def nested(): pass """) check_fp(src, 2) def test_multi_line_for(): src = dedent("""\ for x in [1, 2]: pass pass """) check_fp(src, 1) def test_wrong_indentation(): src = dedent("""\ def func(): a b a """) check_fp(src, 1) src = dedent("""\ def complex(): def nested(): a b a def other(): pass """) check_fp(src, 3) def test_open_parentheses(): func = 'def func():\n a' p = FastParser(load_grammar(), u('isinstance(\n\n' + func)) # As you can see, the isinstance call cannot be seen anymore after # get_code, because it isn't valid code. assert p.module.get_code() == '\n\n' + func assert p.number_of_splits == 2 assert p.number_parsers_used == 2 cache.save_parser(None, p, pickling=False) # Now with a correct parser it should work perfectly well. check_fp('isinstance()\n' + func, 1, 2) def test_strange_parentheses(): src = dedent(""" class X(): a = (1 if 1 else 2) def x(): pass """) check_fp(src, 2) def test_backslash(): src = dedent(r""" a = 1\ if 1 else 2 def x(): pass """) check_fp(src, 2) src = dedent(r""" def x(): a = 1\ if 1 else 2 def y(): pass """) # The dangling if leads to not splitting where we theoretically could # split. check_fp(src, 2) src = dedent(r""" def first(): if foo \ and bar \ or baz: pass def second(): pass """) check_fp(src, 2) def test_fake_parentheses(): """ The fast parser splitting counts parentheses, but not as correct tokens. Therefore parentheses in string tokens are included as well. This needs to be accounted for. """ src = dedent(r""" def x(): a = (')' if 1 else 2) def y(): pass def z(): pass """) check_fp(src, 3, 2, 1) def test_incomplete_function(): source = '''return ImportErr''' script = jedi.Script(dedent(source), 1, 3) assert script.completions() def test_string_literals(): """Simplified case of jedi-vim#377.""" source = dedent(""" x = ur''' def foo(): pass """) script = jedi.Script(dedent(source)) assert script.completions() jedi-0.9.0/test/test_parser/test_parser.py0000664000175000017500000001475112517736533021100 0ustar daviddavid00000000000000# -*- coding: utf-8 -*- import sys import jedi from jedi._compatibility import u, is_py3 from jedi.parser import Parser, load_grammar from jedi.parser.user_context import UserContextParser from jedi.parser import tree as pt from textwrap import dedent def test_user_statement_on_import(): """github #285""" s = u("from datetime import (\n" " time)") for pos in [(2, 1), (2, 4)]: p = UserContextParser(load_grammar(), s, None, pos, None, lambda x: 1).user_stmt() assert isinstance(p, pt.Import) assert [str(n) for n in p.get_defined_names()] == ['time'] class TestCallAndName(): def get_call(self, source): # Get the simple_stmt and then the first one. simple_stmt = Parser(load_grammar(), u(source)).module.children[0] return simple_stmt.children[0] def test_name_and_call_positions(self): name = self.get_call('name\nsomething_else') assert str(name) == 'name' assert name.start_pos == (1, 0) assert name.end_pos == (1, 4) leaf = self.get_call('1.0\n') assert leaf.value == '1.0' assert leaf.eval() == 1.0 assert leaf.start_pos == (1, 0) assert leaf.end_pos == (1, 3) def test_call_type(self): call = self.get_call('hello') assert isinstance(call, pt.Name) def test_literal_type(self): literal = self.get_call('1.0') assert isinstance(literal, pt.Literal) assert type(literal.eval()) == float literal = self.get_call('1') assert isinstance(literal, pt.Literal) assert type(literal.eval()) == int literal = self.get_call('"hello"') assert isinstance(literal, pt.Literal) assert literal.eval() == 'hello' class TestSubscopes(): def get_sub(self, source): return Parser(load_grammar(), u(source)).module.subscopes[0] def test_subscope_names(self): name = self.get_sub('class Foo: pass').name assert name.start_pos == (1, len('class ')) assert name.end_pos == (1, len('class Foo')) assert str(name) == 'Foo' name = self.get_sub('def foo(): pass').name assert name.start_pos == (1, len('def ')) assert name.end_pos == (1, len('def foo')) assert str(name) == 'foo' class TestImports(): def get_import(self, source): return Parser(load_grammar(), source).module.imports[0] def test_import_names(self): imp = self.get_import(u('import math\n')) names = imp.get_defined_names() assert len(names) == 1 assert str(names[0]) == 'math' assert names[0].start_pos == (1, len('import ')) assert names[0].end_pos == (1, len('import math')) assert imp.start_pos == (1, 0) assert imp.end_pos == (1, len('import math')) def test_module(): module = Parser(load_grammar(), u('asdf'), 'example.py').module name = module.name assert str(name) == 'example' assert name.start_pos == (1, 0) assert name.end_pos == (1, 7) module = Parser(load_grammar(), u('asdf')).module name = module.name assert str(name) == '' assert name.start_pos == (1, 0) assert name.end_pos == (1, 0) def test_end_pos(): s = u(dedent(''' x = ['a', 'b', 'c'] def func(): y = None ''')) parser = Parser(load_grammar(), s) scope = parser.module.subscopes[0] assert scope.start_pos == (3, 0) assert scope.end_pos == (5, 0) def test_carriage_return_statements(): source = u(dedent(''' foo = 'ns1!' # this is a namespace package ''')) source = source.replace('\n', '\r\n') stmt = Parser(load_grammar(), source).module.statements[0] assert '#' not in stmt.get_code() def test_incomplete_list_comprehension(): """ Shouldn't raise an error, same bug as #418. """ # With the old parser this actually returned a statement. With the new # parser only valid statements generate one. assert Parser(load_grammar(), u('(1 for def')).module.statements == [] def test_hex_values_in_docstring(): source = r''' def foo(object): """ \xff """ return 1 ''' doc = Parser(load_grammar(), dedent(u(source))).module.subscopes[0].raw_doc if is_py3: assert doc == '\xff' else: assert doc == u('�') def test_error_correction_with(): source = """ with open() as f: try: f.""" comps = jedi.Script(source).completions() assert len(comps) > 30 # `open` completions have a closed attribute. assert [1 for c in comps if c.name == 'closed'] def test_newline_positions(): endmarker = Parser(load_grammar(), u('a\n')).module.children[-1] assert endmarker.end_pos == (2, 0) new_line = endmarker.get_previous() assert new_line.start_pos == (1, 1) assert new_line.end_pos == (2, 0) def test_end_pos_error_correction(): """ Source code without ending newline are given one, because the Python grammar needs it. However, they are removed again. We still want the right end_pos, even if something breaks in the parser (error correction). """ s = u('def x():\n .') m = Parser(load_grammar(), s).module func = m.children[0] assert func.type == 'funcdef' # This is not exactly correct, but ok, because it doesn't make a difference # at all. We just want to make sure that the module end_pos is correct! assert func.end_pos == (3, 0) assert m.end_pos == (2, 2) def test_param_splitting(): """ Jedi splits parameters into params, this is not what the grammar does, but Jedi does this to simplify argument parsing. """ def check(src, result): # Python 2 tuple params should be ignored for now. grammar = load_grammar('grammar%s.%s' % sys.version_info[:2]) m = Parser(grammar, u(src)).module if is_py3: assert not m.subscopes else: # We don't want b and c to be a part of the param enumeration. Just # ignore them, because it's not what we want to support in the # future. assert [str(param.name) for param in m.subscopes[0].params] == result check('def x(a, (b, c)):\n pass', ['a']) check('def x((b, c)):\n pass', []) def test_unicode_string(): s = pt.String(None, u('bö'), (0, 0)) assert repr(s) # Should not raise an Error! def test_backslash_dos_style(): grammar = load_grammar() m = Parser(grammar, u('\\\r\n')).module assert m jedi-0.9.0/test/test_parser/test_user_context.py0000664000175000017500000000022312363566726022317 0ustar daviddavid00000000000000import jedi def test_form_feed_characters(): s = "\f\nclass Test(object):\n pass" jedi.Script(s, line=2, column=18).call_signatures() jedi-0.9.0/test/test_parser/test_tokenize.py0000664000175000017500000001205712517736533021431 0ustar daviddavid00000000000000# -*- coding: utf-8 # This file contains Unicode characters. from io import StringIO from textwrap import dedent import pytest from jedi._compatibility import u, is_py3 from jedi.parser.token import NAME, OP, NEWLINE, STRING, INDENT from jedi.parser import Parser, load_grammar, tokenize from ..helpers import unittest class TokenTest(unittest.TestCase): def test_end_pos_one_line(self): parsed = Parser(load_grammar(), dedent(u(''' def testit(): a = "huhu" '''))) tok = parsed.module.subscopes[0].statements[0].children[2] assert tok.end_pos == (3, 14) def test_end_pos_multi_line(self): parsed = Parser(load_grammar(), dedent(u(''' def testit(): a = """huhu asdfasdf""" + "h" '''))) tok = parsed.module.subscopes[0].statements[0].children[2].children[0] assert tok.end_pos == (4, 11) def test_simple_no_whitespace(self): # Test a simple one line string, no preceding whitespace simple_docstring = u('"""simple one line docstring"""') simple_docstring_io = StringIO(simple_docstring) tokens = tokenize.generate_tokens(simple_docstring_io.readline) token_list = list(tokens) _, value, _, prefix = token_list[0] assert prefix == '' assert value == '"""simple one line docstring"""' def test_simple_with_whitespace(self): # Test a simple one line string with preceding whitespace and newline simple_docstring = u(' """simple one line docstring""" \r\n') simple_docstring_io = StringIO(simple_docstring) tokens = tokenize.generate_tokens(simple_docstring_io.readline) token_list = list(tokens) assert token_list[0][0] == INDENT typ, value, start_pos, prefix = token_list[1] assert prefix == ' ' assert value == '"""simple one line docstring"""' assert typ == STRING typ, value, start_pos, prefix = token_list[2] assert prefix == ' ' assert typ == NEWLINE def test_function_whitespace(self): # Test function definition whitespace identification fundef = dedent(u(''' def test_whitespace(*args, **kwargs): x = 1 if x > 0: print(True) ''')) fundef_io = StringIO(fundef) tokens = tokenize.generate_tokens(fundef_io.readline) token_list = list(tokens) for _, value, _, prefix in token_list: if value == 'test_whitespace': assert prefix == ' ' if value == '(': assert prefix == '' if value == '*': assert prefix == '' if value == '**': assert prefix == ' ' if value == 'print': assert prefix == ' ' if value == 'if': assert prefix == ' ' def test_identifier_contains_unicode(self): fundef = dedent(u(''' def 我あφ(): pass ''')) fundef_io = StringIO(fundef) tokens = tokenize.generate_tokens(fundef_io.readline) token_list = list(tokens) unicode_token = token_list[1] if is_py3: assert unicode_token[0] == NAME else: # Unicode tokens in Python 2 seem to be identified as operators. # They will be ignored in the parser, that's ok. assert unicode_token[0] == OP def test_quoted_strings(self): string_tokens = [ 'u"test"', 'u"""test"""', 'U"""test"""', "u'''test'''", "U'''test'''", ] for s in string_tokens: parsed = Parser(load_grammar(), u('''a = %s\n''' % s)) simple_stmt = parsed.module.children[0] expr_stmt = simple_stmt.children[0] assert len(expr_stmt.children) == 3 string_tok = expr_stmt.children[2] assert string_tok.type == 'string' assert string_tok.value == s assert string_tok.eval() == 'test' def test_tokenizer_with_string_literal_backslash(): import jedi c = jedi.Script("statement = u'foo\\\n'; statement").goto_definitions() assert c[0]._name.parent.obj == 'foo' def test_ur_literals(): """ Decided to parse `u''` literals regardless of Python version. This makes probably sense: - Python 3.2 doesn't support it and is still supported by Jedi, but might not be. While this is incorrect, it's just incorrect for one "old" and in the future not very important version. - All the other Python versions work very well with it. """ def check(literal): io = StringIO(u(literal)) tokens = tokenize.generate_tokens(io.readline) token_list = list(tokens) typ, result_literal, _, _ = token_list[0] assert typ == STRING assert result_literal == literal check('u""') check('ur""') check('Ur""') check('UR""') check('bR""') # Must be in the right order. with pytest.raises(AssertionError): check('Rb""') jedi-0.9.0/test/__init__.py0000664000175000017500000000000012143361723015715 0ustar daviddavid00000000000000jedi-0.9.0/test/test_new_parser.py0000664000175000017500000000063312517736533017410 0ustar daviddavid00000000000000from jedi._compatibility import u from jedi.parser import Parser, load_grammar def test_basic_parsing(): def compare(string): """Generates the AST object and then regenerates the code.""" assert Parser(load_grammar(), string).module.get_code() == string compare(u('\na #pass\n')) compare(u('wblabla* 1\t\n')) compare(u('def x(a, b:3): pass\n')) compare(u('assert foo\n')) jedi-0.9.0/test/test_integration_keyword.py0000664000175000017500000000137212517736533021333 0ustar daviddavid00000000000000""" Test of keywords and ``jedi.keywords`` """ from jedi._compatibility import is_py3 from jedi import Script def test_goto_assignments_keyword(): """ Bug: goto assignments on ``in`` used to raise AttributeError:: 'unicode' object has no attribute 'generate_call_path' """ Script('in').goto_assignments() def test_keyword(): """ github jedi-vim issue #44 """ defs = Script("print").goto_definitions() if is_py3: assert [d.doc for d in defs] else: assert defs == [] assert Script("import").goto_assignments() == [] completions = Script("import", 1, 1).completions() assert len(completions) > 10 and 'if' in [c.name for c in completions] assert Script("assert").goto_definitions() == [] jedi-0.9.0/test/test_utils.py0000664000175000017500000000674112517736533016411 0ustar daviddavid00000000000000try: import readline except ImportError: readline = False from jedi import utils from .helpers import unittest, cwd_at @unittest.skipIf(not readline, "readline not found") class TestSetupReadline(unittest.TestCase): class NameSpace(object): pass def __init__(self, *args, **kwargs): super(type(self), self).__init__(*args, **kwargs) self.namespace = self.NameSpace() utils.setup_readline(self.namespace) def completions(self, text): completer = readline.get_completer() i = 0 completions = [] while True: completion = completer(text, i) if completion is None: break completions.append(completion) i += 1 return completions def test_simple(self): assert self.completions('list') == ['list'] assert self.completions('importerror') == ['ImportError'] s = "print BaseE" assert self.completions(s) == [s + 'xception'] def test_nested(self): assert self.completions('list.Insert') == ['list.insert'] assert self.completions('list().Insert') == ['list().insert'] def test_magic_methods(self): assert self.completions('list.__getitem__') == ['list.__getitem__'] assert self.completions('list().__getitem__') == ['list().__getitem__'] def test_modules(self): import sys import os self.namespace.sys = sys self.namespace.os = os try: assert self.completions('os.path.join') == ['os.path.join'] assert self.completions('os.path.join().upper') == ['os.path.join().upper'] c = set(['os.' + d for d in dir(os) if d.startswith('ch')]) assert set(self.completions('os.ch')) == set(c) finally: del self.namespace.sys del self.namespace.os def test_calls(self): s = 'str(bytes' assert self.completions(s) == [s, 'str(BytesWarning'] def test_import(self): s = 'from os.path import a' assert set(self.completions(s)) == set([s + 'ltsep', s + 'bspath']) assert self.completions('import keyword') == ['import keyword'] import os s = 'from os import ' goal = set([s + el for el in dir(os)]) # There are minor differences, e.g. the dir doesn't include deleted # items as well as items that are not only available on linux. assert len(set(self.completions(s)).symmetric_difference(goal)) < 20 @cwd_at('test') def test_local_import(self): s = 'import test_utils' assert self.completions(s) == [s] def test_preexisting_values(self): self.namespace.a = range(10) assert set(self.completions('a.')) == set(['a.' + n for n in dir(range(1))]) del self.namespace.a def test_colorama(self): """ Only test it if colorama library is available. This module is being tested because it uses ``setattr`` at some point, which Jedi doesn't understand, but it should still work in the REPL. """ try: # if colorama is installed import colorama except ImportError: pass else: self.namespace.colorama = colorama assert self.completions('colorama') assert self.completions('colorama.Fore.BLACK') == ['colorama.Fore.BLACK'] del self.namespace.colorama def test_version_info(): assert utils.version_info()[:2] > (0, 7) jedi-0.9.0/test/run.py0000775000175000017500000003141512517736533015015 0ustar daviddavid00000000000000#!/usr/bin/env python """ |jedi| is mostly being tested by what I would call "Blackbox Tests". These tests are just testing the interface and do input/output testing. This makes a lot of sense for |jedi|. Jedi supports so many different code structures, that it is just stupid to write 200'000 unittests in the manner of ``regression.py``. Also, it is impossible to do doctests/unittests on most of the internal data structures. That's why |jedi| uses mostly these kind of tests. There are different kind of tests: - completions / goto_definitions ``#?`` - goto_assignments: ``#!`` - usages: ``#<`` How to run tests? +++++++++++++++++ Jedi uses pytest_ to run unit and integration tests. To run tests, simply run ``py.test``. You can also use tox_ to run tests for multiple Python versions. .. _pytest: http://pytest.org .. _tox: http://testrun.org/tox Integration test cases are located in ``test/completion`` directory and each test cases are indicated by the comment ``#?`` (completions / definitions), ``#!`` (assignments) and ``#<`` (usages). There is also support for third party libraries. In a normal test run they are not being executed, you have to provide a ``--thirdparty`` option. In addition to standard `-k` and `-m` options in py.test, you can use `-T` (`--test-files`) option to specify integration test cases to run. It takes the format of ``FILE_NAME[:LINE[,LINE[,...]]]`` where ``FILE_NAME`` is a file in ``test/completion`` and ``LINE`` is a line number of the test comment. Here is some recipes: Run tests only in ``basic.py`` and ``imports.py``:: py.test test/test_integration.py -T basic.py -T imports.py Run test at line 4, 6, and 8 in ``basic.py``:: py.test test/test_integration.py -T basic.py:4,6,8 See ``py.test --help`` for more information. If you want to debug a test, just use the ``--pdb`` option. Alternate Test Runner +++++++++++++++++++++ If you don't like the output of ``py.test``, there's an alternate test runner that you can start by running ``./run.py``. The above example could be run by:: ./run.py basic 4 6 8 The advantage of this runner is simplicity and more customized error reports. Using both runners will help you to have a quicker overview of what's happening. Auto-Completion +++++++++++++++ Uses comments to specify a test in the next line. The comment says, which results are expected. The comment always begins with `#?`. The last row symbolizes the cursor. For example:: #? ['real'] a = 3; a.rea Because it follows ``a.rea`` and a is an ``int``, which has a ``real`` property. Goto Definitions ++++++++++++++++ Definition tests use the same symbols like completion tests. This is possible because the completion tests are defined with a list:: #? int() ab = 3; ab Goto Assignments ++++++++++++++++ Tests look like this:: abc = 1 #! ['abc=1'] abc Additionally it is possible to add a number which describes to position of the test (otherwise it's just end of line):: #! 2 ['abc=1'] abc Usages ++++++ Tests look like this:: abc = 1 #< abc@1,0 abc@3,0 abc """ import os import re from ast import literal_eval from io import StringIO from functools import reduce import jedi from jedi._compatibility import unicode, is_py3 TEST_COMPLETIONS = 0 TEST_DEFINITIONS = 1 TEST_ASSIGNMENTS = 2 TEST_USAGES = 3 class IntegrationTestCase(object): def __init__(self, test_type, correct, line_nr, column, start, line, path=None): self.test_type = test_type self.correct = correct self.line_nr = line_nr self.column = column self.start = start self.line = line self.path = path self.skip = None @property def module_name(self): return os.path.splitext(os.path.basename(self.path))[0] @property def line_nr_test(self): """The test is always defined on the line before.""" return self.line_nr - 1 def __repr__(self): return '<%s: %s:%s:%s>' % (self.__class__.__name__, self.module_name, self.line_nr_test, self.line.rstrip()) def script(self): return jedi.Script(self.source, self.line_nr, self.column, self.path) def run(self, compare_cb): testers = { TEST_COMPLETIONS: self.run_completion, TEST_DEFINITIONS: self.run_goto_definitions, TEST_ASSIGNMENTS: self.run_goto_assignments, TEST_USAGES: self.run_usages, } return testers[self.test_type](compare_cb) def run_completion(self, compare_cb): completions = self.script().completions() #import cProfile; cProfile.run('script.completions()') comp_str = set([c.name for c in completions]) return compare_cb(self, comp_str, set(literal_eval(self.correct))) def run_goto_definitions(self, compare_cb): def comparison(definition): suffix = '()' if definition.type == 'instance' else '' return definition.desc_with_module + suffix def definition(correct, correct_start, path): def defs(line_nr, indent): s = jedi.Script(self.source, line_nr, indent, path) return set(s.goto_definitions()) should_be = set() number = 0 for index in re.finditer('(?:[^ ]+)', correct): end = index.end() # +3 because of the comment start `#? ` end += 3 number += 1 try: should_be |= defs(self.line_nr - 1, end + correct_start) except Exception: print('could not resolve %s indent %s' % (self.line_nr - 1, end)) raise # because the objects have different ids, `repr`, then compare. should = set(comparison(r) for r in should_be) if len(should) < number: raise Exception('Solution @%s not right, too few test results: %s' % (self.line_nr - 1, should)) return should script = self.script() should = definition(self.correct, self.start, script.path) result = script.goto_definitions() is_str = set(comparison(r) for r in result) return compare_cb(self, is_str, should) def run_goto_assignments(self, compare_cb): result = self.script().goto_assignments() comp_str = str(sorted(str(r.description) for r in result)) return compare_cb(self, comp_str, self.correct) def run_usages(self, compare_cb): result = self.script().usages() self.correct = self.correct.strip() compare = sorted((r.module_name, r.line, r.column) for r in result) wanted = [] if not self.correct: positions = [] else: positions = literal_eval(self.correct) for pos_tup in positions: if type(pos_tup[0]) == str: # this means that there is a module specified wanted.append(pos_tup) else: line = pos_tup[0] if pos_tup[0] is not None: line += self.line_nr wanted.append((self.module_name, line, pos_tup[1])) return compare_cb(self, compare, sorted(wanted)) def collect_file_tests(lines, lines_to_execute): makecase = lambda t: IntegrationTestCase(t, correct, line_nr, column, start, line) start = None correct = None test_type = None for line_nr, line in enumerate(lines, 1): if correct is not None: r = re.match('^(\d+)\s*(.*)$', correct) if r: column = int(r.group(1)) correct = r.group(2) start += r.regs[2][0] # second group, start index else: column = len(line) - 1 # -1 for the \n if test_type == '!': yield makecase(TEST_ASSIGNMENTS) elif test_type == '<': yield makecase(TEST_USAGES) elif correct.startswith('['): yield makecase(TEST_COMPLETIONS) else: yield makecase(TEST_DEFINITIONS) correct = None else: try: r = re.search(r'(?:^|(?<=\s))#([?!<])\s*([^\n]*)', line) # test_type is ? for completion and ! for goto_assignments test_type = r.group(1) correct = r.group(2) # Quick hack to make everything work (not quite a bloody unicorn hack though). if correct == '': correct = ' ' start = r.start() except AttributeError: correct = None else: # skip the test, if this is not specified test if lines_to_execute and line_nr not in lines_to_execute: correct = None def collect_dir_tests(base_dir, test_files, check_thirdparty=False): for f_name in os.listdir(base_dir): files_to_execute = [a for a in test_files.items() if f_name.startswith(a[0])] lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, []) if f_name.endswith(".py") and (not test_files or files_to_execute): skip = None if check_thirdparty: lib = f_name.replace('_.py', '') try: # there is always an underline at the end. # It looks like: completion/thirdparty/pylab_.py __import__(lib) except ImportError: skip = 'Thirdparty-Library %s not found.' % lib path = os.path.join(base_dir, f_name) source = open(path).read() if not is_py3: source = unicode(source, 'UTF-8') for case in collect_file_tests(StringIO(source), lines_to_execute): case.path = path case.source = source if skip: case.skip = skip yield case docoptstr = """ Using run.py to make debugging easier with integration tests. An alternative testing format, which is much more hacky, but very nice to work with. Usage: run.py [--pdb] [--debug] [--thirdparty] [...] run.py --help Options: -h --help Show this screen. --pdb Enable pdb debugging on fail. -d, --debug Enable text output debugging (please install ``colorama``). --thirdparty Also run thirdparty tests (in ``completion/thirdparty``). """ if __name__ == '__main__': import docopt arguments = docopt.docopt(docoptstr) import time t_start = time.time() # Sorry I didn't use argparse here. It's because argparse is not in the # stdlib in 2.5. import sys if arguments['--debug']: jedi.set_debug_function() # get test list, that should be executed test_files = {} last = None for arg in arguments['']: if arg.isdigit(): if last is None: continue test_files[last].append(int(arg)) else: test_files[arg] = [] last = arg # completion tests: completion_test_dir = '../test/completion' summary = [] tests_fail = 0 # execute tests cases = list(collect_dir_tests(completion_test_dir, test_files)) if test_files or arguments['--thirdparty']: completion_test_dir += '/thirdparty' cases += collect_dir_tests(completion_test_dir, test_files, True) def file_change(current, tests, fails): if current is not None: current = os.path.basename(current) print('%s \t\t %s tests and %s fails.' % (current, tests, fails)) def report(case, actual, desired): if actual == desired: return 0 else: print("\ttest fail @%d, actual = %s, desired = %s" % (case.line_nr - 1, actual, desired)) return 1 import traceback current = cases[0].path if cases else None count = fails = 0 for c in cases: if current != c.path: file_change(current, count, fails) current = c.path count = fails = 0 try: if c.run(report): tests_fail += 1 fails += 1 except Exception: traceback.print_exc() print("\ttest fail @%d" % (c.line_nr - 1)) tests_fail += 1 fails += 1 if arguments['--pdb']: import pdb pdb.post_mortem() count += 1 file_change(current, count, fails) print('\nSummary: (%s fails of %s tests) in %.3fs' % (tests_fail, len(cases), time.time() - t_start)) for s in summary: print(s) exit_code = 1 if tests_fail else 0 sys.exit(exit_code) jedi-0.9.0/test/speed/0000775000175000017500000000000012517736563014733 5ustar daviddavid00000000000000jedi-0.9.0/test/speed/precedence.py0000664000175000017500000000120712331540214017356 0ustar daviddavid00000000000000def marks(code): if '.' in code: another(code[:code.index(',') - 1] + '!') else: another(code + '.') def another(code2): call(numbers(code2 + 'haha')) marks('start1 ') marks('start2 ') def alphabet(code4): if 1: if 2: return code4 + 'a' else: return code4 + 'b' else: if 2: return code4 + 'c' else: return code4 + 'd' def numbers(code5): if 2: return alphabet(code5 + '1') else: return alphabet(code5 + '2') def call(code3): code3 = numbers(numbers('end')) + numbers(code3) code3.partition jedi-0.9.0/test/test_cache.py0000664000175000017500000000615212517736533016310 0ustar daviddavid00000000000000""" Test all things related to the ``jedi.cache`` module. """ import time import pytest import jedi from jedi import settings, cache from jedi.cache import ParserCacheItem, ParserPickling ParserPicklingCls = type(ParserPickling) ParserPickling = ParserPicklingCls() def test_modulepickling_change_cache_dir(monkeypatch, tmpdir): """ ParserPickling should not save old cache when cache_directory is changed. See: `#168 `_ """ dir_1 = str(tmpdir.mkdir('first')) dir_2 = str(tmpdir.mkdir('second')) item_1 = ParserCacheItem('fake parser 1') item_2 = ParserCacheItem('fake parser 2') path_1 = 'fake path 1' path_2 = 'fake path 2' monkeypatch.setattr(settings, 'cache_directory', dir_1) ParserPickling.save_parser(path_1, item_1) cached = load_stored_item(ParserPickling, path_1, item_1) assert cached == item_1.parser monkeypatch.setattr(settings, 'cache_directory', dir_2) ParserPickling.save_parser(path_2, item_2) cached = load_stored_item(ParserPickling, path_1, item_1) assert cached is None def load_stored_item(cache, path, item): """Load `item` stored at `path` in `cache`.""" return cache.load_parser(path, item.change_time - 1) @pytest.mark.usefixtures("isolated_jedi_cache") def test_modulepickling_delete_incompatible_cache(): item = ParserCacheItem('fake parser') path = 'fake path' cache1 = ParserPicklingCls() cache1.version = 1 cache1.save_parser(path, item) cached1 = load_stored_item(cache1, path, item) assert cached1 == item.parser cache2 = ParserPicklingCls() cache2.version = 2 cached2 = load_stored_item(cache2, path, item) assert cached2 is None @pytest.mark.skipif('True', message='Currently the star import cache is not enabled.') def test_star_import_cache_duration(): new = 0.01 old, jedi.settings.star_import_cache_validity = \ jedi.settings.star_import_cache_validity, new dct = cache._time_caches['star_import_cache_validity'] old_dct = dict(dct) dct.clear() # first empty... # path needs to be not-None (otherwise caching effects are not visible) jedi.Script('', 1, 0, '').completions() time.sleep(2 * new) jedi.Script('', 1, 0, '').completions() # reset values jedi.settings.star_import_cache_validity = old assert len(dct) == 1 dct = old_dct cache._star_import_cache = {} def test_cache_call_signatures(): """ See github issue #390. """ def check(column, call_name, path=None): assert jedi.Script(s, 1, column, path).call_signatures()[0].name == call_name s = 'str(int())' for i in range(3): check(8, 'int') check(4, 'str') # Can keep doing these calls and always get the right result. # Now lets specify a source_path of boo and alternate these calls, it # should still work. for i in range(3): check(8, 'int', 'boo') check(4, 'str', 'boo') def test_cache_line_split_issues(): """Should still work even if there's a newline.""" assert jedi.Script('int(\n').call_signatures()[0].name == 'int' jedi-0.9.0/test/static_analysis/0000775000175000017500000000000012517736563017025 5ustar daviddavid00000000000000jedi-0.9.0/test/static_analysis/star_arguments.py0000664000175000017500000000427512517736533022442 0ustar daviddavid00000000000000# ----------------- # *args # ----------------- def simple(a): return a def nested(*args): return simple(*args) nested(1) #! 6 type-error-too-few-arguments nested() def nested_no_call_to_function(*args): return simple(1, *args) def simple2(a, b, c): return b def nested(*args): return simple2(1, *args) def nested_twice(*args1): return nested(*args1) nested_twice(2, 3) #! 13 type-error-too-few-arguments nested_twice(2) #! 19 type-error-too-many-arguments nested_twice(2, 3, 4) # A named argument can be located before *args. def star_args_with_named(*args): return simple2(c='', *args) star_args_with_named(1, 2) # ----------------- # **kwargs # ----------------- def kwargs_test(**kwargs): return simple2(1, **kwargs) kwargs_test(c=3, b=2) #! 12 type-error-too-few-arguments kwargs_test(c=3) #! 12 type-error-too-few-arguments kwargs_test(b=2) #! 22 type-error-keyword-argument kwargs_test(b=2, c=3, d=4) #! 12 type-error-multiple-values kwargs_test(b=2, c=3, a=4) def kwargs_nested(**kwargs): return kwargs_test(b=2, **kwargs) kwargs_nested(c=3) #! 13 type-error-too-few-arguments kwargs_nested() #! 19 type-error-keyword-argument kwargs_nested(c=2, d=4) #! 14 type-error-multiple-values kwargs_nested(c=2, a=4) # TODO reenable ##! 14 type-error-multiple-values #kwargs_nested(b=3, c=2) # ----------------- # mixed *args/**kwargs # ----------------- def simple_mixed(a, b, c): return b def mixed(*args, **kwargs): return simple_mixed(1, *args, **kwargs) mixed(1, 2) mixed(1, c=2) mixed(b=2, c=3) mixed(c=4, b='') # need separate functions, otherwise these might swallow the errors def mixed2(*args, **kwargs): return simple_mixed(1, *args, **kwargs) #! 7 type-error-too-few-arguments mixed2(c=2) #! 7 type-error-too-few-arguments mixed2(3) #! 13 type-error-too-many-arguments mixed2(3, 4, 5) # TODO reenable ##! 13 type-error-too-many-arguments #mixed2(3, 4, c=5) #! 7 type-error-multiple-values mixed2(3, b=5) # ----------------- # plain wrong arguments # ----------------- #! 12 type-error-star-star simple(1, **[]) #! 12 type-error-star-star simple(1, **1) class A(): pass #! 12 type-error-star-star simple(1, **A()) #! 11 type-error-star simple(1, *1) jedi-0.9.0/test/static_analysis/operations.py0000664000175000017500000000020612363566726021561 0ustar daviddavid00000000000000-1 + 1 1 + 1.0 #! 2 type-error-operation 1 + '1' #! 2 type-error-operation 1 - '1' -1 - - 1 -1 - int() int() - float() float() - 3.0 jedi-0.9.0/test/static_analysis/descriptors.py0000664000175000017500000000036512363566726021745 0ustar daviddavid00000000000000# classmethod class TarFile(): @classmethod def open(cls, name, **kwargs): return cls.taropen(name, **kwargs) @classmethod def taropen(cls, name, **kwargs): return name # should just work TarFile.open('hallo') jedi-0.9.0/test/static_analysis/arguments.py0000664000175000017500000000217612517736533021407 0ustar daviddavid00000000000000# ----------------- # normal arguments (no keywords) # ----------------- def simple(a): return a simple(1) #! 6 type-error-too-few-arguments simple() #! 10 type-error-too-many-arguments simple(1, 2) #! 10 type-error-too-many-arguments simple(1, 2, 3) # ----------------- # keyword arguments # ----------------- simple(a=1) #! 7 type-error-keyword-argument simple(b=1) #! 10 type-error-too-many-arguments simple(1, a=1) def two_params(x, y): return y two_params(y=2, x=1) two_params(1, y=2) #! 11 type-error-multiple-values two_params(1, x=2) #! 17 type-error-too-many-arguments two_params(1, 2, y=3) # ----------------- # default arguments # ----------------- def default(x, y=1, z=2): return x #! 7 type-error-too-few-arguments default() default(1) default(1, 2) default(1, 2, 3) #! 17 type-error-too-many-arguments default(1, 2, 3, 4) default(x=1) # ----------------- # class arguments # ----------------- class Instance(): def __init__(self, foo): self.foo = foo Instance(1).foo Instance(foo=1).foo #! 12 type-error-too-many-arguments Instance(1, 2).foo #! 8 type-error-too-few-arguments Instance().foo jedi-0.9.0/test/static_analysis/import_tree/0000775000175000017500000000000012517736563021356 5ustar daviddavid00000000000000jedi-0.9.0/test/static_analysis/import_tree/a.py0000664000175000017500000000002012363566726022141 0ustar daviddavid00000000000000from . import b jedi-0.9.0/test/static_analysis/import_tree/__init__.py0000664000175000017500000000014212363566726023465 0ustar daviddavid00000000000000""" Another import tree, this time not for completion, but static analysis. """ from .a import * jedi-0.9.0/test/static_analysis/import_tree/b.py0000664000175000017500000000000012363566726022140 0ustar daviddavid00000000000000jedi-0.9.0/test/static_analysis/try_except.py0000664000175000017500000000263012363566726021567 0ustar daviddavid00000000000000try: #! 4 attribute-error str.not_existing except TypeError: pass try: str.not_existing except AttributeError: #! 4 attribute-error str.not_existing pass try: import not_existing_import except ImportError: pass try: #! 7 import-error import not_existing_import except AttributeError: pass # ----------------- # multi except # ----------------- try: str.not_existing except (TypeError, AttributeError): pass try: str.not_existing except ImportError: pass except (NotImplementedError, AttributeError): pass try: #! 4 attribute-error str.not_existing except (TypeError, NotImplementedError): pass # ----------------- # detailed except # ----------------- try: str.not_existing except ((AttributeError)): pass try: #! 4 attribute-error str.not_existing except [AttributeError]: pass # Should be able to detect errors in except statement as well. try: pass #! 7 name-error except Undefined: pass # ----------------- # inheritance # ----------------- try: undefined except Exception: pass # should catch everything try: undefined except: pass # ----------------- # kind of similar: hasattr # ----------------- if hasattr(str, 'undefined'): str.undefined str.upper #! 4 attribute-error str.undefined2 #! 4 attribute-error int.undefined else: str.upper #! 4 attribute-error str.undefined jedi-0.9.0/test/static_analysis/attribute_warnings.py0000664000175000017500000000134212363566726023313 0ustar daviddavid00000000000000""" Jedi issues warnings for possible errors if ``__getattr__``, ``__getattribute__`` or ``setattr`` are used. """ # ----------------- # __getattr*__ # ----------------- class Cls(): def __getattr__(self, name): return getattr(str, name) Cls().upper #! 6 warning attribute-error Cls().undefined class Inherited(Cls): pass Inherited().upper #! 12 warning attribute-error Inherited().undefined # ----------------- # setattr # ----------------- class SetattrCls(): def __init__(self, dct): # Jedi doesn't even try to understand such code for k, v in dct: setattr(self, k, v) self.defined = 3 c = SetattrCls({'a': 'b'}) c.defined #! 2 warning attribute-error c.undefined jedi-0.9.0/test/static_analysis/attribute_error.py0000664000175000017500000000433512517736533022615 0ustar daviddavid00000000000000class Cls(): class_attr = '' def __init__(self, input): self.instance_attr = 3 self.input = input def f(self): #! 12 attribute-error return self.not_existing def undefined_object(self, obj): """ Uses an arbitrary object and performs an operation on it, shouldn't be a problem. """ obj.arbitrary_lookup def defined_lookup(self, obj): """ `obj` is defined by a call into this function. """ obj.upper #! 4 attribute-error obj.arbitrary_lookup #! 13 name-error class_attr = a Cls(1).defined_lookup('') c = Cls(1) c.class_attr Cls.class_attr #! 4 attribute-error Cls.class_attr_error c.instance_attr #! 2 attribute-error c.instance_attr_error c.something = None #! 12 name-error something = a something # ----------------- # Unused array variables should still raise attribute errors. # ----------------- # should not raise anything. for loop_variable in [1, 2]: #! 4 name-error x = undefined loop_variable #! 28 name-error for loop_variable in [1, 2, undefined]: pass #! 7 attribute-error [1, ''.undefined_attr] def return_one(something): return 1 #! 14 attribute-error return_one(''.undefined_attribute) #! 12 name-error [r for r in undefined] #! 1 name-error [undefined for r in [1, 2]] [r for r in [1, 2]] # some random error that showed up class NotCalled(): def match_something(self, param): seems_to_need_an_assignment = param return [value.match_something() for value in []] # ----------------- # decorators # ----------------- #! 1 name-error @undefined_decorator def func(): return 1 # ----------------- # operators # ----------------- string = '%s %s' % (1, 2) # Shouldn't raise an error, because `string` is really just a string, not an # array or something. string.upper # ----------------- # imports # ----------------- # Star imports and the like in modules should not cause attribute errors in # this module. import import_tree import_tree.a import_tree.b # This is something that raised an error, because it was using a complex # mixture of Jedi fakes and compiled objects. import _sre #! 15 attribute-error _sre.compile().not_existing jedi-0.9.0/test/static_analysis/generators.py0000664000175000017500000000013612517736533021545 0ustar daviddavid00000000000000def generator(): yield 1 #! 12 type-error-generator generator()[0] list(generator())[0] jedi-0.9.0/test/static_analysis/imports.py0000664000175000017500000000057212517736533021075 0ustar daviddavid00000000000000 #! 7 import-error import not_existing import os from os.path import abspath #! 20 import-error from os.path import not_existing from datetime import date date.today #! 5 attribute-error date.not_existing_attribute #! 14 import-error from datetime.date import today #! 16 import-error import datetime.date #! 7 import-error import not_existing_nested.date import os.path jedi-0.9.0/test/test_integration.py0000664000175000017500000000275712363566726017603 0ustar daviddavid00000000000000import os import pytest from . import helpers def assert_case_equal(case, actual, desired): """ Assert ``actual == desired`` with formatted message. This is not needed for typical py.test use case, but as we need ``--assert=plain`` (see ../pytest.ini) to workaround some issue due to py.test magic, let's format the message by hand. """ assert actual == desired, """ Test %r failed. actual = %s desired = %s """ % (case, actual, desired) def assert_static_analysis(case, actual, desired): """A nicer formatting for static analysis tests.""" a = set(actual) d = set(desired) assert actual == desired, """ Test %r failed. not raised = %s unspecified = %s """ % (case, sorted(d - a), sorted(a - d)) def test_completion(case, monkeypatch): if case.skip is not None: pytest.skip(case.skip) repo_root = helpers.root_dir monkeypatch.chdir(os.path.join(repo_root, 'jedi')) case.run(assert_case_equal) def test_static_analysis(static_analysis_case): static_analysis_case.run(assert_static_analysis) def test_refactor(refactor_case): """ Run refactoring test case. :type refactor_case: :class:`.refactor.RefactoringCase` """ if 0: # TODO Refactoring is not relevant at the moment, it will be changed # significantly in the future, but maybe we can use these tests: refactor_case.run() assert_case_equal(refactor_case, refactor_case.result, refactor_case.desired) jedi-0.9.0/test/completion/0000775000175000017500000000000012517736563016004 5ustar daviddavid00000000000000jedi-0.9.0/test/completion/descriptors.py0000664000175000017500000000650712517736533020724 0ustar daviddavid00000000000000class RevealAccess(object): """ A data descriptor that sets and returns values normally and prints a message logging their access. """ def __init__(self, initval=None, name='var'): self.val = initval self.name = name def __get__(self, obj, objtype): print('Retrieving', self.name) return self.val def __set__(self, obj, val): print('Updating', self.name) self.val = val def just_a_method(self): pass class C(object): x = RevealAccess(10, 'var "x"') #? RevealAccess() x #? ['just_a_method'] x.just_a_method y = 5.0 def __init__(self): #? int() self.x #? [] self.just_a_method #? [] C.just_a_method m = C() #? int() m.x #? float() m.y #? int() C.x #? [] m.just_a_method #? [] C.just_a_method # ----------------- # properties # ----------------- class B(): @property def r(self): return 1 @r.setter def r(self, value): return '' def t(self): return '' p = property(t) #? [] B().r() #? int() B().r #? str() B().p #? [] B().p() class PropClass(): def __init__(self, a): self.a = a @property def ret(self): return self.a @ret.setter def ret(self, value): return 1.0 def ret2(self): return self.a ret2 = property(ret2) @property def nested(self): """ causes recusions in properties, should work """ return self.ret @property def nested2(self): """ causes recusions in properties, should not work """ return self.nested2 @property def join1(self): """ mutual recusion """ return self.join2 @property def join2(self): """ mutual recusion """ return self.join1 #? str() PropClass("").ret #? [] PropClass().ret. #? str() PropClass("").ret2 #? PropClass().ret2 #? int() PropClass(1).nested #? [] PropClass().nested. #? PropClass(1).nested2 #? [] PropClass().nested2. #? PropClass(1).join1 # ----------------- # staticmethod/classmethod # ----------------- class E(object): a = '' def __init__(self, a): self.a = a def f(x): return x f = staticmethod(f) @staticmethod def g(x): return x def s(cls, x): return x s = classmethod(s) @classmethod def t(cls, x): return x @classmethod def u(cls, x): return cls.a e = E(1) #? int() e.f(1) #? int() E.f(1) #? int() e.g(1) #? int() E.g(1) #? int() e.s(1) #? int() E.s(1) #? int() e.t(1) #? int() E.t(1) #? str() e.u(1) #? str() E.u(1) # ----------------- # Conditions # ----------------- from functools import partial class Memoize(): def __init__(self, func): self.func = func def __get__(self, obj, objtype): if obj is None: return self.func return partial(self, obj) def __call__(self, *args, **kwargs): # We don't do caching here, but that's what would normally happen. return self.func(*args, **kwargs) class MemoizeTest(): def __init__(self, x): self.x = x @Memoize def some_func(self): return self.x #? int() MemoizeTest(10).some_func() # Now also call the same function over the class (see if clause above). #? float() MemoizeTest.some_func(MemoizeTest(10.0)) jedi-0.9.0/test/completion/functions.py0000664000175000017500000001423212517736533020365 0ustar daviddavid00000000000000def array(first_param): #? ['first_param'] first_param return list() #? [] array.first_param #? [] array.first_param. func = array #? [] func.first_param #? list() array() #? ['array'] arr def inputs(param): return param #? list inputs(list) def variable_middle(): var = 3 return var #? int() variable_middle() def variable_rename(param): var = param return var #? int() variable_rename(1) def multi_line_func(a, # comment blabla b): return b #? str() multi_line_func(1,'') def multi_line_call(b): return b multi_line_call( #? int() b=1) # nothing after comma def asdf(a): return a x = asdf(a=1, ) #? int() x # ----------------- # double execution # ----------------- def double_exe(param): return param #? str() variable_rename(double_exe)("") # -> shouldn't work (and throw no error) #? [] variable_rename(list())(). #? [] variable_rename(1)(). # ----------------- # recursions (should ignore) # ----------------- def recursion(a, b): if a: return b else: return recursion(a+".", b+1) # Does not also return int anymore, because we now support operators in simple cases. #? float() recursion("a", 1.0) def other(a): return recursion2(a) def recursion2(a): if random.choice([0, 1]): return other(a) else: if random.choice([0, 1]): return recursion2("") else: return a #? int() str() recursion2(1) # ----------------- # ordering # ----------------- def a(): #? int() b() return b() def b(): return 1 #? int() a() # ----------------- # keyword arguments # ----------------- def func(a=1, b=''): return a, b exe = func(b=list, a=tuple) #? tuple exe[0] #? list exe[1] # ----------------- # default arguments # ----------------- #? int() func()[0] #? str() func()[1] #? float() func(1.0)[0] #? str() func(1.0)[1] #? float() func(a=1.0)[0] #? str() func(a=1.0)[1] #? int() func(b=1.0)[0] #? float() func(b=1.0)[1] #? list func(a=list, b=set)[0] #? set func(a=list, b=set)[1] def func_default(a, b=1): return a, b def nested_default(**kwargs): return func_default(**kwargs) #? float() nested_default(a=1.0)[0] #? int() nested_default(a=1.0)[1] #? str() nested_default(a=1.0, b='')[1] # ----------------- # closures # ----------------- def a(): l = 3 def func_b(): #? str() l = '' #? ['func_b'] func_b #? int() l # ----------------- # *args # ----------------- def args_func(*args): #? tuple() return args exe = args_func(1, "") #? int() exe[0] #? str() exe[1] # illegal args (TypeError) #? args_func(*1)[0] # iterator #? int() args_func(*iter([1]))[0] # different types e = args_func(*[1+"", {}]) #? int() str() e[0] #? dict() e[1] _list = [1,""] exe2 = args_func(_list)[0] #? str() exe2[1] exe3 = args_func([1,""])[0] #? str() exe3[1] def args_func(arg1, *args): return arg1, args exe = args_func(1, "", list) #? int() exe[0] #? tuple() exe[1] #? list exe[1][1] # In a dynamic search, both inputs should be given. def simple(a): #? int() str() return a def xargs(*args): return simple(*args) xargs(1) xargs('') # *args without a self symbol def memoize(func): def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper class Something(): @memoize def x(self, a, b=1): return a #? int() Something().x(1) # ----------------- # ** kwargs # ----------------- def kwargs_func(**kwargs): #? ['keys'] kwargs.keys #? dict() return kwargs exe = kwargs_func(a=3,b=4.0) #? dict() exe #? int() exe['a'] #? float() exe['b'] #? int() float() exe['c'] a = 'a' exe2 = kwargs_func(**{a:3, 'b':4.0}) #? int() exe2['a'] #? float() exe2['b'] #? int() float() exe2['c'] # ----------------- # *args / ** kwargs # ----------------- def func_without_call(*args, **kwargs): #? tuple() args #? dict() kwargs def fu(a=1, b="", *args, **kwargs): return a, b, args, kwargs exe = fu(list, 1, "", c=set, d="") #? list exe[0] #? int() exe[1] #? tuple() exe[2] #? str() exe[2][0] #? dict() exe[3] #? set exe[3]['c'] # ----------------- # nested *args # ----------------- def function_args(a, b, c): return b def nested_args(*args): return function_args(*args) def nested_args2(*args, **kwargs): return nested_args(*args) #? int() nested_args('', 1, 1.0, list) #? [] nested_args('') #? int() nested_args2('', 1, 1.0) #? [] nested_args2('') # ----------------- # nested **kwargs # ----------------- def nested_kw(**kwargs1): return function_args(**kwargs1) def nested_kw2(**kwargs2): return nested_kw(**kwargs2) # invalid command, doesn't need to return anything #? nested_kw(b=1, c=1.0, list) #? int() nested_kw(b=1) # invalid command, doesn't need to return anything #? nested_kw(d=1.0, b=1, list) #? int() nested_kw(a=3.0, b=1) #? int() nested_kw(b=1, a=r"") #? [] nested_kw(1, '') #? [] nested_kw(a='') #? int() nested_kw2(b=1) #? int() nested_kw2(b=1, c=1.0) #? int() nested_kw2(c=1.0, b=1) #? [] nested_kw2('') #? [] nested_kw2(a='') #? [] nested_kw2('', b=1). # ----------------- # nested *args/**kwargs # ----------------- def nested_both(*args, **kwargs): return function_args(*args, **kwargs) def nested_both2(*args, **kwargs): return nested_both(*args, **kwargs) # invalid commands, may return whatever. #? list nested_both('', b=1, c=1.0, list) #? list nested_both('', c=1.0, b=1, list) #? [] nested_both('') #? int() nested_both2('', b=1, c=1.0) #? int() nested_both2('', c=1.0, b=1) #? [] nested_both2('') # ----------------- # nested *args/**kwargs with a default arg # ----------------- def function_def(a, b, c): return a, b def nested_def(a, *args, **kwargs): return function_def(a, *args, **kwargs) def nested_def2(*args, **kwargs): return nested_def(*args, **kwargs) #? str() nested_def2('', 1, 1.0)[0] #? str() nested_def2('', b=1, c=1.0)[0] #? str() nested_def2('', c=1.0, b=1)[0] #? int() nested_def2('', 1, 1.0)[1] #? int() nested_def2('', b=1, c=1.0)[1] #? int() nested_def2('', c=1.0, b=1)[1] #? [] nested_def2('')[1] # ----------------- # magic methods # ----------------- def a(): pass #? ['__closure__'] a.__closure__ jedi-0.9.0/test/completion/__init__.py0000664000175000017500000000011512143361723020075 0ustar daviddavid00000000000000""" needed for some modules to test against packages. """ some_variable = 1 jedi-0.9.0/test/completion/complex.py0000664000175000017500000000030512331540214017777 0ustar daviddavid00000000000000""" Mostly for stupid error reports of @dbrgn. :-) """ import time class Foo(object): global time asdf = time def asdfy(): return Foo xorz = getattr(asdfy()(), 'asdf') #? time xorz jedi-0.9.0/test/completion/ordering.py0000664000175000017500000000403712517736533020170 0ustar daviddavid00000000000000# ----------------- # normal # ----------------- a = "" a = 1 #? int() a #? [] a.append a = list b = 1; b = "" #? str() b # temp should not be accessible before definition #? [] temp a = 1 temp = b; b = a a = temp #? int() b #? int() b #? str() a a = tuple if 1: a = list #? ['append'] a.append #? ['index'] a.index # ----------------- # tuples exchanges # ----------------- a, b = 1, "" #? int() a #? str() b b, a = a, b #? int() b #? str() a b, a = a, b #? int() a #? str() b # ----------------- # function # ----------------- def a(a=3): #? int() a #? [] a.func return a #? int() a(2) #? [] a(2).func a_param = 3 def func(a_param): # should not be int #? [] a_param. from os import path # should not return a function, because `a` is a function above def f(b, a): return a #? [] f(b=3) # ----------------- # closure # ----------------- def x(): a = 0 def x(): return a a = 3.0 return x() #? float() x() # ----------------- # class # ----------------- class A(object): a = "" a = 3 #? int() a a = list() def __init__(self): self.b = "" def before(self): self.b = 3 # TODO should this be so? include entries after cursor? #? int() str() list self.b self.b = list self.a = 1 #? str() int() self.a #? ['after'] self.after self.c = 3 #? int() self.c def after(self): self.a = '' c = set() #? list() A.a a = A() #? ['after'] a.after #? [] a.upper #? [] a.append #? [] a.real #? str() int() a.a a = 3 class a(): def __init__(self, a): self.a = a #? float() a(1.0).a #? a().a # ----------------- # imports # ----------------- math = 3 import math #? ['cosh'] math.cosh #? [] math.real math = 3 #? int() math #? [] math.cos # do the same for star imports cosh = 3 from math import * # cosh doesn't work, but that's not a problem, star imports should be at the # start of EVERY script! cosh.real cosh = 3 #? int() cosh jedi-0.9.0/test/completion/goto.py0000664000175000017500000000544312517736533017331 0ustar daviddavid00000000000000# goto_assignments command tests are different in syntax definition = 3 #! 0 ['a = definition'] a = definition #! [] b #! ['a = definition'] a b = a c = b #! ['c = b'] c cd = 1 #! 1 ['cd = c'] cd = c #! 0 ['cd = e'] cd = e #! ['module math'] import math #! ['import math'] math #! ['import math'] b = math #! ['b = math'] b class C(object): def b(self): #! ['b = math'] b #! ['def b'] self.b #! 14 ['def b'] self.b() #! 11 ['self'] self.b return 1 #! ['def b'] b #! ['b = math'] b #! ['def b'] C.b #! ['def b'] C().b #! 0 ['class C'] C().b #! 0 ['class C'] C().b D = C #! ['def b'] D.b #! ['def b'] D().b #! 0 ['D = C'] D().b #! 0 ['D = C'] D().b def c(): return '' #! ['def c'] c #! 0 ['def c'] c() class ClassVar(): x = 3 #! ['x = 3'] ClassVar.x #! ['x = 3'] ClassVar().x # before assignments #! 10 ['x = 3'] ClassVar.x = '' #! 12 ['x = 3'] ClassVar().x = '' # Recurring use of the same var name, github #315 def f(t=None): #! 9 ['t=None'] t = t or 1 # ----------------- # imports # ----------------- #! ['module import_tree'] import import_tree #! ["a = ''"] import_tree.a #! ['module mod1'] import import_tree.mod1 #! ['a = 1'] import_tree.mod1.a #! ['module pkg'] import import_tree.pkg #! ['a = list'] import_tree.pkg.a #! ['module mod1'] import import_tree.pkg.mod1 #! ['a = 1.0'] import_tree.pkg.mod1.a #! ["a = ''"] import_tree.a #! ['module mod1'] from import_tree.pkg import mod1 #! ['a = 1.0'] mod1.a #! ['module mod1'] from import_tree import mod1 #! ['a = 1'] mod1.a #! ['a = 1.0'] from import_tree.pkg.mod1 import a #! ['import os'] from .imports import os #! ['some_variable = 1'] from . import some_variable # ----------------- # anonymous classes # ----------------- def func(): class A(): def b(self): return 1 return A() #! 8 ['def b'] func().b() # ----------------- # on itself # ----------------- #! 7 ['class ClassDef'] class ClassDef(): """ abc """ pass # ----------------- # params # ----------------- param = ClassDef #! 8 ['param'] def ab1(param): pass #! 9 ['param'] def ab2(param): pass #! 11 ['param = ClassDef'] def ab3(a=param): pass ab1(ClassDef);ab2(ClassDef);ab3(ClassDef) # ----------------- # for loops # ----------------- for i in range(1): #! ['for i in range(1): i'] i for key, value in [(1,2)]: #! ['for key, value in [(1,2)]: key'] key for i in []: #! ['for i in []: i'] i # ----------------- # decorator # ----------------- def dec(dec_param=3): pass #! 8 ['dec_param=3'] @dec(dec_param=5) def y(): pass class ClassDec(): def class_func(func): return func #! 14 ['def class_func'] @ClassDec.class_func def x(): pass #! 2 ['class ClassDec'] @ClassDec.class_func def z(): pass jedi-0.9.0/test/completion/types.py0000664000175000017500000000224712517736533017524 0ustar daviddavid00000000000000# ----------------- # non array # ----------------- #? ['imag'] int.imag #? [] int.is_integer #? ['is_integer'] float.is_int #? ['is_integer'] 1.0.is_integer #? ['upper'] "".upper #? ['upper'] r"".upper # strangely this didn't work, because the = is used for assignments #? ['upper'] "=".upper a = "=" #? ['upper'] a.upper # ----------------- # lists # ----------------- arr = [] #? ['append'] arr.app #? ['append'] list().app #? ['append'] [].append arr2 = [1,2,3] #? ['append'] arr2.app #? int() arr.count(1) x = [] #? x.pop() x = [3] #? int() x.pop() x = [] x.append(1.0) #? float() x.pop() # ----------------- # dicts # ----------------- dic = {} #? ['copy', 'clear'] dic.c dic2 = dict(a=1, b=2) #? ['pop', 'popitem'] dic2.p #? ['popitem'] {}.popitem dic2 = {'asdf': 3} #? ['popitem'] dic2.popitem #? int() dic2['asdf'] # ----------------- # set # ----------------- set_t = {1,2} #? ['clear', 'copy'] set_t.c set_t2 = set() #? ['clear', 'copy'] set_t2.c # ----------------- # tuples # ----------------- tup = ('',2) #? ['count'] tup.c tup2 = tuple() #? ['index'] tup2.i #? ['index'] ().i tup3 = 1,"" #? ['index'] tup3.index tup4 = 1,"" #? ['index'] tup4.index jedi-0.9.0/test/completion/basic.py0000664000175000017500000000655412517736533017446 0ustar daviddavid00000000000000# ----------------- # cursor position # ----------------- #? 0 int int() #? 3 int int() #? 4 str int(str) # ----------------- # should not complete # ----------------- #? [] . #? [] str.. #? [] a(0):. # ----------------- # if/else/elif # ----------------- if (random.choice([0, 1])): 1 elif(random.choice([0, 1])): a = 3 else: a = '' #? int() str() a def func(): if random.choice([0, 1]): 1 elif(random.choice([0, 1])): a = 3 else: a = '' #? int() str() return a #? int() str() func() # ----------------- # keywords # ----------------- #? list() assert [] def focus_return(): #? list() return [] # ----------------- # for loops # ----------------- for a in [1,2]: #? int() a for a1 in 1,"": #? int() str() a1 for a3, b3 in (1,""), (1,""), (1,""): #? int() a3 #? str() b3 for a4, (b4, c4) in (1,("", list)), (1,("", list)): #? int() a4 #? str() b4 #? list c4 a = [] for i in [1,'']: #? int() str() i a += [i] #? int() str() a[0] for i in list([1,'']): #? int() str() i #? int() str() for x in [1,'']: x a = [] b = [1.0,''] for i in b: a += [i] #? float() str() a[0] for i in [1,2,3]: #? int() i else: i # ----------------- # range() # ----------------- for i in range(10): #? int() i # ----------------- # ternary operator # ----------------- a = 3 b = '' if a else set() #? str() set() b def ret(a): return ['' if a else set()] #? str() set() ret(1)[0] #? str() set() ret()[0] # ----------------- # with statements # ----------------- with open('') as f: #? ['closed'] f.closed for line in f: #? str() line with open('') as f1, open('') as f2: #? ['closed'] f1.closed #? ['closed'] f2.closed # ----------------- # global vars # ----------------- def global_define(): global global_var_in_func global_var_in_func = 3 #? int() global_var_in_func # ----------------- # within docstrs # ----------------- def a(): """ #? ['global_define'] global_define """ pass #? # str literals in comment """ upper # ----------------- # magic methods # ----------------- class A(object): pass class B(): pass #? ['__init__'] A.__init__ #? ['__init__'] B.__init__ #? ['__init__'] int().__init__ # ----------------- # comments # ----------------- class A(): def __init__(self): self.hello = {} # comment shouldn't be a string #? dict() A().hello # ----------------- # unicode # ----------------- a = 'smörbröd' #? str() a xyz = 'smörbröd.py' if 1: #? str() xyz # ----------------- # exceptions # ----------------- try: import math except ImportError as i_a: #? ['i_a'] i_a #? ImportError() i_a try: import math except ImportError, i_b: # TODO check this only in Python2 ##? ['i_b'] i_b ##? ImportError() i_b class MyException(Exception): def __init__(self, my_attr): self.my_attr = my_attr try: raise MyException(1) except MyException as e: #? ['my_attr'] e.my_attr #? 22 ['my_attr'] for x in e.my_attr: pass # ----------------- # continuations # ----------------- foo = \ 1 #? int() foo # ----------------- # module attributes # ----------------- # Don't move this to imports.py, because there's a star import. #? str() __file__ #? ['__file__'] __file__ jedi-0.9.0/test/completion/classes.py0000664000175000017500000001615512517736533020020 0ustar daviddavid00000000000000def find_class(): """ This scope is special, because its in front of TestClass """ #? ['ret'] TestClass.ret if 1: #? ['ret'] TestClass.ret class FindClass(): #? [] TestClass.ret if a: #? [] TestClass.ret def find_class(self): #? ['ret'] TestClass.ret if 1: #? ['ret'] TestClass.ret #? [] FindClass().find_class.self #? [] FindClass().find_class.self.find_class # set variables, which should not be included, because they don't belong to the # class second = 1 second = "" class TestClass(object): var_class = TestClass(1) def __init__(self2, first_param, second_param, third=1.0): self2.var_inst = first_param self2.second = second_param self2.first = first_param a = 3 def var_func(self): return 1 def get_first(self): # traversal self.second_new = self.second return self.var_inst def values(self): self.var_local = 3 #? ['var_class', 'var_func', 'var_inst', 'var_local'] self.var_ def ret(self, a1): # should not know any class functions! #? [] values #? ['return'] ret return a1 # should not work #? [] var_local #? [] var_inst #? [] var_func # instance inst = TestClass(1) #? ['var_class', 'var_func', 'var_inst', 'var_local'] inst.var #? ['var_class', 'var_func'] TestClass.var #? int() inst.var_local #? [] TestClass.var_local. #? int() TestClass().ret(1) #? int() inst.ret(1) myclass = TestClass(1, '', 3.0) #? int() myclass.get_first() #? [] myclass.get_first.real # too many params #? int() TestClass(1,1,1).var_inst # too few params #? int() TestClass(1).first #? [] TestClass(1).second. # complicated variable settings in class #? str() myclass.second #? str() myclass.second_new # multiple classes / ordering ints = TestClass(1, 1.0) strs = TestClass("", '') #? float() ints.second #? str() strs.second #? ['var_class'] TestClass.var_class.var_class.var_class.var_class # operations (+, *, etc) shouldn't be InstanceElements - #246 class A(): def __init__(self): self.addition = 1 + 2 #? int() A().addition # should also work before `=` #? 8 int() A().addition = None #? 8 int() A(1).addition = None a = A() #? 8 int() a.addition = None # ----------------- # inheritance # ----------------- class Base(object): def method_base(self): return 1 class SuperClass(Base): class_super = 3 def __init__(self): self.var_super = '' def method_super(self): self.var2_super = list class Mixin(SuperClass): def method_mixin(self): return int class SubClass(SuperClass): class_sub = 3 def __init__(self): self.var_sub = '' def method_sub(self): self.var_sub = list return tuple instance = SubClass() #? ['method_base', 'method_sub', 'method_super'] instance.method_ #? ['var2_super', 'var_sub', 'var_super'] instance.var #? ['class_sub', 'class_super'] instance.class_ #? ['method_base', 'method_sub', 'method_super'] SubClass.method_ #? [] SubClass.var #? ['class_sub', 'class_super'] SubClass.class_ # ----------------- # inheritance of builtins # ----------------- class Base(str): pass #? ['upper'] Base.upper #? ['upper'] Base().upper # ----------------- # dynamic inheritance # ----------------- class Angry(object): def shout(self): return 'THIS IS MALARKEY!' def classgetter(): return Angry class Dude(classgetter()): def react(self): #? ['shout'] self.s # ----------------- # __call__ # ----------------- class CallClass(): def __call__(self): return 1 #? int() CallClass()() # ----------------- # variable assignments # ----------------- class V: def __init__(self, a): self.a = a def ret(self): return self.a d = b b = ret if 1: c = b #? int() V(1).b() #? int() V(1).c() #? [] V(1).d() # ----------------- # ordering # ----------------- class A(): def b(self): #? int() a_func() #? str() self.a_func() return a_func() def a_func(self): return "" def a_func(): return 1 #? int() A().b() #? str() A().a_func() # ----------------- # nested classes # ----------------- class A(): class B(): pass def b(self): return 1.0 #? float() A().b() class A(): def b(self): class B(): def b(self): return [] return B().b() #? list() A().b() # ----------------- # recursions # ----------------- def Recursion(): def recurse(self): self.a = self.a self.b = self.b.recurse() #? Recursion().a #? Recursion().b # ----------------- # ducktyping # ----------------- def meth(self): return self.a, self.b class WithoutMethod(): a = 1 def __init__(self): self.b = 1.0 def blub(self): return self.b m = meth class B(): b = '' a = WithoutMethod().m() #? int() a[0] #? float() a[1] #? float() WithoutMethod.blub(WithoutMethod()) #? str() WithoutMethod.blub(B()) # ----------------- # __getattr__ / getattr() / __getattribute__ # ----------------- #? str().upper getattr(str(), 'upper') #? str.upper getattr(str, 'upper') # some strange getattr calls #? getattr(str, 1) #? getattr() #? getattr(str) #? getattr(getattr, 1) #? getattr(str, []) class Base(): def ret(self, b): return b class Wrapper(): def __init__(self, obj): self.obj = obj def __getattr__(self, name): return getattr(self.obj, name) class Wrapper2(): def __getattribute__(self, name): return getattr(Base(), name) #? int() Wrapper(Base()).ret(3) #? int() Wrapper2(Base()).ret(3) class GetattrArray(): def __getattr__(self, name): return [1] #? int() GetattrArray().something[0] # ----------------- # private vars # ----------------- class PrivateVar(): def __init__(self): self.__var = 1 #? int() self.__var #? ['__var'] self.__var #? [] PrivateVar().__var #? PrivateVar().__var # ----------------- # super # ----------------- class Super(object): a = 3 def return_sup(self): return 1 class TestSuper(Super): #? super() def test(self): #? Super() super() #? ['a'] super().a if 1: #? Super() super() def a(): #? super() def return_sup(self): #? int() return super().return_sup() #? int() TestSuper().return_sup() # ----------------- # if flow at class level # ----------------- class TestX(object): def normal_method(self): return 1 if True: def conditional_method(self): var = self.normal_method() #? int() var return 2 def other_method(self): var = self.conditional_method() #? int() var # ----------------- # mro method # ----------------- class A(object): a = 3 #? ['mro'] A.mro #? [] A().mro # ----------------- # mro resolution # ----------------- class B(A()): b = 3 #? B.a #? B().a #? int() B.b #? int() B().b jedi-0.9.0/test/completion/precedence.py0000664000175000017500000000346212517736533020455 0ustar daviddavid00000000000000""" Test Jedi's operation understanding. Jedi should understand simple additions, multiplications, etc. """ # ----------------- # numbers # ----------------- x = [1, 'a', 1.0] #? int() str() float() x[12] #? float() x[1 + 1] index = 0 + 1 #? str() x[index] #? int() x[1 + (-1)] def calculate(number): return number + constant constant = 1 #? float() x[calculate(1)] def calculate(number): return number + constant # ----------------- # strings # ----------------- x = 'upp' + 'e' #? str.upper getattr(str, x + 'r') a = "a"*3 #? str() a a = 3 * "a" #? str() a # ----------------- # assignments # ----------------- x = [1, 'a', 1.0] i = 0 i += 1 i += 1 #? float() x[i] i = 1 i += 1 i -= 3 i += 1 #? int() x[i] # ----------------- # in # ----------------- if 'X' in 'Y': a = 3 else: a = '' # For now don't really check for truth values. So in should return both # results. #? str() int() a # ----------------- # for flow assignments # ----------------- class FooBar(object): fuu = 0.1 raboof = 'fourtytwo' # targets should be working target = '' for char in ['f', 'u', 'u']: target += char #? float() getattr(FooBar, target) # github #24 target = u'' for char in reversed(['f', 'o', 'o', 'b', 'a', 'r']): target += char #? str() getattr(FooBar, target) # ----------------- # repetition problems -> could be very slow and memory expensive - shouldn't # be. # ----------------- b = [str(1)] l = list for x in [l(0), l(1), l(2), l(3), l(4), l(5), l(6), l(7), l(8), l(9), l(10), l(11), l(12), l(13), l(14), l(15), l(16), l(17), l(18), l(19), l(20), l(21), l(22), l(23), l(24), l(25), l(26), l(27), l(28), l(29)]: b += x #? str() b[1] # ----------------- # undefined names # ----------------- a = foobarbaz + 'hello' #? int() float() {'hello': 1, 'bar': 1.0}[a] jedi-0.9.0/test/completion/keywords.py0000664000175000017500000000006712143361723020213 0ustar daviddavid00000000000000 #? ['raise'] raise #? ['except', 'Exception'] except jedi-0.9.0/test/completion/import_tree/0000775000175000017500000000000012517736563020335 5ustar daviddavid00000000000000jedi-0.9.0/test/completion/import_tree/random.py0000664000175000017500000000007612143361723022155 0ustar daviddavid00000000000000""" Here because random is also a builtin module. """ a = set jedi-0.9.0/test/completion/import_tree/pkg/0000775000175000017500000000000012517736563021116 5ustar daviddavid00000000000000jedi-0.9.0/test/completion/import_tree/pkg/__init__.py0000664000175000017500000000003512143361723023210 0ustar daviddavid00000000000000a = list from math import * jedi-0.9.0/test/completion/import_tree/pkg/mod1.py0000664000175000017500000000001012143361723022302 0ustar daviddavid00000000000000a = 1.0 jedi-0.9.0/test/completion/import_tree/__init__.py0000664000175000017500000000012012517736533022434 0ustar daviddavid00000000000000a = '' from . import invisible_pkg the_pkg = invisible_pkg invisible_pkg = 1 jedi-0.9.0/test/completion/import_tree/recurse_class2.py0000664000175000017500000000007312204171717023611 0ustar daviddavid00000000000000import recurse_class1 class C(recurse_class1.C): pass jedi-0.9.0/test/completion/import_tree/rename1.py0000664000175000017500000000005112143361723022216 0ustar daviddavid00000000000000""" used for renaming tests """ abc = 3 jedi-0.9.0/test/completion/import_tree/invisible_pkg.py0000664000175000017500000000030112517736533023523 0ustar daviddavid00000000000000""" It should not be possible to import this pkg except for the import_tree itself, because it is overwritten there. (It would be possible with a sys.path modification, though). """ foo = 1.0 jedi-0.9.0/test/completion/import_tree/mod1.py0000664000175000017500000000007512517736533021546 0ustar daviddavid00000000000000a = 1 from import_tree.random import a as c foobarbaz = 3.0 jedi-0.9.0/test/completion/import_tree/recurse_class1.py0000664000175000017500000000012012204171717023601 0ustar daviddavid00000000000000import recurse_class2 class C(recurse_class2.C): def a(self): pass jedi-0.9.0/test/completion/import_tree/rename2.py0000664000175000017500000000007712143361723022227 0ustar daviddavid00000000000000""" used for renaming tests """ from rename1 import abc abc jedi-0.9.0/test/completion/import_tree/mod2.py0000664000175000017500000000003312204171717021527 0ustar daviddavid00000000000000from . import mod1 as fake jedi-0.9.0/test/completion/lambdas.py0000664000175000017500000000333012517736533017755 0ustar daviddavid00000000000000# ----------------- # lambdas # ----------------- a = lambda: 3 #? int() a() x = [] a = lambda x: x #? int() a(0) #? float() (lambda x: x)(3.0) arg_l = lambda x, y: y, x #? float() arg_l[0]('', 1.0) #? list() arg_l[1] arg_l = lambda x, y: (y, x) args = 1,"" result = arg_l(*args) #? tuple() result #? str() result[0] #? int() result[1] def with_lambda(callable_lambda, *args, **kwargs): return callable_lambda(1, *args, **kwargs) #? int() with_lambda(arg_l, 1.0)[1] #? float() with_lambda(arg_l, 1.0)[0] #? float() with_lambda(arg_l, y=1.0)[0] #? int() with_lambda(lambda x: x) #? float() with_lambda(lambda x, y: y, y=1.0) arg_func = lambda *args, **kwargs: (args[0], kwargs['a']) #? int() arg_func(1, 2, a='', b=10)[0] #? list() arg_func(1, 2, a=[], b=10)[1] # magic method a = lambda: 3 #? ['__closure__'] a.__closure__ class C(): def __init__(self, foo=1.0): self.a = lambda: 1 self.foo = foo def ret(self): return lambda: self.foo def with_param(self): return lambda x: x + self.a() #? int() C().a() #? str() C('foo').ret()() index = C().with_param()(1) #? float() ['', 1, 1.0][index] def xy(param): def ret(a, b): return a + b return lambda b: ret(param, b) #? int() xy(1)(2) # ----------------- # lambda param (#379) # ----------------- class Test(object): def __init__(self, pred=lambda a, b: a): self.a = 1 #? int() self.a #? float() pred(1.0, 2) # ----------------- # test_nocond in grammar (happens in list comprehensions with `if`) # ----------------- # Doesn't need to do anything yet. It should just not raise an error. These # nocond lambdas make no sense at all. #? int() [a for a in [1,2] if lambda: 3][0] jedi-0.9.0/test/completion/decorators.py0000664000175000017500000001160112517736533020517 0ustar daviddavid00000000000000# ----------------- # normal decorators # ----------------- def decorator(func): def wrapper(*args): return func(1, *args) return wrapper @decorator def decorated(a,b): return a,b exe = decorated(set, '') #? set exe[1] #? int() exe[0] # more complicated with args/kwargs def dec(func): def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper @dec def fu(a, b, c, *args, **kwargs): return a, b, c, args, kwargs exe = fu(list, c=set, b=3, d='') #? list exe[0] #? int() exe[1] #? set exe[2] #? [] exe[3][0] #? str() exe[4]['d'] exe = fu(list, set, 3, '', d='') #? str() exe[3][0] # ----------------- # multiple decorators # ----------------- def dec2(func2): def wrapper2(first_arg, *args2, **kwargs2): return func2(first_arg, *args2, **kwargs2) return wrapper2 @dec2 @dec def fu2(a, b, c, *args, **kwargs): return a, b, c, args, kwargs exe = fu2(list, c=set, b=3, d='str') #? list exe[0] #? int() exe[1] #? set exe[2] #? [] exe[3][0] #? str() exe[4]['d'] # ----------------- # Decorator is a class # ----------------- class Decorator(object): def __init__(self, func): self.func = func def __call__(self, *args, **kwargs): return self.func(1, *args, **kwargs) @Decorator def nothing(a,b,c): return a,b,c #? int() nothing("")[0] #? str() nothing("")[1] @Decorator def nothing(a,b,c): return a,b,c class MethodDecoratorAsClass(): class_var = 3 @Decorator def func_without_self(arg, arg2): return arg, arg2 @Decorator def func_with_self(self, arg): return self.class_var #? int() MethodDecoratorAsClass().func_without_self('')[0] #? str() MethodDecoratorAsClass().func_without_self('')[1] #? MethodDecoratorAsClass().func_with_self(1) class SelfVars(): """Init decorator problem as an instance, #247""" @Decorator def __init__(self): """ __init__ decorators should be ignored when looking up variables in the class. """ self.c = list @Decorator def shouldnt_expose_var(not_self): """ Even though in real Python this shouldn't expose the variable, in this case Jedi exposes the variable, because these kind of decorators are normally descriptors, which SHOULD be exposed (at least 90%). """ not_self.b = 1.0 def other_method(self): #? float() self.b #? list self.c # ----------------- # not found decorators (are just ignored) # ----------------- @not_found_decorator def just_a_func(): return 1 #? int() just_a_func() #? ['__closure__'] just_a_func.__closure__ class JustAClass: @not_found_decorator2 def a(self): return 1 #? ['__closure__'] JustAClass().a.__closure__ #? int() JustAClass().a() #? ['__closure__'] JustAClass.a.__closure__ #? int() JustAClass.a() # ----------------- # illegal decorators # ----------------- class DecoratorWithoutCall(): def __init__(self, func): self.func = func @DecoratorWithoutCall def f(): return 1 # cannot be resolved - should be ignored @DecoratorWithoutCall(None) def g(): return 1 #? f() #? int() g() class X(): @str def x(self): pass def y(self): #? str() self.x #? self.x() # ----------------- # method decorators # ----------------- def dec(f): def wrapper(s): return f(s) return wrapper class MethodDecorators(): _class_var = 1 def __init__(self): self._method_var = '' @dec def constant(self): return 1.0 @dec def class_var(self): return self._class_var @dec def method_var(self): return self._method_var #? float() MethodDecorators().constant() #? int() MethodDecorators().class_var() #? str() MethodDecorators().method_var() class Base(): @not_existing def __init__(self): pass @not_existing def b(self): return '' @dec def c(self): return 1 class MethodDecoratorDoesntExist(Base): """#272 github: combination of method decorators and super()""" def a(self): #? super().__init__() #? str() super().b() #? int() super().c() #? float() self.d() @doesnt_exist def d(self): return 1.0 # ----------------- # others # ----------------- def memoize(function): def wrapper(*args): if random.choice([0, 1]): pass else: rv = function(*args) return rv return wrapper @memoize def follow_statement(stmt): return stmt # here we had problems with the else clause, because the parent was not right. #? int() follow_statement(1) # ----------------- # class decorators # ----------------- # class decorators should just be ignored @should_ignore class A(): def ret(self): return 1 #? int() A().ret() jedi-0.9.0/test/completion/named_param.py0000664000175000017500000000033312331540214020575 0ustar daviddavid00000000000000""" Named Params: >>> def a(abc): pass ... >>> a(abc=3) # <- this stuff (abc) """ def a(abc): pass #? 5 ['abc'] a(abc) def a(*some_args, **some_kwargs): pass #? 11 [] a(some_args) #? 13 [] a(some_kwargs) jedi-0.9.0/test/completion/parser.py0000664000175000017500000000116312363566726017654 0ustar daviddavid00000000000000""" Issues with the parser not the completion engine should be here. """ class IndentIssues(): """ issue jedi-vim#288 Which is really a fast parser issue. It used to start a new block at the parentheses, because it had problems with the indentation. """ def one_param( self, ): return 1 def with_param( self, y): return y #? int() IndentIssues().one_param() #? str() IndentIssues().with_param('') """ Just because there's a def keyword, doesn't mean it should not be able to complete to definition. """ definition = 0 #? ['definition', 'def'] str(def jedi-0.9.0/test/completion/flow_analysis.py0000664000175000017500000000556612517736533021241 0ustar daviddavid00000000000000def foo(x): if 1.0: return 1 else: return '' #? int() foo(1) # Exceptions are not analyzed. So check both if branches def try_except(x): try: if 0: return 1 else: return '' except AttributeError: return 1.0 #? float() str() try_except(1) # Exceptions are not analyzed. So check both if branches def try_except(x): try: if 0: return 1 else: return '' except AttributeError: return 1.0 #? float() str() try_except(1) # ----------------- # elif # ----------------- def elif_flows1(x): if False: return 1 elif True: return 1.0 else: return '' #? float() elif_flows1(1) def elif_flows2(x): try: if False: return 1 elif 0: return 1.0 else: return '' except ValueError: return set #? str() set elif_flows2(1) def elif_flows3(x): try: if True: return 1 elif 0: return 1.0 else: return '' except ValueError: return set #? int() set elif_flows3(1) # ----------------- # mid-difficulty if statements # ----------------- def check(a): if a is None: return 1 return '' return set #? int() check(None) #? str() check('asb') a = list if 2 == True: a = set elif 1 == True: a = 0 #? int() a if check != 1: a = '' #? str() a if check == check: a = list #? list a if check != check: a = set else: a = dict #? dict a if not (check is not check): a = 1 #? int() a # ----------------- # name resolution # ----------------- a = list def elif_name(x): try: if True: a = 1 elif 0: a = 1.0 else: return '' except ValueError: a = x return a #? int() set elif_name(set) if 0: a = '' else: a = int #? int a # ----------------- # isinstance # ----------------- class A(): pass def isinst(x): if isinstance(x, A): return dict elif isinstance(x, int) and x == 1 or x is True: return set elif isinstance(x, (float, reversed)): return list elif not isinstance(x, str): return tuple return 1 #? dict isinst(A()) #? set isinst(True) #? set isinst(1) #? tuple isinst(2) #? list isinst(1.0) #? tuple isinst(False) #? int() isinst('') # ----------------- # flows that are not reachable should be able to access parent scopes. # ----------------- foobar = '' if 0: within_flow = 1.0 #? float() within_flow #? str() foobar if 0: nested = 1 #? int() nested #? float() within_flow #? str() foobar #? nested # ----------------- # True objects like modules # ----------------- class X(): pass if X: a = 1 else: a = '' #? int() a jedi-0.9.0/test/completion/docstring.py0000664000175000017500000000511212517736533020346 0ustar daviddavid00000000000000""" Test docstrings in functions and classes, which are used to infer types """ # ----------------- # sphinx style # ----------------- def sphinxy(a, b, c, d, x): """ asdfasdf :param a: blablabla :type a: str :type b: (str, int) :type c: random.Random :type d: :class:`random.Random` :param str x: blablabla :rtype: dict """ #? str() a #? str() b[0] #? int() b[1] #? ['seed'] c.seed #? ['seed'] d.seed #? ['lower'] x.lower #? dict() sphinxy() # wrong declarations def sphinxy2(a, b, x): """ :param a: Forgot type declaration :type a: :param b: Just something :type b: `` :param x: Just something without type :rtype: """ #? a #? b #? x #? sphinxy2() # local classes -> github #370 class ProgramNode(): pass def local_classes(node, node2): """ :type node: ProgramNode ... and the class definition after this func definition: :type node2: ProgramNode2 """ #? ProgramNode() node #? ProgramNode2() node2 class ProgramNode2(): pass def list_with_non_imports(lst): """ Should be able to work with tuples and lists and still import stuff. :type lst: (random.Random, [collections.defaultdict, ...]) """ #? ['seed'] lst[0].seed import collections as col # use some weird index #? col.defaultdict() lst[1][10] def two_dots(a): """ :type a: json.decoder.JSONDecoder """ #? ['raw_decode'] a.raw_decode # sphinx returns def return_module_object(): """ :rtype: :class:`random.Random` """ #? ['seed'] return_module_object().seed # ----------------- # epydoc style # ----------------- def epydoc(a, b): """ asdfasdf @type a: str @param a: blablabla @type b: (str, int) @param b: blablah @rtype: list """ #? str() a #? str() b[0] #? int() b[1] #? list() epydoc() # Returns with param type only def rparam(a,b): """ @type a: str """ return a #? str() rparam() # Composite types def composite(): """ @rtype: (str, int, dict) """ x, y, z = composite() #? str() x #? int() y #? dict() z # Both docstring and calculated return type def both(): """ @rtype: str """ return 23 #? str() int() both() class Test(object): def __init__(self): self.teststr = "" """ # jedi issue #210 """ def test(self): #? ['teststr'] self.teststr # ----------------- # statement docstrings # ----------------- d = '' """ bsdf """ #? str() d.upper() jedi-0.9.0/test/completion/on_import.py0000664000175000017500000000333212517736533020362 0ustar daviddavid00000000000000def from_names(): #? ['mod1'] from import_tree.pkg. #? ['path'] from os. def from_names_goto(): from import_tree import pkg #? pkg from import_tree.pkg def builtin_test(): #? ['math'] import math # ----------------- # completions within imports # ----------------- #? ['sqlite3'] import sqlite3 #? ['classes'] import classes #? ['timedelta'] from datetime import timedel #? 21 [] from datetime.timedel import timedel # should not be possible, because names can only be looked up 1 level deep. #? [] from datetime.timedelta import resolution #? [] from datetime.timedelta import #? ['Cursor'] from sqlite3 import Cursor #? ['some_variable'] from . import some_variable #? ['arrays'] from . import arrays #? [] from . import import_tree as ren import os #? os.path.join from os.path import join # ----------------- # special positions -> edge cases # ----------------- import datetime #? 6 datetime from datetime.time import time #? [] import datetime. #? [] import datetime.date #? 21 ['import'] from import_tree.pkg import pkg #? 22 ['mod1'] from import_tree.pkg. import mod1 #? 17 ['mod1', 'mod2', 'random', 'pkg', 'rename1', 'rename2', 'recurse_class1', 'recurse_class2', 'invisible_pkg'] from import_tree. import pkg #? 18 ['pkg'] from import_tree.p import pkg #? 17 ['import_tree'] from .import_tree import #? 10 ['run'] from ..run import #? ['run'] from ..run #? 10 ['run'] from ..run. #? [] from ..run. #? ['run'] from .. import run #? [] from not_a_module import #137 import json #? 23 json.dump from json import load, dump #? 17 json.load from json import load, dump # without the from clause: import json, datetime #? 7 json import json, datetime #? 13 datetime import json, datetime jedi-0.9.0/test/completion/usages.py0000664000175000017500000001170112517736533017642 0ustar daviddavid00000000000000""" Renaming tests. This means search for usages. I always leave a little bit of space to add room for additions, because the results always contain position informations. """ #< 4 (0,4), (3,0), (5,0), (17,0) def abc(): pass #< 0 (-3,4), (0,0), (2,0), (14,0) abc.d.a.bsaasd.abc.d abc # unicode chars shouldn't be a problem. x['smörbröd'].abc # With the new parser these statements are not recognized as stateents, because # they are not valid Python. if 1: abc = else: (abc) = abc = #< (-17,4), (-14,0), (-12,0), (0,0) abc abc = 5 Abc = 3 #< 6 (0,6), (2,4), (5,8), (17,0) class Abc(): #< (-2,6), (0,4), (3,8), (15,0) Abc def Abc(self): Abc; self.c = 3 #< 17 (0,16), (2,8) def a(self, Abc): #< 10 (-2,16), (0,8) Abc #< 19 (0,18), (2,8) def self_test(self): #< 12 (-2,18), (0,8) self.b Abc.d.Abc #< 4 (0,4), (5,1) def blubi(): pass #< (-5,4), (0,1) @blubi def a(): pass #< 0 (0,0), (1,0) set_object_var = object() set_object_var.var = 1 response = 5 #< 0 (0,0), (1,0), (2,0), (4,0) response = HttpResponse(mimetype='application/pdf') response['Content-Disposition'] = 'attachment; filename=%s.pdf' % id response.write(pdf) #< (-4,0), (-3,0), (-2,0), (0,0) response # ----------------- # imports # ----------------- #< (0,7), (3,0) import module_not_exists #< (-3,7), (0,0) module_not_exists #< ('rename1', 1,0), (0,24), (3,0), (6,17), ('rename2', 4,5), (10,17), (13,17), ('imports', 70, 16) from import_tree import rename1 #< (0,8), ('rename1',3,0), ('rename2',4,20), ('rename2',6,0), (3,32), (7,32), (4,0) rename1.abc #< (-3,8), ('rename1', 3,0), ('rename2', 4,20), ('rename2', 6,0), (0,32), (4,32), (1,0) from import_tree.rename1 import abc abc #< 20 ('rename1', 1,0), ('rename2', 4,5), (-10,24), (-7,0), (-4,17), (0,17), (3,17), ('imports', 70, 16) from import_tree.rename1 import abc #< (0, 32), from import_tree.rename1 import not_existing # Shouldn't raise an error or do anything weird. from not_existing import * # ----------------- # classes # ----------------- class TestMethods(object): #< 8 (0,8), (2,13) def a_method(self): #< 13 (-2,8), (0,13) self.a_method() #< 13 (2,8), (0,13), (3,13) self.b_method() def b_method(self): self.b_method class TestClassVar(object): #< 4 (0,4), (5,13), (7,21) class_v = 1 def a(self): class_v = 1 #< (-5,4), (0,13), (2,21) self.class_v #< (-7,4), (-2,13), (0,21) TestClassVar.class_v #< (0,8), (-7, 8) class_v class TestInstanceVar(): def a(self): #< 13 (4,13), (0,13) self._instance_var = 3 def b(self): #< (-4,13), (0,13) self._instance_var # A call to self used to trigger an error, because it's also a trailer # with two children. self() class NestedClass(): def __getattr__(self, name): return self # Shouldn't find a definition, because there's other `instance`. # TODO reenable that test ##< (0, 14), NestedClass().instance # ----------------- # inheritance # ----------------- class Super(object): #< 4 (0,4), (23,18), (25,13) base_class = 1 #< 4 (0,4), class_var = 1 #< 8 (0,8), def base_method(self): #< 13 (0,13), (20,13) self.base_var = 1 #< 13 (0,13), self.instance_var = 1 #< 8 (0,8), def just_a_method(self): pass #< 20 (0,16), (-18,6) class TestClass(Super): #< 4 (0,4), class_var = 1 def x_method(self): #< (0,18), (2,13), (-23,4) TestClass.base_class #< (-2,18), (0,13), (-25,4) self.base_class #< (-20,13), (0,13) self.base_var #< TestClass.base_var #< 13 (5,13), (0,13) self.instance_var = 3 #< 9 (0,8), def just_a_method(self): #< (-5,13), (0,13) self.instance_var # ----------------- # properties # ----------------- class TestProperty: @property #< 10 (0,8), (5,13) def prop(self): return 1 def a(self): #< 13 (-5,8), (0,13) self.prop @property #< 13 (0,8), (4,5) def rw_prop(self): return self._rw_prop #< 8 (-4,8), (0,5) @rw_prop.setter #< 8 (0,8), (5,13) def rw_prop(self, value): self._rw_prop = value def b(self): #< 13 (-5,8), (0,13) self.rw_prop # ----------------- # *args, **kwargs # ----------------- #< 11 (1,11), (0,8) def f(**kwargs): return kwargs # ----------------- # No result # ----------------- if isinstance(j, int): #< j # ----------------- # Dynamic Param Search # ----------------- class DynamicParam(): def foo(self): return def check(instance): #< 13 (-5,8), (0,13) instance.foo() check(DynamicParam()) # ----------------- # Compiled Objects # ----------------- import _sre #< 0 (-3,7), (0,0), ('_sre', None, None) _sre # ----------------- # on syntax # ----------------- #< 0 import undefined jedi-0.9.0/test/completion/comprehensions.py0000664000175000017500000000346412517736533021416 0ustar daviddavid00000000000000# ----------------- # list comprehensions # ----------------- # basics: a = ['' for a in [1]] #? str() a[0] #? ['insert'] a.insert a = [a for a in [1]] #? int() a[0] y = 1.0 # Should not leak. [y for y in [3]] #? float() y a = [a for a in (1, 2)] #? int() a[0] a = [a for a,b in [(1,'')]] #? int() a[0] arr = [1,''] a = [a for a in arr] #? int() str() a[0] a = [a if 1.0 else '' for a in [1] if [1.0]] #? int() str() a[0] # name resolve should be correct left, right = 'a', 'b' left, right = [x for x in (left, right)] #? str() left # with a dict literal #? str() [a for a in {1:'x'}][0] ##? str() {a-1:b for a,b in {1:'a', 3:1.0}.items()}[0] # list comprehensions should also work in combination with functions def listen(arg): for x in arg: #? str() x listen(['' for x in [1]]) #? str ([str for x in []])[0] # ----------------- # nested list comprehensions # ----------------- b = [a for arr in [[1]] for a in arr] #? int() b[0] b = [a for arr in [[1]] if '' for a in arr if ''] #? int() b[0] b = [b for arr in [[[1.0]]] for a in arr for b in a] #? float() b[0] # jedi issue #26 #? list() a = [[int(v) for v in line.strip().split() if v] for line in ["123", "123", "123"] if line] #? list() a[0] #? int() a[0][0] # ----------------- # generator comprehensions # ----------------- left, right = (i for i in (1, '')) #? int() left gen = (i for i in (1,)) #? int() next(gen) #? gen[0] gen = (a for arr in [[1.0]] for a in arr) #? float() next(gen) #? int() (i for i in (1,)).send() # issues with different formats left, right = (i for i in ('1', '2')) #? str() left # ----------------- # name resolution in comprehensions. # ----------------- def x(): """Should not try to resolve to the if hio, which was a bug.""" #? 22 [a for a in h if hio] if hio: pass jedi-0.9.0/test/completion/thirdparty/0000775000175000017500000000000012517736563020176 5ustar daviddavid00000000000000jedi-0.9.0/test/completion/thirdparty/django_.py0000664000175000017500000000032412143361723022133 0ustar daviddavid00000000000000#! ['class ObjectDoesNotExist'] from django.core.exceptions import ObjectDoesNotExist import django #? ['get_version'] django.get_version from django.conf import settings #? ['configured'] settings.configured jedi-0.9.0/test/completion/thirdparty/PyQt4_.py0000664000175000017500000000050012143361723021646 0ustar daviddavid00000000000000from PyQt4.QtCore import * from PyQt4.QtGui import * #? ['QActionGroup'] QActionGroup #? ['currentText'] QStyleOptionComboBox().currentText #? [] QStyleOptionComboBox().currentText. from PyQt4 import QtGui #? ['currentText'] QtGui.QStyleOptionComboBox().currentText #? [] QtGui.QStyleOptionComboBox().currentText. jedi-0.9.0/test/completion/thirdparty/jedi_.py0000664000175000017500000000270212204171717021606 0ustar daviddavid00000000000000 from jedi import functions, evaluate, parsing el = functions.completions()[0] #? ['description'] el.description #? str() el.description scopes, path, dot, like = \ api._prepare_goto(source, row, column, path, True) # has problems with that (sometimes) very deep nesting. #? set() el = scopes # get_names_for_scope is also recursion stuff #? tuple() el = list(evaluate.get_names_for_scope())[0] #? int() parsing.Module() el = list(evaluate.get_names_for_scope(1))[0][0] #? parsing.Module() el = list(evaluate.get_names_for_scope())[0][0] #? list() el = list(evaluate.get_names_for_scope(1))[0][1] #? list() el = list(evaluate.get_names_for_scope())[0][1] #? list() parsing.Scope((0,0)).get_set_vars() #? parsing.Import() parsing.Name() parsing.Scope((0,0)).get_set_vars()[0] # TODO access parent is not possible, because that is not set in the class ## parsing.Class() parsing.Scope((0,0)).get_set_vars()[0].parent #? parsing.Import() parsing.Name() el = list(evaluate.get_names_for_scope())[0][1][0] #? evaluate.Array() evaluate.Class() evaluate.Function() evaluate.Instance() list(evaluate.follow_call())[0] # With the right recursion settings, this should be possible (and maybe more): # Array Class Function Generator Instance Module # However, this was produced with the recursion settings 10/350/10000, and # lasted 18.5 seconds. So we just have to be content with the results. #? evaluate.Class() evaluate.Function() evaluate.get_scopes_for_name()[0] jedi-0.9.0/test/completion/thirdparty/psycopg2_.py0000664000175000017500000000020612143361723022436 0ustar daviddavid00000000000000import psycopg2 conn = psycopg2.connect('dbname=test') #? ['cursor'] conn.cursor cur = conn.cursor() #? ['fetchall'] cur.fetchall jedi-0.9.0/test/completion/thirdparty/pylab_.py0000664000175000017500000000104012143361723021774 0ustar daviddavid00000000000000import pylab # two gotos #! ['module numpy'] import numpy #! ['module random'] import numpy.random #? ['array2string'] numpy.array2string #? ['shape'] numpy.matrix().shape #? ['random_integers'] pylab.random_integers #? [] numpy.random_integers #? ['random_integers'] numpy.random.random_integers #? ['sample'] numpy.random.sample import numpy na = numpy.array([1,2]) #? ['shape'] na.shape # shouldn't raise an error #29, jedi-vim # doesn't return something, because matplotlib uses __import__ fig = pylab.figure() #? fig.add_subplot jedi-0.9.0/test/completion/definition.py0000664000175000017500000000171712204171717020477 0ustar daviddavid00000000000000""" Fallback to callee definition when definition not found. - https://github.com/davidhalter/jedi/issues/131 - https://github.com/davidhalter/jedi/pull/149 """ """Parenthesis closed at next line.""" #? isinstance isinstance( ) #? isinstance isinstance( ) #? isinstance isinstance(None, ) #? isinstance isinstance(None, ) """Parenthesis closed at same line.""" # Note: len('isinstance(') == 11 #? 11 isinstance isinstance() # Note: len('isinstance(None,') == 16 ##? 16 isinstance isinstance(None,) # Note: len('isinstance(None,') == 16 ##? 16 isinstance isinstance(None, ) # Note: len('isinstance(None, ') == 17 ##? 17 isinstance isinstance(None, ) # Note: len('isinstance( ') == 12 ##? 12 isinstance isinstance( ) """Unclosed parenthesis.""" #? isinstance isinstance( def x(): pass # acts like EOF ##? isinstance isinstance( def x(): pass # acts like EOF #? isinstance isinstance(None, def x(): pass # acts like EOF ##? isinstance isinstance(None, jedi-0.9.0/test/completion/isinstance.py0000664000175000017500000000231512517736533020514 0ustar daviddavid00000000000000if isinstance(i, str): #? str() i if isinstance(j, (str, int)): #? str() int() j while isinstance(k, (str, int)): #? str() int() k if not isinstance(k, (str, int)): #? k while not isinstance(k, (str, int)): #? k assert isinstance(ass, int) #? int() ass assert isinstance(ass, str) assert not isinstance(ass, int) if 2: #? str() ass # ----------------- # in functions # ----------------- import datetime def fooooo(obj): if isinstance(obj, datetime.datetime): #? datetime.datetime() obj def fooooo2(obj): if isinstance(obj, datetime.date): return obj else: return 1 a # In earlier versions of Jedi, this returned both datetime and int, but now # Jedi does flow checks and realizes that the top return isn't executed. #? int() fooooo2('') # ----------------- # Names with multiple indices. # ----------------- class Test(): def __init__(self, testing): if isinstance(testing, str): self.testing = testing else: self.testing = 10 def boo(self): if isinstance(self.testing, str): #? str() self.testing #? Test() self jedi-0.9.0/test/completion/invalid.py0000664000175000017500000000513512517736533020005 0ustar daviddavid00000000000000""" This file is less about the results and much more about the fact, that no exception should be thrown. Basically this file could change depending on the current implementation. But there should never be any errors. """ # wait until keywords are out of definitions (pydoc function). ##? 5 's'() #? [] str()).upper # ----------------- # funcs # ----------------- def asdf(a or b): # multiple param names return a #? asdf(2) asdf = '' from a import (b def blub(): return 0 def wrong_indents(): asdf = 3 asdf asdf( #? int() asdf def openbrace(): asdf = 3 asdf( #? int() asdf return 1 #? int() openbrace() blub([ #? int() openbrace() def indentfault(): asd( indentback #? [] indentfault(). def openbrace2(): asd( def normalfunc(): return 1 #? int() normalfunc() # dots in param def f(seq1...=None): return seq1 #? f(1) @ def test_empty_decorator(): return 1 #? int() test_empty_decorator() def invalid_param(param=): #? param # ----------------- # flows # ----------------- # first part not complete (raised errors) if a a else: #? ['AttributeError'] AttributeError try #? ['AttributeError'] except AttributeError pass finally: pass #? ['isinstance'] if isi try: except TypeError: #? str() "" def break(): pass # wrong ternary expression a = '' a = 1 if #? str() a # No completions for for loops without the right syntax for for_local in : for_local #? [] for_local #? for_local # ----------------- # list comprehensions # ----------------- a2 = [for a2 in [0]] #? a2[0] a3 = [for xyz in] #? a3[0] a3 = [a4 for in 'b'] #? a3[0] a3 = [a4 for a in for x in y] #? a3[0] a = [for a in def break(): pass #? a[0] a = [a for a in [1,2] def break(): pass #? a[0] #? [] int()).real # ----------------- # keywords # ----------------- #! [] as def empty_assert(): x = 3 assert #? int() x import datetime as # ----------------- # statements # ----------------- call = '' invalid = .call #? invalid invalid = call?.call #? invalid # comma invalid = ,call #? invalid # ----------------- # classes # ----------------- class BrokenPartsOfClass(): def foo(self): # This construct contains two places where Jedi with Python 3 can fail. # It should just ignore those constructs and still execute `bar`. pass if 2: try: pass except ValueError, e: raise TypeError, e else: pass def bar(self): self.x = 3 return '' #? str() BrokenPartsOfClass().bar() jedi-0.9.0/test/completion/sys_path.py0000664000175000017500000000065712331540214020174 0ustar daviddavid00000000000000 import sys import os from os import dirname sys.path.insert(0, '../../jedi') sys.path.append(dirname(os.path.abspath('thirdparty' + os.path.sep + 'asdf'))) # modifications, that should fail: # because of sys module sys.path.append(sys.path[1] + '/thirdparty') # syntax err sys.path.append('a' +* '/thirdparty') #? ['evaluate'] import evaluate #? ['Evaluator'] evaluate.Evaluator #? ['jedi_'] import jedi_ #? ['el'] jedi_.el jedi-0.9.0/test/completion/dynamic_arrays.py0000664000175000017500000001016012517736533021356 0ustar daviddavid00000000000000""" Checking for ``list.append`` and all the other possible array modifications. """ # ----------------- # list.append # ----------------- arr = [] for a in [1,2]: arr.append(a); arr.append # should not cause an exception arr.append() # should not cause an exception #? int() arr[10] arr = [tuple()] for a in [1,2]: arr.append(a); #? int() tuple() arr[10] #? int() arr[10].index() arr = list([]) arr.append(1) #? int() arr[0] # ----------------- # list.insert # ----------------- arr = [""] arr.insert(0, 1.0) # on exception due to this, please! arr.insert(0) arr.insert() #? float() str() arr[10] for a in arr: #? float() str() a #? float() str() list(arr)[10] # ----------------- # set.add # ----------------- st = {1.0} for a in [1,2]: st.add(a) st.append('') # lists should not have an influence st.add # should not cause an exception st.add() # ----------------- # list.extend / set.update # ----------------- arr = [1.0] arr.extend([1,2,3]) arr.extend([]) arr.extend("") # should ignore #? float() int() arr[100] a = set(arr) a.update(list(["", 1])) #? float() int() str() list(a)[0] # ----------------- # set/list initialized as functions # ----------------- st = set() st.add(1) #? int() for s in st: s lst = list() lst.append(1) #? int() for i in lst: i # ----------------- # renames / type changes # ----------------- arr = [] arr2 = arr arr2.append('') #? str() arr2[0] st = {1.0} st.add(1) lst = list(st) lst.append('') #? float() int() str() lst[0] lst = [1] lst.append(1.0) s = set(lst) s.add("") lst = list(s) lst.append({}) #? dict() int() float() str() lst[0] # should work with tuple conversion, too. #? dict() int() float() str() tuple(lst)[0] # but not with an iterator #? iter(lst)[0] # ----------------- # complex including += # ----------------- class C(): pass class D(): pass class E(): pass lst = [1] lst.append(1.0) lst += [C] s = set(lst) s.add("") s += [D] lst = list(s) lst.append({}) lst += [E] ##? dict() int() float() str() C D E lst[0] # ----------------- # functions # ----------------- def arr_append(arr4, a): arr4.append(a) def add_to_arr(arr2, a): arr2.append(a) return arr2 def app(a): arr3.append(a) arr3 = [1.0] res = add_to_arr(arr3, 1) arr_append(arr3, 'str') app(set()) #? float() str() int() set() arr3[10] #? float() str() int() set() res[10] # ----------------- # returns, special because the module dicts are not correct here. # ----------------- def blub(): a = [] a.append(1.0) #? float() a[0] return a #? float() blub()[0] # list with default def blub(): a = list([1]) a.append(1.0) return a #? int() float() blub()[0] # empty list def blub(): a = list() a.append(1.0) return a #? float() blub()[0] # with if def blub(): if 1: a = [] a.append(1.0) return a #? float() blub()[0] # with else clause def blub(): if random.choice([0, 1]): 1 else: a = [] a.append(1) return a #? int() blub()[0] # ----------------- # returns, the same for classes # ----------------- class C(): def blub(self, b): if 1: a = [] a.append(b) return a def blub2(self): """ mapper function """ a = self.blub(1.0) #? float() a[0] return a def literal_arr(self, el): self.a = [] self.a.append(el) #? int() self.a[0] return self.a def list_arr(self, el): self.b = list([]) self.b.append(el) #? float() self.b[0] return self.b #? int() C().blub(1)[0] #? float() C().blub2(1)[0] #? int() C().a[0] #? int() C().literal_arr(1)[0] #? float() C().b[0] #? float() C().list_arr(1.0)[0] # ----------------- # array recursions # ----------------- a = set([1.0]) a.update(a) a.update([1]) #? float() int() list(a)[0] def first(a): b = [] b.append(a) b.extend(second(a)) return list(b) def second(a): b = [] b.extend(first(a)) return list(b) #? float() first(1.0)[0] def third(): b = [] b.extend extend() b.extend(first()) return list(b) #? third()[0] jedi-0.9.0/test/completion/generators.py0000664000175000017500000000422412517736533020526 0ustar daviddavid00000000000000# ----------------- # yield statement # ----------------- def gen(): if random.choice([0, 1]): yield 1 else: yield "" gen_exe = gen() #? int() str() next(gen_exe) #? int() str() list next(gen_exe, list) def gen_ret(value): yield value #? int() next(gen_ret(1)) #? [] next(gen_ret()) # generators evaluate to true if cast by bool. a = '' if gen_ret(): a = 3 #? int() a # ----------------- # generators should not be indexable # ----------------- def get(param): if random.choice([0, 1]): yield 1 else: yield "" #? [] get()[0] # ----------------- # __iter__ # ----------------- for a in get(): #? int() str() a class Get(): def __iter__(self): if random.choice([0, 1]): yield 1 else: yield "" b = [] for a in Get(): #? int() str() a b += [a] #? list() b #? int() str() b[0] g = iter(Get()) #? int() str() next(g) g = iter([1.0]) #? float() next(g) # ----------------- # __next__ # ----------------- class Counter: def __init__(self, low, high): self.current = low self.high = high def __iter__(self): return self def next(self): """ need to have both __next__ and next, because of py2/3 testing """ return self.__next__() def __next__(self): if self.current > self.high: raise StopIteration else: self.current += 1 return self.current - 1 for c in Counter(3, 8): #? int() print c # ----------------- # tuples # ----------------- def gen(): if random.choice([0,1]): yield 1, "" else: yield 2, 1.0 a, b = next(gen()) #? int() a #? str() float() b def simple(): if random.choice([0, 1]): yield 1 else: yield "" a, b = simple() #? int() a #? str() b # ----------------- # More complicated access # ----------------- # `close` is a method wrapper. #? ['__call__'] gen().close.__call__ #? gen().throw() #? ['co_consts'] gen().gi_code.co_consts #? [] gen.gi_code.co_consts # `send` is also a method wrapper. #? ['__call__'] gen().send.__call__ #? tuple() gen().send() #? gen()() jedi-0.9.0/test/completion/arrays.py0000664000175000017500000001046112517736533017656 0ustar daviddavid00000000000000# ----------------- # basic array lookups # ----------------- #? int() [1,""][0] #? str() [1,""][1] #? int() str() [1,""][2] #? int() str() [1,""][20] #? int() str() [1,""][str(hello)] a = list() #? list() [a][0] #? list() [[a,a,a]][2][100] c = [[a,""]] #? str() c[0][1] b = [6,7] #? int() b[8-7] # ----------------- # Slices # ----------------- #? list() b[8:] #? list() b[int():] class _StrangeSlice(): def __getitem__(self, slice): return slice # Should not result in an error, just because the slice itself is returned. #? [] _StrangeSlice()[1:2] # ----------------- # iterable multiplication # ----------------- a = ['']*2 #? list() a # ----------------- # tuple assignments # ----------------- a1, b1 = (1, "") #? int() a1 #? str() b1 (a2, b2) = (1, "") #? int() a2 #? str() b2 # list assignment [list1, list2] = (1, "") #? int() list1 #? str() list2 [list3, list4] = [1, ""] #? int() list3 #? str() list4 # ----------------- # subtuple assignment # ----------------- (a3, (b3, c3)) = (1, ("", list)) #? list c3 a4, (b4, c4) = (1, ("", list)) #? list c4 #? int() a4 #? str() b4 # ----------------- # multiple assignments # ----------------- a = b = 1 #? int() a #? int() b (a, b) = (c, (e, f)) = ('2', (3, 4)) #? str() a #? tuple() b #? str() c #? int() e #? int() f # ----------------- # unnessecary braces # ----------------- #? int() (1) #? int() ((1)) #? int() ((1)+1) u, v = 1, "" #? int() u ((u1, v1)) = 1, "" #? int() u1 #? int() (u1) (a), b = 1, '' #? int() a def a(): return '' #? str() (a)() #? str() (a)().replace() #? int() (tuple).index() #? int() (tuple)().index() class C(): def __init__(self): self.a = (str()).upper() #? str() C().a # ----------------- # imbalanced sides # ----------------- (f, g) = (1,) #? int() f #? [] g. (f, g, h) = (1,'') #? int() f #? str() g #? [] h. (f1, g1) = 1 #? [] f1. #? [] g1. (f, g) = (1,'',1.0) #? int() f #? str() g # ----------------- # dicts # ----------------- dic2 = {'asdf': 3, 'b': 'str'} #? int() dic2['asdf'] # string literal #? int() dic2[r'asdf'] #? int() dic2[r'asdf'] #? int() str() dic2['just_something'] def f(): """ github #83 """ r = {} r['status'] = (200, 'ok') return r #? dict() f() # completion within dicts #? 9 ['str'] {str: str} # iteration problem (detected with sith) d = dict({'a':''}) def y(a): return a #? y(**d) # problem with more complicated casts dic = {str(key): ''} #? str() dic[''] # ----------------- # with variable as index # ----------------- a = (1, "") index = 1 #? str() a[index] # these should just ouput the whole array index = int #? int() str() a[index] index = int() #? int() str() a[index] # dicts index = 'asdf' dic2 = {'asdf': 3, 'b': 'str'} #? int() dic2[index] # ----------------- # __getitem__ # ----------------- class GetItem(): def __getitem__(self, index): return 1.0 #? float() GetItem()[0] class GetItem(): def __init__(self, el): self.el = el def __getitem__(self, index): return self.el #? str() GetItem("")[1] class GetItemWithList(): def __getitem__(self, index): return [1, 1.0, 's'][index] #? float() GetItemWithList()[1] for i in 0, 2: #? int() str() GetItemWithList()[i] # ----------------- # conversions # ----------------- a = [1, ""] #? int() str() list(a)[1] #? int() str() list(a)[0] #? set(a)[0] #? int() str() list(set(a))[1] #? int() str() list(list(set(a)))[1] # does not yet work, because the recursion catching is not good enough (catches # to much) #? int() str() list(set(list(set(a))))[1] #? int() str() list(set(set(a)))[1] # frozenset #? int() str() list(frozenset(a))[1] #? int() str() list(set(frozenset(a)))[1] # iter #? int() str() list(iter(a))[1] #? int() str() list(iter(list(set(a))))[1] # tuple #? int() str() tuple(a)[1] #? int() str() tuple(list(set(a)))[1] #? int() tuple({1})[0] #? int() tuple((1,))[0] # implementation detail for lists, should not be visible #? [] list().__iterable # With a list comprehension. for i in set(a for a in [1]): #? int() i # ----------------- # Recursions # ----------------- def to_list(iterable): return list(set(iterable)) def recursion1(foo): return to_list(to_list(foo)) + recursion1(foo) #? int() recursion1([1,2])[0] # ----------------- # Merged Arrays # ----------------- for x in [1] + ['']: #? int() str() x jedi-0.9.0/test/completion/dynamic_params.py0000664000175000017500000000360612517736533021347 0ustar daviddavid00000000000000""" This is used for dynamic object completion. Jedi tries to guess param types with a backtracking approach. """ def func(a, default_arg=2): #? int() default_arg #? int() str() return a #? int() func(1) func int(1) + (int(2))+ func('') # Again the same function, but with another call. def func(a): #? float() return a func(1.0) # Again the same function, but with no call. def func(a): #? return a def func(a): #? float() return a str(func(1.0)) # ----------------- # *args, **args # ----------------- def arg(*args): #? tuple() args #? int() args[0] arg(1,"") # ----------------- # decorators # ----------------- def def_func(f): def wrapper(*args, **kwargs): return f(*args, **kwargs) return wrapper @def_func def func(c): #? str() return c #? str() func("str") @def_func def func(c=1): #? int() float() return c func(1.0) def tricky_decorator(func): def wrapper(*args): return func(1, *args) return wrapper @tricky_decorator def func(a, b): #? int() a #? float() b func(1.0) # Needs to be here, because in this case func is an import -> shouldn't lead to # exceptions. import sys as func func.sys # ----------------- # classes # ----------------- class A(): def __init__(self, a): #? str() a A("s") class A(): def __init__(self, a): #? int() a self.a = a def test(self, a): #? float() a self.c = self.test2() def test2(self): #? int() return self.a def test3(self): #? int() self.test2() #? int() self.c A(3).test(2.0) A(3).test2() # ----------------- # list comprehensions # ----------------- def from_comprehension(foo): #? int() float() return foo [from_comprehension(1.0) for n in (1,)] [from_comprehension(n) for n in (1,)] jedi-0.9.0/test/completion/imports.py0000664000175000017500000001113712517736533020053 0ustar daviddavid00000000000000# ----------------- # own structure # ----------------- # do separate scopes def scope_basic(): from import_tree import mod1 #? int() mod1.a #? [] import_tree.a #? [] import_tree.mod1 import import_tree #? str() import_tree.a def scope_pkg(): import import_tree.mod1 #? str() import_tree.a #? ['mod1'] import_tree.mod1 #? int() import_tree.mod1.a def scope_nested(): import import_tree.pkg.mod1 #? str() import_tree.a #? list import_tree.pkg.a #? ['sqrt'] import_tree.pkg.sqrt #? ['pkg'] import_tree.p #? float() import_tree.pkg.mod1.a import import_tree.random #? set import_tree.random.a def scope_nested2(): """Multiple modules should be indexable, if imported""" import import_tree.mod1 import import_tree.pkg #? ['mod1'] import_tree.mod1 #? ['pkg'] import_tree.pkg # With the latest changes this completion also works, because submodules # are always included (some nested import structures lead to this, # typically). #? ['rename1'] import_tree.rename1 def scope_from_import_variable(): """ All of them shouldn't work, because "fake" imports don't work in python without the use of ``sys.modules`` modifications (e.g. ``os.path`` see also github issue #213 for clarification. """ #? from import_tree.mod2.fake import a #? from import_tree.mod2.fake import c #? a #? c def scope_from_import_variable_with_parenthesis(): from import_tree.mod2.fake import ( a, foobarbaz ) #? a #? foobarbaz # shouldn't complete, should still list the name though. #? ['foobarbaz'] foobarbaz def as_imports(): from import_tree.mod1 import a as xyz #? int() xyz import not_existant, import_tree.mod1 as foo #? int() foo.a import import_tree.mod1 as bar #? int() bar.a def test_import_priorities(): """ It's possible to overwrite import paths in an ``__init__.py`` file, by just assigining something there. See also #536. """ from import_tree import the_pkg, invisible_pkg #? int() invisible_pkg # In real Python, this would be the module, but it's not, because Jedi # doesn't care about most stateful issues such as __dict__, which it would # need to, to do this in a correct way. #? int() the_pkg # Importing foo is still possible, even though inivisible_pkg got changed. #? float() from import_tree.invisible_pkg import foo # ----------------- # std lib modules # ----------------- import tokenize #? ['tok_name'] tokenize.tok_name from pyclbr import * #? ['readmodule_ex'] readmodule_ex import os #? ['dirname'] os.path.dirname from os.path import ( expanduser ) #? os.path.expanduser expanduser from itertools import (tee, islice) #? ['islice'] islice from functools import (partial, wraps) #? ['wraps'] wraps from keyword import kwlist, \ iskeyword #? ['kwlist'] kwlist #? [] from keyword import not_existing1, not_existing2 from tokenize import io tokenize.generate_tokens # ----------------- # builtins # ----------------- import sys #? ['prefix'] sys.prefix #? ['append'] sys.path.append from math import * #? ['cos', 'cosh'] cos def func_with_import(): import time return time #? ['sleep'] func_with_import().sleep # ----------------- # relative imports # ----------------- from .import_tree import mod1 #? int() mod1.a from ..import_tree import mod1 #? mod1.a from .......import_tree import mod1 #? mod1.a from .. import helpers #? int() helpers.sample_int from ..helpers import sample_int as f #? int() f from . import run #? [] run. from . import import_tree as imp_tree #? str() imp_tree.a from . import datetime as mod1 #? [] mod1. # self import # this can cause recursions from imports import * # ----------------- # packages # ----------------- from import_tree.mod1 import c #? set c from import_tree import recurse_class1 #? ['a'] recurse_class1.C.a # github #239 RecursionError #? ['a'] recurse_class1.C().a # ----------------- # Jedi debugging # ----------------- # memoizing issues (check git history for the fix) import not_existing_import if not_existing_import: a = not_existing_import else: a = not_existing_import #? a # ----------------- # module underscore descriptors # ----------------- def underscore(): import keyword #? ['__file__'] keyword.__file__ #? str() keyword.__file__ # Does that also work for the our own module? #? ['__file__'] __file__ jedi-0.9.0/test/completion/stdlib.py0000664000175000017500000000453112517736533017637 0ustar daviddavid00000000000000""" std library stuff """ # ----------------- # builtins # ----------------- arr = [''] #? str() sorted(arr)[0] #? str() next(reversed(arr)) next(reversed(arr)) # should not fail if there's no return value. def yielder(): yield None #? None next(reversed(yielder())) # empty reversed should not raise an error #? next(reversed()) #? str() next(open('')) #? int() {'a':2}.setdefault('a', 3) # Compiled classes should have the meta class attributes. #? ['__itemsize__'] tuple.__itemsize__ # ----------------- # re # ----------------- import re c = re.compile(r'a') # re.compile should not return str -> issue #68 #? [] c.startswith #? int() c.match().start() #? int() re.match(r'a', 'a').start() for a in re.finditer('a', 'a'): #? int() a.start() #? str() re.sub('a', 'a') # ----------------- # ref # ----------------- import weakref #? int() weakref.proxy(1) #? weakref.ref() weakref.ref(1) #? int() weakref.ref(1)() # ----------------- # functools # ----------------- import functools basetwo = functools.partial(int, base=2) #? int() basetwo() def function(a, b): return a, b a = functools.partial(function, 0) #? int() a('')[0] #? str() a('')[1] kw = functools.partial(function, b=1.0) tup = kw(1) #? int() tup[0] #? float() tup[1] def my_decorator(f): @functools.wraps(f) def wrapper(*args, **kwds): return f(*args, **kwds) return wrapper @my_decorator def example(a): return a #? str() example('') # ----------------- # sqlite3 (#84) # ----------------- import sqlite3 #? sqlite3.Connection() con = sqlite3.connect() #? sqlite3.Cursor() c = con.cursor() #? sqlite3.Row() row = c.fetchall()[0] #? str() row.keys()[0] def huhu(db): """ :type db: sqlite3.Connection :param db: the db connection """ #? sqlite3.Connection() db # ----------------- # hashlib # ----------------- import hashlib #? ['md5'] hashlib.md5 # ----------------- # copy # ----------------- import copy #? int() copy.deepcopy(1) #? copy.copy() # ----------------- # json # ----------------- # We don't want any results for json, because it depends on IO. import json #? json.load('asdf') #? json.loads('[1]') # ----------------- # random # ----------------- import random class A(object): def say(self): pass class B(object): def shout(self): pass cls = random.choice([A, B]) #? ['say', 'shout'] cls().s jedi-0.9.0/test/test_jedi_system.py0000664000175000017500000000351212204171717017547 0ustar daviddavid00000000000000""" Test the Jedi "System" which means for example to test if imports are correctly used. """ import os import inspect import jedi def test_settings_module(): """ jedi.settings and jedi.cache.settings must be the same module. """ from jedi import cache from jedi import settings assert cache.settings is settings def test_no_duplicate_modules(): """ Make sure that import hack works as expected. Jedi does an import hack (see: jedi/__init__.py) to have submodules with circular dependencies. The modules in this circular dependency "loop" must be imported by ``import `` rather than normal ``from jedi import `` (or ``from . jedi ...``). This test make sure that this is satisfied. See also: - `#160 `_ - `#161 `_ """ import sys jedipath = os.path.dirname(os.path.abspath(jedi.__file__)) def is_submodule(m): try: filepath = m.__file__ except AttributeError: return False return os.path.abspath(filepath).startswith(jedipath) modules = list(filter(is_submodule, sys.modules.values())) top_modules = [m for m in modules if not m.__name__.startswith('jedi.')] for m in modules: if m is jedi: # py.test automatically improts `jedi.*` when --doctest-modules # is given. So this test cannot succeeds. continue for tm in top_modules: try: imported = getattr(m, tm.__name__) except AttributeError: continue if inspect.ismodule(imported): # module could have a function with the same name, e.g. # `keywords.keywords`. assert imported is tm jedi-0.9.0/test/test_integration_stdlib.py0000664000175000017500000000250212517736533021124 0ustar daviddavid00000000000000""" Tests of various stdlib related things that could not be tested with "Black Box Tests". """ import pytest from jedi import Script from jedi._compatibility import is_py26 # The namedtuple is different for different Python2.7 versions. Some versions # are missing the attribute `_class_template`. pytestmark = pytest.mark.skipif('sys.version_info[0] < 3') @pytest.mark.parametrize(['letter', 'expected'], [ ('n', ['name']), ('s', ['smart']), ]) def test_namedtuple_str(letter, expected): source = "import collections\n" + \ "Person = collections.namedtuple('Person', 'name smart')\n" + \ "dave = Person('Dave', False)\n" + \ "dave.%s" % letter result = Script(source).completions() completions = set(r.name for r in result) if is_py26: assert completions == set() else: assert completions == set(expected) def test_namedtuple_list(): source = "import collections\n" + \ "Cat = collections.namedtuple('Person', ['legs', u'length', 'large'])\n" + \ "garfield = Cat(4, '85cm', True)\n" + \ "garfield.l" result = Script(source).completions() completions = set(r.name for r in result) if is_py26: assert completions == set() else: assert completions == set(['legs', 'length', 'large']) jedi-0.9.0/test/test_integration_import.py0000664000175000017500000000577712517736533021176 0ustar daviddavid00000000000000""" Tests of various import related things that could not be tested with "Black Box Tests". """ from jedi import Script from .helpers import cwd_at from jedi._compatibility import is_py26 import pytest def test_goto_definition_on_import(): assert Script("import sys_blabla", 1, 8).goto_definitions() == [] assert len(Script("import sys", 1, 8).goto_definitions()) == 1 @cwd_at('jedi') def test_complete_on_empty_import(): assert Script("from datetime import").completions()[0].name == 'import' # should just list the files in the directory assert 10 < len(Script("from .", path='').completions()) < 30 # Global import assert len(Script("from . import", 1, 5, '').completions()) > 30 # relative import assert 10 < len(Script("from . import", 1, 6, '').completions()) < 30 # Global import assert len(Script("from . import classes", 1, 5, '').completions()) > 30 # relative import assert 10 < len(Script("from . import classes", 1, 6, '').completions()) < 30 wanted = set(['ImportError', 'import', 'ImportWarning']) assert set([c.name for c in Script("import").completions()]) == wanted if not is_py26: # python 2.6 doesn't always come with a library `import*`. assert len(Script("import import", path='').completions()) > 0 # 111 assert Script("from datetime import").completions()[0].name == 'import' assert Script("from datetime import ").completions() def test_imports_on_global_namespace_without_path(): """If the path is None, there shouldn't be any import problem""" completions = Script("import operator").completions() assert [c.name for c in completions] == ['operator'] completions = Script("import operator", path='example.py').completions() assert [c.name for c in completions] == ['operator'] # the first one has a path the second doesn't completions = Script("import keyword", path='example.py').completions() assert [c.name for c in completions] == ['keyword'] completions = Script("import keyword").completions() assert [c.name for c in completions] == ['keyword'] def test_named_import(): """named import - jedi-vim issue #8""" s = "import time as dt" assert len(Script(s, 1, 15, '/').goto_definitions()) == 1 assert len(Script(s, 1, 10, '/').goto_definitions()) == 1 @pytest.mark.skipif('True', reason='The nested import stuff is still very messy.') def test_goto_following_on_imports(): s = "import multiprocessing.dummy; multiprocessing.dummy" g = Script(s).goto_assignments() assert len(g) == 1 assert (g[0].line, g[0].column) != (0, 0) def test_after_from(): def check(source, result, column=None): completions = Script(source, column=column).completions() assert [c.name for c in completions] == result check('\nfrom os. ', ['path']) check('\nfrom os ', ['import']) check('from os ', ['import']) check('\nfrom os import whatever', ['import'], len('from os im')) check('from os\\\n', ['import']) check('from os \\\n', ['import']) jedi-0.9.0/test/test_speed.py0000664000175000017500000000351612363566726016352 0ustar daviddavid00000000000000""" Speed tests of Jedi. To prove that certain things don't take longer than they should. """ import time import functools from .helpers import TestCase, cwd_at import jedi class TestSpeed(TestCase): def _check_speed(time_per_run, number=4, run_warm=True): """ Speed checks should typically be very tolerant. Some machines are faster than others, but the tests should still pass. These tests are here to assure that certain effects that kill jedi performance are not reintroduced to Jedi.""" def decorated(func): @functools.wraps(func) def wrapper(self): if run_warm: func(self) first = time.time() for i in range(number): func(self) single_time = (time.time() - first) / number print('\nspeed', func, single_time) assert single_time < time_per_run return wrapper return decorated @_check_speed(0.2) def test_os_path_join(self): s = "from posixpath import join; join('', '')." assert len(jedi.Script(s).completions()) > 10 # is a str completion @_check_speed(0.15) def test_scipy_speed(self): s = 'import scipy.weave; scipy.weave.inline(' script = jedi.Script(s, 1, len(s), '') script.call_signatures() #print(jedi.imports.imports_processed) @_check_speed(0.8) @cwd_at('test') def test_precedence_slowdown(self): """ Precedence calculation can slow down things significantly in edge cases. Having strange recursion structures increases the problem. """ with open('speed/precedence.py') as f: line = len(f.read().splitlines()) assert jedi.Script(line=line, path='speed/precedence.py').goto_definitions() jedi-0.9.0/test/test_debug.py0000664000175000017500000000032112331540214016302 0ustar daviddavid00000000000000import jedi from jedi import debug def test_simple(): jedi.set_debug_function() debug.speed('foo') debug.dbg('bar') debug.warning('baz') jedi.set_debug_function(None, False, False, False) jedi-0.9.0/test/test_regression.py0000664000175000017500000001240312517736533017421 0ustar daviddavid00000000000000""" Unit tests to avoid errors of the past. These are also all tests that didn't found a good place in any other testing module. """ import os import sys import textwrap from .helpers import TestCase, cwd_at import pytest import jedi from jedi._compatibility import u from jedi import Script from jedi import api from jedi.evaluate import imports from jedi.parser import Parser, load_grammar #jedi.set_debug_function() class TestRegression(TestCase): def test_goto_definition_cursor(self): s = ("class A():\n" " def _something(self):\n" " return\n" " def different_line(self,\n" " b):\n" " return\n" "A._something\n" "A.different_line" ) in_name = 2, 9 under_score = 2, 8 cls = 2, 7 should1 = 7, 10 diff_line = 4, 10 should2 = 8, 10 def get_def(pos): return [d.description for d in Script(s, *pos).goto_definitions()] in_name = get_def(in_name) under_score = get_def(under_score) should1 = get_def(should1) should2 = get_def(should2) diff_line = get_def(diff_line) assert should1 == in_name assert should1 == under_score assert should2 == diff_line assert get_def(cls) == [] @pytest.mark.skipif('True', reason='Skip for now, test case is not really supported.') @cwd_at('jedi') def test_add_dynamic_mods(self): fname = '__main__.py' api.settings.additional_dynamic_modules = [fname] # Fictional module that defines a function. src1 = "def r(a): return a" # Other fictional modules in another place in the fs. src2 = 'from .. import setup; setup.r(1)' imports.load_module(os.path.abspath(fname), src2) result = Script(src1, path='../setup.py').goto_definitions() assert len(result) == 1 assert result[0].description == 'class int' def test_os_nowait(self): """ github issue #45 """ s = Script("import os; os.P_").completions() assert 'P_NOWAIT' in [i.name for i in s] def test_points_in_completion(self): """At some point, points were inserted into the completions, this caused problems, sometimes. """ c = Script("if IndentationErr").completions() assert c[0].name == 'IndentationError' self.assertEqual(c[0].complete, 'or') def test_no_statement_parent(self): source = textwrap.dedent(""" def f(): pass class C: pass variable = f if random.choice([0, 1]) else C""") defs = Script(source, column=3).goto_definitions() defs = sorted(defs, key=lambda d: d.line) self.assertEqual([d.description for d in defs], ['def f', 'class C']) def test_end_pos_line(self): # jedi issue #150 s = u("x()\nx( )\nx( )\nx ( )") parser = Parser(load_grammar(), s) for i, s in enumerate(parser.module.statements): assert s.end_pos == (i + 1, i + 3) def check_definition_by_marker(self, source, after_cursor, names): r""" Find definitions specified by `after_cursor` and check what found For example, for the following configuration, you can pass ``after_cursor = 'y)'``.:: function( x, y) \ `- You want cursor to be here """ source = textwrap.dedent(source) for (i, line) in enumerate(source.splitlines()): if after_cursor in line: break column = len(line) - len(after_cursor) defs = Script(source, i + 1, column).goto_definitions() print(defs) assert [d.name for d in defs] == names def test_backslash_continuation(self): """ Test that ModuleWithCursor.get_path_until_cursor handles continuation """ self.check_definition_by_marker(r""" x = 0 a = \ [1, 2, 3, 4, 5, 6, 7, 8, 9, x] # <-- here """, '] # <-- here', ['int']) # completion in whitespace s = 'asdfxyxxxxxxxx sds\\\n hello' assert Script(s, 2, 4).goto_assignments() == [] def test_backslash_continuation_and_bracket(self): self.check_definition_by_marker(r""" x = 0 a = \ [1, 2, 3, 4, 5, 6, 7, 8, 9, (x)] # <-- here """, '(x)] # <-- here', []) def test_generator(self): # Did have some problems with the usage of generator completions this # way. s = "def abc():\n" \ " yield 1\n" \ "abc()." assert Script(s).completions() def test_loading_unicode_files_with_bad_global_charset(monkeypatch, tmpdir): dirname = str(tmpdir.mkdir('jedi-test')) filename1 = os.path.join(dirname, 'test1.py') filename2 = os.path.join(dirname, 'test2.py') if sys.version_info < (3, 0): data = "# coding: latin-1\nfoo = 'm\xf6p'\n" else: data = "# coding: latin-1\nfoo = 'm\xf6p'\n".encode("latin-1") with open(filename1, "wb") as f: f.write(data) s = Script("from test1 import foo\nfoo.", line=2, column=4, path=filename2) s.completions() jedi-0.9.0/test/refactor.py0000775000175000017500000000662612331540214016003 0ustar daviddavid00000000000000#!/usr/bin/env python """ Refactoring tests work a little bit similar to Black Box tests. But the idea is here to compare two versions of code. **Note: Refactoring is currently not in active development (and was never stable), the tests are therefore not really valuable - just ignore them.** """ from __future__ import with_statement import os import re from functools import reduce import jedi from jedi import refactoring class RefactoringCase(object): def __init__(self, name, source, line_nr, index, path, new_name, start_line_test, desired): self.name = name self.source = source self.line_nr = line_nr self.index = index self.path = path self.new_name = new_name self.start_line_test = start_line_test self.desired = desired def refactor(self): script = jedi.Script(self.source, self.line_nr, self.index, self.path) f_name = os.path.basename(self.path) refactor_func = getattr(refactoring, f_name.replace('.py', '')) args = (self.new_name,) if self.new_name else () return refactor_func(script, *args) def run(self): refactor_object = self.refactor() # try to get the right excerpt of the newfile f = refactor_object.new_files()[self.path] lines = f.splitlines()[self.start_line_test:] end = self.start_line_test + len(lines) pop_start = None for i, l in enumerate(lines): if l.startswith('# +++'): end = i break elif '#? ' in l: pop_start = i lines.pop(pop_start) self.result = '\n'.join(lines[:end - 1]).strip() return self.result def check(self): return self.run() == self.desired def __repr__(self): return '<%s: %s:%s>' % (self.__class__.__name__, self.name, self.line_nr - 1) def collect_file_tests(source, path, lines_to_execute): r = r'^# --- ?([^\n]*)\n((?:(?!\n# \+\+\+).)*)' \ r'\n# \+\+\+((?:(?!\n# ---).)*)' for match in re.finditer(r, source, re.DOTALL | re.MULTILINE): name = match.group(1).strip() first = match.group(2).strip() second = match.group(3).strip() start_line_test = source[:match.start()].count('\n') + 1 # get the line with the position of the operation p = re.match(r'((?:(?!#\?).)*)#\? (\d*) ?([^\n]*)', first, re.DOTALL) if p is None: print("Please add a test start.") continue until = p.group(1) index = int(p.group(2)) new_name = p.group(3) line_nr = start_line_test + until.count('\n') + 2 if lines_to_execute and line_nr - 1 not in lines_to_execute: continue yield RefactoringCase(name, source, line_nr, index, path, new_name, start_line_test, second) def collect_dir_tests(base_dir, test_files): for f_name in os.listdir(base_dir): files_to_execute = [a for a in test_files.items() if a[0] in f_name] lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, []) if f_name.endswith(".py") and (not test_files or files_to_execute): path = os.path.join(base_dir, f_name) with open(path) as f: source = f.read() for case in collect_file_tests(source, path, lines_to_execute): yield case jedi-0.9.0/test/test_evaluate/0000775000175000017500000000000012517736563016500 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/test_pyc.py0000664000175000017500000000322412331540214020661 0ustar daviddavid00000000000000""" Test completions from *.pyc files: - generate a dummy python module - compile the dummy module to generate a *.pyc - delete the pure python dummy module - try jedi on the generated *.pyc """ import os import shutil import sys import pytest import jedi from ..helpers import cwd_at SRC = """class Foo: pass class Bar: pass """ def generate_pyc(): os.mkdir("dummy_package") with open("dummy_package/__init__.py", 'w'): pass with open("dummy_package/dummy.py", 'w') as f: f.write(SRC) import compileall compileall.compile_file("dummy_package/dummy.py") os.remove("dummy_package/dummy.py") if sys.version_info[0] == 3: # Python3 specific: # To import pyc modules, we must move them out of the __pycache__ # directory and rename them to remove ".cpython-%s%d" # see: http://stackoverflow.com/questions/11648440/python-does-not-detect-pyc-files for f in os.listdir("dummy_package/__pycache__"): dst = f.replace('.cpython-%s%s' % sys.version_info[:2], "") dst = os.path.join("dummy_package", dst) shutil.copy(os.path.join("dummy_package/__pycache__", f), dst) # Python 2.6 does not necessarily come with `compileall.compile_file`. @pytest.mark.skipif("sys.version_info > (2,6)") @cwd_at('test/test_evaluate') def test_pyc(): """ The list of completion must be greater than 2. """ try: generate_pyc() s = jedi.Script("from dummy_package import dummy; dummy.", path='blub.py') assert len(s.completions()) >= 2 finally: shutil.rmtree("dummy_package") if __name__ == "__main__": test_pyc() jedi-0.9.0/test/test_evaluate/test_namespace_package.py0000664000175000017500000000441412517736533023520 0ustar daviddavid00000000000000import jedi import sys from os.path import dirname, join def test_namespace_package(): sys.path.insert(0, join(dirname(__file__), 'namespace_package/ns1')) sys.path.insert(1, join(dirname(__file__), 'namespace_package/ns2')) try: # goto definition assert jedi.Script('from pkg import ns1_file').goto_definitions() assert jedi.Script('from pkg import ns2_file').goto_definitions() assert not jedi.Script('from pkg import ns3_file').goto_definitions() # goto assignment tests = { 'from pkg.ns2_folder.nested import foo': 'nested!', 'from pkg.ns2_folder import foo': 'ns2_folder!', 'from pkg.ns2_file import foo': 'ns2_file!', 'from pkg.ns1_folder import foo': 'ns1_folder!', 'from pkg.ns1_file import foo': 'ns1_file!', 'from pkg import foo': 'ns1!', } for source, solution in tests.items(): ass = jedi.Script(source).goto_assignments() assert len(ass) == 1 assert ass[0].description == "foo = '%s'" % solution # completion completions = jedi.Script('from pkg import ').completions() names = [str(c.name) for c in completions] # str because of unicode compare = ['foo', 'ns1_file', 'ns1_folder', 'ns2_folder', 'ns2_file', 'pkg_resources', 'pkgutil', '__name__', '__path__', '__package__', '__file__', '__doc__'] # must at least contain these items, other items are not important assert set(compare) == set(names) tests = { 'from pkg import ns2_folder as x': 'ns2_folder!', 'from pkg import ns2_file as x': 'ns2_file!', 'from pkg.ns2_folder import nested as x': 'nested!', 'from pkg import ns1_folder as x': 'ns1_folder!', 'from pkg import ns1_file as x': 'ns1_file!', 'import pkg as x': 'ns1!', } for source, solution in tests.items(): for c in jedi.Script(source + '; x.').completions(): if c.name == 'foo': completion = c solution = "statement: foo = '%s'" % solution assert completion.description == solution finally: sys.path.pop(0) sys.path.pop(0) jedi-0.9.0/test/test_evaluate/__init__.py0000664000175000017500000000000012331540214020553 0ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/test_buildout_detection.py0000664000175000017500000000527712517736533024006 0ustar daviddavid00000000000000import os from textwrap import dedent from jedi._compatibility import u from jedi.evaluate.sys_path import (_get_parent_dir_with_file, _get_buildout_scripts, sys_path_with_modifications, _check_module) from jedi.evaluate import Evaluator from jedi.parser import Parser, load_grammar from ..helpers import cwd_at @cwd_at('test/test_evaluate/buildout_project/src/proj_name') def test_parent_dir_with_file(): parent = _get_parent_dir_with_file( os.path.abspath(os.curdir), 'buildout.cfg') assert parent is not None assert parent.endswith(os.path.join('test', 'test_evaluate', 'buildout_project')) @cwd_at('test/test_evaluate/buildout_project/src/proj_name') def test_buildout_detection(): scripts = _get_buildout_scripts(os.path.abspath('./module_name.py')) assert len(scripts) == 1 curdir = os.path.abspath(os.curdir) appdir_path = os.path.normpath(os.path.join(curdir, '../../bin/app')) assert scripts[0] == appdir_path def test_append_on_non_sys_path(): SRC = dedent(u(""" class Dummy(object): path = [] d = Dummy() d.path.append('foo')""")) grammar = load_grammar() p = Parser(grammar, SRC) paths = _check_module(Evaluator(grammar), p.module) assert len(paths) > 0 assert 'foo' not in paths def test_path_from_invalid_sys_path_assignment(): SRC = dedent(u(""" import sys sys.path = 'invalid'""")) grammar = load_grammar() p = Parser(grammar, SRC) paths = _check_module(Evaluator(grammar), p.module) assert len(paths) > 0 assert 'invalid' not in paths @cwd_at('test/test_evaluate/buildout_project/src/proj_name/') def test_sys_path_with_modifications(): SRC = dedent(u(""" import os """)) grammar = load_grammar() p = Parser(grammar, SRC) p.module.path = os.path.abspath(os.path.join(os.curdir, 'module_name.py')) paths = sys_path_with_modifications(Evaluator(grammar), p.module) assert '/tmp/.buildout/eggs/important_package.egg' in paths def test_path_from_sys_path_assignment(): SRC = dedent(u(""" #!/usr/bin/python import sys sys.path[0:0] = [ '/usr/lib/python3.4/site-packages', '/home/test/.buildout/eggs/important_package.egg' ] path[0:0] = [1] import important_package if __name__ == '__main__': sys.exit(important_package.main())""")) grammar = load_grammar() p = Parser(grammar, SRC) paths = _check_module(Evaluator(grammar), p.module) assert 1 not in paths assert '/home/test/.buildout/eggs/important_package.egg' in paths jedi-0.9.0/test/test_evaluate/init_extension_module/0000775000175000017500000000000012517736563023104 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/init_extension_module/__init__.cpython-34m.so0000775000175000017500000004015512517736533027277 0ustar daviddavid00000000000000ELF>@H0@8@"    88 8 $$Ptd000QtdRtd  GNUue+Betaߜ`@  )֡BE|qX p \m|+ "? 0     p__gmon_start___init_fini__cxa_finalize_Jv_RegisterClassesPyInit_init_extension_modulePyModule_Create2_Py_NoneStructPyModule_AddObjectlibpthread.so.0libc.so.6_edata__bss_start_endGLIBC_2.2.5ui     h        HWMH5Z %\ @%Z h%R h%J hHH HtHÐU= HATSubH= t H= H  L% H L)HHH9s DHHe AHZ H9rF [A\]fH= UHtHS Ht]H= @]ÐSH= H H5VHHH[UHSHH0 HtH# HHHuH[]ÐHHfooinit_extension_module;`8p`zRx $ @FJ w?;*3$"D0An p o8  H( oxooVo8   GCC: (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3,0od 8?int5 ii i;17xb#(###4 # k#(#0#8 #@ #H!#P"#X$#`5&#h(b#p#,b#tN.p#x2F#w3T#U4#8#RA{#J#K#L#M#N-#+Pb#6R#   # # fb#  8x  8 g0?  G iY k # lZ#NZ.O#xP#Q # Q #(U #0V #85W #@mX. #HaY#PZf #X|^O#`\_U#h`[#pdr #xe#ff #g" #hZ # ka#>n8#7p#t( #w#({ #C ~ #@ # ##*##VZ## #1 # # #I(# #z #># #H#%#y#k# #M# #Ym0o .p`# q #rk`%^$b4?E U`f{ b b Pxbuf#obj#len #  # 1b# b#$ #( ex#0 \ x#8 x#@ #H bb~Cb   b( 3 9 bS  ^ # # # # R# #( #0 #8 #@ #H #P  #X #` h#h #p #x ## ###`## #- #% #\ #k #{#j##P## #tS P 4##U#VU## >{#(^#0 #8:"#@#U#HJ$j &M '4#(#)#* -  .#k/#0Y 3   _4   :  b  bJ<  " =>: @ bZ ?G@A~   zB   bCD@EFGH(Z# I4:OZ ^  M  T ( )# *# +b# -#g (  #get #set @#doc #  # oT/g" ( !x . "`# - #~#  $ #  %# x &3 !h /  0# x 1#( * 2#0 U 3 #8 L 4-#@  5#H  6( #P  7#X < 8 #`O %+@Z KQbk m L  @ MVS`% : ; I$ > $ >   I : ; : ;I8 : ;  : ;  : ; I8 I !I/ &I : ; : ; ' II : ; I8 '  : ; : ;I : ;< ' I.? : ; ' I@4: ; I4: ; I 4: ; I? < 4: ;I? < *  /usr/lib/gcc/x86_64-linux-gnu/4.6/include/usr/include/x86_64-linux-gnu/bits/usr/include/usr/include/python3.4mmodule.cstddef.htypes.hstdio.hlibio.hpyport.hobject.hmethodobject.hdescrobject.hmoduleobject.h  !=getiterfunc__off_tnb_inplace_subtract_IO_read_ptr_chainm_freereleasebufferproc_shortbufdestructornb_floor_dividetp_freePyMemberDefnb_xorml_namesq_lengthobjobjargproc_IO_buf_basemp_ass_subscriptfreefunclong long unsigned intnb_inplace_powerPyObject_IO_buf_endPyMappingMethodstp_memberstp_getattrdescrgetfuncstdoutPyMethodDefnb_inplace_multiplyPyNumberMethodshashfunctp_dictoffsettp_getattronb_subtracttp_as_mappingtp_init_IO_read_endPy_bufferclosure_fileno_IO_backup_basetp_methods__ssize_tinitproc_cur_columnm_doclong long inttp_is_gcdouble_old_offsetsettertp_reservedtp_setattrm_nameiternextfuncnb_multiply_typeobjectprintfunc__pad2nb_rshiftob_typetp_dict_nextwas_sq_slice_IO_markerstdinPyInit_init_extension_modulenb_inplace_remainderternaryfuncreadonlysq_inplace_concatm_methods_Py_NoneStructshape_IO_write_ptrtp_subclassesm_traversem_copytp_clearshort unsigned intsq_concatml_methtp_itemsizem_base_IO_save_basetp_basicsizePyModuleDef_Baseobjobjproc_locknb_positive_flags2_modetp_descr_setsq_ass_itemgetattrfuncsq_itemwas_sq_ass_slicemodule.ctp_nametp_delsq_repeatdescrsetfuncndimPyGetSetDefnb_reservedtp_hashgetbufferproc_IO_write_endvisitprocnb_boolml_flagstp_iternext__off64_tgetattrofunc_objectnb_inplace_rshift_IO_FILEtp_itertp_mrogettertp_baseinquiry_posbf_releasebuffertp_as_numberbinaryfuncnb_absolute_markersm_indexnb_inplace_true_dividetp_descr_getnewfunctp_traversessizeargfunclenfuncPyBufferProcsnb_intunsigned charPyModuleDefnb_inplace_lshiftshort inttp_allocnb_divmodtp_as_sequencetp_weaklist_vtable_offsetm_reloadob_sizessizeobjargprocformattraverseproctp_calltp_version_tagtp_reprnb_addtp_newsetattrfuncm_clearGNU C 4.6.3tp_strsq_containsmp_subscripttp_richcomparetp_doctp_flagsreprfuncnb_inplace_floor_dividenb_andPyCFunctionnb_inplace_orml_docnb_power_IO_lock_ttp_setattrosq_inplace_repeat_IO_read_base_IO_save_endPy_hash_tbufferinfotp_dealloc__pad1PyVarObject__pad3__pad4__pad5tp_cacheob_base_unused2Py_ssize_tPySequenceMethodsnb_inplace_andnb_inplace_xorrichcmpfunctp_printmp_lengthnb_negativenb_invertnb_true_dividenb_inplace_addsetattrofuncnb_remaindertp_finalizeunaryfunctp_getsetnb_lshiftnb_indextp_basesallocfuncm_init_IO_write_basetp_weaklistoffsetm_sizestrides/home/david/source/jedi/test/test_evaluate/init_extension_moduletp_as_buffernb_floatsuboffsets_sbufnb_orinternalob_refcntbf_getbufferw/w/0w#*P*/S/0P.symtab.strtab.shstrtab.note.gnu.build-id.gnu.hash.dynsym.dynstr.gnu.version.gnu.version_r.rela.dyn.rela.plt.init.text.fini.rodata.eh_frame_hdr.eh_frame.ctors.dtors.jcr.dynamic.got.got.plt.data.bss.comment.debug_aranges.debug_info.debug_abbrev.debug_line.debug_str.debug_loc$.oH8 88P@HoVVUoxx dn((H xpps@~8200PPd   0 08 8   0    0*0!]#.0$ (}./38!3 >8Vx( p    0P   0 8        *  80 E [ j x p 0  @ h(   8     &7F K Wj ~ 0" pcall_gmon_startcrtstuff.c__CTOR_LIST____DTOR_LIST____JCR_LIST____do_global_dtors_auxcompleted.6531dtor_idx.6533frame_dummy__CTOR_END____FRAME_END____JCR_END____do_global_ctors_auxmodule.cmodule__DTOR_END____dso_handle_DYNAMIC_GLOBAL_OFFSET_TABLE__edata_fini__gmon_start__PyModule_Create2_Py_NoneStruct_end__bss_startPyModule_AddObject_Jv_RegisterClassesPyInit_init_extension_module__cxa_finalize@@GLIBC_2.2.5_initjedi-0.9.0/test/test_evaluate/init_extension_module/setup.py0000664000175000017500000000037212517736533024615 0ustar daviddavid00000000000000from distutils.core import setup, Extension setup(name='init_extension_module', version='0.0', description='', ext_modules=[ Extension('init_extension_module.__init__', sources=['module.c']) ] ) jedi-0.9.0/test/test_evaluate/init_extension_module/module.c0000664000175000017500000000044612517736533024536 0ustar daviddavid00000000000000#include "Python.h" static struct PyModuleDef module = { PyModuleDef_HEAD_INIT, "init_extension_module", NULL, -1, NULL }; PyMODINIT_FUNC PyInit_init_extension_module(void){ PyObject *m = PyModule_Create(&module); PyModule_AddObject(m, "foo", Py_None); return m; } jedi-0.9.0/test/test_evaluate/namespace_package/0000775000175000017500000000000012517736563022107 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/namespace_package/ns1/0000775000175000017500000000000012517736563022610 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/namespace_package/ns1/pkg/0000775000175000017500000000000012517736563023371 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/namespace_package/ns1/pkg/__init__.py0000664000175000017500000000032612331540214025457 0ustar daviddavid00000000000000foo = 'ns1!' # this is a namespace package try: import pkg_resources pkg_resources.declare_namespace(__name__) except ImportError: import pkgutil __path__ = pkgutil.extend_path(__path__, __name__) jedi-0.9.0/test/test_evaluate/namespace_package/ns1/pkg/ns1_folder/0000775000175000017500000000000012517736563025425 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/namespace_package/ns1/pkg/ns1_folder/__init__.py0000664000175000017500000000002412331540214027506 0ustar daviddavid00000000000000foo = 'ns1_folder!' jedi-0.9.0/test/test_evaluate/namespace_package/ns1/pkg/ns1_file.py0000664000175000017500000000002212331540214025411 0ustar daviddavid00000000000000foo = 'ns1_file!' jedi-0.9.0/test/test_evaluate/namespace_package/ns2/0000775000175000017500000000000012517736563022611 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/namespace_package/ns2/pkg/0000775000175000017500000000000012517736563023372 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/namespace_package/ns2/pkg/ns2_folder/0000775000175000017500000000000012517736563025427 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/namespace_package/ns2/pkg/ns2_folder/__init__.py0000664000175000017500000000002412331540214027510 0ustar daviddavid00000000000000foo = 'ns2_folder!' jedi-0.9.0/test/test_evaluate/namespace_package/ns2/pkg/ns2_folder/nested/0000775000175000017500000000000012517736563026711 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/namespace_package/ns2/pkg/ns2_folder/nested/__init__.py0000664000175000017500000000002012331540214030766 0ustar daviddavid00000000000000foo = 'nested!' jedi-0.9.0/test/test_evaluate/namespace_package/ns2/pkg/ns2_file.py0000664000175000017500000000002212331540214025413 0ustar daviddavid00000000000000foo = 'ns2_file!' jedi-0.9.0/test/test_evaluate/test_compiled.py0000664000175000017500000000341312517736533021703 0ustar daviddavid00000000000000from jedi._compatibility import builtins, is_py3 from jedi.parser import load_grammar from jedi.parser.tree import Function from jedi.evaluate import compiled, representation from jedi.evaluate import Evaluator from jedi import Script def test_simple(): e = Evaluator(load_grammar()) bltn = compiled.CompiledObject(builtins) obj = compiled.CompiledObject('_str_', bltn) upper = e.find_types(obj, 'upper') assert len(upper) == 1 objs = list(e.execute(upper[0])) assert len(objs) == 1 assert isinstance(objs[0], representation.Instance) def test_fake_loading(): assert isinstance(compiled.create(Evaluator(load_grammar()), next), Function) string = compiled.builtin.get_subscope_by_name('str') from_name = compiled._create_from_name( compiled.builtin, string, '__init__' ) assert isinstance(from_name, Function) def test_fake_docstr(): assert compiled.create(Evaluator(load_grammar()), next).raw_doc == next.__doc__ def test_parse_function_doc_illegal_docstr(): docstr = """ test_func(o doesn't have a closing bracket. """ assert ('', '') == compiled._parse_function_doc(docstr) def test_doc(): """ Even CompiledObject docs always return empty docstrings - not None, that's just a Jedi API definition. """ obj = compiled.CompiledObject(''.__getnewargs__) assert obj.doc == '' def test_string_literals(): def typ(string): d = Script(string).goto_definitions()[0] return d.name assert typ('""') == 'str' assert typ('r""') == 'str' if is_py3: assert typ('br""') == 'bytes' assert typ('b""') == 'bytes' assert typ('u""') == 'str' else: assert typ('b""') == 'str' assert typ('u""') == 'unicode' jedi-0.9.0/test/test_evaluate/absolute_import/0000775000175000017500000000000012517736563021710 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/absolute_import/unittest.py0000664000175000017500000000066512331540214024124 0ustar daviddavid00000000000000""" This is a module that shadows a builtin (intentionally). It imports a local module, which in turn imports stdlib unittest (the name shadowed by this module). If that is properly resolved, there's no problem. However, if jedi doesn't understand absolute_imports, it will get this module again, causing infinite recursion. """ from local_module import Assertions class TestCase(Assertions): def test(self): self.assertT jedi-0.9.0/test/test_evaluate/absolute_import/local_module.py0000664000175000017500000000057712331540214024706 0ustar daviddavid00000000000000""" This is a module that imports the *standard library* unittest, despite there being a local "unittest" module. It specifies that it wants the stdlib one with the ``absolute_import`` __future__ import. The twisted equivalent of this module is ``twisted.trial._synctest``. """ from __future__ import absolute_import import unittest class Assertions(unittest.TestCase): pass jedi-0.9.0/test/test_evaluate/test_docstring.py0000664000175000017500000001045012517736533022102 0ustar daviddavid00000000000000""" Testing of docstring related issues and especially ``jedi.docstrings``. """ from textwrap import dedent import jedi from ..helpers import unittest try: import numpydoc except ImportError: numpydoc_unavailable = True else: numpydoc_unavailable = False class TestDocstring(unittest.TestCase): def test_function_doc(self): defs = jedi.Script(""" def func(): '''Docstring of `func`.''' func""").goto_definitions() self.assertEqual(defs[0].raw_doc, 'Docstring of `func`.') @unittest.skip('need evaluator class for that') def test_attribute_docstring(self): defs = jedi.Script(""" x = None '''Docstring of `x`.''' x""").goto_definitions() self.assertEqual(defs[0].raw_doc, 'Docstring of `x`.') @unittest.skip('need evaluator class for that') def test_multiple_docstrings(self): defs = jedi.Script(""" def func(): '''Original docstring.''' x = func '''Docstring of `x`.''' x""").goto_definitions() docs = [d.raw_doc for d in defs] self.assertEqual(docs, ['Original docstring.', 'Docstring of `x`.']) def test_completion(self): assert jedi.Script(''' class DocstringCompletion(): #? [] """ asdfas """''').completions() def test_docstrings_type_dotted_import(self): s = """ def func(arg): ''' :type arg: random.Random ''' arg.""" names = [c.name for c in jedi.Script(s).completions()] assert 'seed' in names def test_docstrings_param_type(self): s = """ def func(arg): ''' :param str arg: some description ''' arg.""" names = [c.name for c in jedi.Script(s).completions()] assert 'join' in names def test_docstrings_type_str(self): s = """ def func(arg): ''' :type arg: str ''' arg.""" names = [c.name for c in jedi.Script(s).completions()] assert 'join' in names def test_docstring_instance(self): # The types hint that it's a certain kind s = dedent(""" class A: def __init__(self,a): ''' :type a: threading.Thread ''' if a is not None: a.start() self.a = a def method_b(c): ''' :type c: A ''' c.""") names = [c.name for c in jedi.Script(s).completions()] assert 'a' in names assert '__init__' in names assert 'mro' not in names # Exists only for types. @unittest.skipIf(numpydoc_unavailable, 'numpydoc module is unavailable') def test_numpydoc_docstring(self): s = dedent(''' def foobar(x, y): """ Parameters ---------- x : int y : str """ y.''') names = [c.name for c in jedi.Script(s).completions()] assert 'isupper' in names assert 'capitalize' in names @unittest.skipIf(numpydoc_unavailable, 'numpydoc module is unavailable') def test_numpydoc_docstring_set_of_values(self): s = dedent(''' def foobar(x, y): """ Parameters ---------- x : {'foo', 'bar', 100500}, optional """ x.''') names = [c.name for c in jedi.Script(s).completions()] assert 'isupper' in names assert 'capitalize' in names assert 'numerator' in names @unittest.skipIf(numpydoc_unavailable, 'numpydoc module is unavailable') def test_numpydoc_alternative_types(self): s = dedent(''' def foobar(x, y): """ Parameters ---------- x : int or str or list """ x.''') names = [c.name for c in jedi.Script(s).completions()] assert 'isupper' in names assert 'capitalize' in names assert 'numerator' in names assert 'append' in names jedi-0.9.0/test/test_evaluate/buildout_project/0000775000175000017500000000000012517736563022055 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/buildout_project/src/0000775000175000017500000000000012517736563022644 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/buildout_project/src/proj_name/0000775000175000017500000000000012517736563024616 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/buildout_project/src/proj_name/module_name.py0000664000175000017500000000000012363566726027444 0ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/buildout_project/buildout.cfg0000664000175000017500000000000012363566726024354 0ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/buildout_project/bin/0000775000175000017500000000000012517736563022625 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/buildout_project/bin/empty_file0000664000175000017500000000000012363566726024674 0ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/buildout_project/bin/app0000664000175000017500000000034312363566726023331 0ustar daviddavid00000000000000#!/usr/bin/python import sys sys.path[0:0] = [ '/usr/lib/python3.4/site-packages', '/tmp/.buildout/eggs/important_package.egg' ] import important_package if __name__ == '__main__': sys.exit(important_package.main()) jedi-0.9.0/test/test_evaluate/test_extension.py0000664000175000017500000000322012517736533022117 0ustar daviddavid00000000000000""" Test compiled module """ import os import jedi from ..helpers import cwd_at import pytest def test_completions(): s = jedi.Script('import _ctypes; _ctypes.') assert len(s.completions()) >= 15 def test_call_signatures_extension(): if os.name == 'nt': func = 'LoadLibrary' params = 1 else: func = 'dlopen' params = 2 s = jedi.Script('import _ctypes; _ctypes.%s(' % (func,)) sigs = s.call_signatures() assert len(sigs) == 1 assert len(sigs[0].params) == params def test_call_signatures_stdlib(): s = jedi.Script('import math; math.cos(') sigs = s.call_signatures() assert len(sigs) == 1 assert len(sigs[0].params) == 1 # Check only on linux 64 bit platform and Python3.4. @pytest.mark.skipif('sys.platform != "linux" or sys.maxsize <= 2**32 or sys.version_info[:2] != (3, 4)') @cwd_at('test/test_evaluate') def test_init_extension_module(): """ ``__init__`` extension modules are also packages and Jedi should understand that. Originally coming from #472. This test was built by the module.c and setup.py combination you can find in the init_extension_module folder. You can easily build the `__init__.cpython-34m.so` by compiling it (create a virtualenv and run `setup.py install`. This is also why this test only runs on certain systems (and Python 3.4). """ s = jedi.Script('import init_extension_module as i\ni.', path='not_existing.py') assert 'foo' in [c.name for c in s.completions()] s = jedi.Script('from init_extension_module import foo\nfoo', path='not_existing.py') assert ['foo'] == [c.name for c in s.completions()] jedi-0.9.0/test/test_evaluate/test_absolute_import.py0000664000175000017500000000236512517736533023324 0ustar daviddavid00000000000000""" Tests ``from __future__ import absolute_import`` (only important for Python 2.X) """ import jedi from jedi._compatibility import u from jedi.parser import Parser, load_grammar from .. import helpers def test_explicit_absolute_imports(): """ Detect modules with ``from __future__ import absolute_import``. """ parser = Parser(load_grammar(), u("from __future__ import absolute_import"), "test.py") assert parser.module.has_explicit_absolute_import def test_no_explicit_absolute_imports(): """ Detect modules without ``from __future__ import absolute_import``. """ parser = Parser(load_grammar(), u("1"), "test.py") assert not parser.module.has_explicit_absolute_import def test_dont_break_imports_without_namespaces(): """ The code checking for ``from __future__ import absolute_import`` shouldn't assume that all imports have non-``None`` namespaces. """ src = u("from __future__ import absolute_import\nimport xyzzy") parser = Parser(load_grammar(), src, "test.py") assert parser.module.has_explicit_absolute_import @helpers.cwd_at("test/test_evaluate/absolute_import") def test_can_complete_when_shadowing(): script = jedi.Script(path="unittest.py") assert script.completions() jedi-0.9.0/test/test_evaluate/test_representation.py0000664000175000017500000000170412517736533023152 0ustar daviddavid00000000000000from textwrap import dedent from jedi import Script def get_definition_and_evaluator(source): d = Script(dedent(source)).goto_definitions()[0] return d._name.parent, d._evaluator def test_function_execution(): """ We've been having an issue of a mutable list that was changed inside the function execution. Test if an execution always returns the same result. """ s = """ def x(): return str() x""" func, evaluator = get_definition_and_evaluator(s) # Now just use the internals of the result (easiest way to get a fully # usable function). # Should return the same result both times. assert len(evaluator.execute(func)) == 1 assert len(evaluator.execute(func)) == 1 def test_class_mro(): s = """ class X(object): pass X""" cls, evaluator = get_definition_and_evaluator(s) mro = cls.py__mro__(evaluator) assert [str(c.name) for c in mro] == ['X', 'object'] jedi-0.9.0/test/test_evaluate/test_annotations.py0000664000175000017500000000116412517736533022445 0ustar daviddavid00000000000000from textwrap import dedent import jedi import pytest @pytest.mark.skipif('sys.version_info[0] < 3') def test_simple_annotations(): """ Annotations only exist in Python 3. At the moment we ignore them. So they should be parsed and not interfere with anything. """ source = dedent("""\ def annot(a:3): return a annot('')""") assert [d.name for d in jedi.Script(source, ).goto_definitions()] == ['str'] source = dedent("""\ def annot_ret(a:3) -> 3: return a annot_ret('')""") assert [d.name for d in jedi.Script(source, ).goto_definitions()] == ['str'] jedi-0.9.0/test/test_evaluate/not_in_sys_path/0000775000175000017500000000000012517736563021700 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/not_in_sys_path/not_in_sys_path_package/0000775000175000017500000000000012517736563026553 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/not_in_sys_path/not_in_sys_path_package/__init__.py0000664000175000017500000000002212331540214030632 0ustar daviddavid00000000000000value = 'package' jedi-0.9.0/test/test_evaluate/not_in_sys_path/not_in_sys_path_package/module.py0000664000175000017500000000003112331540214030360 0ustar daviddavid00000000000000value = 'package.module' jedi-0.9.0/test/test_evaluate/not_in_sys_path/pkg/0000775000175000017500000000000012517736563022461 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/not_in_sys_path/pkg/__init__.py0000664000175000017500000000000012517736533024555 0ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/not_in_sys_path/pkg/module.py0000664000175000017500000000033512517736533024316 0ustar daviddavid00000000000000from not_in_sys_path import not_in_sys_path from not_in_sys_path import not_in_sys_path_package from not_in_sys_path.not_in_sys_path_package import module not_in_sys_path.value not_in_sys_path_package.value module.value jedi-0.9.0/test/test_evaluate/not_in_sys_path/__init__.py0000664000175000017500000000000012517736533023774 0ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/not_in_sys_path/not_in_sys_path.py0000664000175000017500000000001212331540214025417 0ustar daviddavid00000000000000value = 3 jedi-0.9.0/test/test_evaluate/flask-site-packages/0000775000175000017500000000000012517736563022316 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/flask-site-packages/flask_foo.py0000664000175000017500000000003412517736533024625 0ustar daviddavid00000000000000class Foo(object): pass jedi-0.9.0/test/test_evaluate/flask-site-packages/flask_baz/0000775000175000017500000000000012517736563024252 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/flask-site-packages/flask_baz/__init__.py0000664000175000017500000000001012517736533026347 0ustar daviddavid00000000000000Baz = 1 jedi-0.9.0/test/test_evaluate/flask-site-packages/flaskext/0000775000175000017500000000000012517736563024137 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/flask-site-packages/flaskext/__init__.py0000664000175000017500000000000012517736533026233 0ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/flask-site-packages/flaskext/moo/0000775000175000017500000000000012517736563024731 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/flask-site-packages/flaskext/moo/__init__.py0000664000175000017500000000001012517736533027026 0ustar daviddavid00000000000000Moo = 1 jedi-0.9.0/test/test_evaluate/flask-site-packages/flaskext/bar.py0000664000175000017500000000003412517736533025247 0ustar daviddavid00000000000000class Bar(object): pass jedi-0.9.0/test/test_evaluate/flask-site-packages/flask/0000775000175000017500000000000012517736563023416 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/flask-site-packages/flask/__init__.py0000664000175000017500000000002512517736533025521 0ustar daviddavid00000000000000 jedi-0.9.0/test/test_evaluate/flask-site-packages/flask/ext/0000775000175000017500000000000012517736563024216 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/flask-site-packages/flask/ext/__init__.py0000664000175000017500000000000112517736533026313 0ustar daviddavid00000000000000 jedi-0.9.0/test/test_evaluate/test_imports.py0000664000175000017500000000372712517736533021614 0ustar daviddavid00000000000000import os import sys import pytest import jedi from jedi._compatibility import find_module_py33 from ..helpers import cwd_at @pytest.mark.skipif('sys.version_info < (3,3)') def test_find_module_py33(): """Needs to work like the old find_module.""" assert find_module_py33('_io') == (None, '_io', False) @cwd_at('test/test_evaluate/not_in_sys_path/pkg') def test_import_not_in_sys_path(): """ non-direct imports (not in sys.path) """ a = jedi.Script(path='module.py', line=5).goto_definitions() assert a[0].name == 'int' a = jedi.Script(path='module.py', line=6).goto_definitions() assert a[0].name == 'str' a = jedi.Script(path='module.py', line=7).goto_definitions() assert a[0].name == 'str' @pytest.mark.parametrize("script,name", [ ("from flask.ext import foo; foo.", "Foo"), # flask_foo.py ("from flask.ext import bar; bar.", "Bar"), # flaskext/bar.py ("from flask.ext import baz; baz.", "Baz"), # flask_baz/__init__.py ("from flask.ext import moo; moo.", "Moo"), # flaskext/moo/__init__.py ("from flask.ext.", "foo"), ("from flask.ext.", "bar"), ("from flask.ext.", "baz"), ("from flask.ext.", "moo"), pytest.mark.xfail(("import flask.ext.foo; flask.ext.foo.", "Foo")), pytest.mark.xfail(("import flask.ext.bar; flask.ext.bar.", "Foo")), pytest.mark.xfail(("import flask.ext.baz; flask.ext.baz.", "Foo")), pytest.mark.xfail(("import flask.ext.moo; flask.ext.moo.", "Foo")), ]) def test_flask_ext(script, name): """flask.ext.foo is really imported from flaskext.foo or flask_foo. """ path = os.path.join(os.path.dirname(__file__), 'flask-site-packages') sys.path.append(path) try: assert name in [c.name for c in jedi.Script(script).completions()] finally: sys.path.remove(path) @cwd_at('test/test_evaluate/') def test_not_importable_file(): src = 'import not_importable_file as x; x.' assert not jedi.Script(src, path='example.py').completions() jedi-0.9.0/test/test_evaluate/egg-link/0000775000175000017500000000000012517736563020175 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/egg-link/venv/0000775000175000017500000000000012517736563021153 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/egg-link/venv/lib/0000775000175000017500000000000012517736563021721 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/egg-link/venv/lib/python3.4/0000775000175000017500000000000012517736563023467 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/egg-link/venv/lib/python3.4/site-packages/0000775000175000017500000000000012517736563026207 5ustar daviddavid00000000000000jedi-0.9.0/test/test_evaluate/egg-link/venv/lib/python3.4/site-packages/egg_link.egg-link0000664000175000017500000000002412517736533031376 0ustar daviddavid00000000000000/path/from/egg-link jedi-0.9.0/test/test_evaluate/test_sys_path.py0000664000175000017500000000212212517736533021735 0ustar daviddavid00000000000000import os from jedi._compatibility import unicode from jedi.parser import Parser, load_grammar from jedi.evaluate import sys_path, Evaluator def test_paths_from_assignment(): def paths(src): grammar = load_grammar() stmt = Parser(grammar, unicode(src)).module.statements[0] return list(sys_path._paths_from_assignment(Evaluator(grammar), stmt)) assert paths('sys.path[0:0] = ["a"]') == ['a'] assert paths('sys.path = ["b", 1, x + 3, y, "c"]') == ['b', 'c'] assert paths('sys.path = a = ["a"]') == ['a'] # Fail for complicated examples. assert paths('sys.path, other = ["a"], 2') == [] def test_get_sys_path(monkeypatch): monkeypatch.setenv('VIRTUAL_ENV', os.path.join(os.path.dirname(__file__), 'egg-link', 'venv')) def sitepackages_dir(venv): return os.path.join(venv, 'lib', 'python3.4', 'site-packages') monkeypatch.setattr('jedi.evaluate.sys_path._get_venv_sitepackages', sitepackages_dir) assert '/path/from/egg-link' in sys_path.get_sys_path() jedi-0.9.0/test/helpers.py0000664000175000017500000000200512204171717015627 0ustar daviddavid00000000000000""" A helper module for testing, improves compatibility for testing (as ``jedi._compatibility``) as well as introducing helper functions. """ import sys if sys.hexversion < 0x02070000: import unittest2 as unittest else: import unittest TestCase = unittest.TestCase import os from os.path import abspath, dirname import functools test_dir = dirname(abspath(__file__)) root_dir = dirname(test_dir) sample_int = 1 # This is used in completion/imports.py def cwd_at(path): """ Decorator to run function at `path`. :type path: str :arg path: relative path from repository root (e.g., ``'jedi'``). """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwds): try: oldcwd = os.getcwd() repo_root = os.path.dirname(test_dir) os.chdir(os.path.join(repo_root, path)) return func(*args, **kwds) finally: os.chdir(oldcwd) return wrapper return decorator jedi-0.9.0/test/conftest.py0000664000175000017500000001200512363566726016031 0ustar daviddavid00000000000000import os import shutil import re import tempfile import pytest from . import helpers from . import run from . import refactor import jedi from jedi.evaluate.analysis import Warning def pytest_addoption(parser): parser.addoption( "--integration-case-dir", default=os.path.join(helpers.test_dir, 'completion'), help="Directory in which integration test case files locate.") parser.addoption( "--refactor-case-dir", default=os.path.join(helpers.test_dir, 'refactor'), help="Directory in which refactoring test case files locate.") parser.addoption( "--test-files", "-T", default=[], action='append', help=( "Specify test files using FILE_NAME[:LINE[,LINE[,...]]]. " "For example: -T generators.py:10,13,19. " "Note that you can use -m to specify the test case by id.")) parser.addoption( "--thirdparty", action='store_true', help="Include integration tests that requires third party modules.") def parse_test_files_option(opt): """ Parse option passed to --test-files into a key-value pair. >>> parse_test_files_option('generators.py:10,13,19') ('generators.py', [10, 13, 19]) """ opt = str(opt) if ':' in opt: (f_name, rest) = opt.split(':', 1) return (f_name, list(map(int, rest.split(',')))) else: return (opt, []) def pytest_generate_tests(metafunc): """ :type metafunc: _pytest.python.Metafunc """ test_files = dict(map(parse_test_files_option, metafunc.config.option.test_files)) if 'case' in metafunc.fixturenames: base_dir = metafunc.config.option.integration_case_dir thirdparty = metafunc.config.option.thirdparty cases = list(run.collect_dir_tests(base_dir, test_files)) if thirdparty: cases.extend(run.collect_dir_tests( os.path.join(base_dir, 'thirdparty'), test_files, True)) ids = ["%s:%s" % (c.module_name, c.line_nr_test) for c in cases] metafunc.parametrize('case', cases, ids=ids) if 'refactor_case' in metafunc.fixturenames: base_dir = metafunc.config.option.refactor_case_dir metafunc.parametrize( 'refactor_case', refactor.collect_dir_tests(base_dir, test_files)) if 'static_analysis_case' in metafunc.fixturenames: base_dir = os.path.join(os.path.dirname(__file__), 'static_analysis') metafunc.parametrize( 'static_analysis_case', collect_static_analysis_tests(base_dir, test_files)) def collect_static_analysis_tests(base_dir, test_files): for f_name in os.listdir(base_dir): files_to_execute = [a for a in test_files.items() if a[0] in f_name] if f_name.endswith(".py") and (not test_files or files_to_execute): path = os.path.join(base_dir, f_name) yield StaticAnalysisCase(path) class StaticAnalysisCase(object): """ Static Analysis cases lie in the static_analysis folder. The tests also start with `#!`, like the goto_definition tests. """ def __init__(self, path): self.skip = False self._path = path with open(path) as f: self._source = f.read() def collect_comparison(self): cases = [] for line_nr, line in enumerate(self._source.splitlines(), 1): match = re.match(r'(\s*)#! (\d+ )?(.*)$', line) if match is not None: column = int(match.group(2) or 0) + len(match.group(1)) cases.append((line_nr + 1, column, match.group(3))) return cases def run(self, compare_cb): analysis = jedi.Script(self._source, path=self._path)._analysis() typ_str = lambda inst: 'warning ' if isinstance(inst, Warning) else '' analysis = [(r.line, r.column, typ_str(r) + r.name) for r in analysis] compare_cb(self, analysis, self.collect_comparison()) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, os.path.basename(self._path)) @pytest.fixture() def isolated_jedi_cache(monkeypatch, tmpdir): """ Set `jedi.settings.cache_directory` to a temporary directory during test. Same as `clean_jedi_cache`, but create the temporary directory for each test case (scope='function'). """ from jedi import settings monkeypatch.setattr(settings, 'cache_directory', str(tmpdir)) @pytest.fixture(scope='session') def clean_jedi_cache(request): """ Set `jedi.settings.cache_directory` to a temporary directory during test. Note that you can't use built-in `tmpdir` and `monkeypatch` fixture here because their scope is 'function', which is not used in 'session' scope fixture. This fixture is activated in ../pytest.ini. """ from jedi import settings old = settings.cache_directory tmp = tempfile.mkdtemp(prefix='jedi-test-') settings.cache_directory = tmp @request.addfinalizer def restore(): settings.cache_directory = old shutil.rmtree(tmp) jedi-0.9.0/test/test_integration_analysis.py0000664000175000017500000000053212517736533021467 0ustar daviddavid00000000000000""" Test of keywords and ``jedi.keywords`` """ from jedi import Script def test_issue436(): code = "bar = 0\nbar += 'foo' + 4" errors = set(repr(e) for e in Script(code)._analysis()) assert len(errors) == 2 assert '' in errors assert '' in errors jedi-0.9.0/jedi/0000775000175000017500000000000012517736563013567 5ustar daviddavid00000000000000jedi-0.9.0/jedi/evaluate/0000775000175000017500000000000012517736563015375 5ustar daviddavid00000000000000jedi-0.9.0/jedi/evaluate/recursion.py0000664000175000017500000001173412517736533017763 0ustar daviddavid00000000000000""" Recursions are the recipe of |jedi| to conquer Python code. However, someone must stop recursions going mad. Some settings are here to make |jedi| stop at the right time. You can read more about them :ref:`here `. Next to :mod:`jedi.evaluate.cache` this module also makes |jedi| not thread-safe. Why? ``execution_recursion_decorator`` uses class variables to count the function calls. """ from jedi import debug from jedi import settings from jedi.evaluate import compiled from jedi.evaluate import iterable def recursion_decorator(func): def run(evaluator, stmt, *args, **kwargs): rec_detect = evaluator.recursion_detector # print stmt, len(self.node_statements()) if rec_detect.push_stmt(stmt): return [] else: result = func(evaluator, stmt, *args, **kwargs) rec_detect.pop_stmt() return result return run class RecursionDetector(object): """ A decorator to detect recursions in statements. In a recursion a statement at the same place, in the same module may not be executed two times. """ def __init__(self): self.top = None self.current = None def push_stmt(self, stmt): self.current = _RecursionNode(stmt, self.current) check = self._check_recursion() if check: debug.warning('catched stmt recursion: %s against %s @%s', stmt, check.stmt, stmt.start_pos) self.pop_stmt() return True return False def pop_stmt(self): if self.current is not None: # I don't know how current can be None, but sometimes it happens # with Python3. self.current = self.current.parent def _check_recursion(self): test = self.current while True: test = test.parent if self.current == test: return test if not test: return False def node_statements(self): result = [] n = self.current while n: result.insert(0, n.stmt) n = n.parent return result class _RecursionNode(object): """ A node of the RecursionDecorator. """ def __init__(self, stmt, parent): self.script = stmt.get_parent_until() self.position = stmt.start_pos self.parent = parent self.stmt = stmt # Don't check param instances, they are not causing recursions # The same's true for the builtins, because the builtins are really # simple. self.is_ignored = self.script == compiled.builtin def __eq__(self, other): if not other: return None return self.script == other.script \ and self.position == other.position \ and not self.is_ignored and not other.is_ignored def execution_recursion_decorator(func): def run(execution, **kwargs): detector = execution._evaluator.execution_recursion_detector if detector.push_execution(execution): result = [] else: result = func(execution, **kwargs) detector.pop_execution() return result return run class ExecutionRecursionDetector(object): """ Catches recursions of executions. It is designed like a Singelton. Only one instance should exist. """ def __init__(self): self.recursion_level = 0 self.parent_execution_funcs = [] self.execution_funcs = set() self.execution_count = 0 def __call__(self, execution): debug.dbg('Execution recursions: %s', execution, self.recursion_level, self.execution_count, len(self.execution_funcs)) if self.check_recursion(execution): result = [] else: result = self.func(execution) self.pop_execution() return result def pop_execution(cls): cls.parent_execution_funcs.pop() cls.recursion_level -= 1 def push_execution(cls, execution): in_par_execution_funcs = execution.base in cls.parent_execution_funcs in_execution_funcs = execution.base in cls.execution_funcs cls.recursion_level += 1 cls.execution_count += 1 cls.execution_funcs.add(execution.base) cls.parent_execution_funcs.append(execution.base) if cls.execution_count > settings.max_executions: return True if isinstance(execution.base, (iterable.Array, iterable.Generator)): return False module = execution.get_parent_until() if module == compiled.builtin: return False if in_par_execution_funcs: if cls.recursion_level > settings.max_function_recursion_level: return True if in_execution_funcs and \ len(cls.execution_funcs) > settings.max_until_execution_unique: return True if cls.execution_count > settings.max_executions_without_builtins: return True return False jedi-0.9.0/jedi/evaluate/__init__.py0000664000175000017500000004005612517736533017510 0ustar daviddavid00000000000000""" Evaluation of Python code in |jedi| is based on three assumptions: * The code uses as least side effects as possible. Jedi understands certain list/tuple/set modifications, but there's no guarantee that Jedi detects everything (list.append in different modules for example). * No magic is being used: - metaclasses - ``setattr()`` / ``__import__()`` - writing to ``globals()``, ``locals()``, ``object.__dict__`` * The programmer is not a total dick, e.g. like `this `_ :-) The actual algorithm is based on a principle called lazy evaluation. If you don't know about it, google it. That said, the typical entry point for static analysis is calling ``eval_statement``. There's separate logic for autocompletion in the API, the evaluator is all about evaluating an expression. Now you need to understand what follows after ``eval_statement``. Let's make an example:: import datetime datetime.date.toda# <-- cursor here First of all, this module doesn't care about completion. It really just cares about ``datetime.date``. At the end of the procedure ``eval_statement`` will return the ``date`` class. To *visualize* this (simplified): - ``Evaluator.eval_statement`` doesn't do much, because there's no assignment. - ``Evaluator.eval_element`` cares for resolving the dotted path - ``Evaluator.find_types`` searches for global definitions of datetime, which it finds in the definition of an import, by scanning the syntax tree. - Using the import logic, the datetime module is found. - Now ``find_types`` is called again by ``eval_element`` to find ``date`` inside the datetime module. Now what would happen if we wanted ``datetime.date.foo.bar``? Two more calls to ``find_types``. However the second call would be ignored, because the first one would return nothing (there's no foo attribute in ``date``). What if the import would contain another ``ExprStmt`` like this:: from foo import bar Date = bar.baz Well... You get it. Just another ``eval_statement`` recursion. It's really easy. Python can obviously get way more complicated then this. To understand tuple assignments, list comprehensions and everything else, a lot more code had to be written. Jedi has been tested very well, so you can just start modifying code. It's best to write your own test first for your "new" feature. Don't be scared of breaking stuff. As long as the tests pass, you're most likely to be fine. I need to mention now that lazy evaluation is really good because it only *evaluates* what needs to be *evaluated*. All the statements and modules that are not used are just being ignored. """ import copy from itertools import chain from jedi.parser import tree from jedi import debug from jedi.evaluate import representation as er from jedi.evaluate import imports from jedi.evaluate import recursion from jedi.evaluate import iterable from jedi.evaluate.cache import memoize_default from jedi.evaluate import stdlib from jedi.evaluate import finder from jedi.evaluate import compiled from jedi.evaluate import precedence from jedi.evaluate import param from jedi.evaluate import helpers class Evaluator(object): def __init__(self, grammar): self.grammar = grammar self.memoize_cache = {} # for memoize decorators # To memorize modules -> equals `sys.modules`. self.modules = {} # like `sys.modules`. self.compiled_cache = {} # see `compiled.create()` self.recursion_detector = recursion.RecursionDetector() self.execution_recursion_detector = recursion.ExecutionRecursionDetector() self.analysis = [] def wrap(self, element): if isinstance(element, tree.Class): return er.Class(self, element) elif isinstance(element, tree.Function): if isinstance(element, tree.Lambda): return er.LambdaWrapper(self, element) else: return er.Function(self, element) elif isinstance(element, (tree.Module)) \ and not isinstance(element, er.ModuleWrapper): return er.ModuleWrapper(self, element) else: return element def find_types(self, scope, name_str, position=None, search_global=False, is_goto=False): """ This is the search function. The most important part to debug. `remove_statements` and `filter_statements` really are the core part of this completion. :param position: Position of the last statement -> tuple of line, column :return: List of Names. Their parents are the types. """ f = finder.NameFinder(self, scope, name_str, position) scopes = f.scopes(search_global) if is_goto: return f.filter_name(scopes) return f.find(scopes, search_global) @memoize_default(default=[], evaluator_is_first_arg=True) @recursion.recursion_decorator @debug.increase_indent def eval_statement(self, stmt, seek_name=None): """ The starting point of the completion. A statement always owns a call list, which are the calls, that a statement does. In case multiple names are defined in the statement, `seek_name` returns the result for this name. :param stmt: A `tree.ExprStmt`. """ debug.dbg('eval_statement %s (%s)', stmt, seek_name) types = self.eval_element(stmt.get_rhs()) if seek_name: types = finder.check_tuple_assignments(types, seek_name) first_operation = stmt.first_operation() if first_operation not in ('=', None) and not isinstance(stmt, er.InstanceElement): # TODO don't check for this. # `=` is always the last character in aug assignments -> -1 operator = copy.copy(first_operation) operator.value = operator.value[:-1] name = str(stmt.get_defined_names()[0]) parent = self.wrap(stmt.get_parent_scope()) left = self.find_types(parent, name, stmt.start_pos, search_global=True) if isinstance(stmt.get_parent_until(tree.ForStmt), tree.ForStmt): # Iterate through result and add the values, that's possible # only in for loops without clutter, because they are # predictable. for r in types: left = precedence.calculate(self, left, operator, [r]) types = left else: types = precedence.calculate(self, left, operator, types) debug.dbg('eval_statement result %s', types) return types @memoize_default(evaluator_is_first_arg=True) def eval_element(self, element): if isinstance(element, iterable.AlreadyEvaluated): return list(element) elif isinstance(element, iterable.MergedNodes): return iterable.unite(self.eval_element(e) for e in element) debug.dbg('eval_element %s@%s', element, element.start_pos) if isinstance(element, (tree.Name, tree.Literal)) or tree.is_node(element, 'atom'): return self._eval_atom(element) elif isinstance(element, tree.Keyword): # For False/True/None if element.value in ('False', 'True', 'None'): return [compiled.builtin.get_by_name(element.value)] else: return [] elif element.isinstance(tree.Lambda): return [er.LambdaWrapper(self, element)] elif element.isinstance(er.LambdaWrapper): return [element] # TODO this is no real evaluation. elif element.type == 'expr_stmt': return self.eval_statement(element) elif element.type == 'power': types = self._eval_atom(element.children[0]) for trailer in element.children[1:]: if trailer == '**': # has a power operation. raise NotImplementedError types = self.eval_trailer(types, trailer) return types elif element.type in ('testlist_star_expr', 'testlist',): # The implicit tuple in statements. return [iterable.ImplicitTuple(self, element)] elif element.type in ('not_test', 'factor'): types = self.eval_element(element.children[-1]) for operator in element.children[:-1]: types = list(precedence.factor_calculate(self, types, operator)) return types elif element.type == 'test': # `x if foo else y` case. return (self.eval_element(element.children[0]) + self.eval_element(element.children[-1])) elif element.type == 'operator': # Must be an ellipsis, other operators are not evaluated. return [] # Ignore for now. elif element.type == 'dotted_name': types = self._eval_atom(element.children[0]) for next_name in element.children[2::2]: types = list(chain.from_iterable(self.find_types(typ, next_name) for typ in types)) return types else: return precedence.calculate_children(self, element.children) def _eval_atom(self, atom): """ Basically to process ``atom`` nodes. The parser sometimes doesn't generate the node (because it has just one child). In that case an atom might be a name or a literal as well. """ if isinstance(atom, tree.Name): # This is the first global lookup. stmt = atom.get_definition() scope = stmt.get_parent_until(tree.IsScope, include_current=True) if isinstance(stmt, tree.CompFor): stmt = stmt.get_parent_until((tree.ClassOrFunc, tree.ExprStmt)) if stmt.type != 'expr_stmt': # We only need to adjust the start_pos for statements, because # there the name cannot be used. stmt = atom return self.find_types(scope, atom, stmt.start_pos, search_global=True) elif isinstance(atom, tree.Literal): return [compiled.create(self, atom.eval())] else: c = atom.children # Parentheses without commas are not tuples. if c[0] == '(' and not len(c) == 2 \ and not(tree.is_node(c[1], 'testlist_comp') and len(c[1].children) > 1): return self.eval_element(c[1]) try: comp_for = c[1].children[1] except (IndexError, AttributeError): pass else: if isinstance(comp_for, tree.CompFor): return [iterable.Comprehension.from_atom(self, atom)] return [iterable.Array(self, atom)] def eval_trailer(self, types, trailer): trailer_op, node = trailer.children[:2] if node == ')': # `arglist` is optional. node = () new_types = [] for typ in types: debug.dbg('eval_trailer: %s in scope %s', trailer, typ) if trailer_op == '.': new_types += self.find_types(typ, node) elif trailer_op == '(': new_types += self.execute(typ, node, trailer) elif trailer_op == '[': try: get = typ.get_index_types except AttributeError: debug.warning("TypeError: '%s' object is not subscriptable" % typ) else: new_types += get(self, node) return new_types def execute_evaluated(self, obj, *args): """ Execute a function with already executed arguments. """ args = [iterable.AlreadyEvaluated([arg]) for arg in args] return self.execute(obj, args) @debug.increase_indent def execute(self, obj, arguments=(), trailer=None): if not isinstance(arguments, param.Arguments): arguments = param.Arguments(self, arguments, trailer) if obj.isinstance(er.Function): obj = obj.get_decorated_func() debug.dbg('execute: %s %s', obj, arguments) try: # Some stdlib functions like super(), namedtuple(), etc. have been # hard-coded in Jedi to support them. return stdlib.execute(self, obj, arguments) except stdlib.NotInStdLib: pass try: func = obj.py__call__ except AttributeError: debug.warning("no execution possible %s", obj) return [] else: types = func(self, arguments) debug.dbg('execute result: %s in %s', types, obj) return types def goto_definition(self, name): def_ = name.get_definition() if def_.type == 'expr_stmt' and name in def_.get_defined_names(): return self.eval_statement(def_, name) call = helpers.call_of_name(name) return self.eval_element(call) def goto(self, name): def resolve_implicit_imports(names): for name in names: if isinstance(name.parent, helpers.FakeImport): # Those are implicit imports. s = imports.ImportWrapper(self, name) for n in s.follow(is_goto=True): yield n else: yield name stmt = name.get_definition() par = name.parent if par.type == 'argument' and par.children[1] == '=' and par.children[0] == name: # Named param goto. trailer = par.parent if trailer.type == 'arglist': trailer = trailer.parent if trailer.type != 'classdef': if trailer.type == 'decorator': types = self.eval_element(trailer.children[1]) else: i = trailer.parent.children.index(trailer) to_evaluate = trailer.parent.children[:i] types = self.eval_element(to_evaluate[0]) for trailer in to_evaluate[1:]: types = self.eval_trailer(types, trailer) param_names = [] for typ in types: try: params = typ.params except AttributeError: pass else: param_names += [param.name for param in params if param.name.value == name.value] return param_names elif isinstance(par, tree.ExprStmt) and name in par.get_defined_names(): # Only take the parent, because if it's more complicated than just # a name it's something you can "goto" again. return [name] elif isinstance(par, (tree.Param, tree.Function, tree.Class)) and par.name is name: return [name] elif isinstance(stmt, tree.Import): modules = imports.ImportWrapper(self, name).follow(is_goto=True) return list(resolve_implicit_imports(modules)) elif par.type == 'dotted_name': # Is a decorator. index = par.children.index(name) if index > 0: new_dotted = helpers.deep_ast_copy(par) new_dotted.children[index - 1:] = [] types = self.eval_element(new_dotted) return resolve_implicit_imports(iterable.unite( self.find_types(typ, name, is_goto=True) for typ in types )) scope = name.get_parent_scope() if tree.is_node(name.parent, 'trailer'): call = helpers.call_of_name(name, cut_own_trailer=True) types = self.eval_element(call) return resolve_implicit_imports(iterable.unite( self.find_types(typ, name, is_goto=True) for typ in types )) else: if stmt.type != 'expr_stmt': # We only need to adjust the start_pos for statements, because # there the name cannot be used. stmt = name return self.find_types(scope, name, stmt.start_pos, search_global=True, is_goto=True) jedi-0.9.0/jedi/evaluate/precedence.py0000664000175000017500000001372512517736533020051 0ustar daviddavid00000000000000""" Handles operator precedence. """ import operator from jedi._compatibility import unicode from jedi.parser import tree from jedi import debug from jedi.evaluate.compiled import (CompiledObject, create, builtin, keyword_from_value, true_obj, false_obj) from jedi.evaluate import analysis # Maps Python syntax to the operator module. COMPARISON_OPERATORS = { '==': operator.eq, '!=': operator.ne, 'is': operator.is_, 'is not': operator.is_not, '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, } def _literals_to_types(evaluator, result): # Changes literals ('a', 1, 1.0, etc) to its type instances (str(), # int(), float(), etc). for i, r in enumerate(result): if is_literal(r): # Literals are only valid as long as the operations are # correct. Otherwise add a value-free instance. cls = builtin.get_by_name(r.name.get_code()) result[i] = evaluator.execute(cls)[0] return list(set(result)) def calculate_children(evaluator, children): """ Calculate a list of children with operators. """ iterator = iter(children) types = evaluator.eval_element(next(iterator)) for operator in iterator: right = next(iterator) if tree.is_node(operator, 'comp_op'): # not in / is not operator = ' '.join(str(c.value) for c in operator.children) # handle lazy evaluation of and/or here. if operator in ('and', 'or'): left_bools = set([left.py__bool__() for left in types]) if left_bools == set([True]): if operator == 'and': types = evaluator.eval_element(right) elif left_bools == set([False]): if operator != 'and': types = evaluator.eval_element(right) # Otherwise continue, because of uncertainty. else: types = calculate(evaluator, types, operator, evaluator.eval_element(right)) debug.dbg('calculate_children types %s', types) return types def calculate(evaluator, left_result, operator, right_result): result = [] if not left_result or not right_result: # illegal slices e.g. cause left/right_result to be None result = (left_result or []) + (right_result or []) result = _literals_to_types(evaluator, result) else: # I don't think there's a reasonable chance that a string # operation is still correct, once we pass something like six # objects. if len(left_result) * len(right_result) > 6: result = _literals_to_types(evaluator, left_result + right_result) else: for left in left_result: for right in right_result: result += _element_calculate(evaluator, left, operator, right) return result def factor_calculate(evaluator, types, operator): """ Calculates `+`, `-`, `~` and `not` prefixes. """ for typ in types: if operator == '-': if _is_number(typ): yield create(evaluator, -typ.obj) elif operator == 'not': value = typ.py__bool__() if value is None: # Uncertainty. return yield keyword_from_value(not value) else: yield typ def _is_number(obj): return isinstance(obj, CompiledObject) \ and isinstance(obj.obj, (int, float)) def is_string(obj): return isinstance(obj, CompiledObject) \ and isinstance(obj.obj, (str, unicode)) def is_literal(obj): return _is_number(obj) or is_string(obj) def _is_tuple(obj): from jedi.evaluate import iterable return isinstance(obj, iterable.Array) and obj.type == 'tuple' def _is_list(obj): from jedi.evaluate import iterable return isinstance(obj, iterable.Array) and obj.type == 'list' def _element_calculate(evaluator, left, operator, right): from jedi.evaluate import iterable, representation as er l_is_num = _is_number(left) r_is_num = _is_number(right) if operator == '*': # for iterables, ignore * operations if isinstance(left, iterable.Array) or is_string(left): return [left] elif isinstance(right, iterable.Array) or is_string(right): return [right] elif operator == '+': if l_is_num and r_is_num or is_string(left) and is_string(right): return [create(evaluator, left.obj + right.obj)] elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right): return [iterable.MergedArray(evaluator, (left, right))] elif operator == '-': if l_is_num and r_is_num: return [create(evaluator, left.obj - right.obj)] elif operator == '%': # With strings and numbers the left type typically remains. Except for # `int() % float()`. return [left] elif operator in COMPARISON_OPERATORS: operation = COMPARISON_OPERATORS[operator] if isinstance(left, CompiledObject) and isinstance(right, CompiledObject): # Possible, because the return is not an option. Just compare. left = left.obj right = right.obj try: return [keyword_from_value(operation(left, right))] except TypeError: # Could be True or False. return [true_obj, false_obj] elif operator == 'in': return [] def check(obj): """Checks if a Jedi object is either a float or an int.""" return isinstance(obj, er.Instance) and obj.name.get_code() in ('int', 'float') # Static analysis, one is a number, the other one is not. if operator in ('+', '-') and l_is_num != r_is_num \ and not (check(left) or check(right)): message = "TypeError: unsupported operand type(s) for +: %s and %s" analysis.add(evaluator, 'type-error-operation', operator, message % (left, right)) return [left, right] jedi-0.9.0/jedi/evaluate/representation.py0000664000175000017500000007573412517736533021026 0ustar daviddavid00000000000000""" Like described in the :mod:`jedi.parser.tree` module, there's a need for an ast like module to represent the states of parsed modules. But now there are also structures in Python that need a little bit more than that. An ``Instance`` for example is only a ``Class`` before it is instantiated. This class represents these cases. So, why is there also a ``Class`` class here? Well, there are decorators and they change classes in Python 3. Representation modules also define "magic methods". Those methods look like ``py__foo__`` and are typically mappable to the Python equivalents ``__call__`` and others. Here's a list: ====================================== ======================================== **Method** **Description** -------------------------------------- ---------------------------------------- py__call__(evaluator, params: Array) On callable objects, returns types. py__bool__() Returns True/False/None; None means that there's no certainty. py__bases__(evaluator) Returns a list of base classes. py__mro__(evaluator) Returns a list of classes (the mro). py__getattribute__(evaluator, name) Returns a list of attribute values. The name can be str or Name. ====================================== ======================================== __ """ import os import pkgutil import imp import re from itertools import chain from jedi._compatibility import use_metaclass, unicode, Python3Method from jedi.parser import tree from jedi import debug from jedi import common from jedi.cache import underscore_memoization, cache_star_import from jedi.evaluate.cache import memoize_default, CachedMetaClass, NO_DEFAULT from jedi.evaluate import compiled from jedi.evaluate import recursion from jedi.evaluate import iterable from jedi.evaluate import docstrings from jedi.evaluate import helpers from jedi.evaluate import param from jedi.evaluate import flow_analysis from jedi.evaluate import imports class Executed(tree.Base): """ An instance is also an executable - because __init__ is called :param var_args: The param input array, consist of a parser node or a list. """ def __init__(self, evaluator, base, var_args=()): self._evaluator = evaluator self.base = base self.var_args = var_args def is_scope(self): return True def get_parent_until(self, *args, **kwargs): return tree.Base.get_parent_until(self, *args, **kwargs) @common.safe_property def parent(self): return self.base.parent class Instance(use_metaclass(CachedMetaClass, Executed)): """ This class is used to evaluate instances. """ def __init__(self, evaluator, base, var_args, is_generated=False): super(Instance, self).__init__(evaluator, base, var_args) self.decorates = None # Generated instances are classes that are just generated by self # (No var_args) used. self.is_generated = is_generated if base.name.get_code() in ['list', 'set'] \ and compiled.builtin == base.get_parent_until(): # compare the module path with the builtin name. self.var_args = iterable.check_array_instances(evaluator, self) elif not is_generated: # Need to execute the __init__ function, because the dynamic param # searching needs it. try: method = self.get_subscope_by_name('__init__') except KeyError: pass else: evaluator.execute(method, self.var_args) @property def py__call__(self): def actual(evaluator, params): return evaluator.execute(method, params) try: method = self.get_subscope_by_name('__call__') except KeyError: # Means the Instance is not callable. raise AttributeError return actual def py__class__(self, evaluator): return self.base def py__bool__(self): # Signalize that we don't know about the bool type. return None @memoize_default() def _get_method_execution(self, func): func = get_instance_el(self._evaluator, self, func, True) return FunctionExecution(self._evaluator, func, self.var_args) def _get_func_self_name(self, func): """ Returns the name of the first param in a class method (which is normally self. """ try: return str(func.params[0].name) except IndexError: return None def _self_names_dict(self, add_mro=True): names = {} # This loop adds the names of the self object, copies them and removes # the self. for sub in self.base.subscopes: if isinstance(sub, tree.Class): continue # Get the self name, if there's one. self_name = self._get_func_self_name(sub) if self_name is None: continue if sub.name.value == '__init__' and not self.is_generated: # ``__init__`` is special because the params need are injected # this way. Therefore an execution is necessary. if not sub.get_decorators(): # __init__ decorators should generally just be ignored, # because to follow them and their self variables is too # complicated. sub = self._get_method_execution(sub) for name_list in sub.names_dict.values(): for name in name_list: if name.value == self_name and name.prev_sibling() is None: trailer = name.next_sibling() if tree.is_node(trailer, 'trailer') \ and len(trailer.children) == 2 \ and trailer.children[0] == '.': name = trailer.children[1] # After dot. if name.is_definition(): arr = names.setdefault(name.value, []) arr.append(get_instance_el(self._evaluator, self, name)) return names def get_subscope_by_name(self, name): sub = self.base.get_subscope_by_name(name) return get_instance_el(self._evaluator, self, sub, True) def execute_subscope_by_name(self, name, *args): method = self.get_subscope_by_name(name) return self._evaluator.execute_evaluated(method, *args) def get_descriptor_returns(self, obj): """ Throws a KeyError if there's no method. """ # Arguments in __get__ descriptors are obj, class. # `method` is the new parent of the array, don't know if that's good. args = [obj, obj.base] if isinstance(obj, Instance) else [compiled.none_obj, obj] try: return self.execute_subscope_by_name('__get__', *args) except KeyError: return [self] @memoize_default() def names_dicts(self, search_global): yield self._self_names_dict() for s in self.base.py__mro__(self._evaluator)[1:]: if not isinstance(s, compiled.CompiledObject): # Compiled objects don't have `self.` names. for inst in self._evaluator.execute(s): yield inst._self_names_dict(add_mro=False) for names_dict in self.base.names_dicts(search_global=False, is_instance=True): yield LazyInstanceDict(self._evaluator, self, names_dict) def get_index_types(self, evaluator, index_array): indexes = iterable.create_indexes_or_slices(self._evaluator, index_array) if any([isinstance(i, iterable.Slice) for i in indexes]): # Slice support in Jedi is very marginal, at the moment, so just # ignore them in case of __getitem__. # TODO support slices in a more general way. indexes = [] try: method = self.get_subscope_by_name('__getitem__') except KeyError: debug.warning('No __getitem__, cannot access the array.') return [] else: return self._evaluator.execute(method, [iterable.AlreadyEvaluated(indexes)]) @property @underscore_memoization def name(self): name = self.base.name return helpers.FakeName(unicode(name), self, name.start_pos) def __getattr__(self, name): if name not in ['start_pos', 'end_pos', 'get_imports', 'type', 'doc', 'raw_doc']: raise AttributeError("Instance %s: Don't touch this (%s)!" % (self, name)) return getattr(self.base, name) def __repr__(self): dec = '' if self.decorates is not None: dec = " decorates " + repr(self.decorates) return "" % (type(self).__name__, self.base, self.var_args, dec) class LazyInstanceDict(object): def __init__(self, evaluator, instance, dct): self._evaluator = evaluator self._instance = instance self._dct = dct def __getitem__(self, name): return [get_instance_el(self._evaluator, self._instance, var, True) for var in self._dct[name]] def values(self): return [self[key] for key in self._dct] class InstanceName(tree.Name): def __init__(self, origin_name, parent): super(InstanceName, self).__init__(tree.zero_position_modifier, origin_name.value, origin_name.start_pos) self._origin_name = origin_name self.parent = parent def is_definition(self): return self._origin_name.is_definition() def get_instance_el(evaluator, instance, var, is_class_var=False): """ Returns an InstanceElement if it makes sense, otherwise leaves the object untouched. Basically having an InstanceElement is context information. That is needed in quite a lot of cases, which includes Nodes like ``power``, that need to know where a self name comes from for example. """ if isinstance(var, tree.Name): parent = get_instance_el(evaluator, instance, var.parent, is_class_var) return InstanceName(var, parent) elif var.type != 'funcdef' \ and isinstance(var, (Instance, compiled.CompiledObject, tree.Leaf, tree.Module, FunctionExecution)): return var var = evaluator.wrap(var) return InstanceElement(evaluator, instance, var, is_class_var) class InstanceElement(use_metaclass(CachedMetaClass, tree.Base)): """ InstanceElement is a wrapper for any object, that is used as an instance variable (e.g. self.variable or class methods). """ def __init__(self, evaluator, instance, var, is_class_var): self._evaluator = evaluator self.instance = instance self.var = var self.is_class_var = is_class_var @common.safe_property @memoize_default() def parent(self): par = self.var.parent if isinstance(par, Class) and par == self.instance.base \ or isinstance(par, tree.Class) \ and par == self.instance.base.base: par = self.instance else: par = get_instance_el(self._evaluator, self.instance, par, self.is_class_var) return par def get_parent_until(self, *args, **kwargs): return tree.BaseNode.get_parent_until(self, *args, **kwargs) def get_definition(self): return self.get_parent_until((tree.ExprStmt, tree.IsScope, tree.Import)) def get_decorated_func(self): """ Needed because the InstanceElement should not be stripped """ func = self.var.get_decorated_func() func = get_instance_el(self._evaluator, self.instance, func) return func def get_rhs(self): return get_instance_el(self._evaluator, self.instance, self.var.get_rhs(), self.is_class_var) def is_definition(self): return self.var.is_definition() @property def children(self): # Copy and modify the array. return [get_instance_el(self._evaluator, self.instance, command, self.is_class_var) for command in self.var.children] @property @memoize_default() def name(self): name = self.var.name return helpers.FakeName(unicode(name), self, name.start_pos) def __iter__(self): for el in self.var.__iter__(): yield get_instance_el(self._evaluator, self.instance, el, self.is_class_var) def __getitem__(self, index): return get_instance_el(self._evaluator, self.instance, self.var[index], self.is_class_var) def __getattr__(self, name): return getattr(self.var, name) def isinstance(self, *cls): return isinstance(self.var, cls) def is_scope(self): """ Since we inherit from Base, it would overwrite the action we want here. """ return self.var.is_scope() def py__call__(self, evaluator, params): if isinstance(self.var, compiled.CompiledObject): # This check is a bit strange, but CompiledObject itself is a bit # more complicated than we would it actually like to be. return self.var.py__call__(evaluator, params) else: return Function.py__call__(self, evaluator, params) def __repr__(self): return "<%s of %s>" % (type(self).__name__, self.var) class Wrapper(tree.Base): def is_scope(self): return True def is_class(self): return False def py__bool__(self): """ Since Wrapper is a super class for classes, functions and modules, the return value will always be true. """ return True @property @underscore_memoization def name(self): name = self.base.name return helpers.FakeName(unicode(name), self, name.start_pos) class Class(use_metaclass(CachedMetaClass, Wrapper)): """ This class is not only important to extend `tree.Class`, it is also a important for descriptors (if the descriptor methods are evaluated or not). """ def __init__(self, evaluator, base): self._evaluator = evaluator self.base = base @memoize_default(default=()) def py__mro__(self, evaluator): def add(cls): if cls not in mro: mro.append(cls) mro = [self] # TODO Do a proper mro resolution. Currently we are just listing # classes. However, it's a complicated algorithm. for cls in self.py__bases__(self._evaluator): # TODO detect for TypeError: duplicate base class str, # e.g. `class X(str, str): pass` try: mro_method = cls.py__mro__ except AttributeError: # TODO add a TypeError like: """ >>> class Y(lambda: test): pass Traceback (most recent call last): File "", line 1, in TypeError: function() argument 1 must be code, not str >>> class Y(1): pass Traceback (most recent call last): File "", line 1, in TypeError: int() takes at most 2 arguments (3 given) """ pass else: add(cls) for cls_new in mro_method(evaluator): add(cls_new) return tuple(mro) @memoize_default(default=()) def py__bases__(self, evaluator): arglist = self.base.get_super_arglist() if arglist: args = param.Arguments(self._evaluator, arglist) return list(chain.from_iterable(args.eval_args())) else: return [compiled.object_obj] def py__call__(self, evaluator, params): return [Instance(evaluator, self, params)] def py__getattribute__(self, name): return self._evaluator.find_types(self, name) @property def params(self): return self.get_subscope_by_name('__init__').params def names_dicts(self, search_global, is_instance=False): if search_global: yield self.names_dict else: for scope in self.py__mro__(self._evaluator): if isinstance(scope, compiled.CompiledObject): yield scope.names_dicts(False, is_instance)[0] else: yield scope.names_dict def is_class(self): return True def get_subscope_by_name(self, name): for s in self.py__mro__(self._evaluator): for sub in reversed(s.subscopes): if sub.name.value == name: return sub raise KeyError("Couldn't find subscope.") def __getattr__(self, name): if name not in ['start_pos', 'end_pos', 'parent', 'raw_doc', 'doc', 'get_imports', 'get_parent_until', 'get_code', 'subscopes', 'names_dict', 'type']: raise AttributeError("Don't touch this: %s of %s !" % (name, self)) return getattr(self.base, name) def __repr__(self): return "" % (type(self).__name__, self.base) class Function(use_metaclass(CachedMetaClass, Wrapper)): """ Needed because of decorators. Decorators are evaluated here. """ def __init__(self, evaluator, func, is_decorated=False): """ This should not be called directly """ self._evaluator = evaluator self.base = self.base_func = func self.is_decorated = is_decorated # A property that is set by the decorator resolution. self.decorates = None @memoize_default() def get_decorated_func(self): """ Returns the function, that should to be executed in the end. This is also the places where the decorators are processed. """ f = self.base_func decorators = self.base_func.get_decorators() if not decorators or self.is_decorated: return self # Only enter it, if has not already been processed. if not self.is_decorated: for dec in reversed(decorators): debug.dbg('decorator: %s %s', dec, f) dec_results = self._evaluator.eval_element(dec.children[1]) trailer = dec.children[2:-1] if trailer: # Create a trailer and evaluate it. trailer = tree.Node('trailer', trailer) trailer.parent = dec dec_results = self._evaluator.eval_trailer(dec_results, trailer) if not len(dec_results): debug.warning('decorator not found: %s on %s', dec, self.base_func) return self decorator = dec_results.pop() if dec_results: debug.warning('multiple decorators found %s %s', self.base_func, dec_results) # Create param array. if isinstance(f, Function): old_func = f # TODO this is just hacky. change. else: old_func = Function(self._evaluator, f, is_decorated=True) wrappers = self._evaluator.execute_evaluated(decorator, old_func) if not len(wrappers): debug.warning('no wrappers found %s', self.base_func) return self if len(wrappers) > 1: # TODO resolve issue with multiple wrappers -> multiple types debug.warning('multiple wrappers found %s %s', self.base_func, wrappers) f = wrappers[0] if isinstance(f, (Instance, Function)): f.decorates = self debug.dbg('decorator end %s', f) return f def names_dicts(self, search_global): if search_global: yield self.names_dict else: for names_dict in compiled.magic_function_class.names_dicts(False): yield names_dict @Python3Method def py__call__(self, evaluator, params): if self.base.is_generator(): return [iterable.Generator(evaluator, self, params)] else: return FunctionExecution(evaluator, self, params).get_return_types() def __getattr__(self, name): return getattr(self.base_func, name) def __repr__(self): dec = '' if self.decorates is not None: dec = " decorates " + repr(self.decorates) return "" % (type(self).__name__, self.base_func, dec) class LambdaWrapper(Function): def get_decorated_func(self): return self class FunctionExecution(Executed): """ This class is used to evaluate functions and their returns. This is the most complicated class, because it contains the logic to transfer parameters. It is even more complicated, because there may be multiple calls to functions and recursion has to be avoided. But this is responsibility of the decorators. """ type = 'funcdef' def __init__(self, evaluator, base, *args, **kwargs): super(FunctionExecution, self).__init__(evaluator, base, *args, **kwargs) self._copy_dict = {} new_func = helpers.deep_ast_copy(base.base_func, self, self._copy_dict) self.children = new_func.children self.names_dict = new_func.names_dict @memoize_default(default=()) @recursion.execution_recursion_decorator def get_return_types(self, check_yields=False): func = self.base if func.isinstance(LambdaWrapper): return self._evaluator.eval_element(self.children[-1]) if func.listeners: # Feed the listeners, with the params. for listener in func.listeners: listener.execute(self._get_params()) # If we do have listeners, that means that there's not a regular # execution ongoing. In this case Jedi is interested in the # inserted params, not in the actual execution of the function. return [] if check_yields: types = [] returns = self.yields else: returns = self.returns types = list(docstrings.find_return_types(self._evaluator, func)) for r in returns: check = flow_analysis.break_check(self._evaluator, self, r) if check is flow_analysis.UNREACHABLE: debug.dbg('Return unreachable: %s', r) else: types += self._evaluator.eval_element(r.children[1]) if check is flow_analysis.REACHABLE: debug.dbg('Return reachable: %s', r) break return types def names_dicts(self, search_global): yield self.names_dict @memoize_default(default=NO_DEFAULT) def _get_params(self): """ This returns the params for an TODO and is injected as a 'hack' into the tree.Function class. This needs to be here, because Instance can have __init__ functions, which act the same way as normal functions. """ return param.get_params(self._evaluator, self.base, self.var_args) def param_by_name(self, name): return [n for n in self._get_params() if str(n) == name][0] def name_for_position(self, position): return tree.Function.name_for_position(self, position) def _copy_list(self, lst): """ Copies a list attribute of a parser Function. Copying is very expensive, because it is something like `copy.deepcopy`. However, these copied objects can be used for the executions, as if they were in the execution. """ objects = [] for element in lst: self._scope_copy(element.parent) copied = helpers.deep_ast_copy(element, self._copy_dict) objects.append(copied) return objects def __getattr__(self, name): if name not in ['start_pos', 'end_pos', 'imports', 'name', 'type']: raise AttributeError('Tried to access %s: %s. Why?' % (name, self)) return getattr(self.base, name) def _scope_copy(self, scope): raise NotImplementedError """ Copies a scope (e.g. `if foo:`) in an execution """ if scope != self.base.base_func: # Just make sure the parents been copied. self._scope_copy(scope.parent) helpers.deep_ast_copy(scope, self._copy_dict) @common.safe_property @memoize_default([]) def returns(self): return tree.Scope._search_in_scope(self, tree.ReturnStmt) @common.safe_property @memoize_default([]) def yields(self): return tree.Scope._search_in_scope(self, tree.YieldExpr) @common.safe_property @memoize_default([]) def statements(self): return tree.Scope._search_in_scope(self, tree.ExprStmt) @common.safe_property @memoize_default([]) def subscopes(self): return tree.Scope._search_in_scope(self, tree.Scope) def __repr__(self): return "<%s of %s>" % (type(self).__name__, self.base) class GlobalName(helpers.FakeName): def __init__(self, name): """ We need to mark global names somehow. Otherwise they are just normal names that are not definitions. """ super(GlobalName, self).__init__(name.value, name.parent, name.start_pos, is_definition=True) class ModuleWrapper(use_metaclass(CachedMetaClass, tree.Module, Wrapper)): def __init__(self, evaluator, module): self._evaluator = evaluator self.base = self._module = module def names_dicts(self, search_global): yield self.base.names_dict yield self._module_attributes_dict() for star_module in self.star_imports(): yield star_module.names_dict yield dict((str(n), [GlobalName(n)]) for n in self.base.global_names) yield self._sub_modules_dict() # I'm not sure if the star import cache is really that effective anymore # with all the other really fast import caches. Recheck. Also we would need # to push the star imports into Evaluator.modules, if we reenable this. #@cache_star_import @memoize_default([]) def star_imports(self): modules = [] for i in self.base.imports: if i.is_star_import(): name = i.star_import_name() new = imports.ImportWrapper(self._evaluator, name).follow() for module in new: if isinstance(module, tree.Module): modules += module.star_imports() modules += new return modules @memoize_default() def _module_attributes_dict(self): def parent_callback(): return self._evaluator.execute(compiled.create(self._evaluator, str))[0] names = ['__file__', '__package__', '__doc__', '__name__'] # All the additional module attributes are strings. return dict((n, [helpers.LazyName(n, parent_callback, is_definition=True)]) for n in names) @property @memoize_default() def name(self): return helpers.FakeName(unicode(self.base.name), self, (1, 0)) def _get_init_directory(self): for suffix, _, _ in imp.get_suffixes(): ending = '__init__' + suffix if self.py__file__().endswith(ending): # Remove the ending, including the separator. return self.py__file__()[:-len(ending) - 1] return None def py__name__(self): for name, module in self._evaluator.modules.items(): if module == self: return name return '__main__' def py__file__(self): """ In contrast to Python's __file__ can be None. """ if self._module.path is None: return None return os.path.abspath(self._module.path) def py__package__(self): if self._get_init_directory() is None: return re.sub(r'\.?[^\.]+$', '', self.py__name__()) else: return self.py__name__() @property def py__path__(self): """ Not seen here, since it's a property. The callback actually uses a variable, so use it like:: foo.py__path__(sys_path) In case of a package, this returns Python's __path__ attribute, which is a list of paths (strings). Raises an AttributeError if the module is not a package. """ def return_value(search_path): init_path = self.py__file__() if os.path.basename(init_path) == '__init__.py': with open(init_path, 'rb') as f: content = common.source_to_unicode(f.read()) # these are strings that need to be used for namespace packages, # the first one is ``pkgutil``, the second ``pkg_resources``. options = ('declare_namespace(__name__)', 'extend_path(__path__') if options[0] in content or options[1] in content: # It is a namespace, now try to find the rest of the # modules on sys_path or whatever the search_path is. paths = set() for s in search_path: other = os.path.join(s, unicode(self.name)) if os.path.isdir(other): paths.add(other) return list(paths) # Default to this. return [path] path = self._get_init_directory() if path is None: raise AttributeError('Only packages have __path__ attributes.') else: return return_value @memoize_default() def _sub_modules_dict(self): """ Lists modules in the directory of this module (if this module is a package). """ path = self._module.path names = {} if path is not None and path.endswith(os.path.sep + '__init__.py'): mods = pkgutil.iter_modules([os.path.dirname(path)]) for module_loader, name, is_pkg in mods: fake_n = helpers.FakeName(name) # It's obviously a relative import to the current module. imp = helpers.FakeImport(fake_n, self, level=1) fake_n.parent = imp names[name] = [fake_n] # TODO add something like this in the future, its cleaner than the # import hacks. # ``os.path`` is a hardcoded exception, because it's a # ``sys.modules`` modification. #if str(self.name) == 'os': # names.append(helpers.FakeName('path', parent=self)) return names def __getattr__(self, name): return getattr(self._module, name) def __repr__(self): return "<%s: %s>" % (type(self).__name__, self._module) jedi-0.9.0/jedi/evaluate/analysis.py0000664000175000017500000002507412517736533017577 0ustar daviddavid00000000000000""" Module for statical analysis. """ from jedi import debug from jedi.parser import tree from jedi.evaluate.compiled import CompiledObject CODES = { 'attribute-error': (1, AttributeError, 'Potential AttributeError.'), 'name-error': (2, NameError, 'Potential NameError.'), 'import-error': (3, ImportError, 'Potential ImportError.'), 'type-error-generator': (4, TypeError, "TypeError: 'generator' object is not subscriptable."), 'type-error-too-many-arguments': (5, TypeError, None), 'type-error-too-few-arguments': (6, TypeError, None), 'type-error-keyword-argument': (7, TypeError, None), 'type-error-multiple-values': (8, TypeError, None), 'type-error-star-star': (9, TypeError, None), 'type-error-star': (10, TypeError, None), 'type-error-operation': (11, TypeError, None), } class Error(object): def __init__(self, name, module_path, start_pos, message=None): self.path = module_path self._start_pos = start_pos self.name = name if message is None: message = CODES[self.name][2] self.message = message @property def line(self): return self._start_pos[0] @property def column(self): return self._start_pos[1] @property def code(self): # The class name start first = self.__class__.__name__[0] return first + str(CODES[self.name][0]) def __unicode__(self): return '%s:%s:%s: %s %s' % (self.path, self.line, self.column, self.code, self.message) def __str__(self): return self.__unicode__() def __eq__(self, other): return (self.path == other.path and self.name == other.name and self._start_pos == other._start_pos) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash((self.path, self._start_pos, self.name)) def __repr__(self): return '<%s %s: %s@%s,%s>' % (self.__class__.__name__, self.name, self.path, self._start_pos[0], self._start_pos[1]) class Warning(Error): pass def add(evaluator, name, jedi_obj, message=None, typ=Error, payload=None): from jedi.evaluate.iterable import MergedNodes while isinstance(jedi_obj, MergedNodes): if len(jedi_obj) != 1: # TODO is this kosher? return jedi_obj = list(jedi_obj)[0] exception = CODES[name][1] if _check_for_exception_catch(evaluator, jedi_obj, exception, payload): return module_path = jedi_obj.get_parent_until().path instance = typ(name, module_path, jedi_obj.start_pos, message) debug.warning(str(instance)) evaluator.analysis.append(instance) def _check_for_setattr(instance): """ Check if there's any setattr method inside an instance. If so, return True. """ module = instance.get_parent_until() try: stmts = module.used_names['setattr'] except KeyError: return False return any(instance.start_pos < stmt.start_pos < instance.end_pos for stmt in stmts) def add_attribute_error(evaluator, scope, name): message = ('AttributeError: %s has no attribute %s.' % (scope, name)) from jedi.evaluate.representation import Instance # Check for __getattr__/__getattribute__ existance and issue a warning # instead of an error, if that happens. if isinstance(scope, Instance): typ = Warning try: scope.get_subscope_by_name('__getattr__') except KeyError: try: scope.get_subscope_by_name('__getattribute__') except KeyError: if not _check_for_setattr(scope): typ = Error else: typ = Error payload = scope, name add(evaluator, 'attribute-error', name, message, typ, payload) def _check_for_exception_catch(evaluator, jedi_obj, exception, payload=None): """ Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and doesn't count as an error (if equal to `exception`). Also checks `hasattr` for AttributeErrors and uses the `payload` to compare it. Returns True if the exception was catched. """ def check_match(cls, exception): try: return isinstance(cls, CompiledObject) and issubclass(exception, cls.obj) except TypeError: return False def check_try_for_except(obj, exception): # Only nodes in try iterator = iter(obj.children) for branch_type in iterator: colon = next(iterator) suite = next(iterator) if branch_type == 'try' \ and not (branch_type.start_pos < jedi_obj.start_pos <= suite.end_pos): return False for node in obj.except_clauses(): if node is None: return True # An exception block that catches everything. else: except_classes = evaluator.eval_element(node) for cls in except_classes: from jedi.evaluate import iterable if isinstance(cls, iterable.Array) and cls.type == 'tuple': # multiple exceptions for c in cls.values(): if check_match(c, exception): return True else: if check_match(cls, exception): return True def check_hasattr(node, suite): try: assert suite.start_pos <= jedi_obj.start_pos < suite.end_pos assert node.type == 'power' base = node.children[0] assert base.type == 'name' and base.value == 'hasattr' trailer = node.children[1] assert trailer.type == 'trailer' arglist = trailer.children[1] assert arglist.type == 'arglist' from jedi.evaluate.param import Arguments args = list(Arguments(evaluator, arglist).unpack()) # Arguments should be very simple assert len(args) == 2 # Check name key, values = args[1] assert len(values) == 1 names = evaluator.eval_element(values[0]) assert len(names) == 1 and isinstance(names[0], CompiledObject) assert names[0].obj == str(payload[1]) # Check objects key, values = args[0] assert len(values) == 1 objects = evaluator.eval_element(values[0]) return payload[0] in objects except AssertionError: return False obj = jedi_obj while obj is not None and not obj.isinstance(tree.Function, tree.Class): if obj.isinstance(tree.Flow): # try/except catch check if obj.isinstance(tree.TryStmt) and check_try_for_except(obj, exception): return True # hasattr check if exception == AttributeError and obj.isinstance(tree.IfStmt, tree.WhileStmt): if check_hasattr(obj.children[1], obj.children[3]): return True obj = obj.parent return False def get_module_statements(module): """ Returns the statements used in a module. All these statements should be evaluated to check for potential exceptions. """ def check_children(node): try: children = node.children except AttributeError: return [] else: nodes = [] for child in children: nodes += check_children(child) if child.type == 'trailer': c = child.children if c[0] == '(' and c[1] != ')': if c[1].type != 'arglist': if c[1].type == 'argument': nodes.append(c[1].children[-1]) else: nodes.append(c[1]) else: for argument in c[1].children: if argument.type == 'argument': nodes.append(argument.children[-1]) elif argument.type != 'operator': nodes.append(argument) return nodes def add_nodes(nodes): new = set() for node in nodes: if isinstance(node, tree.Flow): children = node.children if node.type == 'for_stmt': children = children[2:] # Don't want to include the names. # Pick the suite/simple_stmt. new |= add_nodes(children) elif node.type in ('simple_stmt', 'suite'): new |= add_nodes(node.children) elif node.type in ('return_stmt', 'yield_expr'): try: new.add(node.children[1]) except IndexError: pass elif node.type not in ('whitespace', 'operator', 'keyword', 'parameters', 'decorated', 'except_clause') \ and not isinstance(node, (tree.ClassOrFunc, tree.Import)): new.add(node) try: children = node.children except AttributeError: pass else: for next_node in children: new.update(check_children(node)) if next_node.type != 'keyword' and node.type != 'expr_stmt': new.add(node) return new nodes = set() import_names = set() decorated_funcs = [] for scope in module.walk(): for imp in set(scope.imports): import_names |= set(imp.get_defined_names()) if imp.is_nested(): import_names |= set(path[-1] for path in imp.paths()) children = scope.children if isinstance(scope, tree.ClassOrFunc): children = children[2:] # We don't want to include the class name. nodes |= add_nodes(children) for flow in scope.flows: if flow.type == 'for_stmt': nodes.add(flow.children[3]) elif flow.type == 'try_stmt': nodes.update(e for e in flow.except_clauses() if e is not None) try: decorators = scope.get_decorators() except AttributeError: pass else: if decorators: decorated_funcs.append(scope) return nodes, import_names, decorated_funcs jedi-0.9.0/jedi/evaluate/flow_analysis.py0000664000175000017500000000551312517736533020622 0ustar daviddavid00000000000000from jedi.parser import tree class Status(object): lookup_table = {} def __init__(self, value, name): self._value = value self._name = name Status.lookup_table[value] = self def invert(self): if self is REACHABLE: return UNREACHABLE elif self is UNREACHABLE: return REACHABLE else: return UNSURE def __and__(self, other): if UNSURE in (self, other): return UNSURE else: return REACHABLE if self._value and other._value else UNREACHABLE def __repr__(self): return '<%s: %s>' % (type(self).__name__, self._name) REACHABLE = Status(True, 'reachable') UNREACHABLE = Status(False, 'unreachable') UNSURE = Status(None, 'unsure') def break_check(evaluator, base_scope, stmt, origin_scope=None): element_scope = evaluator.wrap(stmt.get_parent_scope(include_flows=True)) # Direct parents get resolved, we filter scopes that are separate branches. # This makes sense for autocompletion and static analysis. For actual # Python it doesn't matter, because we're talking about potentially # unreachable code. # e.g. `if 0:` would cause all name lookup within the flow make # unaccessible. This is not a "problem" in Python, because the code is # never called. In Jedi though, we still want to infer types. while origin_scope is not None: if element_scope == origin_scope: return REACHABLE origin_scope = origin_scope.parent return _break_check(evaluator, stmt, base_scope, element_scope) def _break_check(evaluator, stmt, base_scope, element_scope): element_scope = evaluator.wrap(element_scope) base_scope = evaluator.wrap(base_scope) reachable = REACHABLE if isinstance(element_scope, tree.IfStmt): if element_scope.node_after_else(stmt): for check_node in element_scope.check_nodes(): reachable = _check_if(evaluator, check_node) if reachable in (REACHABLE, UNSURE): break reachable = reachable.invert() else: node = element_scope.node_in_which_check_node(stmt) reachable = _check_if(evaluator, node) elif isinstance(element_scope, (tree.TryStmt, tree.WhileStmt)): return UNSURE # Only reachable branches need to be examined further. if reachable in (UNREACHABLE, UNSURE): return reachable if base_scope != element_scope and base_scope != element_scope.parent: return reachable & _break_check(evaluator, stmt, base_scope, element_scope.parent) return reachable def _check_if(evaluator, node): types = evaluator.eval_element(node) values = set(x.py__bool__() for x in types) if len(values) == 1: return Status.lookup_table[values.pop()] else: return UNSURE jedi-0.9.0/jedi/evaluate/compiled/0000775000175000017500000000000012517736563017171 5ustar daviddavid00000000000000jedi-0.9.0/jedi/evaluate/compiled/__init__.py0000664000175000017500000004031612517736533021303 0ustar daviddavid00000000000000""" Imitate the parser representation. """ import inspect import re import sys import os from functools import partial from jedi._compatibility import builtins as _builtins, unicode from jedi import debug from jedi.cache import underscore_memoization, memoize_method from jedi.evaluate.sys_path import get_sys_path from jedi.parser.tree import Param, Base, Operator, zero_position_modifier from jedi.evaluate.helpers import FakeName from . import fake _sep = os.path.sep if os.path.altsep is not None: _sep += os.path.altsep _path_re = re.compile('(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep))) del _sep class CheckAttribute(object): """Raises an AttributeError if the attribute X isn't available.""" def __init__(self, func): self.func = func # Remove the py in front of e.g. py__call__. self.check_name = func.__name__[2:] def __get__(self, instance, owner): # This might raise an AttributeError. That's wanted. getattr(instance.obj, self.check_name) return partial(self.func, instance) class CompiledObject(Base): # comply with the parser start_pos = 0, 0 path = None # modules have this attribute - set it to None. used_names = {} # To be consistent with modules. def __init__(self, obj, parent=None): self.obj = obj self.parent = parent @property def py__call__(self): def actual(evaluator, params): if inspect.isclass(self.obj): from jedi.evaluate.representation import Instance return [Instance(evaluator, self, params)] else: return list(self._execute_function(evaluator, params)) # Might raise an AttributeError, which is intentional. self.obj.__call__ return actual @CheckAttribute def py__class__(self, evaluator): return CompiledObject(self.obj.__class__, parent=self.parent) @CheckAttribute def py__mro__(self, evaluator): return tuple(create(evaluator, cls, self.parent) for cls in self.obj.__mro__) @CheckAttribute def py__bases__(self, evaluator): return tuple(create(evaluator, cls) for cls in self.obj.__bases__) def py__bool__(self): return bool(self.obj) def py__file__(self): return self.obj.__file__ def is_class(self): return inspect.isclass(self.obj) @property def doc(self): return inspect.getdoc(self.obj) or '' @property def params(self): params_str, ret = self._parse_function_doc() tokens = params_str.split(',') if inspect.ismethoddescriptor(self._cls().obj): tokens.insert(0, 'self') params = [] for p in tokens: parts = [FakeName(part) for part in p.strip().split('=')] if len(parts) > 1: parts.insert(1, Operator(zero_position_modifier, '=', (0, 0))) params.append(Param(parts, self)) return params def __repr__(self): return '<%s: %s>' % (type(self).__name__, repr(self.obj)) @underscore_memoization def _parse_function_doc(self): if self.doc is None: return '', '' return _parse_function_doc(self.doc) def api_type(self): if fake.is_class_instance(self.obj): return 'instance' cls = self._cls().obj if inspect.isclass(cls): return 'class' elif inspect.ismodule(cls): return 'module' elif inspect.isbuiltin(cls) or inspect.ismethod(cls) \ or inspect.ismethoddescriptor(cls): return 'function' @property def type(self): """Imitate the tree.Node.type values.""" cls = self._cls().obj if inspect.isclass(cls): return 'classdef' elif inspect.ismodule(cls): return 'file_input' elif inspect.isbuiltin(cls) or inspect.ismethod(cls) \ or inspect.ismethoddescriptor(cls): return 'funcdef' @underscore_memoization def _cls(self): # Ensures that a CompiledObject is returned that is not an instance (like list) if fake.is_class_instance(self.obj): try: c = self.obj.__class__ except AttributeError: # happens with numpy.core.umath._UFUNC_API (you get it # automatically by doing `import numpy`. c = type(None) return CompiledObject(c, self.parent) return self @property def names_dict(self): # For compatibility with `representation.Class`. return self.names_dicts(False)[0] def names_dicts(self, search_global, is_instance=False): return self._names_dict_ensure_one_dict(is_instance) @memoize_method def _names_dict_ensure_one_dict(self, is_instance): """ search_global shouldn't change the fact that there's one dict, this way there's only one `object`. """ return [LazyNamesDict(self._cls(), is_instance)] def get_subscope_by_name(self, name): if name in dir(self._cls().obj): return CompiledName(self._cls(), name).parent else: raise KeyError("CompiledObject doesn't have an attribute '%s'." % name) def get_index_types(self, evaluator, index_array=()): # If the object doesn't have `__getitem__`, just raise the # AttributeError. if not hasattr(self.obj, '__getitem__'): debug.warning('Tried to call __getitem__ on non-iterable.') return [] if type(self.obj) not in (str, list, tuple, unicode, bytes, bytearray, dict): # Get rid of side effects, we won't call custom `__getitem__`s. return [] result = [] from jedi.evaluate.iterable import create_indexes_or_slices for typ in create_indexes_or_slices(evaluator, index_array): index = None try: index = typ.obj new = self.obj[index] except (KeyError, IndexError, TypeError, AttributeError): # Just try, we don't care if it fails, except for slices. if isinstance(index, slice): result.append(self) else: result.append(CompiledObject(new)) if not result: try: for obj in self.obj: result.append(CompiledObject(obj)) except TypeError: pass # self.obj maynot have an __iter__ method. return result @property def name(self): # might not exist sometimes (raises AttributeError) return FakeName(self._cls().obj.__name__, self) def _execute_function(self, evaluator, params): if self.type != 'funcdef': return for name in self._parse_function_doc()[1].split(): try: bltn_obj = _create_from_name(builtin, builtin, name) except AttributeError: continue else: if isinstance(bltn_obj, CompiledObject) and bltn_obj.obj is None: # We want everything except None. continue for result in evaluator.execute(bltn_obj, params): yield result @property @underscore_memoization def subscopes(self): """ Returns only the faked scopes - the other ones are not important for internal analysis. """ module = self.get_parent_until() faked_subscopes = [] for name in dir(self._cls().obj): f = fake.get_faked(module.obj, self.obj, name) if f: f.parent = self faked_subscopes.append(f) return faked_subscopes def is_scope(self): return True def get_self_attributes(self): return [] # Instance compatibility def get_imports(self): return [] # Builtins don't have imports class LazyNamesDict(object): """ A names_dict instance for compiled objects, resembles the parser.tree. """ def __init__(self, compiled_obj, is_instance): self._compiled_obj = compiled_obj self._is_instance = is_instance def __iter__(self): return (v[0].value for v in self.values()) @memoize_method def __getitem__(self, name): try: getattr(self._compiled_obj.obj, name) except AttributeError: raise KeyError('%s in %s not found.' % (name, self._compiled_obj)) return [CompiledName(self._compiled_obj, name)] def values(self): obj = self._compiled_obj.obj values = [] for name in dir(obj): try: values.append(self[name]) except KeyError: # The dir function can be wrong. pass # dir doesn't include the type names. if not inspect.ismodule(obj) and obj != type and not self._is_instance: values += _type_names_dict.values() return values class CompiledName(FakeName): def __init__(self, obj, name): super(CompiledName, self).__init__(name) self._obj = obj self.name = name def __repr__(self): try: name = self._obj.name # __name__ is not defined all the time except AttributeError: name = None return '<%s: (%s).%s>' % (type(self).__name__, name, self.name) def is_definition(self): return True @property @underscore_memoization def parent(self): module = self._obj.get_parent_until() return _create_from_name(module, self._obj, self.name) @parent.setter def parent(self, value): pass # Just ignore this, FakeName tries to overwrite the parent attribute. def dotted_from_fs_path(fs_path, sys_path=None): """ Changes `/usr/lib/python3.4/email/utils.py` to `email.utils`. I.e. compares the path with sys.path and then returns the dotted_path. If the path is not in the sys.path, just returns None. """ if sys_path is None: sys_path = get_sys_path() if os.path.basename(fs_path).startswith('__init__.'): # We are calculating the path. __init__ files are not interesting. fs_path = os.path.dirname(fs_path) # prefer # - UNIX # /path/to/pythonX.Y/lib-dynload # /path/to/pythonX.Y/site-packages # - Windows # C:\path\to\DLLs # C:\path\to\Lib\site-packages # over # - UNIX # /path/to/pythonX.Y # - Windows # C:\path\to\Lib path = '' for s in sys_path: if (fs_path.startswith(s) and len(path) < len(s)): path = s return _path_re.sub('', fs_path[len(path):].lstrip(os.path.sep)).replace(os.path.sep, '.') def load_module(path=None, name=None): if path is not None: dotted_path = dotted_from_fs_path(path) else: dotted_path = name sys_path = get_sys_path() if dotted_path is None: p, _, dotted_path = path.partition(os.path.sep) sys_path.insert(0, p) temp, sys.path = sys.path, sys_path try: __import__(dotted_path) except RuntimeError: if 'PySide' in dotted_path or 'PyQt' in dotted_path: # RuntimeError: the PyQt4.QtCore and PyQt5.QtCore modules both wrap # the QObject class. # See https://github.com/davidhalter/jedi/pull/483 return None raise except ImportError: # If a module is "corrupt" or not really a Python module or whatever. debug.warning('Module %s not importable.', path) return None finally: sys.path = temp # Just access the cache after import, because of #59 as well as the very # complicated import structure of Python. module = sys.modules[dotted_path] return CompiledObject(module) docstr_defaults = { 'floating point number': 'float', 'character': 'str', 'integer': 'int', 'dictionary': 'dict', 'string': 'str', } def _parse_function_doc(doc): """ Takes a function and returns the params and return value as a tuple. This is nothing more than a docstring parser. TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None TODO docstrings like 'tuple of integers' """ # parse round parentheses: def func(a, (b,c)) try: count = 0 start = doc.index('(') for i, s in enumerate(doc[start:]): if s == '(': count += 1 elif s == ')': count -= 1 if count == 0: end = start + i break param_str = doc[start + 1:end] except (ValueError, UnboundLocalError): # ValueError for doc.index # UnboundLocalError for undefined end in last line debug.dbg('no brackets found - no param') end = 0 param_str = '' else: # remove square brackets, that show an optional param ( = None) def change_options(m): args = m.group(1).split(',') for i, a in enumerate(args): if a and '=' not in a: args[i] += '=None' return ','.join(args) while True: param_str, changes = re.subn(r' ?\[([^\[\]]+)\]', change_options, param_str) if changes == 0: break param_str = param_str.replace('-', '_') # see: isinstance.__doc__ # parse return value r = re.search('-[>-]* ', doc[end:end + 7]) if r is None: ret = '' else: index = end + r.end() # get result type, which can contain newlines pattern = re.compile(r'(,\n|[^\n-])+') ret_str = pattern.match(doc, index).group(0).strip() # New object -> object() ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str) ret = docstr_defaults.get(ret_str, ret_str) return param_str, ret class Builtin(CompiledObject): @memoize_method def get_by_name(self, name): return self.names_dict[name][0].parent def _a_generator(foo): """Used to have an object to return for generators.""" yield 42 yield foo def _create_from_name(module, parent, name): faked = fake.get_faked(module.obj, parent.obj, name) # only functions are necessary. if faked is not None: faked.parent = parent return faked try: obj = getattr(parent.obj, name) except AttributeError: # happens e.g. in properties of # PyQt4.QtGui.QStyleOptionComboBox.currentText # -> just set it to None obj = None return CompiledObject(obj, parent) builtin = Builtin(_builtins) magic_function_class = CompiledObject(type(load_module), parent=builtin) generator_obj = CompiledObject(_a_generator(1.0)) _type_names_dict = builtin.get_by_name('type').names_dict none_obj = builtin.get_by_name('None') false_obj = builtin.get_by_name('False') true_obj = builtin.get_by_name('True') object_obj = builtin.get_by_name('object') def keyword_from_value(obj): if obj is None: return none_obj elif obj is False: return false_obj elif obj is True: return true_obj else: raise NotImplementedError def compiled_objects_cache(func): def wrapper(evaluator, obj, parent=builtin, module=None): # Do a very cheap form of caching here. key = id(obj), id(parent), id(module) try: return evaluator.compiled_cache[key][0] except KeyError: result = func(evaluator, obj, parent, module) # Need to cache all of them, otherwise the id could be overwritten. evaluator.compiled_cache[key] = result, obj, parent, module return result return wrapper @compiled_objects_cache def create(evaluator, obj, parent=builtin, module=None): """ A very weird interface class to this module. The more options provided the more acurate loading compiled objects is. """ if not inspect.ismodule(obj): faked = fake.get_faked(module and module.obj, obj) if faked is not None: faked.parent = parent return faked try: if parent == builtin and obj.__module__ in ('builtins', '__builtin__'): return builtin.get_by_name(obj.__name__) except AttributeError: pass return CompiledObject(obj, parent) jedi-0.9.0/jedi/evaluate/compiled/fake/0000775000175000017500000000000012517736563020077 5ustar daviddavid00000000000000jedi-0.9.0/jedi/evaluate/compiled/fake/datetime.pym0000664000175000017500000000011512331540214022373 0ustar daviddavid00000000000000class datetime(): @staticmethod def now(): return datetime() jedi-0.9.0/jedi/evaluate/compiled/fake/builtins.pym0000664000175000017500000001162512517736533022461 0ustar daviddavid00000000000000""" Pure Python implementation of some builtins. This code is not going to be executed anywhere. These implementations are not always correct, but should work as good as possible for the auto completion. """ def next(iterator, default=None): if random.choice([0, 1]): if hasattr("next"): return iterator.next() else: return iterator.__next__() else: if default is not None: return default def iter(collection, sentinel=None): if sentinel: yield collection() else: for c in collection: yield c def range(start, stop=None, step=1): return [0] class file(): def __iter__(self): yield '' def next(self): return '' class xrange(): # Attention: this function doesn't exist in Py3k (there it is range). def __iter__(self): yield 1 def count(self): return 1 def index(self): return 1 def open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True): import io return io.TextIOWrapper(file, mode, buffering, encoding, errors, newline, closefd) def open_python2(name, mode=None, buffering=None): return file(name, mode, buffering) #-------------------------------------------------------- # descriptors #-------------------------------------------------------- class property(): def __init__(self, fget, fset=None, fdel=None, doc=None): self.fget = fget self.fset = fset self.fdel = fdel self.__doc__ = doc def __get__(self, obj, cls): return self.fget(obj) def __set__(self, obj, value): self.fset(obj, value) def __delete__(self, obj): self.fdel(obj) def setter(self, func): self.fset = func return self def getter(self, func): self.fget = func return self def deleter(self, func): self.fdel = func return self class staticmethod(): def __init__(self, func): self.__func = func def __get__(self, obj, cls): return self.__func class classmethod(): def __init__(self, func): self.__func = func def __get__(self, obj, cls): def _method(*args, **kwargs): return self.__func(cls, *args, **kwargs) return _method #-------------------------------------------------------- # array stuff #-------------------------------------------------------- class list(): def __init__(self, iterable=[]): self.__iterable = [] for i in iterable: self.__iterable += [i] def __iter__(self): for i in self.__iterable: yield i def __getitem__(self, y): return self.__iterable[y] def pop(self): return self.__iterable[-1] class tuple(): def __init__(self, iterable=[]): self.__iterable = [] for i in iterable: self.__iterable += [i] def __iter__(self): for i in self.__iterable: yield i def __getitem__(self, y): return self.__iterable[y] def index(self): return 1 def count(self): return 1 class set(): def __init__(self, iterable=[]): self.__iterable = iterable def __iter__(self): for i in self.__iterable: yield i def pop(self): return list(self.__iterable)[-1] def copy(self): return self def difference(self, other): return self - other def intersection(self, other): return self & other def symmetric_difference(self, other): return self ^ other def union(self, other): return self | other class frozenset(): def __init__(self, iterable=[]): self.__iterable = iterable def __iter__(self): for i in self.__iterable: yield i def copy(self): return self class dict(): def __init__(self, **elements): self.__elements = elements def clear(self): # has a strange docstr pass def get(self, k, d=None): # TODO implement try: #return self.__elements[k] pass except KeyError: return d def setdefault(self, k, d): # TODO maybe also return the content return d class reversed(): def __init__(self, sequence): self.__sequence = sequence def __iter__(self): for i in self.__sequence: yield i def __next__(self): return next(self.__iter__()) def next(self): return next(self.__iter__()) def sorted(iterable, cmp=None, key=None, reverse=False): return iterable #-------------------------------------------------------- # basic types #-------------------------------------------------------- class int(): def __init__(self, x, base=None): pass class str(): def __init__(self, obj): pass class type(): def mro(): return [object] jedi-0.9.0/jedi/evaluate/compiled/fake/_weakref.pym0000664000175000017500000000031112331540214022360 0ustar daviddavid00000000000000def proxy(object, callback=None): return object class weakref(): def __init__(self, object, callback=None): self.__object = object def __call__(self): return self.__object jedi-0.9.0/jedi/evaluate/compiled/fake/_sre.pym0000664000175000017500000000571312363566726021565 0ustar daviddavid00000000000000def compile(): class SRE_Match(): endpos = int() lastgroup = int() lastindex = int() pos = int() string = str() regs = ((int(), int()),) def __init__(self, pattern): self.re = pattern def start(self): return int() def end(self): return int() def span(self): return int(), int() def expand(self): return str() def group(self, nr): return str() def groupdict(self): return {str(): str()} def groups(self): return (str(),) class SRE_Pattern(): flags = int() groupindex = {} groups = int() pattern = str() def findall(self, string, pos=None, endpos=None): """ findall(string[, pos[, endpos]]) --> list. Return a list of all non-overlapping matches of pattern in string. """ return [str()] def finditer(self, string, pos=None, endpos=None): """ finditer(string[, pos[, endpos]]) --> iterator. Return an iterator over all non-overlapping matches for the RE pattern in string. For each match, the iterator returns a match object. """ yield SRE_Match(self) def match(self, string, pos=None, endpos=None): """ match(string[, pos[, endpos]]) --> match object or None. Matches zero or more characters at the beginning of the string pattern """ return SRE_Match(self) def scanner(self, string, pos=None, endpos=None): pass def search(self, string, pos=None, endpos=None): """ search(string[, pos[, endpos]]) --> match object or None. Scan through string looking for a match, and return a corresponding MatchObject instance. Return None if no position in the string matches. """ return SRE_Match(self) def split(self, string, maxsplit=0]): """ split(string[, maxsplit = 0]) --> list. Split string by the occurrences of pattern. """ return [str()] def sub(self, repl, string, count=0): """ sub(repl, string[, count = 0]) --> newstring Return the string obtained by replacing the leftmost non-overlapping occurrences of pattern in string by the replacement repl. """ return str() def subn(self, repl, string, count=0): """ subn(repl, string[, count = 0]) --> (newstring, number of subs) Return the tuple (new_string, number_of_subs_made) found by replacing the leftmost non-overlapping occurrences of pattern with the replacement repl. """ return (str(), int()) return SRE_Pattern() jedi-0.9.0/jedi/evaluate/compiled/fake/_sqlite3.pym0000664000175000017500000000075112331540214022330 0ustar daviddavid00000000000000def connect(database, timeout=None, isolation_level=None, detect_types=None, factory=None): return Connection() class Connection(): def cursor(self): return Cursor() class Cursor(): def cursor(self): return Cursor() def fetchone(self): return Row() def fetchmany(self, size=cursor.arraysize): return [self.fetchone()] def fetchall(self): return [self.fetchone()] class Row(): def keys(self): return [''] jedi-0.9.0/jedi/evaluate/compiled/fake/_functools.pym0000664000175000017500000000050512517736533022776 0ustar daviddavid00000000000000class partial(): def __init__(self, func, *args, **keywords): self.__func = func self.__args = args self.__keywords = keywords def __call__(self, *args, **kwargs): # TODO should be **dict(self.__keywords, **kwargs) return self.__func(*(self.__args + args), **self.__keywords) jedi-0.9.0/jedi/evaluate/compiled/fake/posix.pym0000664000175000017500000000007212331540214021743 0ustar daviddavid00000000000000def getcwd(): return '' def getcwdu(): return '' jedi-0.9.0/jedi/evaluate/compiled/fake/io.pym0000664000175000017500000000016112517736533021230 0ustar daviddavid00000000000000class TextIOWrapper(): def __next__(self): return str() def __iter__(self): yield str() jedi-0.9.0/jedi/evaluate/compiled/fake.py0000664000175000017500000000774412517736533020462 0ustar daviddavid00000000000000""" Loads functions that are mixed in to the standard library. E.g. builtins are written in C (binaries), but my autocompletion only understands Python code. By mixing in Python code, the autocompletion should work much better for builtins. """ import os import inspect from jedi._compatibility import is_py3, builtins, unicode from jedi.parser import Parser, load_grammar from jedi.parser import tree as pt from jedi.evaluate.helpers import FakeName modules = {} def _load_faked_module(module): module_name = module.__name__ if module_name == '__builtin__' and not is_py3: module_name = 'builtins' try: return modules[module_name] except KeyError: path = os.path.dirname(os.path.abspath(__file__)) try: with open(os.path.join(path, 'fake', module_name) + '.pym') as f: source = f.read() except IOError: modules[module_name] = None return grammar = load_grammar('grammar3.4') module = Parser(grammar, unicode(source), module_name).module modules[module_name] = module if module_name == 'builtins' and not is_py3: # There are two implementations of `open` for either python 2/3. # -> Rename the python2 version (`look at fake/builtins.pym`). open_func = search_scope(module, 'open') open_func.children[1] = FakeName('open_python3') open_func = search_scope(module, 'open_python2') open_func.children[1] = FakeName('open') return module def search_scope(scope, obj_name): for s in scope.subscopes: if str(s.name) == obj_name: return s def get_module(obj): if inspect.ismodule(obj): return obj try: obj = obj.__objclass__ except AttributeError: pass try: imp_plz = obj.__module__ except AttributeError: # Unfortunately in some cases like `int` there's no __module__ return builtins else: return __import__(imp_plz) def _faked(module, obj, name): # Crazy underscore actions to try to escape all the internal madness. if module is None: module = get_module(obj) faked_mod = _load_faked_module(module) if faked_mod is None: return # Having the module as a `parser.representation.module`, we need to scan # for methods. if name is None: if inspect.isbuiltin(obj): return search_scope(faked_mod, obj.__name__) elif not inspect.isclass(obj): # object is a method or descriptor cls = search_scope(faked_mod, obj.__objclass__.__name__) if cls is None: return return search_scope(cls, obj.__name__) else: if obj == module: return search_scope(faked_mod, name) else: cls = search_scope(faked_mod, obj.__name__) if cls is None: return return search_scope(cls, name) def get_faked(module, obj, name=None): obj = obj.__class__ if is_class_instance(obj) else obj result = _faked(module, obj, name) if result is None or isinstance(result, pt.Class): # We're not interested in classes. What we want is functions. return None else: # Set the docstr which was previously not set (faked modules don't # contain it). doc = '"""%s"""' % obj.__doc__ # TODO need escapes. suite = result.children[-1] string = pt.String(pt.zero_position_modifier, doc, (0, 0), '') new_line = pt.Whitespace('\n', (0, 0), '') docstr_node = pt.Node('simple_stmt', [string, new_line]) suite.children.insert(2, docstr_node) return result def is_class_instance(obj): """Like inspect.* methods.""" return not (inspect.isclass(obj) or inspect.ismodule(obj) or inspect.isbuiltin(obj) or inspect.ismethod(obj) or inspect.ismethoddescriptor(obj) or inspect.iscode(obj) or inspect.isgenerator(obj)) jedi-0.9.0/jedi/evaluate/helpers.py0000664000175000017500000001261512517736533017413 0ustar daviddavid00000000000000import copy from itertools import chain from jedi.parser import tree def deep_ast_copy(obj, parent=None, new_elements=None): """ Much, much faster than copy.deepcopy, but just for Parser elements (Doesn't copy parents). """ if new_elements is None: new_elements = {} def copy_node(obj): # If it's already in the cache, just return it. try: return new_elements[obj] except KeyError: # Actually copy and set attributes. new_obj = copy.copy(obj) new_elements[obj] = new_obj # Copy children new_children = [] for child in obj.children: typ = child.type if typ in ('whitespace', 'operator', 'keyword', 'number', 'string'): # At the moment we're not actually copying those primitive # elements, because there's really no need to. The parents are # obviously wrong, but that's not an issue. new_child = child elif typ == 'name': new_elements[child] = new_child = copy.copy(child) new_child.parent = new_obj else: # Is a BaseNode. new_child = copy_node(child) new_child.parent = new_obj new_children.append(new_child) new_obj.children = new_children # Copy the names_dict (if there is one). try: names_dict = obj.names_dict except AttributeError: pass else: try: new_obj.names_dict = new_names_dict = {} except AttributeError: # Impossible to set CompFor.names_dict pass else: for string, names in names_dict.items(): new_names_dict[string] = [new_elements[n] for n in names] return new_obj if obj.type == 'name': # Special case of a Name object. new_elements[obj] = new_obj = copy.copy(obj) if parent is not None: new_obj.parent = parent elif isinstance(obj, tree.BaseNode): new_obj = copy_node(obj) if parent is not None: for child in new_obj.children: if isinstance(child, (tree.Name, tree.BaseNode)): child.parent = parent else: # String literals and so on. new_obj = obj # Good enough, don't need to copy anything. return new_obj def call_of_name(name, cut_own_trailer=False): """ Creates a "call" node that consist of all ``trailer`` and ``power`` objects. E.g. if you call it with ``append``:: list([]).append(3) or None You would get a node with the content ``list([]).append`` back. This generates a copy of the original ast node. """ par = name if tree.is_node(par.parent, 'trailer'): par = par.parent power = par.parent if tree.is_node(power, 'power') and power.children[0] != name \ and not (power.children[-2] == '**' and name.start_pos > power.children[-1].start_pos): par = power # Now the name must be part of a trailer index = par.children.index(name.parent) if index != len(par.children) - 1 or cut_own_trailer: # Now we have to cut the other trailers away. par = deep_ast_copy(par) if not cut_own_trailer: # Normally we would remove just the stuff after the index, but # if the option is set remove the index as well. (for goto) index = index + 1 par.children[index:] = [] return par def get_module_names(module, all_scopes): """ Returns a dictionary with name parts as keys and their call paths as values. """ if all_scopes: dct = module.used_names else: dct = module.names_dict return chain.from_iterable(dct.values()) class FakeImport(tree.ImportName): def __init__(self, name, parent, level=0): super(FakeImport, self).__init__([]) self.parent = parent self._level = level self.name = name def get_defined_names(self): return [self.name] def aliases(self): return {} @property def level(self): return self._level @property def start_pos(self): return 0, 0 def paths(self): return [[self.name]] def is_definition(self): return True class FakeName(tree.Name): def __init__(self, name_str, parent=None, start_pos=(0, 0), is_definition=None): """ In case is_definition is defined (not None), that bool value will be returned. """ super(FakeName, self).__init__(tree.zero_position_modifier, name_str, start_pos) self.parent = parent self._is_definition = is_definition def get_definition(self): return self.parent def is_definition(self): if self._is_definition is None: return super(FakeName, self).is_definition() else: return self._is_definition class LazyName(FakeName): def __init__(self, name, parent_callback, is_definition=None): super(LazyName, self).__init__(name, is_definition=is_definition) self._parent_callback = parent_callback @property def parent(self): return self._parent_callback() @parent.setter def parent(self, value): pass # Do nothing, super classes can try to set the parent. jedi-0.9.0/jedi/evaluate/docstrings.py0000664000175000017500000001514212517736533020126 0ustar daviddavid00000000000000""" Docstrings are another source of information for functions and classes. :mod:`jedi.evaluate.dynamic` tries to find all executions of functions, while the docstring parsing is much easier. There are two different types of docstrings that |jedi| understands: - `Sphinx `_ - `Epydoc `_ For example, the sphinx annotation ``:type foo: str`` clearly states that the type of ``foo`` is ``str``. As an addition to parameter searching, this module also provides return annotations. """ from ast import literal_eval import re from itertools import chain from textwrap import dedent from jedi.evaluate.cache import memoize_default from jedi.parser import Parser, load_grammar from jedi.common import indent_block from jedi.evaluate.iterable import Array, FakeSequence, AlreadyEvaluated DOCSTRING_PARAM_PATTERNS = [ r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx r'\s*:param\s+(\w+)\s+%s:[^\n]+', # Sphinx param with type r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc ] DOCSTRING_RETURN_PATTERNS = [ re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epydoc ] REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`') try: from numpydoc.docscrape import NumpyDocString except ImportError: def _search_param_in_numpydocstr(docstr, param_str): return [] else: def _search_param_in_numpydocstr(docstr, param_str): """Search `docstr` (in numpydoc format) for type(-s) of `param_str`.""" params = NumpyDocString(docstr)._parsed_data['Parameters'] for p_name, p_type, p_descr in params: if p_name == param_str: m = re.match('([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type) if m: p_type = m.group(1) if p_type.startswith('{'): types = set(type(x).__name__ for x in literal_eval(p_type)) return list(types) else: return [p_type] return [] def _search_param_in_docstr(docstr, param_str): """ Search `docstr` for type(-s) of `param_str`. >>> _search_param_in_docstr(':type param: int', 'param') ['int'] >>> _search_param_in_docstr('@type param: int', 'param') ['int'] >>> _search_param_in_docstr( ... ':type param: :class:`threading.Thread`', 'param') ['threading.Thread'] >>> bool(_search_param_in_docstr('no document', 'param')) False >>> _search_param_in_docstr(':param int param: some description', 'param') ['int'] """ # look at #40 to see definitions of those params patterns = [re.compile(p % re.escape(param_str)) for p in DOCSTRING_PARAM_PATTERNS] for pattern in patterns: match = pattern.search(docstr) if match: return [_strip_rst_role(match.group(1))] return (_search_param_in_numpydocstr(docstr, param_str) or []) def _strip_rst_role(type_str): """ Strip off the part looks like a ReST role in `type_str`. >>> _strip_rst_role(':class:`ClassName`') # strip off :class: 'ClassName' >>> _strip_rst_role(':py:obj:`module.Object`') # works with domain 'module.Object' >>> _strip_rst_role('ClassName') # do nothing when not ReST role 'ClassName' See also: http://sphinx-doc.org/domains.html#cross-referencing-python-objects """ match = REST_ROLE_PATTERN.match(type_str) if match: return match.group(1) else: return type_str def _evaluate_for_statement_string(evaluator, string, module): code = dedent(""" def pseudo_docstring_stuff(): # Create a pseudo function for docstring statements. %s """) if string is None: return [] for element in re.findall('((?:\w+\.)*\w+)\.', string): # Try to import module part in dotted name. # (e.g., 'threading' in 'threading.Thread'). string = 'import %s\n' % element + string # Take the default grammar here, if we load the Python 2.7 grammar here, it # will be impossible to use `...` (Ellipsis) as a token. Docstring types # don't need to conform with the current grammar. p = Parser(load_grammar(), code % indent_block(string)) try: pseudo_cls = p.module.subscopes[0] # First pick suite, then simple_stmt (-2 for DEDENT) and then the node, # which is also not the last item, because there's a newline. stmt = pseudo_cls.children[-1].children[-2].children[-2] except (AttributeError, IndexError): return [] # Use the module of the param. # TODO this module is not the module of the param in case of a function # call. In that case it's the module of the function call. # stuffed with content from a function call. pseudo_cls.parent = module return list(_execute_types_in_stmt(evaluator, stmt)) def _execute_types_in_stmt(evaluator, stmt): """ Executing all types or general elements that we find in a statement. This doesn't include tuple, list and dict literals, because the stuff they contain is executed. (Used as type information). """ definitions = evaluator.eval_element(stmt) return chain.from_iterable(_execute_array_values(evaluator, d) for d in definitions) def _execute_array_values(evaluator, array): """ Tuples indicate that there's not just one return value, but the listed ones. `(str, int)` means that it returns a tuple with both types. """ if isinstance(array, Array): values = [] for typ in array.values(): objects = _execute_array_values(evaluator, typ) values.append(AlreadyEvaluated(objects)) return [FakeSequence(evaluator, values, array.type)] else: return evaluator.execute(array) @memoize_default(None, evaluator_is_first_arg=True) def follow_param(evaluator, param): func = param.parent_function return [p for param_str in _search_param_in_docstr(func.raw_doc, str(param.name)) for p in _evaluate_for_statement_string(evaluator, param_str, param.get_parent_until())] @memoize_default(None, evaluator_is_first_arg=True) def find_return_types(evaluator, func): def search_return_in_docstr(code): for p in DOCSTRING_RETURN_PATTERNS: match = p.search(code) if match: return _strip_rst_role(match.group(1)) type_str = search_return_in_docstr(func.raw_doc) return _evaluate_for_statement_string(evaluator, type_str, func.get_parent_until()) jedi-0.9.0/jedi/evaluate/sys_path.py0000664000175000017500000002072612517736533017605 0ustar daviddavid00000000000000import glob import os import sys from jedi._compatibility import exec_function, unicode from jedi.parser import tree from jedi.parser import Parser from jedi.evaluate.cache import memoize_default from jedi import debug from jedi import common from jedi import cache def get_sys_path(): def check_virtual_env(sys_path): """ Add virtualenv's site-packages to the `sys.path`.""" venv = os.getenv('VIRTUAL_ENV') if not venv: return venv = os.path.abspath(venv) p = _get_venv_sitepackages(venv) if p not in sys_path: sys_path.insert(0, p) # Add all egg-links from the virtualenv. for egg_link in glob.glob(os.path.join(p, '*.egg-link')): with open(egg_link) as fd: sys_path.insert(0, fd.readline().rstrip()) check_virtual_env(sys.path) return [p for p in sys.path if p != ""] def _get_venv_sitepackages(venv): if os.name == 'nt': p = os.path.join(venv, 'lib', 'site-packages') else: p = os.path.join(venv, 'lib', 'python%d.%d' % sys.version_info[:2], 'site-packages') return p def _execute_code(module_path, code): c = "import os; from os.path import *; result=%s" variables = {'__file__': module_path} try: exec_function(c % code, variables) except Exception: debug.warning('sys.path manipulation detected, but failed to evaluate.') else: try: res = variables['result'] if isinstance(res, str): return [os.path.abspath(res)] except KeyError: pass return [] def _paths_from_assignment(evaluator, expr_stmt): """ Extracts the assigned strings from an assignment that looks as follows:: >>> sys.path[0:0] = ['module/path', 'another/module/path'] This function is in general pretty tolerant (and therefore 'buggy'). However, it's not a big issue usually to add more paths to Jedi's sys_path, because it will only affect Jedi in very random situations and by adding more paths than necessary, it usually benefits the general user. """ for assignee, operator in zip(expr_stmt.children[::2], expr_stmt.children[1::2]): try: assert operator in ['=', '+='] assert tree.is_node(assignee, 'power') and len(assignee.children) > 1 c = assignee.children assert c[0].type == 'name' and c[0].value == 'sys' trailer = c[1] assert trailer.children[0] == '.' and trailer.children[1].value == 'path' # TODO Essentially we're not checking details on sys.path # manipulation. Both assigment of the sys.path and changing/adding # parts of the sys.path are the same: They get added to the current # sys.path. """ execution = c[2] assert execution.children[0] == '[' subscript = execution.children[1] assert subscript.type == 'subscript' assert ':' in subscript.children """ except AssertionError: continue from jedi.evaluate.iterable import get_iterator_types from jedi.evaluate.precedence import is_string for val in get_iterator_types(evaluator.eval_statement(expr_stmt)): if is_string(val): yield val.obj def _paths_from_list_modifications(module_path, trailer1, trailer2): """ extract the path from either "sys.path.append" or "sys.path.insert" """ # Guarantee that both are trailers, the first one a name and the second one # a function execution with at least one param. if not (tree.is_node(trailer1, 'trailer') and trailer1.children[0] == '.' and tree.is_node(trailer2, 'trailer') and trailer2.children[0] == '(' and len(trailer2.children) == 3): return [] name = trailer1.children[1].value if name not in ['insert', 'append']: return [] arg = trailer2.children[1] if name == 'insert' and len(arg.children) in (3, 4): # Possible trailing comma. arg = arg.children[2] return _execute_code(module_path, arg.get_code()) def _check_module(evaluator, module): def get_sys_path_powers(names): for name in names: power = name.parent.parent if tree.is_node(power, 'power'): c = power.children if isinstance(c[0], tree.Name) and c[0].value == 'sys' \ and tree.is_node(c[1], 'trailer'): n = c[1].children[1] if isinstance(n, tree.Name) and n.value == 'path': yield name, power sys_path = list(get_sys_path()) # copy try: possible_names = module.used_names['path'] except KeyError: pass else: for name, power in get_sys_path_powers(possible_names): stmt = name.get_definition() if len(power.children) >= 4: sys_path.extend(_paths_from_list_modifications(module.path, *power.children[2:4])) elif name.get_definition().type == 'expr_stmt': sys_path.extend(_paths_from_assignment(evaluator, stmt)) return sys_path @memoize_default(evaluator_is_first_arg=True, default=[]) def sys_path_with_modifications(evaluator, module): if module.path is None: # Support for modules without a path is bad, therefore return the # normal path. return list(get_sys_path()) curdir = os.path.abspath(os.curdir) with common.ignored(OSError): os.chdir(os.path.dirname(module.path)) buildout_script_paths = set() result = _check_module(evaluator, module) result += _detect_django_path(module.path) for buildout_script in _get_buildout_scripts(module.path): for path in _get_paths_from_buildout_script(evaluator, buildout_script): buildout_script_paths.add(path) # cleanup, back to old directory os.chdir(curdir) return list(result) + list(buildout_script_paths) def _get_paths_from_buildout_script(evaluator, buildout_script): def load(buildout_script): try: with open(buildout_script, 'rb') as f: source = common.source_to_unicode(f.read()) except IOError: debug.dbg('Error trying to read buildout_script: %s', buildout_script) return p = Parser(evaluator.grammar, source, buildout_script) cache.save_parser(buildout_script, p) return p.module cached = cache.load_parser(buildout_script) module = cached and cached.module or load(buildout_script) if not module: return for path in _check_module(evaluator, module): yield path def traverse_parents(path): while True: new = os.path.dirname(path) if new == path: return path = new yield path def _get_parent_dir_with_file(path, filename): for parent in traverse_parents(path): if os.path.isfile(os.path.join(parent, filename)): return parent return None def _detect_django_path(module_path): """ Detects the path of the very well known Django library (if used) """ result = [] for parent in traverse_parents(module_path): with common.ignored(IOError): with open(parent + os.path.sep + 'manage.py'): debug.dbg('Found django path: %s', module_path) result.append(parent) return result def _get_buildout_scripts(module_path): """ if there is a 'buildout.cfg' file in one of the parent directories of the given module it will return a list of all files in the buildout bin directory that look like python files. :param module_path: absolute path to the module. :type module_path: str """ project_root = _get_parent_dir_with_file(module_path, 'buildout.cfg') if not project_root: return [] bin_path = os.path.join(project_root, 'bin') if not os.path.exists(bin_path): return [] extra_module_paths = [] for filename in os.listdir(bin_path): try: filepath = os.path.join(bin_path, filename) with open(filepath, 'r') as f: firstline = f.readline() if firstline.startswith('#!') and 'python' in firstline: extra_module_paths.append(filepath) except IOError as e: # either permission error or race cond. because file got deleted # ignore debug.warning(unicode(e)) continue return extra_module_paths jedi-0.9.0/jedi/evaluate/finder.py0000664000175000017500000005144112517736533017220 0ustar daviddavid00000000000000""" Searching for names with given scope and name. This is very central in Jedi and Python. The name resolution is quite complicated with descripter, ``__getattribute__``, ``__getattr__``, ``global``, etc. Flow checks +++++++++++ Flow checks are not really mature. There's only a check for ``isinstance``. It would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``. Unfortunately every other thing is being ignored (e.g. a == '' would be easy to check for -> a is a string). There's big potential in these checks. """ from itertools import chain from jedi._compatibility import unicode, u from jedi.parser import tree from jedi import debug from jedi import common from jedi import settings from jedi.evaluate import representation as er from jedi.evaluate import dynamic from jedi.evaluate import compiled from jedi.evaluate import docstrings from jedi.evaluate import iterable from jedi.evaluate import imports from jedi.evaluate import analysis from jedi.evaluate import flow_analysis from jedi.evaluate import param from jedi.evaluate import helpers from jedi.evaluate.cache import memoize_default def filter_after_position(names, position): """ Removes all names after a certain position. If position is None, just returns the names list. """ if position is None: return names names_new = [] for n in names: # Filter positions and also allow list comprehensions and lambdas. if n.start_pos[0] is not None and n.start_pos < position \ or isinstance(n.get_definition(), (tree.CompFor, tree.Lambda)): names_new.append(n) return names_new def filter_definition_names(names, origin, position=None): """ Filter names that are actual definitions in a scope. Names that are just used will be ignored. """ # Just calculate the scope from the first stmt = names[0].get_definition() scope = stmt.get_parent_scope() if not (isinstance(scope, er.FunctionExecution) and isinstance(scope.base, er.LambdaWrapper)): names = filter_after_position(names, position) names = [name for name in names if name.is_definition()] # Private name mangling (compile.c) disallows access on names # preceeded by two underscores `__` if used outside of the class. Names # that also end with two underscores (e.g. __id__) are not affected. for name in list(names): if name.value.startswith('__') and not name.value.endswith('__'): if filter_private_variable(scope, origin): names.remove(name) return names class NameFinder(object): def __init__(self, evaluator, scope, name_str, position=None): self._evaluator = evaluator # Make sure that it's not just a syntax tree node. self.scope = evaluator.wrap(scope) self.name_str = name_str self.position = position @debug.increase_indent def find(self, scopes, search_global=False): # TODO rename scopes to names_dicts names = self.filter_name(scopes) types = self._names_to_types(names, search_global) if not names and not types \ and not (isinstance(self.name_str, tree.Name) and isinstance(self.name_str.parent.parent, tree.Param)): if not isinstance(self.name_str, (str, unicode)): # TODO Remove? if search_global: message = ("NameError: name '%s' is not defined." % self.name_str) analysis.add(self._evaluator, 'name-error', self.name_str, message) else: analysis.add_attribute_error(self._evaluator, self.scope, self.name_str) debug.dbg('finder._names_to_types: %s -> %s', names, types) return types def scopes(self, search_global=False): if search_global: return global_names_dict_generator(self._evaluator, self.scope, self.position) else: return ((n, None) for n in self.scope.names_dicts(search_global)) def names_dict_lookup(self, names_dict, position): def get_param(scope, el): if isinstance(el.get_parent_until(tree.Param), tree.Param): return scope.param_by_name(str(el)) return el search_str = str(self.name_str) try: names = names_dict[search_str] if not names: # We want names, otherwise stop. return [] except KeyError: return [] names = filter_definition_names(names, self.name_str, position) name_scope = None # Only the names defined in the last position are valid definitions. last_names = [] for name in reversed(sorted(names, key=lambda name: name.start_pos)): stmt = name.get_definition() name_scope = self._evaluator.wrap(stmt.get_parent_scope()) if isinstance(self.scope, er.Instance) and not isinstance(name_scope, er.Instance): # Instances should not be checked for positioning, because we # don't know in which order the functions are called. last_names.append(name) continue if isinstance(name_scope, compiled.CompiledObject): # Let's test this. TODO need comment. shouldn't this be # filtered before? last_names.append(name) continue if isinstance(name, compiled.CompiledName) \ or isinstance(name, er.InstanceName) and isinstance(name._origin_name, compiled.CompiledName): last_names.append(name) continue if isinstance(self.name_str, tree.Name): origin_scope = self.name_str.get_parent_until(tree.Scope, reverse=True) else: origin_scope = None if isinstance(stmt.parent, compiled.CompiledObject): # TODO seriously? this is stupid. continue check = flow_analysis.break_check(self._evaluator, name_scope, stmt, origin_scope) if check is not flow_analysis.UNREACHABLE: last_names.append(name) if check is flow_analysis.REACHABLE: break if isinstance(name_scope, er.FunctionExecution): # Replace params return [get_param(name_scope, n) for n in last_names] return last_names def filter_name(self, names_dicts): """ Searches names that are defined in a scope (the different `names_dicts`), until a name fits. """ names = [] for names_dict, position in names_dicts: names = self.names_dict_lookup(names_dict, position) if names: break debug.dbg('finder.filter_name "%s" in (%s): %s@%s', self.name_str, self.scope, u(names), self.position) return list(self._clean_names(names)) def _clean_names(self, names): """ ``NameFinder.filter_name`` should only output names with correct wrapper parents. We don't want to see AST classes out in the evaluation, so remove them already here! """ for n in names: definition = n.parent if isinstance(definition, (tree.Function, tree.Class, tree.Module)): yield self._evaluator.wrap(definition).name else: yield n def _check_getattr(self, inst): """Checks for both __getattr__ and __getattribute__ methods""" result = [] # str is important, because it shouldn't be `Name`! name = compiled.create(self._evaluator, str(self.name_str)) with common.ignored(KeyError): result = inst.execute_subscope_by_name('__getattr__', name) if not result: # this is a little bit special. `__getattribute__` is executed # before anything else. But: I know no use case, where this # could be practical and the jedi would return wrong types. If # you ever have something, let me know! with common.ignored(KeyError): result = inst.execute_subscope_by_name('__getattribute__', name) return result def _names_to_types(self, names, search_global): types = [] # Add isinstance and other if/assert knowledge. if isinstance(self.name_str, tree.Name): # Ignore FunctionExecution parents for now. flow_scope = self.name_str until = flow_scope.get_parent_until(er.FunctionExecution) while not isinstance(until, er.FunctionExecution): flow_scope = flow_scope.get_parent_scope(include_flows=True) if flow_scope is None: break # TODO check if result is in scope -> no evaluation necessary n = check_flow_information(self._evaluator, flow_scope, self.name_str, self.position) if n: return n for name in names: new_types = _name_to_types(self._evaluator, name, self.scope) if isinstance(self.scope, (er.Class, er.Instance)) and not search_global: types += self._resolve_descriptors(name, new_types) else: types += new_types if not names and isinstance(self.scope, er.Instance): # handling __getattr__ / __getattribute__ types = self._check_getattr(self.scope) return types def _resolve_descriptors(self, name, types): # The name must not be in the dictionary, but part of the class # definition. __get__ is only called if the descriptor is defined in # the class dictionary. name_scope = name.get_definition().get_parent_scope() if not isinstance(name_scope, (er.Instance, tree.Class)): return types result = [] for r in types: try: desc_return = r.get_descriptor_returns except AttributeError: result.append(r) else: result += desc_return(self.scope) return result @memoize_default([], evaluator_is_first_arg=True) def _name_to_types(evaluator, name, scope): types = [] typ = name.get_definition() if typ.isinstance(tree.ForStmt): for_types = evaluator.eval_element(typ.children[3]) for_types = iterable.get_iterator_types(for_types) types += check_tuple_assignments(for_types, name) elif typ.isinstance(tree.CompFor): for_types = evaluator.eval_element(typ.children[3]) for_types = iterable.get_iterator_types(for_types) types += check_tuple_assignments(for_types, name) elif isinstance(typ, tree.Param): types += _eval_param(evaluator, typ, scope) elif typ.isinstance(tree.ExprStmt): types += _remove_statements(evaluator, typ, name) elif typ.isinstance(tree.WithStmt): types += evaluator.eval_element(typ.node_from_name(name)) elif isinstance(typ, tree.Import): types += imports.ImportWrapper(evaluator, name).follow() elif isinstance(typ, tree.GlobalStmt): # TODO theoretically we shouldn't be using search_global here, it # doesn't make sense, because it's a local search (for that name)! # However, globals are not that important and resolving them doesn't # guarantee correctness in any way, because we don't check for when # something is executed. types += evaluator.find_types(typ.get_parent_scope(), str(name), search_global=True) elif isinstance(typ, tree.TryStmt): # TODO an exception can also be a tuple. Check for those. # TODO check for types that are not classes and add it to # the static analysis report. exceptions = evaluator.eval_element(name.prev_sibling().prev_sibling()) types = list(chain.from_iterable( evaluator.execute(t) for t in exceptions)) else: if typ.isinstance(er.Function): typ = typ.get_decorated_func() types.append(typ) return types def _remove_statements(evaluator, stmt, name): """ This is the part where statements are being stripped. Due to lazy evaluation, statements like a = func; b = a; b() have to be evaluated. """ types = [] # Remove the statement docstr stuff for now, that has to be # implemented with the evaluator class. #if stmt.docstr: #res_new.append(stmt) check_instance = None if isinstance(stmt, er.InstanceElement) and stmt.is_class_var: check_instance = stmt.instance stmt = stmt.var types += evaluator.eval_statement(stmt, seek_name=name) if check_instance is not None: # class renames types = [er.get_instance_el(evaluator, check_instance, a, True) if isinstance(a, (er.Function, tree.Function)) else a for a in types] return types def _eval_param(evaluator, param, scope): res_new = [] func = param.get_parent_scope() cls = func.parent.get_parent_until((tree.Class, tree.Function)) from jedi.evaluate.param import ExecutedParam, Arguments if isinstance(cls, tree.Class) and param.position_nr == 0 \ and not isinstance(param, ExecutedParam): # This is where we add self - if it has never been # instantiated. if isinstance(scope, er.InstanceElement): res_new.append(scope.instance) else: inst = er.Instance(evaluator, evaluator.wrap(cls), Arguments(evaluator, ()), is_generated=True) res_new.append(inst) return res_new # Instances are typically faked, if the instance is not called from # outside. Here we check it for __init__ functions and return. if isinstance(func, er.InstanceElement) \ and func.instance.is_generated and str(func.name) == '__init__': param = func.var.params[param.position_nr] # Add docstring knowledge. doc_params = docstrings.follow_param(evaluator, param) if doc_params: return doc_params if isinstance(param, ExecutedParam): return res_new + param.eval(evaluator) else: # Param owns no information itself. res_new += dynamic.search_params(evaluator, param) if not res_new: if param.stars: t = 'tuple' if param.stars == 1 else 'dict' typ = evaluator.find_types(compiled.builtin, t)[0] res_new = evaluator.execute(typ) if param.default: res_new += evaluator.eval_element(param.default) return res_new def check_flow_information(evaluator, flow, search_name, pos): """ Try to find out the type of a variable just with the information that is given by the flows: e.g. It is also responsible for assert checks.:: if isinstance(k, str): k. # <- completion here ensures that `k` is a string. """ if not settings.dynamic_flow_information: return None result = [] if flow.is_scope(): # Check for asserts. try: names = reversed(flow.names_dict[search_name.value]) except (KeyError, AttributeError): names = [] for name in names: ass = name.get_parent_until(tree.AssertStmt) if isinstance(ass, tree.AssertStmt) and pos is not None and ass.start_pos < pos: result = _check_isinstance_type(evaluator, ass.assertion(), search_name) if result: break if isinstance(flow, (tree.IfStmt, tree.WhileStmt)): element = flow.children[1] result = _check_isinstance_type(evaluator, element, search_name) return result def _check_isinstance_type(evaluator, element, search_name): try: assert element.type == 'power' # this might be removed if we analyze and, etc assert len(element.children) == 2 first, trailer = element.children assert isinstance(first, tree.Name) and first.value == 'isinstance' assert trailer.type == 'trailer' and trailer.children[0] == '(' assert len(trailer.children) == 3 # arglist stuff arglist = trailer.children[1] args = param.Arguments(evaluator, arglist, trailer) lst = list(args.unpack()) # Disallow keyword arguments assert len(lst) == 2 and lst[0][0] is None and lst[1][0] is None name = lst[0][1][0] # first argument, values, first value # Do a simple get_code comparison. They should just have the same code, # and everything will be all right. classes = lst[1][1][0] call = helpers.call_of_name(search_name) assert name.get_code() == call.get_code() except AssertionError: return [] result = [] for typ in evaluator.eval_element(classes): for typ in (typ.values() if isinstance(typ, iterable.Array) else [typ]): result += evaluator.execute(typ) return result def global_names_dict_generator(evaluator, scope, position): """ For global name lookups. Yields tuples of (names_dict, position). If the position is None, the position does not matter anymore in that scope. This function is used to include names from outer scopes. For example, when the current scope is function: >>> from jedi._compatibility import u, no_unicode_pprint >>> from jedi.parser import Parser, load_grammar >>> parser = Parser(load_grammar(), u(''' ... x = ['a', 'b', 'c'] ... def func(): ... y = None ... ''')) >>> scope = parser.module.subscopes[0] >>> scope `global_names_dict_generator` is a generator. First it yields names from most inner scope. >>> from jedi.evaluate import Evaluator >>> evaluator = Evaluator(load_grammar()) >>> scope = evaluator.wrap(scope) >>> pairs = list(global_names_dict_generator(evaluator, scope, (4, 0))) >>> no_unicode_pprint(pairs[0]) ({'func': [], 'y': []}, (4, 0)) Then it yields the names from one level "lower". In this example, this is the most outer scope. As you can see, the position in the tuple is now None, because typically the whole module is loaded before the function is called. >>> no_unicode_pprint(pairs[1]) ({'func': [], 'x': []}, None) After that we have a few underscore names that are part of the module. >>> sorted(pairs[2][0].keys()) ['__doc__', '__file__', '__name__', '__package__'] >>> pairs[3] # global names -> there are none in our example. ({}, None) >>> pairs[4] # package modules -> Also none. ({}, None) Finally, it yields names from builtin, if `include_builtin` is true (default). >>> pairs[5][0].values() #doctest: +ELLIPSIS [[], ...] """ in_func = False while scope is not None: if not (scope.type == 'classdef' and in_func): # Names in methods cannot be resolved within the class. for names_dict in scope.names_dicts(True): yield names_dict, position if scope.type == 'funcdef': # The position should be reset if the current scope is a function. in_func = True position = None scope = evaluator.wrap(scope.get_parent_scope()) # Add builtins to the global scope. for names_dict in compiled.builtin.names_dicts(True): yield names_dict, None def check_tuple_assignments(types, name): """ Checks if tuples are assigned. """ for index in name.assignment_indexes(): new_types = [] for r in types: try: func = r.get_exact_index_types except AttributeError: debug.warning("Invalid tuple lookup #%s of result %s in %s", index, types, name) else: try: new_types += func(index) except IndexError: pass types = new_types return types def filter_private_variable(scope, origin_node): """Check if a variable is defined inside the same class or outside.""" instance = scope.get_parent_scope() coming_from = origin_node while coming_from is not None \ and not isinstance(coming_from, (tree.Class, compiled.CompiledObject)): coming_from = coming_from.get_parent_scope() # CompiledObjects don't have double underscore attributes, but Jedi abuses # those for fakes (builtins.pym -> list). if isinstance(instance, compiled.CompiledObject): return instance != coming_from else: return isinstance(instance, er.Instance) and instance.base.base != coming_from jedi-0.9.0/jedi/evaluate/param.py0000664000175000017500000004101712517736533017047 0ustar daviddavid00000000000000from collections import defaultdict from itertools import chain from jedi._compatibility import unicode, zip_longest from jedi import debug from jedi import common from jedi.parser import tree from jedi.evaluate import iterable from jedi.evaluate import analysis from jedi.evaluate import precedence from jedi.evaluate.helpers import FakeName from jedi.cache import underscore_memoization class Arguments(tree.Base): def __init__(self, evaluator, argument_node, trailer=None): """ The argument_node is either a parser node or a list of evaluated objects. Those evaluated objects may be lists of evaluated objects themselves (one list for the first argument, one for the second, etc). :param argument_node: May be an argument_node or a list of nodes. """ self.argument_node = argument_node self._evaluator = evaluator self.trailer = trailer # Can be None, e.g. in a class definition. def _split(self): if isinstance(self.argument_node, (tuple, list)): for el in self.argument_node: yield 0, el else: if not tree.is_node(self.argument_node, 'arglist'): yield 0, self.argument_node return iterator = iter(self.argument_node.children) for child in iterator: if child == ',': continue elif child in ('*', '**'): yield len(child.value), next(iterator) else: yield 0, child def get_parent_until(self, *args, **kwargs): if self.trailer is None: try: element = self.argument_node[0] from jedi.evaluate.iterable import AlreadyEvaluated if isinstance(element, AlreadyEvaluated): element = self._evaluator.eval_element(element)[0] except IndexError: return None else: return element.get_parent_until(*args, **kwargs) else: return self.trailer.get_parent_until(*args, **kwargs) def as_tuple(self): for stars, argument in self._split(): if tree.is_node(argument, 'argument'): argument, default = argument.children[::2] else: default = None yield argument, default, stars def unpack(self, func=None): named_args = [] for stars, el in self._split(): if stars == 1: arrays = self._evaluator.eval_element(el) iterators = [_iterate_star_args(self._evaluator, a, el, func) for a in arrays] iterators = list(iterators) for values in list(zip_longest(*iterators)): yield None, [v for v in values if v is not None] elif stars == 2: arrays = self._evaluator.eval_element(el) dicts = [_star_star_dict(self._evaluator, a, el, func) for a in arrays] for dct in dicts: for key, values in dct.items(): yield key, values else: if tree.is_node(el, 'argument'): c = el.children if len(c) == 3: # Keyword argument. named_args.append((c[0].value, (c[2],))) else: # Generator comprehension. # Include the brackets with the parent. comp = iterable.GeneratorComprehension( self._evaluator, self.argument_node.parent) yield None, (iterable.AlreadyEvaluated([comp]),) elif isinstance(el, (list, tuple)): yield None, el else: yield None, (el,) # Reordering var_args is necessary, because star args sometimes appear # after named argument, but in the actual order it's prepended. for key_arg in named_args: yield key_arg def _reorder_var_args(var_args): named_index = None new_args = [] for i, stmt in enumerate(var_args): if isinstance(stmt, tree.ExprStmt): if named_index is None and stmt.assignment_details: named_index = i if named_index is not None: expression_list = stmt.expression_list() if expression_list and expression_list[0] == '*': new_args.insert(named_index, stmt) named_index += 1 continue new_args.append(stmt) return new_args def eval_argument_clinic(self, arguments): """Uses a list with argument clinic information (see PEP 436).""" iterator = self.unpack() for i, (name, optional, allow_kwargs) in enumerate(arguments): key, va_values = next(iterator, (None, [])) if key is not None: raise NotImplementedError if not va_values and not optional: debug.warning('TypeError: %s expected at least %s arguments, got %s', name, len(arguments), i) raise ValueError values = list(chain.from_iterable(self._evaluator.eval_element(el) for el in va_values)) if not values and not optional: # For the stdlib we always want values. If we don't get them, # that's ok, maybe something is too hard to resolve, however, # we will not proceed with the evaluation of that function. debug.warning('argument_clinic "%s" not resolvable.', name) raise ValueError yield values def scope(self): # Returns the scope in which the arguments are used. return (self.trailer or self.argument_node).get_parent_until(tree.IsScope) def eval_args(self): # TODO this method doesn't work with named args and a lot of other # things. Use unpack. return [self._evaluator.eval_element(el) for stars, el in self._split()] def __repr__(self): return '<%s: %s>' % (type(self).__name__, self.argument_node) def get_calling_var_args(self): if tree.is_node(self.argument_node, 'arglist', 'argument') \ or self.argument_node == () and self.trailer is not None: return _get_calling_var_args(self._evaluator, self) else: return None class ExecutedParam(tree.Param): """Fake a param and give it values.""" def __init__(self, original_param, var_args, values): self._original_param = original_param self.var_args = var_args self._values = values def eval(self, evaluator): types = [] for v in self._values: types += evaluator.eval_element(v) return types @property def position_nr(self): # Need to use the original logic here, because it uses the parent. return self._original_param.position_nr @property @underscore_memoization def name(self): return FakeName(str(self._original_param.name), self, self.start_pos) def __getattr__(self, name): return getattr(self._original_param, name) def _get_calling_var_args(evaluator, var_args): old_var_args = None while var_args != old_var_args: old_var_args = var_args for name, default, stars in reversed(list(var_args.as_tuple())): if not stars or not isinstance(name, tree.Name): continue names = evaluator.goto(name) if len(names) != 1: break param = names[0].get_definition() if not isinstance(param, ExecutedParam): if isinstance(param, tree.Param): # There is no calling var_args in this case - there's just # a param without any input. return None break # We never want var_args to be a tuple. This should be enough for # now, we can change it later, if we need to. if isinstance(param.var_args, Arguments): var_args = param.var_args return var_args.argument_node or var_args.trailer def get_params(evaluator, func, var_args): param_names = [] param_dict = {} for param in func.params: param_dict[str(param.name)] = param unpacked_va = list(var_args.unpack(func)) from jedi.evaluate.representation import InstanceElement if isinstance(func, InstanceElement): # Include self at this place. unpacked_va.insert(0, (None, [iterable.AlreadyEvaluated([func.instance])])) var_arg_iterator = common.PushBackIterator(iter(unpacked_va)) non_matching_keys = defaultdict(lambda: []) keys_used = {} keys_only = False had_multiple_value_error = False for param in func.params: # The value and key can both be null. There, the defaults apply. # args / kwargs will just be empty arrays / dicts, respectively. # Wrong value count is just ignored. If you try to test cases that are # not allowed in Python, Jedi will maybe not show any completions. default = [] if param.default is None else [param.default] key, va_values = next(var_arg_iterator, (None, default)) while key is not None: keys_only = True k = unicode(key) try: key_param = param_dict[unicode(key)] except KeyError: non_matching_keys[key] += va_values else: param_names.append(ExecutedParam(key_param, var_args, va_values).name) if k in keys_used: had_multiple_value_error = True m = ("TypeError: %s() got multiple values for keyword argument '%s'." % (func.name, k)) calling_va = _get_calling_var_args(evaluator, var_args) if calling_va is not None: analysis.add(evaluator, 'type-error-multiple-values', calling_va, message=m) else: try: keys_used[k] = param_names[-1] except IndexError: # TODO this is wrong stupid and whatever. pass key, va_values = next(var_arg_iterator, (None, ())) values = [] if param.stars == 1: # *args param lst_values = [iterable.MergedNodes(va_values)] if va_values else [] for key, va_values in var_arg_iterator: # Iterate until a key argument is found. if key: var_arg_iterator.push_back((key, va_values)) break if va_values: lst_values.append(iterable.MergedNodes(va_values)) seq = iterable.FakeSequence(evaluator, lst_values, 'tuple') values = [iterable.AlreadyEvaluated([seq])] elif param.stars == 2: # **kwargs param dct = iterable.FakeDict(evaluator, dict(non_matching_keys)) values = [iterable.AlreadyEvaluated([dct])] non_matching_keys = {} else: # normal param if va_values: values = va_values else: # No value: Return an empty container values = [] if not keys_only: calling_va = var_args.get_calling_var_args() if calling_va is not None: m = _error_argument_count(func, len(unpacked_va)) analysis.add(evaluator, 'type-error-too-few-arguments', calling_va, message=m) # Now add to result if it's not one of the previously covered cases. if (not keys_only or param.stars == 2): param_names.append(ExecutedParam(param, var_args, values).name) keys_used[unicode(param.name)] = param_names[-1] if keys_only: # All arguments should be handed over to the next function. It's not # about the values inside, it's about the names. Jedi needs to now that # there's nothing to find for certain names. for k in set(param_dict) - set(keys_used): param = param_dict[k] values = [] if param.default is None else [param.default] param_names.append(ExecutedParam(param, var_args, values).name) if not (non_matching_keys or had_multiple_value_error or param.stars or param.default): # add a warning only if there's not another one. calling_va = _get_calling_var_args(evaluator, var_args) if calling_va is not None: m = _error_argument_count(func, len(unpacked_va)) analysis.add(evaluator, 'type-error-too-few-arguments', calling_va, message=m) for key, va_values in non_matching_keys.items(): m = "TypeError: %s() got an unexpected keyword argument '%s'." \ % (func.name, key) for value in va_values: analysis.add(evaluator, 'type-error-keyword-argument', value.parent, message=m) remaining_params = list(var_arg_iterator) if remaining_params: m = _error_argument_count(func, len(unpacked_va)) # Just report an error for the first param that is not needed (like # cPython). first_key, first_values = remaining_params[0] for v in first_values: if first_key is not None: # Is a keyword argument, return the whole thing instead of just # the value node. v = v.parent try: non_kw_param = keys_used[first_key] except KeyError: pass else: origin_args = non_kw_param.parent.var_args.argument_node # TODO calculate the var_args tree and check if it's in # the tree (if not continue). # print('\t\tnonkw', non_kw_param.parent.var_args.argument_node, ) if origin_args not in [f.parent.parent for f in first_values]: continue analysis.add(evaluator, 'type-error-too-many-arguments', v, message=m) return param_names def _iterate_star_args(evaluator, array, input_node, func=None): from jedi.evaluate.representation import Instance if isinstance(array, iterable.Array): for field_stmt in array: # yield from plz! yield field_stmt elif isinstance(array, iterable.Generator): for field_stmt in array.iter_content(): yield iterable.AlreadyEvaluated([field_stmt]) elif isinstance(array, Instance) and array.name.get_code() == 'tuple': debug.warning('Ignored a tuple *args input %s' % array) else: if func is not None: m = "TypeError: %s() argument after * must be a sequence, not %s" \ % (func.name.value, array) analysis.add(evaluator, 'type-error-star', input_node, message=m) def _star_star_dict(evaluator, array, input_node, func): dct = defaultdict(lambda: []) from jedi.evaluate.representation import Instance if isinstance(array, Instance) and array.name.get_code() == 'dict': # For now ignore this case. In the future add proper iterators and just # make one call without crazy isinstance checks. return {} if isinstance(array, iterable.FakeDict): return array._dct elif isinstance(array, iterable.Array) and array.type == 'dict': # TODO bad call to non-public API for key_node, values in array._items(): for key in evaluator.eval_element(key_node): if precedence.is_string(key): dct[key.obj] += values else: if func is not None: m = "TypeError: %s argument after ** must be a mapping, not %s" \ % (func.name.value, array) analysis.add(evaluator, 'type-error-star-star', input_node, message=m) return dict(dct) def _error_argument_count(func, actual_count): default_arguments = sum(1 for p in func.params if p.default or p.stars) if default_arguments == 0: before = 'exactly ' else: before = 'from %s to ' % (len(func.params) - default_arguments) return ('TypeError: %s() takes %s%s arguments (%s given).' % (func.name, before, len(func.params), actual_count)) jedi-0.9.0/jedi/evaluate/dynamic.py0000664000175000017500000001151012517736533017366 0ustar daviddavid00000000000000""" One of the really important features of |jedi| is to have an option to understand code like this:: def foo(bar): bar. # completion here foo(1) There's no doubt wheter bar is an ``int`` or not, but if there's also a call like ``foo('str')``, what would happen? Well, we'll just show both. Because that's what a human would expect. It works as follows: - |Jedi| sees a param - search for function calls named ``foo`` - execute these calls and check the input. This work with a ``ParamListener``. """ from itertools import chain from jedi._compatibility import unicode from jedi.parser import tree from jedi import settings from jedi import debug from jedi.evaluate.cache import memoize_default from jedi.evaluate import imports class ParamListener(object): """ This listener is used to get the params for a function. """ def __init__(self): self.param_possibilities = [] def execute(self, params): self.param_possibilities += params @debug.increase_indent def search_params(evaluator, param): """ A dynamic search for param values. If you try to complete a type: >>> def func(foo): ... foo >>> func(1) >>> func("") It is not known what the type ``foo`` without analysing the whole code. You have to look for all calls to ``func`` to find out what ``foo`` possibly is. """ if not settings.dynamic_params: return [] func = param.get_parent_until(tree.Function) debug.dbg('Dynamic param search for %s in %s.', param, str(func.name)) # Compare the param names. names = [n for n in search_function_call(evaluator, func) if n.value == param.name.value] # Evaluate the ExecutedParams to types. result = list(chain.from_iterable(n.parent.eval(evaluator) for n in names)) debug.dbg('Dynamic param result %s', result) return result @memoize_default([], evaluator_is_first_arg=True) def search_function_call(evaluator, func): """ Returns a list of param names. """ from jedi.evaluate import representation as er def get_params_for_module(module): """ Returns the values of a param, or an empty array. """ @memoize_default([], evaluator_is_first_arg=True) def get_posibilities(evaluator, module, func_name): try: names = module.used_names[func_name] except KeyError: return [] for name in names: parent = name.parent if tree.is_node(parent, 'trailer'): parent = parent.parent trailer = None if tree.is_node(parent, 'power'): for t in parent.children[1:]: if t == '**': break if t.start_pos > name.start_pos and t.children[0] == '(': trailer = t break if trailer is not None: types = evaluator.goto_definition(name) # We have to remove decorators, because they are not the # "original" functions, this way we can easily compare. # At the same time we also have to remove InstanceElements. undec = [] for escope in types: if escope.isinstance(er.Function, er.Instance) \ and escope.decorates is not None: undec.append(escope.decorates) elif isinstance(escope, er.InstanceElement): undec.append(escope.var) else: undec.append(escope) if evaluator.wrap(compare) in undec: # Only if we have the correct function we execute # it, otherwise just ignore it. evaluator.eval_trailer(types, trailer) return listener.param_possibilities return get_posibilities(evaluator, module, func_name) current_module = func.get_parent_until() func_name = unicode(func.name) compare = func if func_name == '__init__': cls = func.get_parent_scope() if isinstance(cls, tree.Class): func_name = unicode(cls.name) compare = cls # add the listener listener = ParamListener() func.listeners.add(listener) try: result = [] # This is like backtracking: Get the first possible result. for mod in imports.get_modules_containing_name(evaluator, [current_module], func_name): result = get_params_for_module(mod) if result: break finally: # cleanup: remove the listener; important: should not stick. func.listeners.remove(listener) return result jedi-0.9.0/jedi/evaluate/imports.py0000664000175000017500000004660512517736533017454 0ustar daviddavid00000000000000""" :mod:`jedi.evaluate.imports` is here to resolve import statements and return the modules/classes/functions/whatever, which they stand for. However there's not any actual importing done. This module is about finding modules in the filesystem. This can be quite tricky sometimes, because Python imports are not always that simple. This module uses imp for python up to 3.2 and importlib for python 3.3 on; the correct implementation is delegated to _compatibility. This module also supports import autocompletion, which means to complete statements like ``from datetim`` (curser at the end would return ``datetime``). """ import imp import os import pkgutil import sys from itertools import chain from jedi._compatibility import find_module, unicode from jedi import common from jedi import debug from jedi import cache from jedi.parser import fast from jedi.parser import tree from jedi.evaluate import sys_path from jedi.evaluate import helpers from jedi import settings from jedi.common import source_to_unicode from jedi.evaluate import compiled from jedi.evaluate import analysis from jedi.evaluate.cache import memoize_default, NO_DEFAULT def completion_names(evaluator, imp, pos): name = imp.name_for_position(pos) module = evaluator.wrap(imp.get_parent_until()) if name is None: level = 0 for node in imp.children: if node.end_pos <= pos: if node in ('.', '...'): level += len(node.value) import_path = [] else: # Completion on an existing name. # The import path needs to be reduced by one, because we're completing. import_path = imp.path_for_name(name)[:-1] level = imp.level importer = Importer(evaluator, tuple(import_path), module, level) if isinstance(imp, tree.ImportFrom): c = imp.children only_modules = c[c.index('import')].start_pos >= pos else: only_modules = True return importer.completion_names(evaluator, only_modules) class ImportWrapper(tree.Base): def __init__(self, evaluator, name): self._evaluator = evaluator self._name = name self._import = name.get_parent_until(tree.Import) self.import_path = self._import.path_for_name(name) @memoize_default() def follow(self, is_goto=False): if self._evaluator.recursion_detector.push_stmt(self._import): # check recursion return [] try: module = self._evaluator.wrap(self._import.get_parent_until()) import_path = self._import.path_for_name(self._name) from_import_name = None try: from_names = self._import.get_from_names() except AttributeError: # Is an import_name pass else: if len(from_names) + 1 == len(import_path): # We have to fetch the from_names part first and then check # if from_names exists in the modules. from_import_name = import_path[-1] import_path = from_names importer = Importer(self._evaluator, tuple(import_path), module, self._import.level) types = importer.follow() #if self._import.is_nested() and not self.nested_resolve: # scopes = [NestedImportModule(module, self._import)] if from_import_name is not None: types = list(chain.from_iterable( self._evaluator.find_types(t, unicode(from_import_name), is_goto=is_goto) for t in types)) if not types: path = import_path + [from_import_name] importer = Importer(self._evaluator, tuple(path), module, self._import.level) types = importer.follow() # goto only accepts `Name` if is_goto: types = [s.name for s in types] else: # goto only accepts `Name` if is_goto: types = [s.name for s in types] debug.dbg('after import: %s', types) finally: self._evaluator.recursion_detector.pop_stmt() return types class NestedImportModule(tree.Module): """ TODO while there's no use case for nested import module right now, we might be able to use them for static analysis checks later on. """ def __init__(self, module, nested_import): self._module = module self._nested_import = nested_import def _get_nested_import_name(self): """ Generates an Import statement, that can be used to fake nested imports. """ i = self._nested_import # This is not an existing Import statement. Therefore, set position to # 0 (0 is not a valid line number). zero = (0, 0) names = [unicode(name) for name in i.namespace_names[1:]] name = helpers.FakeName(names, self._nested_import) new = tree.Import(i._sub_module, zero, zero, name) new.parent = self._module debug.dbg('Generated a nested import: %s', new) return helpers.FakeName(str(i.namespace_names[1]), new) def __getattr__(self, name): return getattr(self._module, name) def __repr__(self): return "<%s: %s of %s>" % (self.__class__.__name__, self._module, self._nested_import) def _add_error(evaluator, name, message=None): if hasattr(name, 'parent'): # Should be a name, not a string! analysis.add(evaluator, 'import-error', name, message) def get_init_path(directory_path): """ The __init__ file can be searched in a directory. If found return it, else None. """ for suffix, _, _ in imp.get_suffixes(): path = os.path.join(directory_path, '__init__' + suffix) if os.path.exists(path): return path return None class Importer(object): def __init__(self, evaluator, import_path, module, level=0): """ An implementation similar to ``__import__``. Use `follow` to actually follow the imports. *level* specifies whether to use absolute or relative imports. 0 (the default) means only perform absolute imports. Positive values for level indicate the number of parent directories to search relative to the directory of the module calling ``__import__()`` (see PEP 328 for the details). :param import_path: List of namespaces (strings or Names). """ debug.speed('import %s' % (import_path,)) self._evaluator = evaluator self.level = level self.module = module try: self.file_path = module.py__file__() except AttributeError: # Can be None for certain compiled modules like 'builtins'. self.file_path = None if level: base = module.py__package__().split('.') if base == ['']: base = [] if level > len(base): path = module.py__file__() import_path = list(import_path) for i in range(level): path = os.path.dirname(path) dir_name = os.path.basename(path) # This is not the proper way to do relative imports. However, since # Jedi cannot be sure about the entry point, we just calculate an # absolute path here. if dir_name: import_path.insert(0, dir_name) else: _add_error(self._evaluator, import_path[-1]) import_path = [] # TODO add import error. debug.warning('Attempted relative import beyond top-level package.') else: # Here we basically rewrite the level to 0. import_path = tuple(base) + import_path self.import_path = import_path @property def str_import_path(self): """Returns the import path as pure strings instead of `Name`.""" return tuple(str(name) for name in self.import_path) @memoize_default() def sys_path_with_modifications(self): in_path = [] sys_path_mod = list(sys_path.sys_path_with_modifications(self._evaluator, self.module)) if self.file_path is not None: # If you edit e.g. gunicorn, there will be imports like this: # `from gunicorn import something`. But gunicorn is not in the # sys.path. Therefore look if gunicorn is a parent directory, #56. if self.import_path: # TODO is this check really needed? for path in sys_path.traverse_parents(self.file_path): if os.path.basename(path) == self.str_import_path[0]: in_path.append(os.path.dirname(path)) # Since we know nothing about the call location of the sys.path, # it's a possibility that the current directory is the origin of # the Python execution. sys_path_mod.insert(0, os.path.dirname(self.file_path)) return in_path + sys_path_mod @memoize_default(NO_DEFAULT) def follow(self): if not self.import_path: return [] return self._do_import(self.import_path, self.sys_path_with_modifications()) def _do_import(self, import_path, sys_path): """ This method is very similar to importlib's `_gcd_import`. """ import_parts = [str(i) for i in import_path] # Handle "magic" Flask extension imports: # ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``. if len(import_path) > 2 and import_parts[:2] == ['flask', 'ext']: # New style. ipath = ('flask_' + str(import_parts[2]),) + import_path[3:] modules = self._do_import(ipath, sys_path) if modules: return modules else: # Old style return self._do_import(('flaskext',) + import_path[2:], sys_path) module_name = '.'.join(import_parts) try: return [self._evaluator.modules[module_name]] except KeyError: pass if len(import_path) > 1: # This is a recursive way of importing that works great with # the module cache. bases = self._do_import(import_path[:-1], sys_path) if not bases: return [] # We can take the first element, because only the os special # case yields multiple modules, which is not important for # further imports. base = bases[0] # This is a huge exception, we follow a nested import # ``os.path``, because it's a very important one in Python # that is being achieved by messing with ``sys.modules`` in # ``os``. if [str(i) for i in import_path] == ['os', 'path']: return self._evaluator.find_types(base, 'path') try: # It's possible that by giving it always the sys path (and not # the __path__ attribute of the parent, we get wrong results # and nested namespace packages don't work. But I'm not sure. paths = base.py__path__(sys_path) except AttributeError: # The module is not a package. _add_error(self._evaluator, import_path[-1]) return [] else: debug.dbg('search_module %s in paths %s', module_name, paths) for path in paths: # At the moment we are only using one path. So this is # not important to be correct. try: module_file, module_path, is_pkg = \ find_module(import_parts[-1], [path]) break except ImportError: module_path = None if module_path is None: _add_error(self._evaluator, import_path[-1]) return [] else: try: debug.dbg('search_module %s in %s', import_parts[-1], self.file_path) # Override the sys.path. It works only good that way. # Injecting the path directly into `find_module` did not work. sys.path, temp = sys_path, sys.path try: module_file, module_path, is_pkg = \ find_module(import_parts[-1]) finally: sys.path = temp except ImportError: # The module is not a package. _add_error(self._evaluator, import_path[-1]) return [] source = None if is_pkg: # In this case, we don't have a file yet. Search for the # __init__ file. module_path = get_init_path(module_path) elif module_file: source = module_file.read() module_file.close() if module_file is None and not module_path.endswith('.py'): module = compiled.load_module(module_path) else: module = _load_module(self._evaluator, module_path, source, sys_path) self._evaluator.modules[module_name] = module return [module] def _generate_name(self, name): return helpers.FakeName(name, parent=self.module) def _get_module_names(self, search_path=None): """ Get the names of all modules in the search_path. This means file names and not names defined in the files. """ names = [] # add builtin module names if search_path is None: names += [self._generate_name(name) for name in sys.builtin_module_names] if search_path is None: search_path = self.sys_path_with_modifications() for module_loader, name, is_pkg in pkgutil.iter_modules(search_path): names.append(self._generate_name(name)) return names def completion_names(self, evaluator, only_modules=False): """ :param only_modules: Indicates wheter it's possible to import a definition that is not defined in a module. """ from jedi.evaluate import finder names = [] if self.import_path: # flask if self.str_import_path == ('flask', 'ext'): # List Flask extensions like ``flask_foo`` for mod in self._get_module_names(): modname = str(mod) if modname.startswith('flask_'): extname = modname[len('flask_'):] names.append(self._generate_name(extname)) # Now the old style: ``flaskext.foo`` for dir in self.sys_path_with_modifications(): flaskext = os.path.join(dir, 'flaskext') if os.path.isdir(flaskext): names += self._get_module_names([flaskext]) for scope in self.follow(): # Non-modules are not completable. if not scope.type == 'file_input': # not a module continue # namespace packages if isinstance(scope, tree.Module) and scope.path.endswith('__init__.py'): paths = scope.py__path__(self.sys_path_with_modifications()) names += self._get_module_names(paths) if only_modules: # In the case of an import like `from x.` we don't need to # add all the variables. if ('os',) == self.str_import_path and not self.level: # os.path is a hardcoded exception, because it's a # ``sys.modules`` modification. names.append(self._generate_name('path')) continue for names_dict in scope.names_dicts(search_global=False): _names = list(chain.from_iterable(names_dict.values())) if not _names: continue _names = finder.filter_definition_names(_names, scope) names += _names else: # Empty import path=completion after import if not self.level: names += self._get_module_names() if self.file_path is not None: path = os.path.abspath(self.file_path) for i in range(self.level - 1): path = os.path.dirname(path) names += self._get_module_names([path]) return names def _load_module(evaluator, path=None, source=None, sys_path=None): def load(source): dotted_path = path and compiled.dotted_from_fs_path(path, sys_path) if path is not None and path.endswith('.py') \ and not dotted_path in settings.auto_import_modules: if source is None: with open(path, 'rb') as f: source = f.read() else: return compiled.load_module(path) p = path p = fast.FastParser(evaluator.grammar, common.source_to_unicode(source), p) cache.save_parser(path, p) return p.module cached = cache.load_parser(path) module = load(source) if cached is None else cached.module module = evaluator.wrap(module) return module def add_module(evaluator, module_name, module): if '.' not in module_name: # We cannot add paths with dots, because that would collide with # the sepatator dots for nested packages. Therefore we return # `__main__` in ModuleWrapper.py__name__(), which is similar to # Python behavior. evaluator.modules[module_name] = module def get_modules_containing_name(evaluator, mods, name): """ Search a name in the directories of modules. """ def check_python_file(path): try: return cache.parser_cache[path].parser.module except KeyError: try: return check_fs(path) except IOError: return None def check_fs(path): with open(path, 'rb') as f: source = source_to_unicode(f.read()) if name in source: module_name = os.path.basename(path)[:-3] # Remove `.py`. module = _load_module(evaluator, path, source) add_module(evaluator, module_name, module) return module # skip non python modules mods = set(m for m in mods if not isinstance(m, compiled.CompiledObject)) mod_paths = set() for m in mods: mod_paths.add(m.path) yield m if settings.dynamic_params_for_other_modules: paths = set(settings.additional_dynamic_modules) for p in mod_paths: if p is not None: d = os.path.dirname(p) for entry in os.listdir(d): if entry not in mod_paths: if entry.endswith('.py'): paths.add(d + os.path.sep + entry) for p in sorted(paths): # make testing easier, sort it - same results on every interpreter c = check_python_file(p) if c is not None and c not in mods and not isinstance(c, compiled.CompiledObject): yield c jedi-0.9.0/jedi/evaluate/iterable.py0000664000175000017500000005377612517736533017555 0ustar daviddavid00000000000000""" Contains all classes and functions to deal with lists, dicts, generators and iterators in general. Array modifications ******************* If the content of an array (``set``/``list``) is requested somewhere, the current module will be checked for appearances of ``arr.append``, ``arr.insert``, etc. If the ``arr`` name points to an actual array, the content will be added This can be really cpu intensive, as you can imagine. Because |jedi| has to follow **every** ``append`` and check wheter it's the right array. However this works pretty good, because in *slow* cases, the recursion detector and other settings will stop this process. It is important to note that: 1. Array modfications work only in the current module. 2. Jedi only checks Array additions; ``list.pop``, etc are ignored. """ from itertools import chain from jedi import common from jedi import debug from jedi import settings from jedi._compatibility import use_metaclass, is_py3, unicode from jedi.parser import tree from jedi.evaluate import compiled from jedi.evaluate import helpers from jedi.evaluate.cache import CachedMetaClass, memoize_default from jedi.evaluate import analysis def unite(iterable): """Turns a two dimensional array into a one dimensional.""" return list(chain.from_iterable(iterable)) class IterableWrapper(tree.Base): def is_class(self): return False class GeneratorMixin(object): @memoize_default() def names_dicts(self, search_global=False): # is always False dct = {} executes_generator = '__next__', 'send', 'next' for names in compiled.generator_obj.names_dict.values(): for name in names: if name.value in executes_generator: parent = GeneratorMethod(self, name.parent) dct[name.value] = [helpers.FakeName(name.name, parent, is_definition=True)] else: dct[name.value] = [name] yield dct def get_index_types(self, evaluator, index_array): #debug.warning('Tried to get array access on a generator: %s', self) analysis.add(self._evaluator, 'type-error-generator', index_array) return [] def get_exact_index_types(self, index): """ Exact lookups are used for tuple lookups, which are perfectly fine if used with generators. """ return [self.iter_content()[index]] def py__bool__(self): return True class Generator(use_metaclass(CachedMetaClass, IterableWrapper, GeneratorMixin)): """Handling of `yield` functions.""" def __init__(self, evaluator, func, var_args): super(Generator, self).__init__() self._evaluator = evaluator self.func = func self.var_args = var_args def iter_content(self): """ returns the content of __iter__ """ # Directly execute it, because with a normal call to py__call__ a # Generator will be returned. from jedi.evaluate.representation import FunctionExecution f = FunctionExecution(self._evaluator, self.func, self.var_args) return f.get_return_types(check_yields=True) def __getattr__(self, name): if name not in ['start_pos', 'end_pos', 'parent', 'get_imports', 'doc', 'docstr', 'get_parent_until', 'get_code', 'subscopes']: raise AttributeError("Accessing %s of %s is not allowed." % (self, name)) return getattr(self.func, name) def __repr__(self): return "<%s of %s>" % (type(self).__name__, self.func) class GeneratorMethod(IterableWrapper): """``__next__`` and ``send`` methods.""" def __init__(self, generator, builtin_func): self._builtin_func = builtin_func self._generator = generator def py__call__(self, evaluator, params): # TODO add TypeError if params are given. return self._generator.iter_content() def __getattr__(self, name): return getattr(self._builtin_func, name) class Comprehension(IterableWrapper): @staticmethod def from_atom(evaluator, atom): mapping = { '(': GeneratorComprehension, '[': ListComprehension } return mapping[atom.children[0]](evaluator, atom) def __init__(self, evaluator, atom): self._evaluator = evaluator self._atom = atom @memoize_default() def eval_node(self): """ The first part `x + 1` of the list comprehension: [x + 1 for x in foo] """ comprehension = self._atom.children[1] # For nested comprehensions we need to search the last one. last = comprehension.children[-1] last_comp = comprehension.children[1] while True: if isinstance(last, tree.CompFor): last_comp = last elif not tree.is_node(last, 'comp_if'): break last = last.children[-1] return helpers.deep_ast_copy(comprehension.children[0], parent=last_comp) def get_exact_index_types(self, index): return [self._evaluator.eval_element(self.eval_node())[index]] def __repr__(self): return "" % (type(self).__name__, self._atom) class ArrayMixin(object): @memoize_default() def names_dicts(self, search_global=False): # Always False. # `array.type` is a string with the type, e.g. 'list'. scope = self._evaluator.find_types(compiled.builtin, self.type)[0] # builtins only have one class -> [0] scope = self._evaluator.execute(scope, (AlreadyEvaluated((self,)),))[0] return scope.names_dicts(search_global) def py__bool__(self): return None # We don't know the length, because of appends. class ListComprehension(Comprehension, ArrayMixin): type = 'list' def get_index_types(self, evaluator, index): return self.iter_content() def iter_content(self): return self._evaluator.eval_element(self.eval_node()) @property def name(self): return FakeSequence(self._evaluator, [], 'list').name class GeneratorComprehension(Comprehension, GeneratorMixin): def iter_content(self): return self._evaluator.eval_element(self.eval_node()) class Array(IterableWrapper, ArrayMixin): mapping = {'(': 'tuple', '[': 'list', '{': 'dict'} def __init__(self, evaluator, atom): self._evaluator = evaluator self.atom = atom self.type = Array.mapping[atom.children[0]] """The builtin name of the array (list, set, tuple or dict).""" c = self.atom.children array_node = c[1] if self.type == 'dict' and array_node != '}' \ and (not hasattr(array_node, 'children') or ':' not in array_node.children): self.type = 'set' @property def name(self): return helpers.FakeName(self.type, parent=self) @memoize_default() def get_index_types(self, evaluator, index=()): """ Get the types of a specific index or all, if not given. :param index: A subscriptlist node (or subnode). """ indexes = create_indexes_or_slices(evaluator, index) lookup_done = False types = [] for index in indexes: if isinstance(index, Slice): types += [self] lookup_done = True elif isinstance(index, compiled.CompiledObject) \ and isinstance(index.obj, (int, str, unicode)): with common.ignored(KeyError, IndexError, TypeError): types += self.get_exact_index_types(index.obj) lookup_done = True return types if lookup_done else self.values() @memoize_default() def values(self): result = unite(self._evaluator.eval_element(v) for v in self._values()) result += check_array_additions(self._evaluator, self) return result def get_exact_index_types(self, mixed_index): """ Here the index is an int/str. Raises IndexError/KeyError """ if self.type == 'dict': for key, values in self._items(): # Because we only want the key to be a string. keys = self._evaluator.eval_element(key) for k in keys: if isinstance(k, compiled.CompiledObject) \ and mixed_index == k.obj: for value in values: return self._evaluator.eval_element(value) raise KeyError('No key found in dictionary %s.' % self) # Can raise an IndexError return self._evaluator.eval_element(self._items()[mixed_index]) def iter_content(self): return self.values() @common.safe_property def parent(self): return compiled.builtin def get_parent_until(self): return compiled.builtin def __getattr__(self, name): if name not in ['start_pos', 'get_only_subelement', 'parent', 'get_parent_until', 'items']: raise AttributeError('Strange access on %s: %s.' % (self, name)) return getattr(self.atom, name) def _values(self): """Returns a list of a list of node.""" if self.type == 'dict': return list(chain.from_iterable(v for k, v in self._items())) else: return self._items() def _items(self): c = self.atom.children array_node = c[1] if array_node in (']', '}', ')'): return [] # Direct closing bracket, doesn't contain items. if tree.is_node(array_node, 'testlist_comp'): return array_node.children[::2] elif tree.is_node(array_node, 'dictorsetmaker'): kv = [] iterator = iter(array_node.children) for key in iterator: op = next(iterator, None) if op is None or op == ',': kv.append(key) # A set. elif op == ':': # A dict. kv.append((key, [next(iterator)])) next(iterator, None) # Possible comma. else: raise NotImplementedError('dict/set comprehensions') return kv else: return [array_node] def __iter__(self): return iter(self._items()) def __repr__(self): return "<%s of %s>" % (type(self).__name__, self.atom) class _FakeArray(Array): def __init__(self, evaluator, container, type): self.type = type self._evaluator = evaluator self.atom = container class ImplicitTuple(_FakeArray): def __init__(self, evaluator, testlist): super(ImplicitTuple, self).__init__(evaluator, testlist, 'tuple') self._testlist = testlist def _items(self): return self._testlist.children[::2] class FakeSequence(_FakeArray): def __init__(self, evaluator, sequence_values, type): super(FakeSequence, self).__init__(evaluator, sequence_values, type) self._sequence_values = sequence_values def _items(self): return self._sequence_values def get_exact_index_types(self, index): value = self._sequence_values[index] return self._evaluator.eval_element(value) class AlreadyEvaluated(frozenset): """A simple container to add already evaluated objects to an array.""" def get_code(self): # For debugging purposes. return str(self) class MergedNodes(frozenset): pass class FakeDict(_FakeArray): def __init__(self, evaluator, dct): super(FakeDict, self).__init__(evaluator, dct, 'dict') self._dct = dct def get_exact_index_types(self, index): return list(chain.from_iterable(self._evaluator.eval_element(v) for v in self._dct[index])) def _items(self): return self._dct.items() class MergedArray(_FakeArray): def __init__(self, evaluator, arrays): super(MergedArray, self).__init__(evaluator, arrays, arrays[-1].type) self._arrays = arrays def get_exact_index_types(self, mixed_index): raise IndexError def values(self): return list(chain(*(a.values() for a in self._arrays))) def __iter__(self): for array in self._arrays: for a in array: yield a def __len__(self): return sum(len(a) for a in self._arrays) def get_iterator_types(inputs): """Returns the types of any iterator (arrays, yields, __iter__, etc).""" iterators = [] # Take the first statement (for has always only # one, remember `in`). And follow it. for it in inputs: if isinstance(it, (Generator, Array, ArrayInstance, Comprehension)): iterators.append(it) else: if not hasattr(it, 'execute_subscope_by_name'): debug.warning('iterator/for loop input wrong: %s', it) continue try: iterators += it.execute_subscope_by_name('__iter__') except KeyError: debug.warning('iterators: No __iter__ method found.') result = [] from jedi.evaluate.representation import Instance for it in iterators: if isinstance(it, Array): # Array is a little bit special, since this is an internal array, # but there's also the list builtin, which is another thing. result += it.values() elif isinstance(it, Instance): # __iter__ returned an instance. name = '__next__' if is_py3 else 'next' try: result += it.execute_subscope_by_name(name) except KeyError: debug.warning('Instance has no __next__ function in %s.', it) else: # TODO this is not correct, __iter__ can return arbitrary input! # Is a generator. result += it.iter_content() return result def check_array_additions(evaluator, array): """ Just a mapper function for the internal _check_array_additions """ if array.type not in ('list', 'set'): # TODO also check for dict updates return [] is_list = array.type == 'list' try: current_module = array.atom.get_parent_until() except AttributeError: # If there's no get_parent_until, it's a FakeSequence or another Fake # type. Those fake types are used inside Jedi's engine. No values may # be added to those after their creation. return [] return _check_array_additions(evaluator, array, current_module, is_list) @memoize_default([], evaluator_is_first_arg=True) def _check_array_additions(evaluator, compare_array, module, is_list): """ Checks if a `Array` has "add" (append, insert, extend) statements: >>> a = [""] >>> a.append(1) """ if not settings.dynamic_array_additions or isinstance(module, compiled.CompiledObject): return [] def check_additions(arglist, add_name): params = list(param.Arguments(evaluator, arglist).unpack()) result = [] if add_name in ['insert']: params = params[1:] if add_name in ['append', 'add', 'insert']: for key, nodes in params: result += unite(evaluator.eval_element(node) for node in nodes) elif add_name in ['extend', 'update']: for key, nodes in params: iterators = unite(evaluator.eval_element(node) for node in nodes) result += get_iterator_types(iterators) return result from jedi.evaluate import representation as er, param def get_execution_parent(element): """ Used to get an Instance/FunctionExecution parent """ if isinstance(element, Array): node = element.atom else: # Is an Instance with an # Arguments([AlreadyEvaluated([ArrayInstance])]) inside # Yeah... I know... It's complicated ;-) node = list(element.var_args.argument_node[0])[0].var_args.trailer if isinstance(node, er.InstanceElement): return node return node.get_parent_until(er.FunctionExecution) temp_param_add, settings.dynamic_params_for_other_modules = \ settings.dynamic_params_for_other_modules, False search_names = ['append', 'extend', 'insert'] if is_list else ['add', 'update'] comp_arr_parent = get_execution_parent(compare_array) added_types = [] for add_name in search_names: try: possible_names = module.used_names[add_name] except KeyError: continue else: for name in possible_names: # Check if the original scope is an execution. If it is, one # can search for the same statement, that is in the module # dict. Executions are somewhat special in jedi, since they # literally copy the contents of a function. if isinstance(comp_arr_parent, er.FunctionExecution): if comp_arr_parent.start_pos < name.start_pos < comp_arr_parent.end_pos: name = comp_arr_parent.name_for_position(name.start_pos) else: # Don't check definitions that are not defined in the # same function. This is not "proper" anyway. It also # improves Jedi's speed for array lookups, since we # don't have to check the whole source tree anymore. continue trailer = name.parent power = trailer.parent trailer_pos = power.children.index(trailer) try: execution_trailer = power.children[trailer_pos + 1] except IndexError: continue else: if execution_trailer.type != 'trailer' \ or execution_trailer.children[0] != '(' \ or execution_trailer.children[1] == ')': continue power = helpers.call_of_name(name, cut_own_trailer=True) # InstanceElements are special, because they don't get copied, # but have this wrapper around them. if isinstance(comp_arr_parent, er.InstanceElement): power = er.get_instance_el(evaluator, comp_arr_parent.instance, power) if evaluator.recursion_detector.push_stmt(power): # Check for recursion. Possible by using 'extend' in # combination with function calls. continue if compare_array in evaluator.eval_element(power): # The arrays match. Now add the results added_types += check_additions(execution_trailer.children[1], add_name) evaluator.recursion_detector.pop_stmt() # reset settings settings.dynamic_params_for_other_modules = temp_param_add return added_types def check_array_instances(evaluator, instance): """Used for set() and list() instances.""" if not settings.dynamic_array_additions: return instance.var_args ai = ArrayInstance(evaluator, instance) from jedi.evaluate import param return param.Arguments(evaluator, [AlreadyEvaluated([ai])]) class ArrayInstance(IterableWrapper): """ Used for the usage of set() and list(). This is definitely a hack, but a good one :-) It makes it possible to use set/list conversions. In contrast to Array, ListComprehension and all other iterable types, this is something that is only used inside `evaluate/compiled/fake/builtins.py` and therefore doesn't need `names_dicts`, `py__bool__` and so on, because we don't use these operations in `builtins.py`. """ def __init__(self, evaluator, instance): self._evaluator = evaluator self.instance = instance self.var_args = instance.var_args def iter_content(self): """ The index is here just ignored, because of all the appends, etc. lists/sets are too complicated too handle that. """ items = [] for key, nodes in self.var_args.unpack(): for node in nodes: for typ in self._evaluator.eval_element(node): items += get_iterator_types([typ]) module = self.var_args.get_parent_until() is_list = str(self.instance.name) == 'list' items += _check_array_additions(self._evaluator, self.instance, module, is_list) return items class Slice(object): def __init__(self, evaluator, start, stop, step): self._evaluator = evaluator # all of them are either a Precedence or None. self._start = start self._stop = stop self._step = step @property def obj(self): """ Imitate CompiledObject.obj behavior and return a ``builtin.slice()`` object. """ def get(element): if element is None: return None result = self._evaluator.eval_element(element) if len(result) != 1: # We want slices to be clear defined with just one type. # Otherwise we will return an empty slice object. raise IndexError try: return result[0].obj except AttributeError: return None try: return slice(get(self._start), get(self._stop), get(self._step)) except IndexError: return slice(None, None, None) def create_indexes_or_slices(evaluator, index): if tree.is_node(index, 'subscript'): # subscript is a slice operation. start, stop, step = None, None, None result = [] for el in index.children: if el == ':': if not result: result.append(None) elif tree.is_node(el, 'sliceop'): if len(el.children) == 2: result.append(el.children[1]) else: result.append(el) result += [None] * (3 - len(result)) return (Slice(evaluator, *result),) return evaluator.eval_element(index) jedi-0.9.0/jedi/evaluate/stdlib.py0000664000175000017500000002006112517736533017224 0ustar daviddavid00000000000000""" Implementations of standard library functions, because it's not possible to understand them with Jedi. To add a new implementation, create a function and add it to the ``_implemented`` dict at the bottom of this module. """ import collections import re from jedi._compatibility import unicode from jedi.evaluate import compiled from jedi.evaluate import representation as er from jedi.evaluate import iterable from jedi.parser import Parser from jedi.parser import tree from jedi import debug from jedi.evaluate import precedence from jedi.evaluate import param class NotInStdLib(LookupError): pass def execute(evaluator, obj, params): try: obj_name = str(obj.name) except AttributeError: pass else: if obj.parent == compiled.builtin: module_name = 'builtins' elif isinstance(obj.parent, tree.Module): module_name = str(obj.parent.name) else: module_name = '' # for now we just support builtin functions. try: return _implemented[module_name][obj_name](evaluator, obj, params) except KeyError: pass raise NotInStdLib() def _follow_param(evaluator, params, index): try: key, values = list(params.unpack())[index] except IndexError: return [] else: return iterable.unite(evaluator.eval_element(v) for v in values) def argument_clinic(string, want_obj=False, want_scope=False): """ Works like Argument Clinic (PEP 436), to validate function params. """ clinic_args = [] allow_kwargs = False optional = False while string: # Optional arguments have to begin with a bracket. And should always be # at the end of the arguments. This is therefore not a proper argument # clinic implementation. `range()` for exmple allows an optional start # value at the beginning. match = re.match('(?:(?:(\[),? ?|, ?|)(\w+)|, ?/)\]*', string) string = string[len(match.group(0)):] if not match.group(2): # A slash -> allow named arguments allow_kwargs = True continue optional = optional or bool(match.group(1)) word = match.group(2) clinic_args.append((word, optional, allow_kwargs)) def f(func): def wrapper(evaluator, obj, arguments): try: lst = list(arguments.eval_argument_clinic(clinic_args)) except ValueError: return [] else: kwargs = {} if want_scope: kwargs['scope'] = arguments.scope() if want_obj: kwargs['obj'] = obj return func(evaluator, *lst, **kwargs) return wrapper return f @argument_clinic('object, name[, default], /') def builtins_getattr(evaluator, objects, names, defaults=None): types = [] # follow the first param for obj in objects: if not isinstance(obj, (er.Instance, er.Class, tree.Module, compiled.CompiledObject)): debug.warning('getattr called without instance') continue for name in names: if precedence.is_string(name): return evaluator.find_types(obj, name.obj) else: debug.warning('getattr called without str') continue return types @argument_clinic('object[, bases, dict], /') def builtins_type(evaluator, objects, bases, dicts): if bases or dicts: # metaclass... maybe someday... return [] else: return [o.base for o in objects if isinstance(o, er.Instance)] class SuperInstance(er.Instance): """To be used like the object ``super`` returns.""" def __init__(self, evaluator, cls): su = cls.py_mro()[1] super().__init__(evaluator, su and su[0] or self) @argument_clinic('[type[, obj]], /', want_scope=True) def builtins_super(evaluator, types, objects, scope): # TODO make this able to detect multiple inheritance super accept = (tree.Function, er.FunctionExecution) if scope.isinstance(*accept): wanted = (tree.Class, er.Instance) cls = scope.get_parent_until(accept + wanted, include_current=False) if isinstance(cls, wanted): if isinstance(cls, tree.Class): cls = er.Class(evaluator, cls) elif isinstance(cls, er.Instance): cls = cls.base su = cls.py__bases__(evaluator) if su: return evaluator.execute(su[0]) return [] @argument_clinic('sequence, /', want_obj=True) def builtins_reversed(evaluator, sequences, obj): # Unpack the iterator values objects = tuple(iterable.get_iterator_types(sequences)) rev = [iterable.AlreadyEvaluated([o]) for o in reversed(objects)] # Repack iterator values and then run it the normal way. This is # necessary, because `reversed` is a function and autocompletion # would fail in certain cases like `reversed(x).__iter__` if we # just returned the result directly. rev = iterable.AlreadyEvaluated( [iterable.FakeSequence(evaluator, rev, 'list')] ) return [er.Instance(evaluator, obj, param.Arguments(evaluator, [rev]))] @argument_clinic('obj, type, /') def builtins_isinstance(evaluator, objects, types): bool_results = set([]) for o in objects: try: mro_func = o.py__class__(evaluator).py__mro__ except AttributeError: # This is temporary. Everything should have a class attribute in # Python?! Maybe we'll leave it here, because some numpy objects or # whatever might not. return [compiled.true_obj, compiled.false_obj] mro = mro_func(evaluator) for cls_or_tup in types: if cls_or_tup.is_class(): bool_results.add(cls_or_tup in mro) else: # Check for tuples. classes = iterable.get_iterator_types([cls_or_tup]) bool_results.add(any(cls in mro for cls in classes)) return [compiled.keyword_from_value(x) for x in bool_results] def collections_namedtuple(evaluator, obj, params): """ Implementation of the namedtuple function. This has to be done by processing the namedtuple class template and evaluating the result. .. note:: |jedi| only supports namedtuples on Python >2.6. """ # Namedtuples are not supported on Python 2.6 if not hasattr(collections, '_class_template'): return [] # Process arguments name = _follow_param(evaluator, params, 0)[0].obj _fields = _follow_param(evaluator, params, 1)[0] if isinstance(_fields, compiled.CompiledObject): fields = _fields.obj.replace(',', ' ').split() elif isinstance(_fields, iterable.Array): try: fields = [v.obj for v in _fields.values()] except AttributeError: return [] else: return [] # Build source source = collections._class_template.format( typename=name, field_names=fields, num_fields=len(fields), arg_list=', '.join(fields), repr_fmt=', '.join(collections._repr_template.format(name=name) for name in fields), field_defs='\n'.join(collections._field_template.format(index=index, name=name) for index, name in enumerate(fields)) ) # Parse source generated_class = Parser(evaluator.grammar, unicode(source)).module.subscopes[0] return [er.Class(evaluator, generated_class)] @argument_clinic('first, /') def _return_first_param(evaluator, firsts): return firsts _implemented = { 'builtins': { 'getattr': builtins_getattr, 'type': builtins_type, 'super': builtins_super, 'reversed': builtins_reversed, 'isinstance': builtins_isinstance, }, 'copy': { 'copy': _return_first_param, 'deepcopy': _return_first_param, }, 'json': { 'load': lambda *args: [], 'loads': lambda *args: [], }, 'collections': { 'namedtuple': collections_namedtuple, }, } jedi-0.9.0/jedi/evaluate/cache.py0000664000175000017500000000376712517736533017024 0ustar daviddavid00000000000000""" - the popular ``memoize_default`` works like a typical memoize and returns the default otherwise. - ``CachedMetaClass`` uses ``memoize_default`` to do the same with classes. """ import inspect NO_DEFAULT = object() def memoize_default(default=NO_DEFAULT, evaluator_is_first_arg=False, second_arg_is_evaluator=False): """ This is a typical memoization decorator, BUT there is one difference: To prevent recursion it sets defaults. Preventing recursion is in this case the much bigger use than speed. I don't think, that there is a big speed difference, but there are many cases where recursion could happen (think about a = b; b = a). """ def func(function): def wrapper(obj, *args, **kwargs): if evaluator_is_first_arg: cache = obj.memoize_cache elif second_arg_is_evaluator: # needed for meta classes cache = args[0].memoize_cache else: cache = obj._evaluator.memoize_cache try: memo = cache[function] except KeyError: memo = {} cache[function] = memo key = (obj, args, frozenset(kwargs.items())) if key in memo: return memo[key] else: if default is not NO_DEFAULT: memo[key] = default rv = function(obj, *args, **kwargs) if inspect.isgenerator(rv): rv = list(rv) memo[key] = rv return rv return wrapper return func class CachedMetaClass(type): """ This is basically almost the same than the decorator above, it just caches class initializations. Either you do it this way or with decorators, but with decorators you lose class access (isinstance, etc). """ @memoize_default(None, second_arg_is_evaluator=True) def __call__(self, *args, **kwargs): return super(CachedMetaClass, self).__call__(*args, **kwargs) jedi-0.9.0/jedi/__init__.py0000664000175000017500000000315012517736533015674 0ustar daviddavid00000000000000""" Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its historic focus is autocompletion, but does static analysis for now as well. Jedi is fast and is very well tested. It understands Python on a deeper level than all other static analysis frameworks for Python. Jedi has support for two different goto functions. It's possible to search for related names and to list all names in a Python file and infer them. Jedi understands docstrings and you can use Jedi autocompletion in your REPL as well. Jedi uses a very simple API to connect with IDE's. There's a reference implementation as a `VIM-Plugin `_, which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs. It's really easy. To give you a simple example how you can use the Jedi library, here is an example for the autocompletion feature: >>> import jedi >>> source = ''' ... import datetime ... datetime.da''' >>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py') >>> script >>> completions = script.completions() >>> completions #doctest: +ELLIPSIS [, , ...] >>> print(completions[0].complete) te >>> print(completions[0].name) date As you see Jedi is pretty simple and allows you to concentrate on writing a good text editor, while still having very good IDE features for Python. """ __version__ = '0.9.0' from jedi.api import Script, Interpreter, NotFoundError, set_debug_function from jedi.api import preload_module, defined_names, names from jedi import settings jedi-0.9.0/jedi/__main__.py0000664000175000017500000000245212363566726015665 0ustar daviddavid00000000000000from sys import argv from os.path import join, dirname, abspath, isdir if len(argv) == 2 and argv[1] == 'repl': # don't want to use __main__ only for repl yet, maybe we want to use it for # something else. So just use the keyword ``repl`` for now. print(join(dirname(abspath(__file__)), 'api', 'replstartup.py')) elif len(argv) > 1 and argv[1] == 'linter': """ This is a pre-alpha API. You're not supposed to use it at all, except for testing. It will very likely change. """ import jedi import sys if '--debug' in sys.argv: jedi.set_debug_function() for path in sys.argv[2:]: if path.startswith('--'): continue if isdir(path): import fnmatch import os paths = [] for root, dirnames, filenames in os.walk(path): for filename in fnmatch.filter(filenames, '*.py'): paths.append(os.path.join(root, filename)) else: paths = [path] try: for path in paths: for error in jedi.Script(path=path)._analysis(): print(error) except Exception: if '--pdb' in sys.argv: import pdb pdb.post_mortem() else: raise jedi-0.9.0/jedi/parser/0000775000175000017500000000000012517736563015063 5ustar daviddavid00000000000000jedi-0.9.0/jedi/parser/__init__.py0000664000175000017500000004064412517736533017201 0ustar daviddavid00000000000000""" The ``Parser`` tries to convert the available Python code in an easy to read format, something like an abstract syntax tree. The classes who represent this tree, are sitting in the :mod:`jedi.parser.tree` module. The Python module ``tokenize`` is a very important part in the ``Parser``, because it splits the code into different words (tokens). Sometimes it looks a bit messy. Sorry for that! You might ask now: "Why didn't you use the ``ast`` module for this? Well, ``ast`` does a very good job understanding proper Python code, but fails to work as soon as there's a single line of broken code. There's one important optimization that needs to be known: Statements are not being parsed completely. ``Statement`` is just a representation of the tokens within the statement. This lowers memory usage and cpu time and reduces the complexity of the ``Parser`` (there's another parser sitting inside ``Statement``, which produces ``Array`` and ``Call``). """ import os import re from jedi.parser import tree as pt from jedi.parser import tokenize from jedi.parser import token from jedi.parser.token import (DEDENT, INDENT, ENDMARKER, NEWLINE, NUMBER, STRING, OP, ERRORTOKEN) from jedi.parser.pgen2.pgen import generate_grammar from jedi.parser.pgen2.parse import PgenParser OPERATOR_KEYWORDS = 'and', 'for', 'if', 'else', 'in', 'is', 'lambda', 'not', 'or' # Not used yet. In the future I intend to add something like KeywordStatement STATEMENT_KEYWORDS = 'assert', 'del', 'global', 'nonlocal', 'raise', \ 'return', 'yield', 'pass', 'continue', 'break' _loaded_grammars = {} def load_grammar(file='grammar3.4'): # For now we only support two different Python syntax versions: The latest # Python 3 and Python 2. This may change. if file.startswith('grammar3'): file = 'grammar3.4' else: file = 'grammar2.7' global _loaded_grammars path = os.path.join(os.path.dirname(__file__), file) + '.txt' try: return _loaded_grammars[path] except KeyError: return _loaded_grammars.setdefault(path, generate_grammar(path)) class ErrorStatement(object): def __init__(self, stack, next_token, position_modifier, next_start_pos): self.stack = stack self._position_modifier = position_modifier self.next_token = next_token self._next_start_pos = next_start_pos @property def next_start_pos(self): s = self._next_start_pos return s[0] + self._position_modifier.line, s[1] @property def first_pos(self): first_type, nodes = self.stack[0] return nodes[0].start_pos @property def first_type(self): first_type, nodes = self.stack[0] return first_type class ParserSyntaxError(object): def __init__(self, message, position): self.message = message self.position = position class Parser(object): """ This class is used to parse a Python file, it then divides them into a class structure of different scopes. :param grammar: The grammar object of pgen2. Loaded by load_grammar. :param source: The codebase for the parser. Must be unicode. :param module_path: The path of the module in the file system, may be None. :type module_path: str :param top_module: Use this module as a parent instead of `self.module`. """ def __init__(self, grammar, source, module_path=None, tokenizer=None): self._ast_mapping = { 'expr_stmt': pt.ExprStmt, 'classdef': pt.Class, 'funcdef': pt.Function, 'file_input': pt.Module, 'import_name': pt.ImportName, 'import_from': pt.ImportFrom, 'break_stmt': pt.KeywordStatement, 'continue_stmt': pt.KeywordStatement, 'return_stmt': pt.ReturnStmt, 'raise_stmt': pt.KeywordStatement, 'yield_expr': pt.YieldExpr, 'del_stmt': pt.KeywordStatement, 'pass_stmt': pt.KeywordStatement, 'global_stmt': pt.GlobalStmt, 'nonlocal_stmt': pt.KeywordStatement, 'assert_stmt': pt.AssertStmt, 'if_stmt': pt.IfStmt, 'with_stmt': pt.WithStmt, 'for_stmt': pt.ForStmt, 'while_stmt': pt.WhileStmt, 'try_stmt': pt.TryStmt, 'comp_for': pt.CompFor, 'decorator': pt.Decorator, 'lambdef': pt.Lambda, 'old_lambdef': pt.Lambda, 'lambdef_nocond': pt.Lambda, } self.syntax_errors = [] self._global_names = [] self._omit_dedent_list = [] self._indent_counter = 0 self._last_failed_start_pos = (0, 0) # TODO do print absolute import detection here. #try: # del python_grammar_no_print_statement.keywords["print"] #except KeyError: # pass # Doesn't exist in the Python 3 grammar. #if self.options["print_function"]: # python_grammar = pygram.python_grammar_no_print_statement #else: self._used_names = {} self._scope_names_stack = [{}] self._error_statement_stacks = [] added_newline = False # The Python grammar needs a newline at the end of each statement. if not source.endswith('\n'): source += '\n' added_newline = True # For the fast parser. self.position_modifier = pt.PositionModifier() p = PgenParser(grammar, self.convert_node, self.convert_leaf, self.error_recovery) tokenizer = tokenizer or tokenize.source_tokens(source) self.module = p.parse(self._tokenize(tokenizer)) if self.module.type != 'file_input': # If there's only one statement, we get back a non-module. That's # not what we want, we want a module, so we add it here: self.module = self.convert_node(grammar, grammar.symbol2number['file_input'], [self.module]) if added_newline: self.remove_last_newline() self.module.used_names = self._used_names self.module.path = module_path self.module.global_names = self._global_names self.module.error_statement_stacks = self._error_statement_stacks def convert_node(self, grammar, type, children): """ Convert raw node information to a Node instance. This is passed to the parser driver which calls it whenever a reduction of a grammar rule produces a new complete node, so that the tree is build strictly bottom-up. """ symbol = grammar.number2symbol[type] try: new_node = self._ast_mapping[symbol](children) except KeyError: new_node = pt.Node(symbol, children) # We need to check raw_node always, because the same node can be # returned by convert multiple times. if symbol == 'global_stmt': self._global_names += new_node.get_global_names() elif isinstance(new_node, pt.Lambda): new_node.names_dict = self._scope_names_stack.pop() elif isinstance(new_node, (pt.ClassOrFunc, pt.Module)) \ and symbol in ('funcdef', 'classdef', 'file_input'): # scope_name_stack handling scope_names = self._scope_names_stack.pop() if isinstance(new_node, pt.ClassOrFunc): n = new_node.name scope_names[n.value].remove(n) # Set the func name of the current node arr = self._scope_names_stack[-1].setdefault(n.value, []) arr.append(n) new_node.names_dict = scope_names elif isinstance(new_node, pt.CompFor): # The name definitions of comprehenions shouldn't be part of the # current scope. They are part of the comprehension scope. for n in new_node.get_defined_names(): self._scope_names_stack[-1][n.value].remove(n) return new_node def convert_leaf(self, grammar, type, value, prefix, start_pos): #print('leaf', value, pytree.type_repr(type)) if type == tokenize.NAME: if value in grammar.keywords: if value in ('def', 'class', 'lambda'): self._scope_names_stack.append({}) return pt.Keyword(self.position_modifier, value, start_pos, prefix) else: name = pt.Name(self.position_modifier, value, start_pos, prefix) # Keep a listing of all used names arr = self._used_names.setdefault(name.value, []) arr.append(name) arr = self._scope_names_stack[-1].setdefault(name.value, []) arr.append(name) return name elif type == STRING: return pt.String(self.position_modifier, value, start_pos, prefix) elif type == NUMBER: return pt.Number(self.position_modifier, value, start_pos, prefix) elif type in (NEWLINE, ENDMARKER): return pt.Whitespace(self.position_modifier, value, start_pos, prefix) else: return pt.Operator(self.position_modifier, value, start_pos, prefix) def error_recovery(self, grammar, stack, typ, value, start_pos, prefix, add_token_callback): """ This parser is written in a dynamic way, meaning that this parser allows using different grammars (even non-Python). However, error recovery is purely written for Python. """ def current_suite(stack): # For now just discard everything that is not a suite or # file_input, if we detect an error. for index, (dfa, state, (typ, nodes)) in reversed(list(enumerate(stack))): # `suite` can sometimes be only simple_stmt, not stmt. symbol = grammar.number2symbol[typ] if symbol == 'file_input': break elif symbol == 'suite' and len(nodes) > 1: # suites without an indent in them get discarded. break elif symbol == 'simple_stmt' and len(nodes) > 1: # simple_stmt can just be turned into a Node, if there are # enough statements. Ignore the rest after that. break return index, symbol, nodes index, symbol, nodes = current_suite(stack) if symbol == 'simple_stmt': index -= 2 (_, _, (typ, suite_nodes)) = stack[index] symbol = grammar.number2symbol[typ] suite_nodes.append(pt.Node(symbol, list(nodes))) # Remove nodes[:] = [] nodes = suite_nodes stack[index] #print('err', token.tok_name[typ], repr(value), start_pos, len(stack), index) self._stack_removal(grammar, stack, index + 1, value, start_pos) if typ == INDENT: # For every deleted INDENT we have to delete a DEDENT as well. # Otherwise the parser will get into trouble and DEDENT too early. self._omit_dedent_list.append(self._indent_counter) if value in ('import', 'from', 'class', 'def', 'try', 'while', 'return'): # Those can always be new statements. add_token_callback(typ, value, prefix, start_pos) elif typ == DEDENT and symbol == 'suite': # Close the current suite, with DEDENT. # Note that this may cause some suites to not contain any # statements at all. This is contrary to valid Python syntax. We # keep incomplete suites in Jedi to be able to complete param names # or `with ... as foo` names. If we want to use this parser for # syntax checks, we have to check in a separate turn if suites # contain statements or not. However, a second check is necessary # anyway (compile.c does that for Python), because Python's grammar # doesn't stop you from defining `continue` in a module, etc. add_token_callback(typ, value, prefix, start_pos) def _stack_removal(self, grammar, stack, start_index, value, start_pos): def clear_names(children): for c in children: try: clear_names(c.children) except AttributeError: if isinstance(c, pt.Name): try: self._scope_names_stack[-1][c.value].remove(c) self._used_names[c.value].remove(c) except ValueError: pass # This may happen with CompFor. for dfa, state, node in stack[start_index:]: clear_names(children=node[1]) failed_stack = [] found = False for dfa, state, (typ, nodes) in stack[start_index:]: if nodes: found = True if found: symbol = grammar.number2symbol[typ] failed_stack.append((symbol, nodes)) if nodes and nodes[0] in ('def', 'class', 'lambda'): self._scope_names_stack.pop() if failed_stack: err = ErrorStatement(failed_stack, value, self.position_modifier, start_pos) self._error_statement_stacks.append(err) self._last_failed_start_pos = start_pos stack[start_index:] = [] def _tokenize(self, tokenizer): for typ, value, start_pos, prefix in tokenizer: #print(tokenize.tok_name[typ], repr(value), start_pos, repr(prefix)) if typ == DEDENT: # We need to count indents, because if we just omit any DEDENT, # we might omit them in the wrong place. o = self._omit_dedent_list if o and o[-1] == self._indent_counter: o.pop() continue self._indent_counter -= 1 elif typ == INDENT: self._indent_counter += 1 elif typ == ERRORTOKEN: self._add_syntax_error('Strange token', start_pos) continue if typ == OP: typ = token.opmap[value] yield typ, value, prefix, start_pos def _add_syntax_error(self, message, position): self.syntax_errors.append(ParserSyntaxError(message, position)) def __repr__(self): return "<%s: %s>" % (type(self).__name__, self.module) def remove_last_newline(self): """ In all of this we need to work with _start_pos, because if we worked with start_pos, we would need to check the position_modifier as well (which is accounted for in the start_pos property). """ endmarker = self.module.children[-1] # The newline is either in the endmarker as a prefix or the previous # leaf as a newline token. if endmarker.prefix.endswith('\n'): endmarker.prefix = endmarker.prefix[:-1] last_line = re.sub('.*\n', '', endmarker.prefix) endmarker._start_pos = endmarker._start_pos[0] - 1, len(last_line) else: try: newline = endmarker.get_previous() except IndexError: return # This means that the parser is empty. while True: if newline.value == '': # Must be a DEDENT, just continue. try: newline = newline.get_previous() except IndexError: # If there's a statement that fails to be parsed, there # will be no previous leaf. So just ignore it. break elif newline.value != '\n': # This may happen if error correction strikes and removes # a whole statement including '\n'. break else: newline.value = '' if self._last_failed_start_pos > newline._start_pos: # It may be the case that there was a syntax error in a # function. In that case error correction removes the # right newline. So we use the previously assigned # _last_failed_start_pos variable to account for that. endmarker._start_pos = self._last_failed_start_pos else: endmarker._start_pos = newline._start_pos break jedi-0.9.0/jedi/parser/pgen2/0000775000175000017500000000000012517736563016076 5ustar daviddavid00000000000000jedi-0.9.0/jedi/parser/pgen2/pgen.py0000664000175000017500000003321612517736533017403 0ustar daviddavid00000000000000# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # Modifications: # Copyright 2014 David Halter. Integration into Jedi. # Modifications are dual-licensed: MIT and PSF. # Pgen imports from . import grammar from jedi.parser import token from jedi.parser import tokenize class ParserGenerator(object): def __init__(self, filename, stream=None): close_stream = None if stream is None: stream = open(filename) close_stream = stream.close self.filename = filename self.stream = stream self.generator = tokenize.generate_tokens(stream.readline) self.gettoken() # Initialize lookahead self.dfas, self.startsymbol = self.parse() if close_stream is not None: close_stream() self.first = {} # map from symbol name to set of tokens self.addfirstsets() def make_grammar(self): c = grammar.Grammar() names = list(self.dfas.keys()) names.sort() names.remove(self.startsymbol) names.insert(0, self.startsymbol) for name in names: i = 256 + len(c.symbol2number) c.symbol2number[name] = i c.number2symbol[i] = name for name in names: dfa = self.dfas[name] states = [] for state in dfa: arcs = [] for label, next in state.arcs.items(): arcs.append((self.make_label(c, label), dfa.index(next))) if state.isfinal: arcs.append((0, dfa.index(state))) states.append(arcs) c.states.append(states) c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name)) c.start = c.symbol2number[self.startsymbol] return c def make_first(self, c, name): rawfirst = self.first[name] first = {} for label in rawfirst: ilabel = self.make_label(c, label) ##assert ilabel not in first # XXX failed on <> ... != first[ilabel] = 1 return first def make_label(self, c, label): # XXX Maybe this should be a method on a subclass of converter? ilabel = len(c.labels) if label[0].isalpha(): # Either a symbol name or a named token if label in c.symbol2number: # A symbol name (a non-terminal) if label in c.symbol2label: return c.symbol2label[label] else: c.labels.append((c.symbol2number[label], None)) c.symbol2label[label] = ilabel return ilabel else: # A named token (NAME, NUMBER, STRING) itoken = getattr(token, label, None) assert isinstance(itoken, int), label assert itoken in token.tok_name, label if itoken in c.tokens: return c.tokens[itoken] else: c.labels.append((itoken, None)) c.tokens[itoken] = ilabel return ilabel else: # Either a keyword or an operator assert label[0] in ('"', "'"), label value = eval(label) if value[0].isalpha(): # A keyword if value in c.keywords: return c.keywords[value] else: c.labels.append((token.NAME, value)) c.keywords[value] = ilabel return ilabel else: # An operator (any non-numeric token) itoken = token.opmap[value] # Fails if unknown token if itoken in c.tokens: return c.tokens[itoken] else: c.labels.append((itoken, None)) c.tokens[itoken] = ilabel return ilabel def addfirstsets(self): names = list(self.dfas.keys()) names.sort() for name in names: if name not in self.first: self.calcfirst(name) #print name, self.first[name].keys() def calcfirst(self, name): dfa = self.dfas[name] self.first[name] = None # dummy to detect left recursion state = dfa[0] totalset = {} overlapcheck = {} for label, next in state.arcs.items(): if label in self.dfas: if label in self.first: fset = self.first[label] if fset is None: raise ValueError("recursion for rule %r" % name) else: self.calcfirst(label) fset = self.first[label] totalset.update(fset) overlapcheck[label] = fset else: totalset[label] = 1 overlapcheck[label] = {label: 1} inverse = {} for label, itsfirst in overlapcheck.items(): for symbol in itsfirst: if symbol in inverse: raise ValueError("rule %s is ambiguous; %s is in the" " first sets of %s as well as %s" % (name, symbol, label, inverse[symbol])) inverse[symbol] = label self.first[name] = totalset def parse(self): dfas = {} startsymbol = None # MSTART: (NEWLINE | RULE)* ENDMARKER while self.type != token.ENDMARKER: while self.type == token.NEWLINE: self.gettoken() # RULE: NAME ':' RHS NEWLINE name = self.expect(token.NAME) self.expect(token.OP, ":") a, z = self.parse_rhs() self.expect(token.NEWLINE) #self.dump_nfa(name, a, z) dfa = self.make_dfa(a, z) #self.dump_dfa(name, dfa) # oldlen = len(dfa) self.simplify_dfa(dfa) # newlen = len(dfa) dfas[name] = dfa #print name, oldlen, newlen if startsymbol is None: startsymbol = name return dfas, startsymbol def make_dfa(self, start, finish): # To turn an NFA into a DFA, we define the states of the DFA # to correspond to *sets* of states of the NFA. Then do some # state reduction. Let's represent sets as dicts with 1 for # values. assert isinstance(start, NFAState) assert isinstance(finish, NFAState) def closure(state): base = {} addclosure(state, base) return base def addclosure(state, base): assert isinstance(state, NFAState) if state in base: return base[state] = 1 for label, next in state.arcs: if label is None: addclosure(next, base) states = [DFAState(closure(start), finish)] for state in states: # NB states grows while we're iterating arcs = {} for nfastate in state.nfaset: for label, next in nfastate.arcs: if label is not None: addclosure(next, arcs.setdefault(label, {})) for label, nfaset in arcs.items(): for st in states: if st.nfaset == nfaset: break else: st = DFAState(nfaset, finish) states.append(st) state.addarc(st, label) return states # List of DFAState instances; first one is start def dump_nfa(self, name, start, finish): print("Dump of NFA for", name) todo = [start] for i, state in enumerate(todo): print(" State", i, state is finish and "(final)" or "") for label, next in state.arcs: if next in todo: j = todo.index(next) else: j = len(todo) todo.append(next) if label is None: print(" -> %d" % j) else: print(" %s -> %d" % (label, j)) def dump_dfa(self, name, dfa): print("Dump of DFA for", name) for i, state in enumerate(dfa): print(" State", i, state.isfinal and "(final)" or "") for label, next in state.arcs.items(): print(" %s -> %d" % (label, dfa.index(next))) def simplify_dfa(self, dfa): # This is not theoretically optimal, but works well enough. # Algorithm: repeatedly look for two states that have the same # set of arcs (same labels pointing to the same nodes) and # unify them, until things stop changing. # dfa is a list of DFAState instances changes = True while changes: changes = False for i, state_i in enumerate(dfa): for j in range(i + 1, len(dfa)): state_j = dfa[j] if state_i == state_j: #print " unify", i, j del dfa[j] for state in dfa: state.unifystate(state_j, state_i) changes = True break def parse_rhs(self): # RHS: ALT ('|' ALT)* a, z = self.parse_alt() if self.value != "|": return a, z else: aa = NFAState() zz = NFAState() aa.addarc(a) z.addarc(zz) while self.value == "|": self.gettoken() a, z = self.parse_alt() aa.addarc(a) z.addarc(zz) return aa, zz def parse_alt(self): # ALT: ITEM+ a, b = self.parse_item() while (self.value in ("(", "[") or self.type in (token.NAME, token.STRING)): c, d = self.parse_item() b.addarc(c) b = d return a, b def parse_item(self): # ITEM: '[' RHS ']' | ATOM ['+' | '*'] if self.value == "[": self.gettoken() a, z = self.parse_rhs() self.expect(token.OP, "]") a.addarc(z) return a, z else: a, z = self.parse_atom() value = self.value if value not in ("+", "*"): return a, z self.gettoken() z.addarc(a) if value == "+": return a, z else: return a, a def parse_atom(self): # ATOM: '(' RHS ')' | NAME | STRING if self.value == "(": self.gettoken() a, z = self.parse_rhs() self.expect(token.OP, ")") return a, z elif self.type in (token.NAME, token.STRING): a = NFAState() z = NFAState() a.addarc(z, self.value) self.gettoken() return a, z else: self.raise_error("expected (...) or NAME or STRING, got %s/%s", self.type, self.value) def expect(self, type, value=None): if self.type != type or (value is not None and self.value != value): self.raise_error("expected %s/%s, got %s/%s", type, value, self.type, self.value) value = self.value self.gettoken() return value def gettoken(self): tup = next(self.generator) while tup[0] in (token.COMMENT, token.NL): tup = next(self.generator) self.type, self.value, self.begin, prefix = tup #print tokenize.tok_name[self.type], repr(self.value) def raise_error(self, msg, *args): if args: try: msg = msg % args except: msg = " ".join([msg] + list(map(str, args))) line = open(self.filename).readlines()[self.begin[0]] raise SyntaxError(msg, (self.filename, self.begin[0], self.begin[1], line)) class NFAState(object): def __init__(self): self.arcs = [] # list of (label, NFAState) pairs def addarc(self, next, label=None): assert label is None or isinstance(label, str) assert isinstance(next, NFAState) self.arcs.append((label, next)) class DFAState(object): def __init__(self, nfaset, final): assert isinstance(nfaset, dict) assert isinstance(next(iter(nfaset)), NFAState) assert isinstance(final, NFAState) self.nfaset = nfaset self.isfinal = final in nfaset self.arcs = {} # map from label to DFAState def addarc(self, next, label): assert isinstance(label, str) assert label not in self.arcs assert isinstance(next, DFAState) self.arcs[label] = next def unifystate(self, old, new): for label, next in self.arcs.items(): if next is old: self.arcs[label] = new def __eq__(self, other): # Equality test -- ignore the nfaset instance variable assert isinstance(other, DFAState) if self.isfinal != other.isfinal: return False # Can't just return self.arcs == other.arcs, because that # would invoke this method recursively, with cycles... if len(self.arcs) != len(other.arcs): return False for label, next in self.arcs.items(): if next is not other.arcs.get(label): return False return True __hash__ = None # For Py3 compatibility. def generate_grammar(filename="Grammar.txt"): p = ParserGenerator(filename) return p.make_grammar() jedi-0.9.0/jedi/parser/pgen2/__init__.py0000664000175000017500000000052112517736533020202 0ustar daviddavid00000000000000# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # Modifications: # Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # Copyright 2014 David Halter. Integration into Jedi. # Modifications are dual-licensed: MIT and PSF. jedi-0.9.0/jedi/parser/pgen2/parse.py0000664000175000017500000001767412517736533017576 0ustar daviddavid00000000000000# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # Modifications: # Copyright 2014 David Halter. Integration into Jedi. # Modifications are dual-licensed: MIT and PSF. """ Parser engine for the grammar tables generated by pgen. The grammar table must be loaded first. See Parser/parser.c in the Python distribution for additional info on how this parsing engine works. """ # Local imports from jedi.parser import tokenize class ParseError(Exception): """Exception to signal the parser is stuck.""" def __init__(self, msg, type, value, start_pos): Exception.__init__(self, "%s: type=%r, value=%r, start_pos=%r" % (msg, tokenize.tok_name[type], value, start_pos)) self.msg = msg self.type = type self.value = value self.start_pos = start_pos class PgenParser(object): """Parser engine. The proper usage sequence is: p = Parser(grammar, [converter]) # create instance p.setup([start]) # prepare for parsing : if p.addtoken(...): # parse a token; may raise ParseError break root = p.rootnode # root of abstract syntax tree A Parser instance may be reused by calling setup() repeatedly. A Parser instance contains state pertaining to the current token sequence, and should not be used concurrently by different threads to parse separate token sequences. See driver.py for how to get input tokens by tokenizing a file or string. Parsing is complete when addtoken() returns True; the root of the abstract syntax tree can then be retrieved from the rootnode instance variable. When a syntax error occurs, addtoken() raises the ParseError exception. There is no error recovery; the parser cannot be used after a syntax error was reported (but it can be reinitialized by calling setup()). """ def __init__(self, grammar, convert_node, convert_leaf, error_recovery): """Constructor. The grammar argument is a grammar.Grammar instance; see the grammar module for more information. The parser is not ready yet for parsing; you must call the setup() method to get it started. The optional convert argument is a function mapping concrete syntax tree nodes to abstract syntax tree nodes. If not given, no conversion is done and the syntax tree produced is the concrete syntax tree. If given, it must be a function of two arguments, the first being the grammar (a grammar.Grammar instance), and the second being the concrete syntax tree node to be converted. The syntax tree is converted from the bottom up. A concrete syntax tree node is a (type, nodes) tuple, where type is the node type (a token or symbol number) and nodes is a list of children for symbols, and None for tokens. An abstract syntax tree node may be anything; this is entirely up to the converter function. """ self.grammar = grammar self.convert_node = convert_node self.convert_leaf = convert_leaf # Prepare for parsing. start = self.grammar.start # Each stack entry is a tuple: (dfa, state, node). # A node is a tuple: (type, children), # where children is a list of nodes or None newnode = (start, []) stackentry = (self.grammar.dfas[start], 0, newnode) self.stack = [stackentry] self.rootnode = None self.error_recovery = error_recovery def parse(self, tokenizer): for type, value, prefix, start_pos in tokenizer: if self.addtoken(type, value, prefix, start_pos): break else: # We never broke out -- EOF is too soon -- Unfinished statement. self.error_recovery(self.grammar, self.stack, type, value, start_pos, prefix, self.addtoken) # Add the ENDMARKER again. if not self.addtoken(type, value, prefix, start_pos): raise ParseError("incomplete input", type, value, start_pos) return self.rootnode def addtoken(self, type, value, prefix, start_pos): """Add a token; return True if this is the end of the program.""" # Map from token to label if type == tokenize.NAME: # Check for reserved words (keywords) try: ilabel = self.grammar.keywords[value] except KeyError: ilabel = self.grammar.tokens[type] else: ilabel = self.grammar.tokens[type] # Loop until the token is shifted; may raise exceptions while True: dfa, state, node = self.stack[-1] states, first = dfa arcs = states[state] # Look for a state with this label for i, newstate in arcs: t, v = self.grammar.labels[i] if ilabel == i: # Look it up in the list of labels assert t < 256 # Shift a token; we're done with it self.shift(type, value, newstate, prefix, start_pos) # Pop while we are in an accept-only state state = newstate while states[state] == [(0, state)]: self.pop() if not self.stack: # Done parsing! return True dfa, state, node = self.stack[-1] states, first = dfa # Done with this token return False elif t >= 256: # See if it's a symbol and if we're in its first set itsdfa = self.grammar.dfas[t] itsstates, itsfirst = itsdfa if ilabel in itsfirst: # Push a symbol self.push(t, itsdfa, newstate) break # To continue the outer while loop else: if (0, state) in arcs: # An accepting state, pop it and try something else self.pop() if not self.stack: # Done parsing, but another token is input raise ParseError("too much input", type, value, start_pos) else: self.error_recovery(self.grammar, self.stack, type, value, start_pos, prefix, self.addtoken) break def shift(self, type, value, newstate, prefix, start_pos): """Shift a token. (Internal)""" dfa, state, node = self.stack[-1] newnode = self.convert_leaf(self.grammar, type, value, prefix, start_pos) node[-1].append(newnode) self.stack[-1] = (dfa, newstate, node) def push(self, type, newdfa, newstate): """Push a nonterminal. (Internal)""" dfa, state, node = self.stack[-1] newnode = (type, []) self.stack[-1] = (dfa, newstate, node) self.stack.append((newdfa, 0, newnode)) def pop(self): """Pop a nonterminal. (Internal)""" popdfa, popstate, (type, children) = self.stack.pop() # If there's exactly one child, return that child instead of creating a # new node. We still create expr_stmt and file_input though, because a # lot of Jedi depends on its logic. if len(children) == 1: newnode = children[0] else: newnode = self.convert_node(self.grammar, type, children) try: # Equal to: # dfa, state, node = self.stack[-1] # symbol, children = node self.stack[-1][2][1].append(newnode) except IndexError: # Stack is empty, set the rootnode. self.rootnode = newnode jedi-0.9.0/jedi/parser/pgen2/grammar.py0000664000175000017500000001112312517736533020071 0ustar daviddavid00000000000000# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # Modifications: # Copyright 2014 David Halter. Integration into Jedi. # Modifications are dual-licensed: MIT and PSF. """This module defines the data structures used to represent a grammar. These are a bit arcane because they are derived from the data structures used by Python's 'pgen' parser generator. There's also a table here mapping operators to their names in the token module; the Python tokenize module reports all operators as the fallback token code OP, but the parser needs the actual token code. """ # Python imports import pickle class Grammar(object): """Pgen parsing tables conversion class. Once initialized, this class supplies the grammar tables for the parsing engine implemented by parse.py. The parsing engine accesses the instance variables directly. The class here does not provide initialization of the tables; several subclasses exist to do this (see the conv and pgen modules). The load() method reads the tables from a pickle file, which is much faster than the other ways offered by subclasses. The pickle file is written by calling dump() (after loading the grammar tables using a subclass). The report() method prints a readable representation of the tables to stdout, for debugging. The instance variables are as follows: symbol2number -- a dict mapping symbol names to numbers. Symbol numbers are always 256 or higher, to distinguish them from token numbers, which are between 0 and 255 (inclusive). number2symbol -- a dict mapping numbers to symbol names; these two are each other's inverse. states -- a list of DFAs, where each DFA is a list of states, each state is a list of arcs, and each arc is a (i, j) pair where i is a label and j is a state number. The DFA number is the index into this list. (This name is slightly confusing.) Final states are represented by a special arc of the form (0, j) where j is its own state number. dfas -- a dict mapping symbol numbers to (DFA, first) pairs, where DFA is an item from the states list above, and first is a set of tokens that can begin this grammar rule (represented by a dict whose values are always 1). labels -- a list of (x, y) pairs where x is either a token number or a symbol number, and y is either None or a string; the strings are keywords. The label number is the index in this list; label numbers are used to mark state transitions (arcs) in the DFAs. start -- the number of the grammar's start symbol. keywords -- a dict mapping keyword strings to arc labels. tokens -- a dict mapping token numbers to arc labels. """ def __init__(self): self.symbol2number = {} self.number2symbol = {} self.states = [] self.dfas = {} self.labels = [(0, "EMPTY")] self.keywords = {} self.tokens = {} self.symbol2label = {} self.start = 256 def dump(self, filename): """Dump the grammar tables to a pickle file.""" with open(filename, "wb") as f: pickle.dump(self.__dict__, f, 2) def load(self, filename): """Load the grammar tables from a pickle file.""" with open(filename, "rb") as f: d = pickle.load(f) self.__dict__.update(d) def copy(self): """ Copy the grammar. """ new = self.__class__() for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords", "tokens", "symbol2label"): setattr(new, dict_attr, getattr(self, dict_attr).copy()) new.labels = self.labels[:] new.states = self.states[:] new.start = self.start return new def report(self): """Dump the grammar tables to standard output, for debugging.""" from pprint import pprint print("s2n") pprint(self.symbol2number) print("n2s") pprint(self.number2symbol) print("states") pprint(self.states) print("dfas") pprint(self.dfas) print("labels") pprint(self.labels) print("start", self.start) jedi-0.9.0/jedi/parser/fast.py0000664000175000017500000005212612517736533016375 0ustar daviddavid00000000000000""" Basically a parser that is faster, because it tries to parse only parts and if anything changes, it only reparses the changed parts. But because it's not finished (and still not working as I want), I won't document it any further. """ import re from itertools import chain from jedi._compatibility import use_metaclass from jedi import settings from jedi.parser import Parser from jedi.parser import tree from jedi import cache from jedi import debug from jedi.parser.tokenize import (source_tokens, NEWLINE, ENDMARKER, INDENT, DEDENT) FLOWS = 'if', 'else', 'elif', 'while', 'with', 'try', 'except', 'finally', 'for' class FastModule(tree.Module): type = 'file_input' def __init__(self, module_path): super(FastModule, self).__init__([]) self.modules = [] self.reset_caches() self.names_dict = {} self.path = module_path def reset_caches(self): self.modules = [] try: del self._used_names # Remove the used names cache. except AttributeError: pass # It was never used. @property @cache.underscore_memoization def used_names(self): return MergedNamesDict([m.used_names for m in self.modules]) @property def global_names(self): return [name for m in self.modules for name in m.global_names] @property def error_statement_stacks(self): return [e for m in self.modules for e in m.error_statement_stacks] def __repr__(self): return "" % (type(self).__name__, self.name, self.start_pos[0], self.end_pos[0]) # To avoid issues with with the `parser.Parser`, we need setters that do # nothing, because if pickle comes along and sets those values. @global_names.setter def global_names(self, value): pass @error_statement_stacks.setter def error_statement_stacks(self, value): pass @used_names.setter def used_names(self, value): pass class MergedNamesDict(object): def __init__(self, dicts): self.dicts = dicts def __iter__(self): return iter(set(key for dct in self.dicts for key in dct)) def __getitem__(self, value): return list(chain.from_iterable(dct.get(value, []) for dct in self.dicts)) def items(self): dct = {} for d in self.dicts: for key, values in d.items(): try: dct_values = dct[key] dct_values += values except KeyError: dct[key] = list(values) return dct.items() def values(self): lst = [] for dct in self.dicts: lst += dct.values() return lst class CachedFastParser(type): """ This is a metaclass for caching `FastParser`. """ def __call__(self, grammar, source, module_path=None): if not settings.fast_parser: return Parser(grammar, source, module_path) pi = cache.parser_cache.get(module_path, None) if pi is None or isinstance(pi.parser, Parser): p = super(CachedFastParser, self).__call__(grammar, source, module_path) else: p = pi.parser # pi is a `cache.ParserCacheItem` p.update(source) return p class ParserNode(object): def __init__(self, fast_module, parser, source): self._fast_module = fast_module self.parent = None self._node_children = [] self.source = source self.hash = hash(source) self.parser = parser try: # With fast_parser we have either 1 subscope or only statements. self._content_scope = parser.module.subscopes[0] except IndexError: self._content_scope = parser.module else: self._rewrite_last_newline() # We need to be able to reset the original children of a parser. self._old_children = list(self._content_scope.children) def _rewrite_last_newline(self): """ The ENDMARKER can contain a newline in the prefix. However this prefix really belongs to the function - respectively to the next function or parser node. If we don't rewrite that newline, we end up with a newline in the wrong position, i.d. at the end of the file instead of in the middle. """ c = self._content_scope.children if tree.is_node(c[-1], 'suite'): # In a simple_stmt there's no DEDENT. end_marker = self.parser.module.children[-1] # Set the DEDENT prefix instead of the ENDMARKER. c[-1].children[-1].prefix = end_marker.prefix end_marker.prefix = '' def __repr__(self): module = self.parser.module try: return '<%s: %s-%s>' % (type(self).__name__, module.start_pos, module.end_pos) except IndexError: # There's no module yet. return '<%s: empty>' % type(self).__name__ def reset_node(self): """ Removes changes that were applied in this class. """ self._node_children = [] scope = self._content_scope scope.children = list(self._old_children) try: # This works if it's a MergedNamesDict. # We are correcting it, because the MergedNamesDicts are artificial # and can change after closing a node. scope.names_dict = scope.names_dict.dicts[0] except AttributeError: pass def close(self): """ Closes the current parser node. This means that after this no further nodes should be added anymore. """ # We only need to replace the dict if multiple dictionaries are used: if self._node_children: dcts = [n.parser.module.names_dict for n in self._node_children] # Need to insert the own node as well. dcts.insert(0, self._content_scope.names_dict) self._content_scope.names_dict = MergedNamesDict(dcts) def parent_until_indent(self, indent=None): if (indent is None or self._indent >= indent) and self.parent is not None: self.close() return self.parent.parent_until_indent(indent) return self @property def _indent(self): if not self.parent: return 0 return self.parser.module.children[0].start_pos[1] def add_node(self, node, line_offset): """Adding a node means adding a node that was already added earlier""" # Changing the line offsets is very important, because if they don't # fit, all the start_pos values will be wrong. m = node.parser.module node.parser.position_modifier.line = line_offset self._fast_module.modules.append(m) node.parent = self self._node_children.append(node) # Insert parser objects into current structure. We only need to set the # parents and children in a good way. scope = self._content_scope for child in m.children: child.parent = scope scope.children.append(child) return node def all_sub_nodes(self): """ Returns all nodes including nested ones. """ for n in self._node_children: yield n for y in n.all_sub_nodes(): yield y @cache.underscore_memoization # Should only happen once! def remove_last_newline(self): self.parser.remove_last_newline() class FastParser(use_metaclass(CachedFastParser)): _FLOWS_NEED_SPACE = 'if', 'elif', 'while', 'with', 'except', 'for' _FLOWS_NEED_COLON = 'else', 'try', 'except', 'finally' _keyword_re = re.compile('^[ \t]*(def |class |@|(?:%s)|(?:%s)\s*:)' % ('|'.join(_FLOWS_NEED_SPACE), '|'.join(_FLOWS_NEED_COLON))) def __init__(self, grammar, source, module_path=None): # set values like `tree.Module`. self._grammar = grammar self.module_path = module_path self._reset_caches() self.update(source) def _reset_caches(self): self.module = FastModule(self.module_path) self.current_node = ParserNode(self.module, self, '') def update(self, source): # For testing purposes: It is important that the number of parsers used # can be minimized. With these variables we can test against that. self.number_parsers_used = 0 self.number_of_splits = 0 self.number_of_misses = 0 self.module.reset_caches() try: self._parse(source) except: # FastParser is cached, be careful with exceptions. self._reset_caches() raise def _split_parts(self, source): """ Split the source code into different parts. This makes it possible to parse each part seperately and therefore cache parts of the file and not everything. """ def gen_part(): text = ''.join(current_lines) del current_lines[:] self.number_of_splits += 1 return text def just_newlines(current_lines): for line in current_lines: line = line.lstrip('\t \n\r') if line and line[0] != '#': return False return True # Split only new lines. Distinction between \r\n is the tokenizer's # job. # It seems like there's no problem with form feed characters here, # because we're not counting lines. self._lines = source.splitlines(True) current_lines = [] is_decorator = False # Use -1, because that indent is always smaller than any other. indent_list = [-1, 0] new_indent = False parentheses_level = 0 flow_indent = None previous_line = None # All things within flows are simply being ignored. for i, l in enumerate(self._lines): # Handle backslash newline escaping. if l.endswith('\\\n') or l.endswith('\\\r\n'): if previous_line is not None: previous_line += l else: previous_line = l continue if previous_line is not None: l = previous_line + l previous_line = None # check for dedents s = l.lstrip('\t \n\r') indent = len(l) - len(s) if not s or s[0] == '#': current_lines.append(l) # Just ignore comments and blank lines continue if new_indent: if indent > indent_list[-2]: # Set the actual indent, not just the random old indent + 1. indent_list[-1] = indent new_indent = False while indent <= indent_list[-2]: # -> dedent indent_list.pop() # This automatically resets the flow_indent if there was a # dedent or a flow just on one line (with one simple_stmt). new_indent = False if flow_indent is None and current_lines and not parentheses_level: yield gen_part() flow_indent = None # Check lines for functions/classes and split the code there. if flow_indent is None: m = self._keyword_re.match(l) if m: # Strip whitespace and colon from flows as a check. if m.group(1).strip(' \t\r\n:') in FLOWS: if not parentheses_level: flow_indent = indent else: if not is_decorator and not just_newlines(current_lines): yield gen_part() is_decorator = '@' == m.group(1) if not is_decorator: parentheses_level = 0 # The new indent needs to be higher indent_list.append(indent + 1) new_indent = True elif is_decorator: is_decorator = False parentheses_level = \ max(0, (l.count('(') + l.count('[') + l.count('{') - l.count(')') - l.count(']') - l.count('}'))) current_lines.append(l) if current_lines: yield gen_part() def _parse(self, source): """ :type source: str """ added_newline = False if not source or source[-1] != '\n': # To be compatible with Pythons grammar, we need a newline at the # end. The parser would handle it, but since the fast parser abuses # the normal parser in various ways, we need to care for this # ourselves. source += '\n' added_newline = True next_line_offset = line_offset = 0 start = 0 nodes = list(self.current_node.all_sub_nodes()) # Now we can reset the node, because we have all the old nodes. self.current_node.reset_node() last_end_line = 1 for code_part in self._split_parts(source): next_line_offset += code_part.count('\n') # If the last code part parsed isn't equal to the current end_pos, # we know that the parser went further (`def` start in a # docstring). So just parse the next part. if line_offset + 1 == last_end_line: self.current_node = self._get_node(code_part, source[start:], line_offset, nodes) else: # Means that some lines where not fully parsed. Parse it now. # This is a very rare case. Should only happens with very # strange code bits. self.number_of_misses += 1 while last_end_line < next_line_offset + 1: line_offset = last_end_line - 1 # We could calculate the src in a more complicated way to # make caching here possible as well. However, this is # complicated and error-prone. Since this is not very often # called - just ignore it. src = ''.join(self._lines[line_offset:]) self.current_node = self._get_node(code_part, src, line_offset, nodes) last_end_line = self.current_node.parser.module.end_pos[0] debug.dbg('While parsing %s, line %s slowed down the fast parser.', self.module_path, line_offset + 1) line_offset = next_line_offset start += len(code_part) last_end_line = self.current_node.parser.module.end_pos[0] if added_newline: self.current_node.remove_last_newline() # Now that the for loop is finished, we still want to close all nodes. self.current_node = self.current_node.parent_until_indent() self.current_node.close() debug.dbg('Parsed %s, with %s parsers in %s splits.' % (self.module_path, self.number_parsers_used, self.number_of_splits)) def _get_node(self, source, parser_code, line_offset, nodes): """ Side effect: Alters the list of nodes. """ indent = len(source) - len(source.lstrip('\t ')) self.current_node = self.current_node.parent_until_indent(indent) h = hash(source) for index, node in enumerate(nodes): if node.hash == h and node.source == source: node.reset_node() nodes.remove(node) break else: tokenizer = FastTokenizer(parser_code) self.number_parsers_used += 1 p = Parser(self._grammar, parser_code, self.module_path, tokenizer=tokenizer) end = line_offset + p.module.end_pos[0] used_lines = self._lines[line_offset:end - 1] code_part_actually_used = ''.join(used_lines) node = ParserNode(self.module, p, code_part_actually_used) self.current_node.add_node(node, line_offset) return node class FastTokenizer(object): """ Breaks when certain conditions are met, i.e. a new function or class opens. """ def __init__(self, source): self.source = source self._gen = source_tokens(source) self._closed = False # fast parser options self.current = self.previous = NEWLINE, '', (0, 0) self._in_flow = False self._is_decorator = False self._first_stmt = True self._parentheses_level = 0 self._indent_counter = 0 self._flow_indent_counter = 0 self._returned_endmarker = False self._expect_indent = False def __iter__(self): return self def next(self): """ Python 2 Compatibility """ return self.__next__() def __next__(self): if self._closed: return self._finish_dedents() typ, value, start_pos, prefix = current = next(self._gen) if typ == ENDMARKER: self._closed = True self._returned_endmarker = True return current self.previous = self.current self.current = current if typ == INDENT: self._indent_counter += 1 if not self._expect_indent and not self._first_stmt and not self._in_flow: # This does not mean that there is an actual flow, it means # that the INDENT is syntactically wrong. self._flow_indent_counter = self._indent_counter - 1 self._in_flow = True self._expect_indent = False elif typ == DEDENT: self._indent_counter -= 1 if self._in_flow: if self._indent_counter == self._flow_indent_counter: self._in_flow = False else: self._closed = True return current if value in ('def', 'class') and self._parentheses_level \ and re.search(r'\n[ \t]*\Z', prefix): # Account for the fact that an open parentheses before a function # will reset the parentheses counter, but new lines before will # still be ignored. So check the prefix. # TODO what about flow parentheses counter resets in the tokenizer? self._parentheses_level = 0 return self._close() # Parentheses ignore the indentation rules. The other three stand for # new lines. if self.previous[0] in (NEWLINE, INDENT, DEDENT) \ and not self._parentheses_level and typ not in (INDENT, DEDENT): if not self._in_flow: if value in FLOWS: self._flow_indent_counter = self._indent_counter self._first_stmt = False elif value in ('def', 'class', '@'): # The values here are exactly the same check as in # _split_parts, but this time with tokenize and therefore # precise. if not self._first_stmt and not self._is_decorator: return self._close() self._is_decorator = '@' == value if not self._is_decorator: self._first_stmt = False self._expect_indent = True elif self._expect_indent: return self._close() else: self._first_stmt = False if value in '([{' and value: self._parentheses_level += 1 elif value in ')]}' and value: # Ignore closing parentheses, because they are all # irrelevant for the indentation. self._parentheses_level = max(self._parentheses_level - 1, 0) return current def _close(self): if self._first_stmt: # Continue like nothing has happened, because we want to enter # the first class/function. if self.current[1] != '@': self._first_stmt = False return self.current else: self._closed = True return self._finish_dedents() def _finish_dedents(self): if self._indent_counter: self._indent_counter -= 1 return DEDENT, '', self.current[2], '' elif not self._returned_endmarker: self._returned_endmarker = True return ENDMARKER, '', self.current[2], self._get_prefix() else: raise StopIteration def _get_prefix(self): """ We're using the current prefix for the endmarker to not loose any information. However we care about "lost" lines. The prefix of the current line (indent) will always be included in the current line. """ cur = self.current while cur[0] == DEDENT: cur = next(self._gen) prefix = cur[3] # \Z for the end of the string. $ is bugged, because it has the # same behavior with or without re.MULTILINE. return re.sub(r'[^\n]+\Z', '', prefix) jedi-0.9.0/jedi/parser/grammar2.7.txt0000664000175000017500000001501412517736533017477 0ustar daviddavid00000000000000# Grammar for 2to3. This grammar supports Python 2.x and 3.x. # Note: Changing the grammar specified in this file will most likely # require corresponding changes in the parser module # (../Modules/parsermodule.c). If you can't make the changes to # that module yourself, please co-ordinate the required changes # with someone who can; ask around on python-dev for help. Fred # Drake will probably be listening there. # NOTE WELL: You should also follow all the steps listed in PEP 306, # "How to Change Python's Grammar" # Start symbols for the grammar: # file_input is a module or sequence of commands read from an input file; # single_input is a single interactive statement; # eval_input is the input for the eval() and input() functions. # NB: compound_stmt in single_input is followed by extra NEWLINE! file_input: (NEWLINE | stmt)* ENDMARKER single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE eval_input: testlist NEWLINE* ENDMARKER decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE decorators: decorator+ decorated: decorators (classdef | funcdef) funcdef: 'def' NAME parameters ['->' test] ':' suite parameters: '(' [typedargslist] ')' typedargslist: ((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])* [',' '**' tname] | '**' tname) | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) tname: NAME [':' test] tfpdef: tname | '(' tfplist ')' tfplist: tfpdef (',' tfpdef)* [','] varargslist: ((vfpdef ['=' test] ',')* ('*' [vname] (',' vname ['=' test])* [',' '**' vname] | '**' vname) | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) vname: NAME vfpdef: vname | '(' vfplist ')' vfplist: vfpdef (',' vfpdef)* [','] stmt: simple_stmt | compound_stmt simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | exec_stmt | assert_stmt) expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))*) testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//=') # For normal assignments, additional restrictions enforced by the interpreter print_stmt: 'print' ( [ test (',' test)* [','] ] | '>>' test [ (',' test)+ [','] ] ) del_stmt: 'del' exprlist pass_stmt: 'pass' flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt break_stmt: 'break' continue_stmt: 'continue' return_stmt: 'return' [testlist] yield_stmt: yield_expr raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]] import_stmt: import_name | import_from import_name: 'import' dotted_as_names # note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) 'import' ('*' | '(' import_as_names ')' | import_as_names)) import_as_name: NAME ['as' NAME] dotted_as_name: dotted_name ['as' NAME] import_as_names: import_as_name (',' import_as_name)* [','] dotted_as_names: dotted_as_name (',' dotted_as_name)* dotted_name: NAME ('.' NAME)* global_stmt: ('global' | 'nonlocal') NAME (',' NAME)* exec_stmt: 'exec' expr ['in' test [',' test]] assert_stmt: 'assert' test [',' test] compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] while_stmt: 'while' test ':' suite ['else' ':' suite] for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] try_stmt: ('try' ':' suite ((except_clause ':' suite)+ ['else' ':' suite] ['finally' ':' suite] | 'finally' ':' suite)) with_stmt: 'with' with_item (',' with_item)* ':' suite with_item: test ['as' expr] with_var: 'as' expr # NB compile.c makes sure that the default except clause is last except_clause: 'except' [test [(',' | 'as') test]] # Edit by David Halter: The stmt is now optional. This reflects how Jedi allows # classes and functions to be empty, which is beneficial for autocompletion. suite: simple_stmt | NEWLINE INDENT stmt* DEDENT # Backward compatibility cruft to support: # [ x for x in lambda: True, lambda: False if x() ] # even while also allowing: # lambda x: 5 if x else 2 # (But not a mix of the two) testlist_safe: old_test [(',' old_test)+ [',']] old_test: or_test | old_lambdef old_lambdef: 'lambda' [varargslist] ':' old_test test: or_test ['if' or_test 'else' test] | lambdef or_test: and_test ('or' and_test)* and_test: not_test ('and' not_test)* not_test: 'not' not_test | comparison comparison: expr (comp_op expr)* comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' star_expr: '*' expr expr: xor_expr ('|' xor_expr)* xor_expr: and_expr ('^' and_expr)* and_expr: shift_expr ('&' shift_expr)* shift_expr: arith_expr (('<<'|'>>') arith_expr)* arith_expr: term (('+'|'-') term)* term: factor (('*'|'/'|'%'|'//') factor)* factor: ('+'|'-'|'~') factor | power power: atom trailer* ['**' factor] atom: ('(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictorsetmaker] '}' | '`' testlist1 '`' | NAME | NUMBER | STRING+ | '.' '.' '.') # Modification by David Halter, remove `testlist_gexp` and `listmaker` testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) lambdef: 'lambda' [varargslist] ':' test trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: subscript (',' subscript)* [','] subscript: test | [test] ':' [test] [sliceop] sliceop: ':' [test] exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] testlist: test (',' test)* [','] # Modification by David Halter, dictsetmaker -> dictorsetmaker (so that it's # the same as in the 3.4 grammar). dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) | (test (comp_for | (',' test)* [','])) ) classdef: 'class' NAME ['(' [arglist] ')'] ':' suite arglist: (argument ',')* (argument [','] |'*' test (',' argument)* [',' '**' test] |'**' test) argument: test [comp_for] | test '=' test # Really [keyword '='] test comp_iter: comp_for | comp_if comp_for: 'for' exprlist 'in' testlist_safe [comp_iter] comp_if: 'if' old_test [comp_iter] testlist1: test (',' test)* # not used in grammar, but may appear in "node" passed from Parser to Compiler encoding_decl: NAME yield_expr: 'yield' [testlist] jedi-0.9.0/jedi/parser/tree.py0000664000175000017500000010565712517736533016407 0ustar daviddavid00000000000000""" If you know what an abstract syntax tree (AST) is, you'll see that this module is pretty much that. The classes represent syntax elements like functions and imports. This is the "business logic" part of the parser. There's a lot of logic here that makes it easier for Jedi (and other libraries to deal with a Python syntax tree. By using `get_code` on a module, you can get back the 1-to-1 representation of the input given to the parser. This is important if you are using refactoring. The easiest way to play with this module is to use :class:`parsing.Parser`. :attr:`parsing.Parser.module` holds an instance of :class:`Module`: >>> from jedi._compatibility import u >>> from jedi.parser import Parser, load_grammar >>> parser = Parser(load_grammar(), u('import os'), 'example.py') >>> submodule = parser.module >>> submodule Any subclasses of :class:`Scope`, including :class:`Module` has an attribute :attr:`imports `: >>> submodule.imports [] See also :attr:`Scope.subscopes` and :attr:`Scope.statements`. """ import os import re from inspect import cleandoc from itertools import chain import textwrap from jedi._compatibility import (Python3Method, encoding, is_py3, utf8_repr, literal_eval, use_metaclass, unicode) from jedi import cache def is_node(node, *symbol_names): try: type = node.type except AttributeError: return False else: return type in symbol_names class PositionModifier(object): """A start_pos modifier for the fast parser.""" def __init__(self): self.line = 0 zero_position_modifier = PositionModifier() class DocstringMixin(object): __slots__ = () @property def raw_doc(self): """ Returns a cleaned version of the docstring token. """ if isinstance(self, Module): node = self.children[0] elif isinstance(self, ClassOrFunc): node = self.children[self.children.index(':') + 1] if is_node(node, 'suite'): # Normally a suite node = node.children[2] # -> NEWLINE INDENT stmt else: # ExprStmt simple_stmt = self.parent c = simple_stmt.parent.children index = c.index(simple_stmt) if not index: return '' node = c[index - 1] if is_node(node, 'simple_stmt'): node = node.children[0] if node.type == 'string': # TODO We have to check next leaves until there are no new # leaves anymore that might be part of the docstring. A # docstring can also look like this: ``'foo' 'bar' # Returns a literal cleaned version of the ``Token``. cleaned = cleandoc(literal_eval(node.value)) # Since we want the docstr output to be always unicode, just # force it. if is_py3 or isinstance(cleaned, unicode): return cleaned else: return unicode(cleaned, 'UTF-8', 'replace') return '' class Base(object): """ This is just here to have an isinstance check, which is also used on evaluate classes. But since they have sometimes a special type of delegation, it is important for those classes to override this method. I know that there is a chance to do such things with __instancecheck__, but since Python 2.5 doesn't support it, I decided to do it this way. """ __slots__ = () def isinstance(self, *cls): return isinstance(self, cls) @Python3Method def get_parent_until(self, classes=(), reverse=False, include_current=True): """ Searches the parent "chain" until the object is an instance of classes. If classes is empty return the last parent in the chain (is without a parent). """ if type(classes) not in (tuple, list): classes = (classes,) scope = self if include_current else self.parent while scope.parent is not None: # TODO why if classes? if classes and reverse != scope.isinstance(*classes): break scope = scope.parent return scope def get_parent_scope(self, include_flows=False): """ Returns the underlying scope. """ scope = self.parent while scope is not None: if include_flows and isinstance(scope, Flow): return scope if scope.is_scope(): break scope = scope.parent return scope def is_scope(self): # Default is not being a scope. Just inherit from Scope. return False class Leaf(Base): __slots__ = ('position_modifier', 'value', 'parent', '_start_pos', 'prefix') def __init__(self, position_modifier, value, start_pos, prefix=''): self.position_modifier = position_modifier self.value = value self._start_pos = start_pos self.prefix = prefix self.parent = None @property def start_pos(self): return self._start_pos[0] + self.position_modifier.line, self._start_pos[1] @start_pos.setter def start_pos(self, value): self._start_pos = value[0] - self.position_modifier.line, value[1] @property def end_pos(self): return (self._start_pos[0] + self.position_modifier.line, self._start_pos[1] + len(self.value)) def move(self, line_offset, column_offset): self._start_pos = (self._start_pos[0] + line_offset, self._start_pos[1] + column_offset) def get_previous(self): """ Returns the previous leaf in the parser tree. """ node = self while True: c = node.parent.children i = c.index(self) if i == 0: node = node.parent if node.parent is None: raise IndexError('Cannot access the previous element of the first one.') else: node = c[i - 1] break while True: try: node = node.children[-1] except AttributeError: # A Leaf doesn't have children. return node def get_code(self): return self.prefix + self.value def next_sibling(self): """ The node immediately following the invocant in their parent's children list. If the invocant does not have a next sibling, it is None """ # Can't use index(); we need to test by identity for i, child in enumerate(self.parent.children): if child is self: try: return self.parent.children[i + 1] except IndexError: return None def prev_sibling(self): """ The node/leaf immediately preceding the invocant in their parent's children list. If the invocant does not have a previous sibling, it is None. """ # Can't use index(); we need to test by identity for i, child in enumerate(self.parent.children): if child is self: if i == 0: return None return self.parent.children[i - 1] @utf8_repr def __repr__(self): return "<%s: %s>" % (type(self).__name__, self.value) class LeafWithNewLines(Leaf): __slots__ = () @property def end_pos(self): """ Literals and whitespace end_pos are more complicated than normal end_pos, because the containing newlines may change the indexes. """ end_pos_line, end_pos_col = self.start_pos lines = self.value.split('\n') end_pos_line += len(lines) - 1 # Check for multiline token if self.start_pos[0] == end_pos_line: end_pos_col += len(lines[-1]) else: end_pos_col = len(lines[-1]) return end_pos_line, end_pos_col class Whitespace(LeafWithNewLines): """Contains NEWLINE and ENDMARKER tokens.""" __slots__ = () type = 'whitespace' class Name(Leaf): """ A string. Sometimes it is important to know if the string belongs to a name or not. """ type = 'name' __slots__ = () def __str__(self): return self.value def __unicode__(self): return self.value def __repr__(self): return "<%s: %s@%s,%s>" % (type(self).__name__, self.value, self.start_pos[0], self.start_pos[1]) def get_definition(self): scope = self while scope.parent is not None: parent = scope.parent if scope.isinstance(Node, Name) and parent.type != 'simple_stmt': if scope.type == 'testlist_comp': try: if isinstance(scope.children[1], CompFor): return scope.children[1] except IndexError: pass scope = parent else: break return scope def is_definition(self): stmt = self.get_definition() if stmt.type in ('funcdef', 'classdef', 'file_input', 'param'): return self == stmt.name elif stmt.type == 'for_stmt': return self.start_pos < stmt.children[2].start_pos elif stmt.type == 'try_stmt': return self.prev_sibling() == 'as' else: return stmt.type in ('expr_stmt', 'import_name', 'import_from', 'comp_for', 'with_stmt') \ and self in stmt.get_defined_names() def assignment_indexes(self): """ Returns an array of ints of the indexes that are used in tuple assignments. For example if the name is ``y`` in the following code:: x, (y, z) = 2, '' would result in ``[1, 0]``. """ indexes = [] node = self.parent compare = self while node is not None: if is_node(node, 'testlist_comp', 'testlist_star_expr', 'exprlist'): for i, child in enumerate(node.children): if child == compare: indexes.insert(0, int(i / 2)) break else: raise LookupError("Couldn't find the assignment.") elif isinstance(node, (ExprStmt, CompFor)): break compare = node node = node.parent return indexes class Literal(LeafWithNewLines): __slots__ = () def eval(self): return literal_eval(self.value) class Number(Literal): type = 'number' __slots__ = () class String(Literal): type = 'string' __slots__ = () class Operator(Leaf): type = 'operator' __slots__ = () def __str__(self): return self.value def __eq__(self, other): """ Make comparisons with strings easy. Improves the readability of the parser. """ if isinstance(other, Operator): return self is other else: return self.value == other def __ne__(self, other): """Python 2 compatibility.""" return self.value != other def __hash__(self): return hash(self.value) class Keyword(Leaf): type = 'keyword' __slots__ = () def __eq__(self, other): """ Make comparisons with strings easy. Improves the readability of the parser. """ if isinstance(other, Keyword): return self is other return self.value == other def __ne__(self, other): """Python 2 compatibility.""" return not self.__eq__(other) def __hash__(self): return hash(self.value) class BaseNode(Base): """ The super class for Scope, Import, Name and Statement. Every object in the parser tree inherits from this class. """ __slots__ = ('children', 'parent') type = None def __init__(self, children): """ Initialize :class:`BaseNode`. :param children: The module in which this Python object locates. """ for c in children: c.parent = self self.children = children self.parent = None def move(self, line_offset, column_offset): """ Move the Node's start_pos. """ for c in self.children: c.move(line_offset, column_offset) @property def start_pos(self): return self.children[0].start_pos @property def end_pos(self): return self.children[-1].end_pos def get_code(self): return "".join(c.get_code() for c in self.children) @Python3Method def name_for_position(self, position): for c in self.children: if isinstance(c, Leaf): if isinstance(c, Name) and c.start_pos <= position <= c.end_pos: return c else: result = c.name_for_position(position) if result is not None: return result return None @Python3Method def get_statement_for_position(self, pos): for c in self.children: if c.start_pos <= pos <= c.end_pos: if c.type not in ('decorated', 'simple_stmt', 'suite') \ and not isinstance(c, (Flow, ClassOrFunc)): return c else: try: return c.get_statement_for_position(pos) except AttributeError: pass # Must be a non-scope return None def first_leaf(self): try: return self.children[0].first_leaf() except AttributeError: return self.children[0] @utf8_repr def __repr__(self): code = self.get_code().replace('\n', ' ') if not is_py3: code = code.encode(encoding, 'replace') return "<%s: %s@%s,%s>" % \ (type(self).__name__, code, self.start_pos[0], self.start_pos[1]) class Node(BaseNode): """Concrete implementation for interior nodes.""" __slots__ = ('type',) def __init__(self, type, children): """ Initializer. Takes a type constant (a symbol number >= 256), a sequence of child nodes, and an optional context keyword argument. As a side effect, the parent pointers of the children are updated. """ super(Node, self).__init__(children) self.type = type def __repr__(self): return "%s(%s, %r)" % (self.__class__.__name__, self.type, self.children) class IsScopeMeta(type): def __instancecheck__(self, other): return other.is_scope() class IsScope(use_metaclass(IsScopeMeta)): pass class Scope(BaseNode, DocstringMixin): """ Super class for the parser tree, which represents the state of a python text file. A Scope manages and owns its subscopes, which are classes and functions, as well as variables and imports. It is used to access the structure of python files. :param start_pos: The position (line and column) of the scope. :type start_pos: tuple(int, int) """ __slots__ = ('names_dict',) def __init__(self, children): super(Scope, self).__init__(children) @property def returns(self): # Needed here for fast_parser, because the fast_parser splits and # returns will be in "normal" modules. return self._search_in_scope(ReturnStmt) @property def subscopes(self): return self._search_in_scope(Scope) @property def flows(self): return self._search_in_scope(Flow) @property def imports(self): return self._search_in_scope(Import) @Python3Method def _search_in_scope(self, typ): def scan(children): elements = [] for element in children: if isinstance(element, typ): elements.append(element) if is_node(element, 'suite', 'simple_stmt', 'decorated') \ or isinstance(element, Flow): elements += scan(element.children) return elements return scan(self.children) @property def statements(self): return self._search_in_scope((ExprStmt, KeywordStatement)) def is_scope(self): return True def __repr__(self): try: name = self.path except AttributeError: try: name = self.name except AttributeError: name = self.command return "<%s: %s@%s-%s>" % (type(self).__name__, name, self.start_pos[0], self.end_pos[0]) def walk(self): yield self for s in self.subscopes: for scope in s.walk(): yield scope for r in self.statements: while isinstance(r, Flow): for scope in r.walk(): yield scope r = r.next class Module(Scope): """ The top scope, which is always a module. Depending on the underlying parser this may be a full module or just a part of a module. """ __slots__ = ('path', 'global_names', 'used_names', '_name', 'error_statement_stacks') type = 'file_input' def __init__(self, children): """ Initialize :class:`Module`. :type path: str :arg path: File path to this module. .. todo:: Document `top_module`. """ super(Module, self).__init__(children) self.path = None # Set later. @property @cache.underscore_memoization def name(self): """ This is used for the goto functions. """ if self.path is None: string = '' # no path -> empty name else: sep = (re.escape(os.path.sep),) * 2 r = re.search(r'([^%s]*?)(%s__init__)?(\.py|\.so)?$' % sep, self.path) # Remove PEP 3149 names string = re.sub('\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1)) # Positions are not real, but a module starts at (1, 0) p = (1, 0) name = Name(zero_position_modifier, string, p) name.parent = self return name @property def has_explicit_absolute_import(self): """ Checks if imports in this module are explicitly absolute, i.e. there is a ``__future__`` import. """ # TODO this is a strange scan and not fully correct. I think Python's # parser does it in a different way and scans for the first # statement/import with a tokenizer (to check for syntax changes like # the future print statement). for imp in self.imports: if imp.type == 'import_from' and imp.level == 0: for path in imp.paths(): if [str(name) for name in path] == ['__future__', 'absolute_import']: return True return False class Decorator(BaseNode): type = 'decorator' __slots__ = () class ClassOrFunc(Scope): __slots__ = () @property def name(self): return self.children[1] def get_decorators(self): decorated = self.parent if is_node(decorated, 'decorated'): if is_node(decorated.children[0], 'decorators'): return decorated.children[0].children else: return decorated.children[:1] else: return [] class Class(ClassOrFunc): """ Used to store the parsed contents of a python class. :param name: The Class name. :type name: str :param supers: The super classes of a Class. :type supers: list :param start_pos: The start position (line, column) of the class. :type start_pos: tuple(int, int) """ type = 'classdef' __slots__ = () def __init__(self, children): super(Class, self).__init__(children) def get_super_arglist(self): if self.children[2] != '(': # Has no parentheses return None else: if self.children[3] == ')': # Empty parentheses return None else: return self.children[3] @property def doc(self): """ Return a document string including call signature of __init__. """ docstr = self.raw_doc for sub in self.subscopes: if str(sub.name) == '__init__': return '%s\n\n%s' % ( sub.get_call_signature(func_name=self.name), docstr) return docstr def _create_params(parent, argslist_list): """ `argslist_list` is a list that can contain an argslist as a first item, but most not. It's basically the items between the parameter brackets (which is at most one item). This function modifies the parser structure. It generates `Param` objects from the normal ast. Those param objects do not exist in a normal ast, but make the evaluation of the ast tree so much easier. You could also say that this function replaces the argslist node with a list of Param objects. """ def check_python2_nested_param(node): """ Python 2 allows params to look like ``def x(a, (b, c))``, which is basically a way of unpacking tuples in params. Python 3 has ditched this behavior. Jedi currently just ignores those constructs. """ return node.type == 'tfpdef' and node.children[0] == '(' try: first = argslist_list[0] except IndexError: return [] if first.type in ('name', 'tfpdef'): if check_python2_nested_param(first): return [] else: return [Param([first], parent)] else: # argslist is a `typedargslist` or a `varargslist`. children = first.children params = [] start = 0 # Start with offset 1, because the end is higher. for end, child in enumerate(children + [None], 1): if child is None or child == ',': new_children = children[start:end] if new_children: # Could as well be comma and then end. if check_python2_nested_param(new_children[0]): continue params.append(Param(new_children, parent)) start = end return params class Function(ClassOrFunc): """ Used to store the parsed contents of a python function. """ __slots__ = ('listeners',) type = 'funcdef' def __init__(self, children): super(Function, self).__init__(children) self.listeners = set() # not used here, but in evaluation. parameters = self.children[2] # After `def foo` parameters.children[1:-1] = _create_params(parameters, parameters.children[1:-1]) @property def params(self): return self.children[2].children[1:-1] @property def name(self): return self.children[1] # First token after `def` @property def yields(self): # TODO This is incorrect, yields are also possible in a statement. return self._search_in_scope(YieldExpr) def is_generator(self): return bool(self.yields) def annotation(self): try: return self.children[6] # 6th element: def foo(...) -> bar except IndexError: return None def get_call_signature(self, width=72, func_name=None): """ Generate call signature of this function. :param width: Fold lines if a line is longer than this value. :type width: int :arg func_name: Override function name when given. :type func_name: str :rtype: str """ func_name = func_name or self.children[1] code = unicode(func_name) + self.children[2].get_code() return '\n'.join(textwrap.wrap(code, width)) @property def doc(self): """ Return a document string including call signature. """ docstr = self.raw_doc return '%s\n\n%s' % (self.get_call_signature(), docstr) class Lambda(Function): """ Lambdas are basically trimmed functions, so give it the same interface. """ type = 'lambda' __slots__ = () def __init__(self, children): # We don't want to call the Function constructor, call its parent. super(Function, self).__init__(children) self.listeners = set() # not used here, but in evaluation. lst = self.children[1:-2] # After `def foo` self.children[1:-2] = _create_params(self, lst) @property def params(self): return self.children[1:-2] def is_generator(self): return False def yields(self): return [] def __repr__(self): return "<%s@%s>" % (self.__class__.__name__, self.start_pos) class Flow(BaseNode): __slots__ = () class IfStmt(Flow): type = 'if_stmt' __slots__ = () def check_nodes(self): """ Returns all the `test` nodes that are defined as x, here: if x: pass elif x: pass """ for i, c in enumerate(self.children): if c in ('elif', 'if'): yield self.children[i + 1] def node_in_which_check_node(self, node): for check_node in reversed(list(self.check_nodes())): if check_node.start_pos < node.start_pos: return check_node def node_after_else(self, node): """ Checks if a node is defined after `else`. """ for c in self.children: if c == 'else': if node.start_pos > c.start_pos: return True else: return False class WhileStmt(Flow): type = 'while_stmt' __slots__ = () class ForStmt(Flow): type = 'for_stmt' __slots__ = () class TryStmt(Flow): type = 'try_stmt' __slots__ = () def except_clauses(self): """ Returns the ``test`` nodes found in ``except_clause`` nodes. Returns ``[None]`` for except clauses without an exception given. """ for node in self.children: if node.type == 'except_clause': yield node.children[1] elif node == 'except': yield None class WithStmt(Flow): type = 'with_stmt' __slots__ = () def get_defined_names(self): names = [] for with_item in self.children[1:-2:2]: # Check with items for 'as' names. if is_node(with_item, 'with_item'): names += _defined_names(with_item.children[2]) return names def node_from_name(self, name): node = name while True: node = node.parent if is_node(node, 'with_item'): return node.children[0] class Import(BaseNode): __slots__ = () def path_for_name(self, name): try: # The name may be an alias. If it is, just map it back to the name. name = self.aliases()[name] except KeyError: pass for path in self.paths(): if name in path: return path[:path.index(name) + 1] raise ValueError('Name should be defined in the import itself') def is_nested(self): return False # By default, sub classes may overwrite this behavior def is_star_import(self): return self.children[-1] == '*' class ImportFrom(Import): type = 'import_from' __slots__ = () def get_defined_names(self): return [alias or name for name, alias in self._as_name_tuples()] def aliases(self): """Mapping from alias to its corresponding name.""" return dict((alias, name) for name, alias in self._as_name_tuples() if alias is not None) def get_from_names(self): for n in self.children[1:]: if n not in ('.', '...'): break if is_node(n, 'dotted_name'): # from x.y import return n.children[::2] elif n == 'import': # from . import return [] else: # from x import return [n] @property def level(self): """The level parameter of ``__import__``.""" level = 0 for n in self.children[1:]: if n in ('.', '...'): level += len(n.value) else: break return level def _as_name_tuples(self): last = self.children[-1] if last == ')': last = self.children[-2] elif last == '*': return # No names defined directly. if is_node(last, 'import_as_names'): as_names = last.children[::2] else: as_names = [last] for as_name in as_names: if as_name.type == 'name': yield as_name, None else: yield as_name.children[::2] # yields x, y -> ``x as y`` def star_import_name(self): """ The last name defined in a star import. """ return self.paths()[-1][-1] def paths(self): """ The import paths defined in an import statement. Typically an array like this: ``[, ]``. """ dotted = self.get_from_names() if self.children[-1] == '*': return [dotted] return [dotted + [name] for name, alias in self._as_name_tuples()] class ImportName(Import): """For ``import_name`` nodes. Covers normal imports without ``from``.""" type = 'import_name' __slots__ = () def get_defined_names(self): return [alias or path[0] for path, alias in self._dotted_as_names()] @property def level(self): """The level parameter of ``__import__``.""" return 0 # Obviously 0 for imports without from. def paths(self): return [path for path, alias in self._dotted_as_names()] def _dotted_as_names(self): """Generator of (list(path), alias) where alias may be None.""" dotted_as_names = self.children[1] if is_node(dotted_as_names, 'dotted_as_names'): as_names = dotted_as_names.children[::2] else: as_names = [dotted_as_names] for as_name in as_names: if is_node(as_name, 'dotted_as_name'): alias = as_name.children[2] as_name = as_name.children[0] else: alias = None if as_name.type == 'name': yield [as_name], alias else: # dotted_names yield as_name.children[::2], alias def is_nested(self): """ This checks for the special case of nested imports, without aliases and from statement:: import foo.bar """ return [1 for path, alias in self._dotted_as_names() if alias is None and len(path) > 1] def aliases(self): return dict((alias, path[-1]) for path, alias in self._dotted_as_names() if alias is not None) class KeywordStatement(BaseNode): """ For the following statements: `assert`, `del`, `global`, `nonlocal`, `raise`, `return`, `yield`, `pass`, `continue`, `break`, `return`, `yield`. """ __slots__ = () @property def keyword(self): return self.children[0].value class AssertStmt(KeywordStatement): type = 'assert_stmt' __slots__ = () def assertion(self): return self.children[1] class GlobalStmt(KeywordStatement): type = 'global_stmt' __slots__ = () def get_defined_names(self): return [] def get_global_names(self): return self.children[1::2] class ReturnStmt(KeywordStatement): type = 'return_stmt' __slots__ = () class YieldExpr(BaseNode): type = 'yield_expr' __slots__ = () def _defined_names(current): """ A helper function to find the defined names in statements, for loops and list comprehensions. """ names = [] if is_node(current, 'testlist_star_expr', 'testlist_comp', 'exprlist'): for child in current.children[::2]: names += _defined_names(child) elif is_node(current, 'atom'): names += _defined_names(current.children[1]) elif is_node(current, 'power'): if current.children[-2] != '**': # Just if there's no operation trailer = current.children[-1] if trailer.children[0] == '.': names.append(trailer.children[1]) else: names.append(current) return names class ExprStmt(BaseNode, DocstringMixin): type = 'expr_stmt' __slots__ = () def get_defined_names(self): return list(chain.from_iterable(_defined_names(self.children[i]) for i in range(0, len(self.children) - 2, 2) if '=' in self.children[i + 1].value)) def get_rhs(self): """Returns the right-hand-side of the equals.""" return self.children[-1] def first_operation(self): """ Returns `+=`, `=`, etc or None if there is no operation. """ try: return self.children[1] except IndexError: return None class Param(BaseNode): """ It's a helper class that makes business logic with params much easier. The Python grammar defines no ``param`` node. It defines it in a different way that is not really suited to working with parameters. """ type = 'param' def __init__(self, children, parent): super(Param, self).__init__(children) self.parent = parent for child in children: child.parent = self @property def stars(self): first = self.children[0] if first in ('*', '**'): return len(first.value) return 0 @property def default(self): try: return self.children[int(self.children[0] in ('*', '**')) + 2] except IndexError: return None def annotation(self): # Generate from tfpdef. raise NotImplementedError def _tfpdef(self): """ tfpdef: see grammar.txt. """ offset = int(self.children[0] in ('*', '**')) return self.children[offset] @property def name(self): if is_node(self._tfpdef(), 'tfpdef'): return self._tfpdef().children[0] else: return self._tfpdef() @property def position_nr(self): return self.parent.children.index(self) - 1 @property def parent_function(self): return self.get_parent_until(IsScope) def __repr__(self): default = '' if self.default is None else '=%s' % self.default return '<%s: %s>' % (type(self).__name__, str(self._tfpdef()) + default) class CompFor(BaseNode): type = 'comp_for' __slots__ = () def is_scope(self): return True @property def names_dict(self): dct = {} for name in self.get_defined_names(): arr = dct.setdefault(name.value, []) arr.append(name) return dct def names_dicts(self, search_global): yield self.names_dict def get_defined_names(self): return _defined_names(self.children[1]) jedi-0.9.0/jedi/parser/token.py0000664000175000017500000000224412517736533016554 0ustar daviddavid00000000000000from __future__ import absolute_import from jedi._compatibility import is_py3 from token import * COMMENT = N_TOKENS tok_name[COMMENT] = 'COMMENT' N_TOKENS += 1 NL = N_TOKENS tok_name[NL] = 'NL' N_TOKENS += 1 if is_py3: BACKQUOTE = N_TOKENS tok_name[BACKQUOTE] = 'BACKQUOTE' N_TOKENS += 1 else: RARROW = N_TOKENS tok_name[RARROW] = 'RARROW' N_TOKENS += 1 ELLIPSIS = N_TOKENS tok_name[ELLIPSIS] = 'ELLIPSIS' N_TOKENS += 1 # Map from operator to number (since tokenize doesn't do this) opmap_raw = """\ ( LPAR ) RPAR [ LSQB ] RSQB : COLON , COMMA ; SEMI + PLUS - MINUS * STAR / SLASH | VBAR & AMPER < LESS > GREATER = EQUAL . DOT % PERCENT ` BACKQUOTE { LBRACE } RBRACE @ AT == EQEQUAL != NOTEQUAL <> NOTEQUAL <= LESSEQUAL >= GREATEREQUAL ~ TILDE ^ CIRCUMFLEX << LEFTSHIFT >> RIGHTSHIFT ** DOUBLESTAR += PLUSEQUAL -= MINEQUAL *= STAREQUAL /= SLASHEQUAL %= PERCENTEQUAL &= AMPEREQUAL |= VBAREQUAL ^= CIRCUMFLEXEQUAL <<= LEFTSHIFTEQUAL >>= RIGHTSHIFTEQUAL **= DOUBLESTAREQUAL // DOUBLESLASH //= DOUBLESLASHEQUAL -> RARROW ... ELLIPSIS """ opmap = {} for line in opmap_raw.splitlines(): op, name = line.split() opmap[op] = globals()[name] jedi-0.9.0/jedi/parser/user_context.py0000664000175000017500000003106112517736533020155 0ustar daviddavid00000000000000import re import os import keyword from jedi import cache from jedi import common from jedi.parser import tokenize, Parser from jedi._compatibility import u from jedi.parser.fast import FastParser from jedi.parser import tree from jedi import debug from jedi.common import PushBackIterator REPLACE_STR = r"[bBuU]?[rR]?" + (r"(?:(')[^\n'\\]*(?:\\.[^\n'\\]*)*(?:'|$)" + '|' + r'(")[^\n"\\]*(?:\\.[^\n"\\]*)*(?:"|$))') REPLACE_STR = re.compile(REPLACE_STR) class UserContext(object): """ :param source: The source code of the file. :param position: The position, the user is currently in. Only important \ for the main file. """ def __init__(self, source, position): self.source = source self.position = position self._line_cache = None self._relevant_temp = None @cache.underscore_memoization def get_path_until_cursor(self): """ Get the path under the cursor. """ path, self._start_cursor_pos = self._calc_path_until_cursor(self.position) return path def _backwards_line_generator(self, start_pos): self._line_temp, self._column_temp = start_pos first_line = self.get_line(start_pos[0])[:self._column_temp] self._line_length = self._column_temp yield first_line[::-1] + '\n' while True: self._line_temp -= 1 line = self.get_line(self._line_temp) self._line_length = len(line) yield line[::-1] + '\n' def _get_backwards_tokenizer(self, start_pos, line_gen=None): if line_gen is None: line_gen = self._backwards_line_generator(start_pos) token_gen = tokenize.generate_tokens(lambda: next(line_gen)) for typ, tok_str, tok_start_pos, prefix in token_gen: line = self.get_line(self._line_temp) # Calculate the real start_pos of the token. if tok_start_pos[0] == 1: # We are in the first checked line column = start_pos[1] - tok_start_pos[1] else: column = len(line) - tok_start_pos[1] # Multi-line docstrings must be accounted for. first_line = common.splitlines(tok_str)[0] column -= len(first_line) # Reverse the token again, so that it is in normal order again. yield typ, tok_str[::-1], (self._line_temp, column), prefix[::-1] def _calc_path_until_cursor(self, start_pos): """ Something like a reverse tokenizer that tokenizes the reversed strings. """ open_brackets = ['(', '[', '{'] close_brackets = [')', ']', '}'] start_cursor = start_pos gen = PushBackIterator(self._get_backwards_tokenizer(start_pos)) string = u('') level = 0 force_point = False last_type = None is_first = True for tok_type, tok_str, tok_start_pos, prefix in gen: if is_first: if prefix: # whitespace is not a path return u(''), start_cursor is_first = False if last_type == tok_type == tokenize.NAME: string = ' ' + string if level: if tok_str in close_brackets: level += 1 elif tok_str in open_brackets: level -= 1 elif tok_str == '.': force_point = False elif force_point: # Reversed tokenizing, therefore a number is recognized as a # floating point number. # The same is true for string prefixes -> represented as a # combination of string and name. if tok_type == tokenize.NUMBER and tok_str[-1] == '.' \ or tok_type == tokenize.NAME and last_type == tokenize.STRING \ and tok_str.lower() in ('b', 'u', 'r', 'br', 'ur'): force_point = False else: break elif tok_str in close_brackets: level += 1 elif tok_type in [tokenize.NAME, tokenize.STRING]: if keyword.iskeyword(tok_str) and string: # If there's already something in the string, a keyword # never adds any meaning to the current statement. break force_point = True elif tok_type == tokenize.NUMBER: pass else: if tok_str == '-': next_tok = next(gen) if next_tok[1] == 'e': gen.push_back(next_tok) else: break else: break start_cursor = tok_start_pos string = tok_str + prefix + string last_type = tok_type # Don't need whitespace around a statement. return string.strip(), start_cursor def get_path_under_cursor(self): """ Return the path under the cursor. If there is a rest of the path left, it will be added to the stuff before it. """ return self.get_path_until_cursor() + self.get_path_after_cursor() def get_path_after_cursor(self): line = self.get_line(self.position[0]) return re.search("[\w\d]*", line[self.position[1]:]).group(0) def get_operator_under_cursor(self): line = self.get_line(self.position[0]) after = re.match("[^\w\s]+", line[self.position[1]:]) before = re.match("[^\w\s]+", line[:self.position[1]][::-1]) return (before.group(0) if before is not None else '') \ + (after.group(0) if after is not None else '') def call_signature(self): """ :return: Tuple of string of the call and the index of the cursor. """ def get_line(pos): def simplify_str(match): """ To avoid having strings without end marks (error tokens) and strings that just screw up all the call signatures, just simplify everything. """ mark = match.group(1) or match.group(2) return mark + ' ' * (len(match.group(0)) - 2) + mark line_gen = self._backwards_line_generator(pos) for line in line_gen: # We have to switch the already backwards lines twice, because # we scan them from start. line = line[::-1] modified = re.sub(REPLACE_STR, simplify_str, line) yield modified[::-1] index = 0 level = 0 next_must_be_name = False next_is_key = False key_name = None generator = self._get_backwards_tokenizer(self.position, get_line(self.position)) for tok_type, tok_str, start_pos, prefix in generator: if tok_str in tokenize.ALWAYS_BREAK_TOKENS: break elif next_must_be_name: if tok_type == tokenize.NUMBER: # If there's a number at the end of the string, it will be # tokenized as a number. So add it to the name. tok_type, t, _, _ = next(generator) if tok_type == tokenize.NAME: end_pos = start_pos[0], start_pos[1] + len(tok_str) call, start_pos = self._calc_path_until_cursor(start_pos=end_pos) return call, index, key_name, start_pos index = 0 next_must_be_name = False elif next_is_key: if tok_type == tokenize.NAME: key_name = tok_str next_is_key = False if tok_str == '(': level += 1 if level == 1: next_must_be_name = True level = 0 elif tok_str == ')': level -= 1 elif tok_str == ',': index += 1 elif tok_str == '=': next_is_key = True return None, 0, None, (0, 0) def get_context(self, yield_positions=False): self.get_path_until_cursor() # In case _start_cursor_pos is undefined. pos = self._start_cursor_pos while True: # remove non important white space line = self.get_line(pos[0]) while True: if pos[1] == 0: line = self.get_line(pos[0] - 1) if line and line[-1] == '\\': pos = pos[0] - 1, len(line) - 1 continue else: break if line[pos[1] - 1].isspace(): pos = pos[0], pos[1] - 1 else: break try: result, pos = self._calc_path_until_cursor(start_pos=pos) if yield_positions: yield pos else: yield result except StopIteration: if yield_positions: yield None else: yield '' def get_line(self, line_nr): if not self._line_cache: self._line_cache = common.splitlines(self.source) if line_nr == 0: # This is a fix for the zeroth line. We need a newline there, for # the backwards parser. return u('') if line_nr < 0: raise StopIteration() try: return self._line_cache[line_nr - 1] except IndexError: raise StopIteration() def get_position_line(self): return self.get_line(self.position[0])[:self.position[1]] class UserContextParser(object): def __init__(self, grammar, source, path, position, user_context, parser_done_callback, use_fast_parser=True): self._grammar = grammar self._source = source self._path = path and os.path.abspath(path) self._position = position self._user_context = user_context self._use_fast_parser = use_fast_parser self._parser_done_callback = parser_done_callback @cache.underscore_memoization def _parser(self): cache.invalidate_star_import_cache(self._path) if self._use_fast_parser: parser = FastParser(self._grammar, self._source, self._path) # Don't pickle that module, because the main module is changing quickly cache.save_parser(self._path, parser, pickling=False) else: parser = Parser(self._grammar, self._source, self._path) self._parser_done_callback(parser) return parser @cache.underscore_memoization def user_stmt(self): module = self.module() debug.speed('parsed') return module.get_statement_for_position(self._position) @cache.underscore_memoization def user_stmt_with_whitespace(self): """ Returns the statement under the cursor even if the statement lies before the cursor. """ user_stmt = self.user_stmt() if not user_stmt: # for statements like `from x import ` (cursor not in statement) # or `abs( ` where the cursor is out in the whitespace. if self._user_context.get_path_under_cursor(): # We really should have a user_stmt, but the parser couldn't # process it - probably a Syntax Error (or in a comment). debug.warning('No statement under the cursor.') return pos = next(self._user_context.get_context(yield_positions=True)) user_stmt = self.module().get_statement_for_position(pos) return user_stmt @cache.underscore_memoization def user_scope(self): """ Returns the scope in which the user resides. This includes flows. """ user_stmt = self.user_stmt() if user_stmt is None: def scan(scope): for s in scope.children: if s.start_pos <= self._position <= s.end_pos: if isinstance(s, (tree.Scope, tree.Flow)): if isinstance(s, tree.Flow): return s return scan(s) or s elif s.type in ('suite', 'decorated'): return scan(s) return scan(self.module()) or self.module() else: return user_stmt.get_parent_scope(include_flows=True) def module(self): return self._parser().module jedi-0.9.0/jedi/parser/tokenize.py0000664000175000017500000002521012517736533017262 0ustar daviddavid00000000000000# -*- coding: utf-8 -*- """ This tokenizer has been copied from the ``tokenize.py`` standard library tokenizer. The reason was simple: The standard library tokenizer fails if the indentation is not right. The fast parser of jedi however requires "wrong" indentation. Basically this is a stripped down version of the standard library module, so you can read the documentation there. Additionally we included some speed and memory optimizations here. """ from __future__ import absolute_import import string import re from io import StringIO from jedi.parser.token import (tok_name, N_TOKENS, ENDMARKER, STRING, NUMBER, NAME, OP, ERRORTOKEN, NEWLINE, INDENT, DEDENT) from jedi._compatibility import is_py3 cookie_re = re.compile("coding[:=]\s*([-\w.]+)") if is_py3: # Python 3 has str.isidentifier() to check if a char is a valid identifier is_identifier = str.isidentifier else: namechars = string.ascii_letters + '_' is_identifier = lambda s: s in namechars COMMENT = N_TOKENS tok_name[COMMENT] = 'COMMENT' def group(*choices): return '(' + '|'.join(choices) + ')' def maybe(*choices): return group(*choices) + '?' # Note: we use unicode matching for names ("\w") but ascii matching for # number literals. whitespace = r'[ \f\t]*' comment = r'#[^\r\n]*' name = r'\w+' hex_number = r'0[xX][0-9a-fA-F]+' bin_number = r'0[bB][01]+' oct_number = r'0[oO][0-7]+' dec_number = r'(?:0+|[1-9][0-9]*)' int_number = group(hex_number, bin_number, oct_number, dec_number) exponent = r'[eE][-+]?[0-9]+' point_float = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(exponent) Expfloat = r'[0-9]+' + exponent float_number = group(point_float, Expfloat) imag_number = group(r'[0-9]+[jJ]', float_number + r'[jJ]') number = group(imag_number, float_number, int_number) # Tail end of ' string. single = r"[^'\\]*(?:\\.[^'\\]*)*'" # Tail end of " string. double = r'[^"\\]*(?:\\.[^"\\]*)*"' # Tail end of ''' string. single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" # Tail end of """ string. double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""') # Single-line ' or " string. # Because of leftmost-then-longest match semantics, be sure to put the # longest operators first (e.g., if = came before ==, == would get # recognized as two instances of =). operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", r"//=?", r"->", r"[+\-*/%&|^=<>]=?", r"~") bracket = '[][(){}]' special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]') funny = group(operator, bracket, special) # First (or only) line of ' or " string. cont_str = group(r"[bBuU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r'\\\r?\n'), r'[bBuU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r'\\\r?\n')) pseudo_extras = group(r'\\\r?\n', comment, triple) pseudo_token = group(whitespace) + \ group(pseudo_extras, number, funny, cont_str, name) def _compile(expr): return re.compile(expr, re.UNICODE) pseudoprog, single3prog, double3prog = map( _compile, (pseudo_token, single3, double3)) endprogs = {"'": _compile(single), '"': _compile(double), "'''": single3prog, '"""': double3prog, "r'''": single3prog, 'r"""': double3prog, "b'''": single3prog, 'b"""': double3prog, "u'''": single3prog, 'u"""': double3prog, "R'''": single3prog, 'R"""': double3prog, "B'''": single3prog, 'B"""': double3prog, "U'''": single3prog, 'U"""': double3prog, "br'''": single3prog, 'br"""': double3prog, "bR'''": single3prog, 'bR"""': double3prog, "Br'''": single3prog, 'Br"""': double3prog, "BR'''": single3prog, 'BR"""': double3prog, "ur'''": single3prog, 'ur"""': double3prog, "uR'''": single3prog, 'uR"""': double3prog, "Ur'''": single3prog, 'Ur"""': double3prog, "UR'''": single3prog, 'UR"""': double3prog, 'r': None, 'R': None, 'b': None, 'B': None} triple_quoted = {} for t in ("'''", '"""', "r'''", 'r"""', "R'''", 'R"""', "b'''", 'b"""', "B'''", 'B"""', "u'''", 'u"""', "U'''", 'U"""', "br'''", 'br"""', "Br'''", 'Br"""', "bR'''", 'bR"""', "BR'''", 'BR"""', "ur'''", 'ur"""', "Ur'''", 'Ur"""', "uR'''", 'uR"""', "UR'''", 'UR"""'): triple_quoted[t] = t single_quoted = {} for t in ("'", '"', "r'", 'r"', "R'", 'R"', "b'", 'b"', "B'", 'B"', "u'", 'u"', "U'", 'U"', "br'", 'br"', "Br'", 'Br"', "bR'", 'bR"', "BR'", 'BR"', "ur'", 'ur"', "Ur'", 'Ur"', "uR'", 'uR"', "UR'", 'UR"'): single_quoted[t] = t del _compile tabsize = 8 ALWAYS_BREAK_TOKENS = (';', 'import', 'from', 'class', 'def', 'try', 'except', 'finally', 'while', 'return') def source_tokens(source): """Generate tokens from a the source code (string).""" source = source + '\n' # end with \n, because the parser needs it readline = StringIO(source).readline return generate_tokens(readline) def generate_tokens(readline): """ A heavily modified Python standard library tokenizer. Additionally to the default information, yields also the prefix of each token. This idea comes from lib2to3. The prefix contains all information that is irrelevant for the parser like newlines in parentheses or comments. """ paren_level = 0 # count parentheses indents = [0] lnum = 0 numchars = '0123456789' contstr = '' contline = None # We start with a newline. This makes indent at the first position # possible. It's not valid Python, but still better than an INDENT in the # second line (and not in the first). This makes quite a few things in # Jedi's fast parser possible. new_line = True prefix = '' # Should never be required, but here for safety additional_prefix = '' while True: # loop over lines in stream line = readline() # readline returns empty when finished. See StringIO if not line: if contstr: yield ERRORTOKEN, contstr, contstr_start, prefix break lnum += 1 pos, max = 0, len(line) if contstr: # continued string endmatch = endprog.match(line) if endmatch: pos = endmatch.end(0) yield STRING, contstr + line[:pos], contstr_start, prefix contstr = '' contline = None else: contstr = contstr + line contline = contline + line continue while pos < max: pseudomatch = pseudoprog.match(line, pos) if not pseudomatch: # scan for tokens txt = line[pos] if line[pos] in '"\'': # If a literal starts but doesn't end the whole rest of the # line is an error token. txt = line[pos:] yield ERRORTOKEN, txt, (lnum, pos), prefix pos += 1 continue prefix = additional_prefix + pseudomatch.group(1) additional_prefix = '' start, pos = pseudomatch.span(2) spos = (lnum, start) token, initial = line[start:pos], line[start] if new_line and initial not in '\r\n#': new_line = False if paren_level == 0: if start > indents[-1]: yield INDENT, '', spos, '' indents.append(start) while start < indents[-1]: yield DEDENT, '', spos, '' indents.pop() if (initial in numchars or # ordinary number (initial == '.' and token != '.' and token != '...')): yield NUMBER, token, spos, prefix elif initial in '\r\n': if not new_line and paren_level == 0: yield NEWLINE, token, spos, prefix else: additional_prefix = prefix + token new_line = True elif initial == '#': # Comments assert not token.endswith("\n") additional_prefix = prefix + token elif token in triple_quoted: endprog = endprogs[token] endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] yield STRING, token, spos, prefix else: contstr_start = (lnum, start) # multiple lines contstr = line[start:] contline = line break elif initial in single_quoted or \ token[:2] in single_quoted or \ token[:3] in single_quoted: if token[-1] == '\n': # continued string contstr_start = lnum, start endprog = (endprogs.get(initial) or endprogs.get(token[1]) or endprogs.get(token[2])) contstr = line[start:] contline = line break else: # ordinary string yield STRING, token, spos, prefix elif is_identifier(initial): # ordinary name if token in ALWAYS_BREAK_TOKENS: paren_level = 0 while True: indent = indents.pop() if indent > start: yield DEDENT, '', spos, '' else: indents.append(indent) break yield NAME, token, spos, prefix elif initial == '\\' and line[start:] in ('\\\n', '\\\r\n'): # continued stmt additional_prefix += prefix + line[start:] break else: if token in '([{': paren_level += 1 elif token in ')]}': paren_level -= 1 yield OP, token, spos, prefix end_pos = (lnum, max - 1) # As the last position we just take the maximally possible position. We # remove -1 for the last new line. for indent in indents[1:]: yield DEDENT, '', end_pos, '' yield ENDMARKER, '', end_pos, prefix jedi-0.9.0/jedi/parser/grammar3.4.txt0000664000175000017500000001413412517736533017477 0ustar daviddavid00000000000000# Grammar for Python # Note: Changing the grammar specified in this file will most likely # require corresponding changes in the parser module # (../Modules/parsermodule.c). If you can't make the changes to # that module yourself, please co-ordinate the required changes # with someone who can; ask around on python-dev for help. Fred # Drake will probably be listening there. # NOTE WELL: You should also follow all the steps listed in PEP 306, # "How to Change Python's Grammar" # Start symbols for the grammar: # single_input is a single interactive statement; # file_input is a module or sequence of commands read from an input file; # eval_input is the input for the eval() functions. # NB: compound_stmt in single_input is followed by extra NEWLINE! file_input: (NEWLINE | stmt)* ENDMARKER single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE eval_input: testlist NEWLINE* ENDMARKER decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE decorators: decorator+ decorated: decorators (classdef | funcdef) funcdef: 'def' NAME parameters ['->' test] ':' suite parameters: '(' [typedargslist] ')' typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' ['*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef]] | '*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef) tfpdef: NAME [':' test] varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' ['*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef]] | '*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef) vfpdef: NAME stmt: simple_stmt | compound_stmt simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt) expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))*) testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//=') # For normal assignments, additional restrictions enforced by the interpreter del_stmt: 'del' exprlist pass_stmt: 'pass' flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt break_stmt: 'break' continue_stmt: 'continue' return_stmt: 'return' [testlist] yield_stmt: yield_expr raise_stmt: 'raise' [test ['from' test]] import_stmt: import_name | import_from import_name: 'import' dotted_as_names # note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) 'import' ('*' | '(' import_as_names ')' | import_as_names)) import_as_name: NAME ['as' NAME] dotted_as_name: dotted_name ['as' NAME] import_as_names: import_as_name (',' import_as_name)* [','] dotted_as_names: dotted_as_name (',' dotted_as_name)* dotted_name: NAME ('.' NAME)* global_stmt: 'global' NAME (',' NAME)* nonlocal_stmt: 'nonlocal' NAME (',' NAME)* assert_stmt: 'assert' test [',' test] compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] while_stmt: 'while' test ':' suite ['else' ':' suite] for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] try_stmt: ('try' ':' suite ((except_clause ':' suite)+ ['else' ':' suite] ['finally' ':' suite] | 'finally' ':' suite)) with_stmt: 'with' with_item (',' with_item)* ':' suite with_item: test ['as' expr] # NB compile.c makes sure that the default except clause is last except_clause: 'except' [test ['as' NAME]] # Edit by David Halter: The stmt is now optional. This reflects how Jedi allows # classes and functions to be empty, which is beneficial for autocompletion. suite: simple_stmt | NEWLINE INDENT stmt* DEDENT test: or_test ['if' or_test 'else' test] | lambdef test_nocond: or_test | lambdef_nocond lambdef: 'lambda' [varargslist] ':' test lambdef_nocond: 'lambda' [varargslist] ':' test_nocond or_test: and_test ('or' and_test)* and_test: not_test ('and' not_test)* not_test: 'not' not_test | comparison comparison: expr (comp_op expr)* # <> isn't actually a valid comparison operator in Python. It's here for the # sake of a __future__ import described in PEP 401 comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' star_expr: '*' expr expr: xor_expr ('|' xor_expr)* xor_expr: and_expr ('^' and_expr)* and_expr: shift_expr ('&' shift_expr)* shift_expr: arith_expr (('<<'|'>>') arith_expr)* arith_expr: term (('+'|'-') term)* term: factor (('*'|'/'|'%'|'//') factor)* factor: ('+'|'-'|'~') factor | power power: atom trailer* ['**' factor] atom: ('(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictorsetmaker] '}' | NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False') testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: subscript (',' subscript)* [','] subscript: test | [test] ':' [test] [sliceop] sliceop: ':' [test] exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] testlist: test (',' test)* [','] dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) | (test (comp_for | (',' test)* [','])) ) classdef: 'class' NAME ['(' [arglist] ')'] ':' suite arglist: (argument ',')* (argument [','] |'*' test (',' argument)* [',' '**' test] |'**' test) # The reason that keywords are test nodes instead of NAME is that using NAME # results in an ambiguity. ast.c makes sure it's a NAME. argument: test [comp_for] | test '=' test # Really [keyword '='] test comp_iter: comp_for | comp_if comp_for: 'for' exprlist 'in' or_test [comp_iter] comp_if: 'if' test_nocond [comp_iter] # not used in grammar, but may appear in "node" passed from Parser to Compiler encoding_decl: NAME yield_expr: 'yield' [yield_arg] yield_arg: 'from' test | testlist jedi-0.9.0/jedi/utils.py0000664000175000017500000000773512517736533015312 0ustar daviddavid00000000000000""" Utilities for end-users. """ from __future__ import absolute_import import __main__ from collections import namedtuple import re import os import sys from jedi import Interpreter from jedi.api.helpers import completion_parts from jedi.parser.user_context import UserContext def setup_readline(namespace_module=__main__): """ Install Jedi completer to :mod:`readline`. This function setups :mod:`readline` to use Jedi in Python interactive shell. If you want to use a custom ``PYTHONSTARTUP`` file (typically ``$HOME/.pythonrc.py``), you can add this piece of code:: try: from jedi.utils import setup_readline setup_readline() except ImportError: # Fallback to the stdlib readline completer if it is installed. # Taken from http://docs.python.org/2/library/rlcompleter.html print("Jedi is not installed, falling back to readline") try: import readline import rlcompleter readline.parse_and_bind("tab: complete") except ImportError: print("Readline is not installed either. No tab completion is enabled.") This will fallback to the readline completer if Jedi is not installed. The readline completer will only complete names in the global namespace, so for example:: ran will complete to ``range`` with both Jedi and readline, but:: range(10).cou will show complete to ``range(10).count`` only with Jedi. You'll also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to your shell profile (usually ``.bash_profile`` or ``.profile`` if you use bash). """ class JediRL(object): def complete(self, text, state): """ This complete stuff is pretty weird, a generator would make a lot more sense, but probably due to backwards compatibility this is still the way how it works. The only important part is stuff in the ``state == 0`` flow, everything else has been copied from the ``rlcompleter`` std. library module. """ if state == 0: sys.path.insert(0, os.getcwd()) # Calling python doesn't have a path, so add to sys.path. try: interpreter = Interpreter(text, [namespace_module.__dict__]) path = UserContext(text, (1, len(text))).get_path_until_cursor() path, dot, like = completion_parts(path) before = text[:len(text) - len(like)] completions = interpreter.completions() finally: sys.path.pop(0) self.matches = [before + c.name_with_symbols for c in completions] try: return self.matches[state] except IndexError: return None try: import readline except ImportError: print("Module readline not available.") else: readline.set_completer(JediRL().complete) readline.parse_and_bind("tab: complete") # jedi itself does the case matching readline.parse_and_bind("set completion-ignore-case on") # because it's easier to hit the tab just once readline.parse_and_bind("set show-all-if-unmodified") readline.parse_and_bind("set show-all-if-ambiguous on") # don't repeat all the things written in the readline all the time readline.parse_and_bind("set completion-prefix-display-length 2") # No delimiters, Jedi handles that. readline.set_completer_delims('') def version_info(): """ Returns a namedtuple of Jedi's version, similar to Python's ``sys.version_info``. """ Version = namedtuple('Version', 'major, minor, micro') from jedi import __version__ tupl = re.findall('[a-z]+|\d+', __version__) return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)]) jedi-0.9.0/jedi/refactoring.py0000664000175000017500000001557112517736533016452 0ustar daviddavid00000000000000""" Introduce some basic refactoring functions to |jedi|. This module is still in a very early development stage and needs much testing and improvement. .. warning:: I won't do too much here, but if anyone wants to step in, please do. Refactoring is none of my priorities It uses the |jedi| `API `_ and supports currently the following functions (sometimes bug-prone): - rename - extract variable - inline variable """ import difflib from jedi import common from jedi.evaluate import helpers from jedi.parser import tree as pt class Refactoring(object): def __init__(self, change_dct): """ :param change_dct: dict(old_path=(new_path, old_lines, new_lines)) """ self.change_dct = change_dct def old_files(self): dct = {} for old_path, (new_path, old_l, new_l) in self.change_dct.items(): dct[new_path] = '\n'.join(new_l) return dct def new_files(self): dct = {} for old_path, (new_path, old_l, new_l) in self.change_dct.items(): dct[new_path] = '\n'.join(new_l) return dct def diff(self): texts = [] for old_path, (new_path, old_l, new_l) in self.change_dct.items(): if old_path: udiff = difflib.unified_diff(old_l, new_l) else: udiff = difflib.unified_diff(old_l, new_l, old_path, new_path) texts.append('\n'.join(udiff)) return '\n'.join(texts) def rename(script, new_name): """ The `args` / `kwargs` params are the same as in `api.Script`. :param operation: The refactoring operation to execute. :type operation: str :type source: str :return: list of changed lines/changed files """ return Refactoring(_rename(script.usages(), new_name)) def _rename(names, replace_str): """ For both rename and inline. """ order = sorted(names, key=lambda x: (x.module_path, x.line, x.column), reverse=True) def process(path, old_lines, new_lines): if new_lines is not None: # goto next file, save last dct[path] = path, old_lines, new_lines dct = {} current_path = object() new_lines = old_lines = None for name in order: if name.in_builtin_module(): continue if current_path != name.module_path: current_path = name.module_path process(current_path, old_lines, new_lines) if current_path is not None: # None means take the source that is a normal param. with open(current_path) as f: source = f.read() new_lines = common.splitlines(common.source_to_unicode(source)) old_lines = new_lines[:] nr, indent = name.line, name.column line = new_lines[nr - 1] new_lines[nr - 1] = line[:indent] + replace_str + \ line[indent + len(name.name):] process(current_path, old_lines, new_lines) return dct def extract(script, new_name): """ The `args` / `kwargs` params are the same as in `api.Script`. :param operation: The refactoring operation to execute. :type operation: str :type source: str :return: list of changed lines/changed files """ new_lines = common.splitlines(common.source_to_unicode(script.source)) old_lines = new_lines[:] user_stmt = script._parser.user_stmt() # TODO care for multiline extracts dct = {} if user_stmt: pos = script._pos line_index = pos[0] - 1 arr, index = helpers.array_for_pos(user_stmt, pos) if arr is not None: start_pos = arr[index].start_pos end_pos = arr[index].end_pos # take full line if the start line is different from end line e = end_pos[1] if end_pos[0] == start_pos[0] else None start_line = new_lines[start_pos[0] - 1] text = start_line[start_pos[1]:e] for l in range(start_pos[0], end_pos[0] - 1): text += '\n' + l if e is None: end_line = new_lines[end_pos[0] - 1] text += '\n' + end_line[:end_pos[1]] # remove code from new lines t = text.lstrip() del_start = start_pos[1] + len(text) - len(t) text = t.rstrip() del_end = len(t) - len(text) if e is None: new_lines[end_pos[0] - 1] = end_line[end_pos[1] - del_end:] e = len(start_line) else: e = e - del_end start_line = start_line[:del_start] + new_name + start_line[e:] new_lines[start_pos[0] - 1] = start_line new_lines[start_pos[0]:end_pos[0] - 1] = [] # add parentheses in multiline case open_brackets = ['(', '[', '{'] close_brackets = [')', ']', '}'] if '\n' in text and not (text[0] in open_brackets and text[-1] == close_brackets[open_brackets.index(text[0])]): text = '(%s)' % text # add new line before statement indent = user_stmt.start_pos[1] new = "%s%s = %s" % (' ' * indent, new_name, text) new_lines.insert(line_index, new) dct[script.path] = script.path, old_lines, new_lines return Refactoring(dct) def inline(script): """ :type script: api.Script """ new_lines = common.splitlines(common.source_to_unicode(script.source)) dct = {} definitions = script.goto_assignments() with common.ignored(AssertionError): assert len(definitions) == 1 stmt = definitions[0]._definition usages = script.usages() inlines = [r for r in usages if not stmt.start_pos <= (r.line, r.column) <= stmt.end_pos] inlines = sorted(inlines, key=lambda x: (x.module_path, x.line, x.column), reverse=True) expression_list = stmt.expression_list() # don't allow multiline refactorings for now. assert stmt.start_pos[0] == stmt.end_pos[0] index = stmt.start_pos[0] - 1 line = new_lines[index] replace_str = line[expression_list[0].start_pos[1]:stmt.end_pos[1] + 1] replace_str = replace_str.strip() # tuples need parentheses if expression_list and isinstance(expression_list[0], pr.Array): arr = expression_list[0] if replace_str[0] not in ['(', '[', '{'] and len(arr) > 1: replace_str = '(%s)' % replace_str # if it's the only assignment, remove the statement if len(stmt.get_defined_names()) == 1: line = line[:stmt.start_pos[1]] + line[stmt.end_pos[1]:] dct = _rename(inlines, replace_str) # remove the empty line new_lines = dct[script.path][2] if line.strip(): new_lines[index] = line else: new_lines.pop(index) return Refactoring(dct) jedi-0.9.0/jedi/_compatibility.py0000664000175000017500000001303512517736533017150 0ustar daviddavid00000000000000""" To ensure compatibility from Python ``2.6`` - ``3.3``, a module has been created. Clearly there is huge need to use conforming syntax. """ import sys import imp import os import re try: import importlib except ImportError: pass is_py3 = sys.version_info[0] >= 3 is_py33 = is_py3 and sys.version_info.minor >= 3 is_py26 = not is_py3 and sys.version_info[1] < 7 def find_module_py33(string, path=None): loader = importlib.machinery.PathFinder.find_module(string, path) if loader is None and path is None: # Fallback to find builtins try: loader = importlib.find_loader(string) except ValueError as e: # See #491. Importlib might raise a ValueError, to avoid this, we # just raise an ImportError to fix the issue. raise ImportError("Originally ValueError: " + e.message) if loader is None: raise ImportError("Couldn't find a loader for {0}".format(string)) try: is_package = loader.is_package(string) if is_package: module_path = os.path.dirname(loader.path) module_file = None else: module_path = loader.get_filename(string) module_file = open(module_path, 'rb') except AttributeError: # ExtensionLoader has not attribute get_filename, instead it has a # path attribute that we can use to retrieve the module path try: module_path = loader.path module_file = open(loader.path, 'rb') except AttributeError: module_path = string module_file = None finally: is_package = False return module_file, module_path, is_package def find_module_pre_py33(string, path=None): module_file, module_path, description = imp.find_module(string, path) module_type = description[2] return module_file, module_path, module_type is imp.PKG_DIRECTORY find_module = find_module_py33 if is_py33 else find_module_pre_py33 find_module.__doc__ = """ Provides information about a module. This function isolates the differences in importing libraries introduced with python 3.3 on; it gets a module name and optionally a path. It will return a tuple containin an open file for the module (if not builtin), the filename or the name of the module if it is a builtin one and a boolean indicating if the module is contained in a package. """ # unicode function try: unicode = unicode except NameError: unicode = str if is_py3: u = lambda s: s else: u = lambda s: s.decode('utf-8') u.__doc__ = """ Decode a raw string into unicode object. Do nothing in Python 3. """ # exec function if is_py3: def exec_function(source, global_map): exec(source, global_map) else: eval(compile("""def exec_function(source, global_map): exec source in global_map """, 'blub', 'exec')) # re-raise function if is_py3: def reraise(exception, traceback): raise exception.with_traceback(traceback) else: eval(compile(""" def reraise(exception, traceback): raise exception, None, traceback """, 'blub', 'exec')) reraise.__doc__ = """ Re-raise `exception` with a `traceback` object. Usage:: reraise(Exception, sys.exc_info()[2]) """ class Python3Method(object): def __init__(self, func): self.func = func def __get__(self, obj, objtype): if obj is None: return lambda *args, **kwargs: self.func(*args, **kwargs) else: return lambda *args, **kwargs: self.func(obj, *args, **kwargs) def use_metaclass(meta, *bases): """ Create a class with a metaclass. """ if not bases: bases = (object,) return meta("HackClass", bases, {}) try: encoding = sys.stdout.encoding if encoding is None: encoding = 'utf-8' except AttributeError: encoding = 'ascii' def u(string): """Cast to unicode DAMMIT! Written because Python2 repr always implicitly casts to a string, so we have to cast back to a unicode (and we now that we always deal with valid unicode, because we check that in the beginning). """ if is_py3: return str(string) elif not isinstance(string, unicode): return unicode(str(string), 'UTF-8') return string try: import builtins # module name in python 3 except ImportError: import __builtin__ as builtins import ast def literal_eval(string): # py3.0, py3.1 and py32 don't support unicode literals. Support those, I # don't want to write two versions of the tokenizer. if is_py3 and sys.version_info.minor < 3: if re.match('[uU][\'"]', string): string = string[1:] return ast.literal_eval(string) try: from itertools import zip_longest except ImportError: from itertools import izip_longest as zip_longest # Python 2 def no_unicode_pprint(dct): """ Python 2/3 dict __repr__ may be different, because of unicode differens (with or without a `u` prefix). Normally in doctests we could use `pprint` to sort dicts and check for equality, but here we have to write a separate function to do that. """ import pprint s = pprint.pformat(dct) print(re.sub("u'", "'", s)) def utf8_repr(func): """ ``__repr__`` methods in Python 2 don't allow unicode objects to be returned. Therefore cast them to utf-8 bytes in this decorator. """ def wrapper(self): result = func(self) if isinstance(result, unicode): return result.encode('utf-8') else: return result if is_py3: return func else: return wrapper jedi-0.9.0/jedi/common.py0000664000175000017500000001150712517736533015432 0ustar daviddavid00000000000000""" A universal module with functions / classes without dependencies. """ import sys import contextlib import functools import re from ast import literal_eval from jedi._compatibility import unicode, reraise from jedi import settings class UncaughtAttributeError(Exception): """ Important, because `__getattr__` and `hasattr` catch AttributeErrors implicitly. This is really evil (mainly because of `__getattr__`). `hasattr` in Python 2 is even more evil, because it catches ALL exceptions. Therefore this class originally had to be derived from `BaseException` instead of `Exception`. But because I removed relevant `hasattr` from the code base, we can now switch back to `Exception`. :param base: return values of sys.exc_info(). """ def safe_property(func): return property(reraise_uncaught(func)) def reraise_uncaught(func): """ Re-throw uncaught `AttributeError`. Usage: Put ``@rethrow_uncaught`` in front of the function which does **not** suppose to raise `AttributeError`. AttributeError is easily get caught by `hasattr` and another ``except AttributeError`` clause. This becomes problem when you use a lot of "dynamic" attributes (e.g., using ``@property``) because you can't distinguish if the property does not exist for real or some code inside of the "dynamic" attribute through that error. In a well written code, such error should not exist but getting there is very difficult. This decorator is to help us getting there by changing `AttributeError` to `UncaughtAttributeError` to avoid unexpected catch. This helps us noticing bugs earlier and facilitates debugging. .. note:: Treating StopIteration here is easy. Add that feature when needed. """ @functools.wraps(func) def wrapper(*args, **kwds): try: return func(*args, **kwds) except AttributeError: exc_info = sys.exc_info() reraise(UncaughtAttributeError(exc_info[1]), exc_info[2]) return wrapper class PushBackIterator(object): def __init__(self, iterator): self.pushes = [] self.iterator = iterator self.current = None def push_back(self, value): self.pushes.append(value) def __iter__(self): return self def next(self): """ Python 2 Compatibility """ return self.__next__() def __next__(self): if self.pushes: self.current = self.pushes.pop() else: self.current = next(self.iterator) return self.current @contextlib.contextmanager def scale_speed_settings(factor): a = settings.max_executions b = settings.max_until_execution_unique settings.max_executions *= factor settings.max_until_execution_unique *= factor try: yield finally: settings.max_executions = a settings.max_until_execution_unique = b def indent_block(text, indention=' '): """This function indents a text block with a default of four spaces.""" temp = '' while text and text[-1] == '\n': temp += text[-1] text = text[:-1] lines = text.split('\n') return '\n'.join(map(lambda s: indention + s, lines)) + temp @contextlib.contextmanager def ignored(*exceptions): """ Context manager that ignores all of the specified exceptions. This will be in the standard library starting with Python 3.4. """ try: yield except exceptions: pass def source_to_unicode(source, encoding=None): def detect_encoding(): """ For the implementation of encoding definitions in Python, look at: - http://www.python.org/dev/peps/pep-0263/ - http://docs.python.org/2/reference/lexical_analysis.html#encoding-declarations """ byte_mark = literal_eval(r"b'\xef\xbb\xbf'") if source.startswith(byte_mark): # UTF-8 byte-order mark return 'utf-8' first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', str(source)).group(0) possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)", first_two_lines) if possible_encoding: return possible_encoding.group(1) else: # the default if nothing else has been set -> PEP 263 return encoding if encoding is not None else 'iso-8859-1' if isinstance(source, unicode): # only cast str/bytes return source # cast to unicode by default return unicode(source, detect_encoding(), 'replace') def splitlines(string): """ A splitlines for Python code. In contrast to Python's ``str.splitlines``, looks at form feeds and other special characters as normal text. Just splits ``\n`` and ``\r\n``. Also different: Returns ``['']`` for an empty string input. """ return re.split('\n|\r\n', string) jedi-0.9.0/jedi/settings.py0000664000175000017500000001376612517736533016013 0ustar daviddavid00000000000000""" This module contains variables with global |jedi| settings. To change the behavior of |jedi|, change the variables defined in :mod:`jedi.settings`. Plugins should expose an interface so that the user can adjust the configuration. Example usage:: from jedi import settings settings.case_insensitive_completion = True Completion output ~~~~~~~~~~~~~~~~~ .. autodata:: case_insensitive_completion .. autodata:: add_dot_after_module .. autodata:: add_bracket_after_function .. autodata:: no_completion_duplicates Filesystem cache ~~~~~~~~~~~~~~~~ .. autodata:: cache_directory .. autodata:: use_filesystem_cache Parser ~~~~~~ .. autodata:: fast_parser Dynamic stuff ~~~~~~~~~~~~~ .. autodata:: dynamic_array_additions .. autodata:: dynamic_params .. autodata:: dynamic_params_for_other_modules .. autodata:: additional_dynamic_modules .. autodata:: auto_import_modules .. _settings-recursion: Recursions ~~~~~~~~~~ Recursion settings are important if you don't want extremly recursive python code to go absolutely crazy. First of there is a global limit :data:`max_executions`. This limit is important, to set a maximum amount of time, the completion may use. The default values are based on experiments while completing the |jedi| library itself (inception!). But I don't think there's any other Python library that uses recursion in a similarly extreme way. These settings make the completion definitely worse in some cases. But a completion should also be fast. .. autodata:: max_until_execution_unique .. autodata:: max_function_recursion_level .. autodata:: max_executions_without_builtins .. autodata:: max_executions .. autodata:: scale_call_signatures Caching ~~~~~~~ .. autodata:: star_import_cache_validity .. autodata:: call_signatures_validity """ import os import platform # ---------------- # completion output settings # ---------------- case_insensitive_completion = True """ The completion is by default case insensitive. """ add_dot_after_module = False """ Adds a dot after a module, because a module that is not accessed this way is definitely not the normal case. However, in VIM this doesn't work, that's why it isn't used at the moment. """ add_bracket_after_function = False """ Adds an opening bracket after a function, because that's normal behaviour. Removed it again, because in VIM that is not very practical. """ no_completion_duplicates = True """ If set, completions with the same name don't appear in the output anymore, but are in the `same_name_completions` attribute. """ # ---------------- # Filesystem cache # ---------------- use_filesystem_cache = True """ Use filesystem cache to save once parsed files with pickle. """ if platform.system().lower() == 'windows': _cache_directory = os.path.join(os.getenv('APPDATA') or '~', 'Jedi', 'Jedi') elif platform.system().lower() == 'darwin': _cache_directory = os.path.join('~', 'Library', 'Caches', 'Jedi') else: _cache_directory = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache', 'jedi') cache_directory = os.path.expanduser(_cache_directory) """ The path where all the caches can be found. On Linux, this defaults to ``~/.cache/jedi/``, on OS X to ``~/Library/Caches/Jedi/`` and on Windows to ``%APPDATA%\\Jedi\\Jedi\\``. On Linux, if environment variable ``$XDG_CACHE_HOME`` is set, ``$XDG_CACHE_HOME/jedi`` is used instead of the default one. """ # ---------------- # parser # ---------------- fast_parser = True """ Use the fast parser. This means that reparsing is only being done if something has been changed e.g. to a function. If this happens, only the function is being reparsed. """ # ---------------- # dynamic stuff # ---------------- dynamic_array_additions = True """ check for `append`, etc. on arrays: [], {}, () as well as list/set calls. """ dynamic_params = True """ A dynamic param completion, finds the callees of the function, which define the params of a function. """ dynamic_params_for_other_modules = True """ Do the same for other modules. """ additional_dynamic_modules = [] """ Additional modules in which |jedi| checks if statements are to be found. This is practical for IDEs, that want to administrate their modules themselves. """ dynamic_flow_information = True """ Check for `isinstance` and other information to infer a type. """ auto_import_modules = [ 'hashlib', # setattr ] """ Modules that are not analyzed but imported, although they contain Python code. This improves autocompletion for libraries that use ``setattr`` or ``globals()`` modifications a lot. """ # ---------------- # recursions # ---------------- max_until_execution_unique = 50 """ This limit is probably the most important one, because if this limit is exceeded, functions can only be one time executed. So new functions will be executed, complex recursions with the same functions again and again, are ignored. """ max_function_recursion_level = 5 """ `max_function_recursion_level` is more about whether the recursions are stopped in deepth or in width. The ratio beetween this and `max_until_execution_unique` is important here. It stops a recursion (after the number of function calls in the recursion), if it was already used earlier. """ max_executions_without_builtins = 200 """ .. todo:: Document this. """ max_executions = 250 """ A maximum amount of time, the completion may use. """ scale_call_signatures = 0.1 """ Because call_signatures is normally used on every single key hit, it has to be faster than a normal completion. This is the factor that is used to scale `max_executions` and `max_until_execution_unique`: """ # ---------------- # caching validity (time) # ---------------- star_import_cache_validity = 60.0 """ In huge packages like numpy, checking all star imports on every completion might be slow, therefore we do a star import caching, that lasts a certain time span (in seconds). """ call_signatures_validity = 3.0 """ Finding function calls might be slow (0.1-0.5s). This is not acceptible for normal writing. Therefore cache it for a short time. """ jedi-0.9.0/jedi/debug.py0000664000175000017500000000473712517736533015237 0ustar daviddavid00000000000000from jedi._compatibility import encoding, is_py3, u import inspect import os import time try: if os.name == 'nt': # does not work on Windows, as pyreadline and colorama interfere raise ImportError else: # Use colorama for nicer console output. from colorama import Fore, init from colorama import initialise # pytest resets the stream at the end - causes troubles. Since after # every output the stream is reset automatically we don't need this. initialise.atexit_done = True init() except ImportError: class Fore(object): RED = '' GREEN = '' YELLOW = '' RESET = '' NOTICE = object() WARNING = object() SPEED = object() enable_speed = False enable_warning = False enable_notice = False # callback, interface: level, str debug_function = None ignored_modules = ['jedi.evaluate.builtin', 'jedi.parser'] _debug_indent = -1 _start_time = time.time() def reset_time(): global _start_time, _debug_indent _start_time = time.time() _debug_indent = -1 def increase_indent(func): """Decorator for makin """ def wrapper(*args, **kwargs): global _debug_indent _debug_indent += 1 try: result = func(*args, **kwargs) finally: _debug_indent -= 1 return result return wrapper def dbg(message, *args): """ Looks at the stack, to see if a debug message should be printed. """ if debug_function and enable_notice: frm = inspect.stack()[1] mod = inspect.getmodule(frm[0]) if not (mod.__name__ in ignored_modules): i = ' ' * _debug_indent debug_function(NOTICE, i + 'dbg: ' + message % tuple(u(repr(a)) for a in args)) def warning(message, *args): if debug_function and enable_warning: i = ' ' * _debug_indent debug_function(WARNING, i + 'warning: ' + message % tuple(u(repr(a)) for a in args)) def speed(name): if debug_function and enable_speed: now = time.time() i = ' ' * _debug_indent debug_function(SPEED, i + 'speed: ' + '%s %s' % (name, now - _start_time)) def print_to_stdout(level, str_out): """ The default debug function """ if level == NOTICE: col = Fore.GREEN elif level == WARNING: col = Fore.RED else: col = Fore.YELLOW if not is_py3: str_out = str_out.encode(encoding, 'replace') print(col + str_out + Fore.RESET) # debug_function = print_to_stdout jedi-0.9.0/jedi/api/0000775000175000017500000000000012517736563014340 5ustar daviddavid00000000000000jedi-0.9.0/jedi/api/__init__.py0000664000175000017500000007042612517736533016457 0ustar daviddavid00000000000000""" The API basically only provides one class. You can create a :class:`Script` and use its methods. Additionally you can add a debug function with :func:`set_debug_function`. .. warning:: Please, note that Jedi is **not thread safe**. """ import re import os import warnings import sys from itertools import chain from jedi._compatibility import unicode, builtins from jedi.parser import Parser, load_grammar from jedi.parser.tokenize import source_tokens from jedi.parser import tree from jedi.parser.user_context import UserContext, UserContextParser from jedi import debug from jedi import settings from jedi import common from jedi import cache from jedi.api import keywords from jedi.api import classes from jedi.api import interpreter from jedi.api import usages from jedi.api import helpers from jedi.evaluate import Evaluator from jedi.evaluate import representation as er from jedi.evaluate import compiled from jedi.evaluate import imports from jedi.evaluate.cache import memoize_default from jedi.evaluate.helpers import FakeName, get_module_names from jedi.evaluate.finder import global_names_dict_generator, filter_definition_names from jedi.evaluate import analysis # Jedi uses lots and lots of recursion. By setting this a little bit higher, we # can remove some "maximum recursion depth" errors. sys.setrecursionlimit(2000) class NotFoundError(Exception): """A custom error to avoid catching the wrong exceptions. .. deprecated:: 0.9.0 Not in use anymore, Jedi just returns no goto result if you're not on a valid name. .. todo:: Remove! """ class Script(object): """ A Script is the base for completions, goto or whatever you want to do with |jedi|. You can either use the ``source`` parameter or ``path`` to read a file. Usually you're going to want to use both of them (in an editor). :param source: The source code of the current file, separated by newlines. :type source: str :param line: The line to perform actions on (starting with 1). :type line: int :param col: The column of the cursor (starting with 0). :type col: int :param path: The path of the file in the file system, or ``''`` if it hasn't been saved yet. :type path: str or None :param encoding: The encoding of ``source``, if it is not a ``unicode`` object (default ``'utf-8'``). :type encoding: str :param source_encoding: The encoding of ``source``, if it is not a ``unicode`` object (default ``'utf-8'``). :type encoding: str """ def __init__(self, source=None, line=None, column=None, path=None, encoding='utf-8', source_path=None, source_encoding=None): if source_path is not None: warnings.warn("Use path instead of source_path.", DeprecationWarning) path = source_path if source_encoding is not None: warnings.warn("Use encoding instead of source_encoding.", DeprecationWarning) encoding = source_encoding self._orig_path = path self.path = None if path is None else os.path.abspath(path) if source is None: with open(path) as f: source = f.read() self.source = common.source_to_unicode(source, encoding) lines = common.splitlines(self.source) line = max(len(lines), 1) if line is None else line if not (0 < line <= len(lines)): raise ValueError('`line` parameter is not in a valid range.') line_len = len(lines[line - 1]) column = line_len if column is None else column if not (0 <= column <= line_len): raise ValueError('`column` parameter is not in a valid range.') self._pos = line, column cache.clear_time_caches() debug.reset_time() self._grammar = load_grammar('grammar%s.%s' % sys.version_info[:2]) self._user_context = UserContext(self.source, self._pos) self._parser = UserContextParser(self._grammar, self.source, path, self._pos, self._user_context, self._parsed_callback) self._evaluator = Evaluator(self._grammar) debug.speed('init') def _parsed_callback(self, parser): module = self._evaluator.wrap(parser.module) imports.add_module(self._evaluator, unicode(module.name), module) @property def source_path(self): """ .. deprecated:: 0.7.0 Use :attr:`.path` instead. .. todo:: Remove! """ warnings.warn("Use path instead of source_path.", DeprecationWarning) return self.path def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, repr(self._orig_path)) def completions(self): """ Return :class:`classes.Completion` objects. Those objects contain information about the completions, more than just names. :return: Completion objects, sorted by name and __ comes last. :rtype: list of :class:`classes.Completion` """ def get_completions(user_stmt, bs): # TODO this closure is ugly. it also doesn't work with # simple_complete (used for Interpreter), somehow redo. module = self._evaluator.wrap(self._parser.module()) names, level, only_modules, unfinished_dotted = \ helpers.check_error_statements(module, self._pos) completion_names = [] if names is not None: imp_names = tuple(str(n) for n in names if n.end_pos < self._pos) i = imports.Importer(self._evaluator, imp_names, module, level) completion_names = i.completion_names(self._evaluator, only_modules) # TODO this paragraph is necessary, but not sure it works. context = self._user_context.get_context() if not next(context).startswith('.'): # skip the path if next(context) == 'from': # completion is just "import" if before stands from .. if unfinished_dotted: return completion_names else: return keywords.keyword_names('import') if isinstance(user_stmt, tree.Import): module = self._parser.module() completion_names += imports.completion_names(self._evaluator, user_stmt, self._pos) return completion_names if names is None and not isinstance(user_stmt, tree.Import): if not path and not dot: # add keywords completion_names += keywords.keyword_names(all=True) # TODO delete? We should search for valid parser # transformations. completion_names += self._simple_complete(path, dot, like) return completion_names debug.speed('completions start') path = self._user_context.get_path_until_cursor() # Dots following an int are not the start of a completion but a float # literal. if re.search(r'^\d\.$', path): return [] path, dot, like = helpers.completion_parts(path) user_stmt = self._parser.user_stmt_with_whitespace() b = compiled.builtin completion_names = get_completions(user_stmt, b) if not dot: # add named params for call_sig in self.call_signatures(): # Allow protected access, because it's a public API. module = call_sig._name.get_parent_until() # Compiled modules typically don't allow keyword arguments. if not isinstance(module, compiled.CompiledObject): for p in call_sig.params: # Allow access on _definition here, because it's a # public API and we don't want to make the internal # Name object public. if p._definition.stars == 0: # no *args/**kwargs completion_names.append(p._name) needs_dot = not dot and path comps = [] comp_dct = {} for c in set(completion_names): n = str(c) if settings.case_insensitive_completion \ and n.lower().startswith(like.lower()) \ or n.startswith(like): if isinstance(c.parent, (tree.Function, tree.Class)): # TODO I think this is a hack. It should be an # er.Function/er.Class before that. c = self._evaluator.wrap(c.parent).name new = classes.Completion(self._evaluator, c, needs_dot, len(like)) k = (new.name, new.complete) # key if k in comp_dct and settings.no_completion_duplicates: comp_dct[k]._same_name_completions.append(new) else: comp_dct[k] = new comps.append(new) debug.speed('completions end') return sorted(comps, key=lambda x: (x.name.startswith('__'), x.name.startswith('_'), x.name.lower())) def _simple_complete(self, path, dot, like): if not path and not dot: scope = self._parser.user_scope() if not scope.is_scope(): # Might be a flow (if/while/etc). scope = scope.get_parent_scope() names_dicts = global_names_dict_generator( self._evaluator, self._evaluator.wrap(scope), self._pos ) completion_names = [] for names_dict, pos in names_dicts: names = list(chain.from_iterable(names_dict.values())) if not names: continue completion_names += filter_definition_names(names, self._parser.user_stmt(), pos) elif self._get_under_cursor_stmt(path) is None: return [] else: scopes = list(self._prepare_goto(path, True)) completion_names = [] debug.dbg('possible completion scopes: %s', scopes) for s in scopes: names = [] for names_dict in s.names_dicts(search_global=False): names += chain.from_iterable(names_dict.values()) completion_names += filter_definition_names(names, self._parser.user_stmt()) return completion_names def _prepare_goto(self, goto_path, is_completion=False): """ Base for completions/goto. Basically it returns the resolved scopes under cursor. """ debug.dbg('start: %s in %s', goto_path, self._parser.user_scope()) user_stmt = self._parser.user_stmt_with_whitespace() if not user_stmt and len(goto_path.split('\n')) > 1: # If the user_stmt is not defined and the goto_path is multi line, # something's strange. Most probably the backwards tokenizer # matched to much. return [] if isinstance(user_stmt, tree.Import): i, _ = helpers.get_on_import_stmt(self._evaluator, self._user_context, user_stmt, is_completion) if i is None: return [] scopes = [i] else: # just parse one statement, take it and evaluate it eval_stmt = self._get_under_cursor_stmt(goto_path) if eval_stmt is None: return [] module = self._evaluator.wrap(self._parser.module()) names, level, _, _ = helpers.check_error_statements(module, self._pos) if names: names = [str(n) for n in names] i = imports.Importer(self._evaluator, names, module, level) return i.follow() scopes = self._evaluator.eval_element(eval_stmt) return scopes @memoize_default() def _get_under_cursor_stmt(self, cursor_txt, start_pos=None): tokenizer = source_tokens(cursor_txt) r = Parser(self._grammar, cursor_txt, tokenizer=tokenizer) try: # Take the last statement available that is not an endmarker. # And because it's a simple_stmt, we need to get the first child. stmt = r.module.children[-2].children[0] except (AttributeError, IndexError): return None user_stmt = self._parser.user_stmt() if user_stmt is None: # Set the start_pos to a pseudo position, that doesn't exist but # works perfectly well (for both completions in docstrings and # statements). pos = start_pos or self._pos else: pos = user_stmt.start_pos stmt.move(pos[0] - 1, pos[1]) # Moving the offset. stmt.parent = self._parser.user_scope() return stmt def goto_definitions(self): """ Return the definitions of a the path under the cursor. goto function! This follows complicated paths and returns the end, not the first definition. The big difference between :meth:`goto_assignments` and :meth:`goto_definitions` is that :meth:`goto_assignments` doesn't follow imports and statements. Multiple objects may be returned, because Python itself is a dynamic language, which means depending on an option you can have two different versions of a function. :rtype: list of :class:`classes.Definition` """ def resolve_import_paths(scopes): for s in scopes.copy(): if isinstance(s, imports.ImportWrapper): scopes.remove(s) scopes.update(resolve_import_paths(set(s.follow()))) return scopes goto_path = self._user_context.get_path_under_cursor() context = self._user_context.get_context() definitions = set() if next(context) in ('class', 'def'): definitions = set([self._evaluator.wrap(self._parser.user_scope())]) else: # Fetch definition of callee, if there's no path otherwise. if not goto_path: definitions = set(signature._definition for signature in self.call_signatures()) if re.match('\w[\w\d_]*$', goto_path) and not definitions: user_stmt = self._parser.user_stmt() if user_stmt is not None and user_stmt.type == 'expr_stmt': for name in user_stmt.get_defined_names(): if name.start_pos <= self._pos <= name.end_pos: # TODO scaning for a name and then using it should be # the default. definitions = set(self._evaluator.goto_definition(name)) if not definitions and goto_path: definitions = set(self._prepare_goto(goto_path)) definitions = resolve_import_paths(definitions) names = [s.name for s in definitions] defs = [classes.Definition(self._evaluator, name) for name in names] return helpers.sorted_definitions(set(defs)) def goto_assignments(self): """ Return the first definition found. Imports and statements aren't followed. Multiple objects may be returned, because Python itself is a dynamic language, which means depending on an option you can have two different versions of a function. :rtype: list of :class:`classes.Definition` """ results = self._goto() d = [classes.Definition(self._evaluator, d) for d in set(results)] return helpers.sorted_definitions(d) def _goto(self, add_import_name=False): """ Used for goto_assignments and usages. :param add_import_name: Add the the name (if import) to the result. """ def follow_inexistent_imports(defs): """ Imports can be generated, e.g. following `multiprocessing.dummy` generates an import dummy in the multiprocessing module. The Import doesn't exist -> follow. """ definitions = set(defs) for d in defs: if isinstance(d.parent, tree.Import) \ and d.start_pos == (0, 0): i = imports.ImportWrapper(self._evaluator, d.parent).follow(is_goto=True) definitions.remove(d) definitions |= follow_inexistent_imports(i) return definitions goto_path = self._user_context.get_path_under_cursor() context = self._user_context.get_context() user_stmt = self._parser.user_stmt() user_scope = self._parser.user_scope() stmt = self._get_under_cursor_stmt(goto_path) if stmt is None: return [] if user_scope is None: last_name = None else: # Try to use the parser if possible. last_name = user_scope.name_for_position(self._pos) if last_name is None: last_name = stmt while not isinstance(last_name, tree.Name): try: last_name = last_name.children[-1] except AttributeError: # Doesn't have a name in it. return [] if next(context) in ('class', 'def'): # The cursor is on a class/function name. user_scope = self._parser.user_scope() definitions = set([user_scope.name]) elif isinstance(user_stmt, tree.Import): s, name = helpers.get_on_import_stmt(self._evaluator, self._user_context, user_stmt) definitions = self._evaluator.goto(name) else: # The Evaluator.goto function checks for definitions, but since we # use a reverse tokenizer, we have new name_part objects, so we # have to check the user_stmt here for positions. if isinstance(user_stmt, tree.ExprStmt) \ and isinstance(last_name.parent, tree.ExprStmt): for name in user_stmt.get_defined_names(): if name.start_pos <= self._pos <= name.end_pos: return [name] defs = self._evaluator.goto(last_name) definitions = follow_inexistent_imports(defs) return definitions def usages(self, additional_module_paths=()): """ Return :class:`classes.Definition` objects, which contain all names that point to the definition of the name under the cursor. This is very useful for refactoring (renaming), or to show all usages of a variable. .. todo:: Implement additional_module_paths :rtype: list of :class:`classes.Definition` """ temp, settings.dynamic_flow_information = \ settings.dynamic_flow_information, False try: user_stmt = self._parser.user_stmt() definitions = self._goto(add_import_name=True) if not definitions and isinstance(user_stmt, tree.Import): # For not defined imports (goto doesn't find something, we take # the name as a definition. This is enough, because every name # points to it. name = user_stmt.name_for_position(self._pos) if name is None: # Must be syntax return [] definitions = [name] if not definitions: # Without a definition for a name we cannot find references. return [] if not isinstance(user_stmt, tree.Import): # import case is looked at with add_import_name option definitions = usages.usages_add_import_modules(self._evaluator, definitions) module = set([d.get_parent_until() for d in definitions]) module.add(self._parser.module()) names = usages.usages(self._evaluator, definitions, module) for d in set(definitions): names.append(classes.Definition(self._evaluator, d)) finally: settings.dynamic_flow_information = temp return helpers.sorted_definitions(set(names)) def call_signatures(self): """ Return the function object of the call you're currently in. E.g. if the cursor is here:: abs(# <-- cursor is here This would return the ``abs`` function. On the other hand:: abs()# <-- cursor is here This would return ``None``. :rtype: list of :class:`classes.CallSignature` """ call_txt, call_index, key_name, start_pos = self._user_context.call_signature() if call_txt is None: return [] stmt = self._get_under_cursor_stmt(call_txt, start_pos) if stmt is None: return [] with common.scale_speed_settings(settings.scale_call_signatures): origins = cache.cache_call_signatures(self._evaluator, stmt, self.source, self._pos) debug.speed('func_call followed') return [classes.CallSignature(self._evaluator, o.name, stmt, call_index, key_name) for o in origins if hasattr(o, 'py__call__')] def _analysis(self): def check_types(types): for typ in types: try: f = typ.iter_content except AttributeError: pass else: check_types(f()) #statements = set(chain(*self._parser.module().used_names.values())) nodes, imp_names, decorated_funcs = \ analysis.get_module_statements(self._parser.module()) # Sort the statements so that the results are reproducible. for n in imp_names: imports.ImportWrapper(self._evaluator, n).follow() for node in sorted(nodes, key=lambda obj: obj.start_pos): check_types(self._evaluator.eval_element(node)) for dec_func in decorated_funcs: er.Function(self._evaluator, dec_func).get_decorated_func() ana = [a for a in self._evaluator.analysis if self.path == a.path] return sorted(set(ana), key=lambda x: x.line) class Interpreter(Script): """ Jedi API for Python REPLs. In addition to completion of simple attribute access, Jedi supports code completion based on static code analysis. Jedi can complete attributes of object which is not initialized yet. >>> from os.path import join >>> namespace = locals() >>> script = Interpreter('join().up', [namespace]) >>> print(script.completions()[0].name) upper """ def __init__(self, source, namespaces, **kwds): """ Parse `source` and mixin interpreted Python objects from `namespaces`. :type source: str :arg source: Code to parse. :type namespaces: list of dict :arg namespaces: a list of namespace dictionaries such as the one returned by :func:`locals`. Other optional arguments are same as the ones for :class:`Script`. If `line` and `column` are None, they are assumed be at the end of `source`. """ if type(namespaces) is not list or len(namespaces) == 0 or \ any([type(x) is not dict for x in namespaces]): raise TypeError("namespaces must be a non-empty list of dict") super(Interpreter, self).__init__(source, **kwds) self.namespaces = namespaces # Don't use the fast parser, because it does crazy stuff that we don't # need in our very simple and small code here (that is always # changing). self._parser = UserContextParser(self._grammar, self.source, self._orig_path, self._pos, self._user_context, self._parsed_callback, use_fast_parser=False) interpreter.add_namespaces_to_parser(self._evaluator, namespaces, self._parser.module()) def _simple_complete(self, path, dot, like): user_stmt = self._parser.user_stmt_with_whitespace() is_simple_path = not path or re.search('^[\w][\w\d.]*$', path) if isinstance(user_stmt, tree.Import) or not is_simple_path: return super(Interpreter, self)._simple_complete(path, dot, like) else: class NamespaceModule(object): def __getattr__(_, name): for n in self.namespaces: try: return n[name] except KeyError: pass raise AttributeError() def __dir__(_): gen = (n.keys() for n in self.namespaces) return list(set(chain.from_iterable(gen))) paths = path.split('.') if path else [] namespaces = (NamespaceModule(), builtins) for p in paths: old, namespaces = namespaces, [] for n in old: try: namespaces.append(getattr(n, p)) except Exception: pass completion_names = [] for namespace in namespaces: for name in dir(namespace): if name.lower().startswith(like.lower()): scope = self._parser.module() n = FakeName(name, scope) completion_names.append(n) return completion_names def defined_names(source, path=None, encoding='utf-8'): """ Get all definitions in `source` sorted by its position. This functions can be used for listing functions, classes and data defined in a file. This can be useful if you want to list them in "sidebar". Each element in the returned list also has `defined_names` method which can be used to get sub-definitions (e.g., methods in class). :rtype: list of classes.Definition .. deprecated:: 0.9.0 Use :func:`names` instead. .. todo:: Remove! """ warnings.warn("Use call_signatures instead.", DeprecationWarning) return names(source, path, encoding) def names(source=None, path=None, encoding='utf-8', all_scopes=False, definitions=True, references=False): """ Returns a list of `Definition` objects, containing name parts. This means you can call ``Definition.goto_assignments()`` and get the reference of a name. The parameters are the same as in :py:class:`Script`, except or the following ones: :param all_scopes: If True lists the names of all scopes instead of only the module namespace. :param definitions: If True lists the names that have been defined by a class, function or a statement (``a = b`` returns ``a``). :param references: If True lists all the names that are not listed by ``definitions=True``. E.g. ``a = b`` returns ``b``. """ def def_ref_filter(_def): is_def = _def.is_definition() return definitions and is_def or references and not is_def # Set line/column to a random position, because they don't matter. script = Script(source, line=1, column=0, path=path, encoding=encoding) defs = [classes.Definition(script._evaluator, name_part) for name_part in get_module_names(script._parser.module(), all_scopes)] return sorted(filter(def_ref_filter, defs), key=lambda x: (x.line, x.column)) def preload_module(*modules): """ Preloading modules tells Jedi to load a module now, instead of lazy parsing of modules. Usful for IDEs, to control which modules to load on startup. :param modules: different module names, list of string. """ for m in modules: s = "import %s as x; x." % m Script(s, 1, len(s), None).completions() def set_debug_function(func_cb=debug.print_to_stdout, warnings=True, notices=True, speed=True): """ Define a callback debug function to get all the debug messages. :param func_cb: The callback function for debug messages, with n params. """ debug.debug_function = func_cb debug.enable_warning = warnings debug.enable_notice = notices debug.enable_speed = speed jedi-0.9.0/jedi/api/classes.py0000664000175000017500000005663412517736533016362 0ustar daviddavid00000000000000""" The :mod:`jedi.api.classes` module contains the return classes of the API. These classes are the much bigger part of the whole API, because they contain the interesting information about completion and goto operations. """ import warnings from itertools import chain import re from jedi._compatibility import unicode, use_metaclass from jedi import settings from jedi import common from jedi.parser import tree from jedi.evaluate.cache import memoize_default, CachedMetaClass from jedi.evaluate import representation as er from jedi.evaluate import iterable from jedi.evaluate import imports from jedi.evaluate import compiled from jedi.api import keywords from jedi.evaluate.finder import filter_definition_names def defined_names(evaluator, scope): """ List sub-definitions (e.g., methods in class). :type scope: Scope :rtype: list of Definition """ dct = scope.names_dict names = list(chain.from_iterable(dct.values())) names = filter_definition_names(names, scope) return [Definition(evaluator, d) for d in sorted(names, key=lambda s: s.start_pos)] class BaseDefinition(object): _mapping = { 'posixpath': 'os.path', 'riscospath': 'os.path', 'ntpath': 'os.path', 'os2emxpath': 'os.path', 'macpath': 'os.path', 'genericpath': 'os.path', 'posix': 'os', '_io': 'io', '_functools': 'functools', '_sqlite3': 'sqlite3', '__builtin__': '', 'builtins': '', } _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in { 'argparse._ActionsContainer': 'argparse.ArgumentParser', '_sre.SRE_Match': 're.MatchObject', '_sre.SRE_Pattern': 're.RegexObject', }.items()) def __init__(self, evaluator, name): self._evaluator = evaluator self._name = name """ An instance of :class:`jedi.parser.reprsentation.Name` subclass. """ self._definition = evaluator.wrap(self._name.get_definition()) self.is_keyword = isinstance(self._definition, keywords.Keyword) # generate a path to the definition self._module = name.get_parent_until() if self.in_builtin_module(): self.module_path = None else: self.module_path = self._module.path """Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``""" @property def name(self): """ Name of variable/function/class/module. For example, for ``x = None`` it returns ``'x'``. :rtype: str or None """ return unicode(self._name) @property def start_pos(self): """ .. deprecated:: 0.7.0 Use :attr:`.line` and :attr:`.column` instead. .. todo:: Remove! """ warnings.warn("Use line/column instead.", DeprecationWarning) return self._name.start_pos @property def type(self): """ The type of the definition. Here is an example of the value of this attribute. Let's consider the following source. As what is in ``variable`` is unambiguous to Jedi, :meth:`jedi.Script.goto_definitions` should return a list of definition for ``sys``, ``f``, ``C`` and ``x``. >>> from jedi import Script >>> source = ''' ... import keyword ... ... class C: ... pass ... ... class D: ... pass ... ... x = D() ... ... def f(): ... pass ... ... for variable in [keyword, f, C, x]: ... variable''' >>> script = Script(source) >>> defs = script.goto_definitions() Before showing what is in ``defs``, let's sort it by :attr:`line` so that it is easy to relate the result to the source code. >>> defs = sorted(defs, key=lambda d: d.line) >>> defs # doctest: +NORMALIZE_WHITESPACE [, , , ] Finally, here is what you can get from :attr:`type`: >>> defs[0].type 'module' >>> defs[1].type 'class' >>> defs[2].type 'instance' >>> defs[3].type 'function' """ stripped = self._definition if isinstance(stripped, er.InstanceElement): stripped = stripped.var if isinstance(stripped, compiled.CompiledObject): return stripped.api_type() elif isinstance(stripped, iterable.Array): return 'instance' elif isinstance(stripped, tree.Import): return 'import' string = type(stripped).__name__.lower().replace('wrapper', '') if string == 'exprstmt': return 'statement' else: return string def _path(self): """The path to a module/class/function definition.""" path = [] par = self._definition while par is not None: if isinstance(par, tree.Import): path += imports.ImportWrapper(self._evaluator, self._name).import_path break try: name = par.name except AttributeError: pass else: if isinstance(par, er.ModuleWrapper): # TODO just make the path dotted from the beginning, we # shouldn't really split here. path[0:0] = par.py__name__().split('.') break else: path.insert(0, unicode(name)) par = par.parent return path @property def module_name(self): """ The module name. >>> from jedi import Script >>> source = 'import json' >>> script = Script(source, path='example.py') >>> d = script.goto_definitions()[0] >>> print(d.module_name) # doctest: +ELLIPSIS json """ return str(self._module.name) def in_builtin_module(self): """Whether this is a builtin module.""" return isinstance(self._module, compiled.CompiledObject) @property def line(self): """The line where the definition occurs (starting with 1).""" if self.in_builtin_module(): return None return self._name.start_pos[0] @property def column(self): """The column where the definition occurs (starting with 0).""" if self.in_builtin_module(): return None return self._name.start_pos[1] def docstring(self, raw=False): r""" Return a document string for this completion object. Example: >>> from jedi import Script >>> source = '''\ ... def f(a, b=1): ... "Document for function f." ... ''' >>> script = Script(source, 1, len('def f'), 'example.py') >>> doc = script.goto_definitions()[0].docstring() >>> print(doc) f(a, b=1) Document for function f. Notice that useful extra information is added to the actual docstring. For function, it is call signature. If you need actual docstring, use ``raw=True`` instead. >>> print(script.goto_definitions()[0].docstring(raw=True)) Document for function f. """ if raw: return _Help(self._definition).raw() else: return _Help(self._definition).full() @property def doc(self): """ .. deprecated:: 0.8.0 Use :meth:`.docstring` instead. .. todo:: Remove! """ warnings.warn("Use docstring() instead.", DeprecationWarning) return self.docstring() @property def raw_doc(self): """ .. deprecated:: 0.8.0 Use :meth:`.docstring` instead. .. todo:: Remove! """ warnings.warn("Use docstring() instead.", DeprecationWarning) return self.docstring(raw=True) @property def description(self): """A textual description of the object.""" return unicode(self._name) @property def full_name(self): """ Dot-separated path of this object. It is in the form of ``[.[...]][.]``. It is useful when you want to look up Python manual of the object at hand. Example: >>> from jedi import Script >>> source = ''' ... import os ... os.path.join''' >>> script = Script(source, 3, len('os.path.join'), 'example.py') >>> print(script.goto_definitions()[0].full_name) os.path.join Notice that it correctly returns ``'os.path.join'`` instead of (for example) ``'posixpath.join'``. """ path = [unicode(p) for p in self._path()] # TODO add further checks, the mapping should only occur on stdlib. if not path: return None # for keywords the path is empty with common.ignored(KeyError): path[0] = self._mapping[path[0]] for key, repl in self._tuple_mapping.items(): if tuple(path[:len(key)]) == key: path = [repl] + path[len(key):] return '.'.join(path if path[0] else path[1:]) def goto_assignments(self): defs = self._evaluator.goto(self._name) return [Definition(self._evaluator, d) for d in defs] @memoize_default() def _follow_statements_imports(self): """ Follow both statements and imports, as far as possible. """ if self._definition.isinstance(tree.ExprStmt): return self._evaluator.eval_statement(self._definition) elif self._definition.isinstance(tree.Import): return imports.ImportWrapper(self._evaluator, self._name).follow() else: return [self._definition] @property @memoize_default() def params(self): """ Raises an ``AttributeError``if the definition is not callable. Otherwise returns a list of `Definition` that represents the params. """ followed = self._follow_statements_imports() if not followed or not hasattr(followed[0], 'py__call__'): raise AttributeError() followed = followed[0] # only check the first one. if followed.type == 'funcdef': if isinstance(followed, er.InstanceElement): params = followed.params[1:] else: params = followed.params elif followed.isinstance(er.compiled.CompiledObject): params = followed.params else: try: sub = followed.get_subscope_by_name('__init__') params = sub.params[1:] # ignore self except KeyError: return [] return [_Param(self._evaluator, p.name) for p in params] def parent(self): scope = self._definition.get_parent_scope() scope = self._evaluator.wrap(scope) return Definition(self._evaluator, scope.name) def __repr__(self): return "<%s %s>" % (type(self).__name__, self.description) class Completion(BaseDefinition): """ `Completion` objects are returned from :meth:`api.Script.completions`. They provide additional information about a completion. """ def __init__(self, evaluator, name, needs_dot, like_name_length): super(Completion, self).__init__(evaluator, name) self._needs_dot = needs_dot self._like_name_length = like_name_length # Completion objects with the same Completion name (which means # duplicate items in the completion) self._same_name_completions = [] def _complete(self, like_name): dot = '.' if self._needs_dot else '' append = '' if settings.add_bracket_after_function \ and self.type == 'Function': append = '(' if settings.add_dot_after_module: if isinstance(self._definition, tree.Module): append += '.' if isinstance(self._definition, tree.Param): append += '=' name = str(self._name) if like_name: name = name[self._like_name_length:] return dot + name + append @property def complete(self): """ Return the rest of the word, e.g. completing ``isinstance``:: isinstan# <-- Cursor is here would return the string 'ce'. It also adds additional stuff, depending on your `settings.py`. """ return self._complete(True) @property def name_with_symbols(self): """ Similar to :attr:`name`, but like :attr:`name` returns also the symbols, for example:: list() would return ``.append`` and others (which means it adds a dot). """ return self._complete(False) @property def description(self): """Provide a description of the completion object.""" if self._definition is None: return '' t = self.type if t == 'statement' or t == 'import': desc = self._definition.get_code() else: desc = '.'.join(unicode(p) for p in self._path()) line = '' if self.in_builtin_module else '@%s' % self.line return '%s: %s%s' % (t, desc, line) def __repr__(self): return '<%s: %s>' % (type(self).__name__, self._name) def docstring(self, raw=False, fast=True): """ :param fast: Don't follow imports that are only one level deep like ``import foo``, but follow ``from foo import bar``. This makes sense for speed reasons. Completing `import a` is slow if you use the ``foo.docstring(fast=False)`` on every object, because it parses all libraries starting with ``a``. """ definition = self._definition if isinstance(definition, tree.Import): i = imports.ImportWrapper(self._evaluator, self._name) if len(i.import_path) > 1 or not fast: followed = self._follow_statements_imports() if followed: # TODO: Use all of the followed objects as input to Documentation. definition = followed[0] if raw: return _Help(definition).raw() else: return _Help(definition).full() @property def type(self): """ The type of the completion objects. Follows imports. For a further description, look at :attr:`jedi.api.classes.BaseDefinition.type`. """ if isinstance(self._definition, tree.Import): i = imports.ImportWrapper(self._evaluator, self._name) if len(i.import_path) <= 1: return 'module' followed = self.follow_definition() if followed: # Caveat: Only follows the first one, ignore the other ones. # This is ok, since people are almost never interested in # variations. return followed[0].type return super(Completion, self).type @memoize_default() def _follow_statements_imports(self): # imports completion is very complicated and needs to be treated # separately in Completion. definition = self._definition if definition.isinstance(tree.Import): i = imports.ImportWrapper(self._evaluator, self._name) return i.follow() return super(Completion, self)._follow_statements_imports() @memoize_default() def follow_definition(self): """ Return the original definitions. I strongly recommend not using it for your completions, because it might slow down |jedi|. If you want to read only a few objects (<=20), it might be useful, especially to get the original docstrings. The basic problem of this function is that it follows all results. This means with 1000 completions (e.g. numpy), it's just PITA-slow. """ defs = self._follow_statements_imports() return [Definition(self._evaluator, d.name) for d in defs] class Definition(use_metaclass(CachedMetaClass, BaseDefinition)): """ *Definition* objects are returned from :meth:`api.Script.goto_assignments` or :meth:`api.Script.goto_definitions`. """ def __init__(self, evaluator, definition): super(Definition, self).__init__(evaluator, definition) @property def description(self): """ A description of the :class:`.Definition` object, which is heavily used in testing. e.g. for ``isinstance`` it returns ``def isinstance``. Example: >>> from jedi import Script >>> source = ''' ... def f(): ... pass ... ... class C: ... pass ... ... variable = f if random.choice([0,1]) else C''' >>> script = Script(source, column=3) # line is maximum by default >>> defs = script.goto_definitions() >>> defs = sorted(defs, key=lambda d: d.line) >>> defs [, ] >>> str(defs[0].description) # strip literals in python2 'def f' >>> str(defs[1].description) 'class C' """ d = self._definition if isinstance(d, er.InstanceElement): d = d.var if isinstance(d, compiled.CompiledObject): typ = d.api_type() if typ == 'instance': typ = 'class' # The description should be similar to Py objects. d = typ + ' ' + d.name.get_code() elif isinstance(d, iterable.Array): d = 'class ' + d.type elif isinstance(d, (tree.Class, er.Class, er.Instance)): d = 'class ' + unicode(d.name) elif isinstance(d, (er.Function, tree.Function)): d = 'def ' + unicode(d.name) elif isinstance(d, tree.Module): # only show module name d = 'module %s' % self.module_name elif isinstance(d, tree.Param): d = d.get_code().strip() if d.endswith(','): d = d[:-1] # Remove the comma. else: # ExprStmt try: first_leaf = d.first_leaf() except AttributeError: # `d` is already a Leaf (Name). first_leaf = d # Remove the prefix, because that's not what we want for get_code # here. old, first_leaf.prefix = first_leaf.prefix, '' try: d = d.get_code() finally: first_leaf.prefix = old # Delete comments: d = re.sub('#[^\n]+\n', ' ', d) # Delete multi spaces/newlines return re.sub('\s+', ' ', d).strip() @property def desc_with_module(self): """ In addition to the definition, also return the module. .. warning:: Don't use this function yet, its behaviour may change. If you really need it, talk to me. .. todo:: Add full path. This function is should return a `module.class.function` path. """ position = '' if self.in_builtin_module else '@%s' % (self.line) return "%s:%s%s" % (self.module_name, self.description, position) @memoize_default() def defined_names(self): """ List sub-definitions (e.g., methods in class). :rtype: list of Definition """ defs = self._follow_statements_imports() # For now we don't want base classes or evaluate decorators. defs = [d.base if isinstance(d, (er.Class, er.Function)) else d for d in defs] iterable = (defined_names(self._evaluator, d) for d in defs) iterable = list(iterable) return list(chain.from_iterable(iterable)) def is_definition(self): """ Returns True, if defined as a name in a statement, function or class. Returns False, if it's a reference to such a definition. """ return self._name.is_definition() def __eq__(self, other): return self._name.start_pos == other._name.start_pos \ and self.module_path == other.module_path \ and self.name == other.name \ and self._evaluator == other._evaluator def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash((self._name.start_pos, self.module_path, self.name, self._evaluator)) class CallSignature(Definition): """ `CallSignature` objects is the return value of `Script.function_definition`. It knows what functions you are currently in. e.g. `isinstance(` would return the `isinstance` function. without `(` it would return nothing. """ def __init__(self, evaluator, executable_name, call_stmt, index, key_name): super(CallSignature, self).__init__(evaluator, executable_name) self._index = index self._key_name = key_name self._call_stmt = call_stmt @property def index(self): """ The Param index of the current call. Returns None if the index cannot be found in the curent call. """ if self._key_name is not None: for i, param in enumerate(self.params): if self._key_name == param.name: return i if self.params and self.params[-1]._name.get_definition().stars == 2: return i else: return None if self._index >= len(self.params): for i, param in enumerate(self.params): # *args case if param._name.get_definition().stars == 1: return i return None return self._index @property def bracket_start(self): """ The indent of the bracket that is responsible for the last function call. """ return self._call_stmt.end_pos @property def call_name(self): """ .. deprecated:: 0.8.0 Use :attr:`.name` instead. .. todo:: Remove! The name (e.g. 'isinstance') as a string. """ warnings.warn("Use name instead.", DeprecationWarning) return unicode(self.name) @property def module(self): """ .. deprecated:: 0.8.0 Use :attr:`.module_name` for the module name. .. todo:: Remove! """ return self._executable.get_parent_until() def __repr__(self): return '<%s: %s index %s>' % (type(self).__name__, self._name, self.index) class _Param(Definition): """ Just here for backwards compatibility. """ def get_code(self): """ .. deprecated:: 0.8.0 Use :attr:`.description` and :attr:`.name` instead. .. todo:: Remove! A function to get the whole code of the param. """ warnings.warn("Use description instead.", DeprecationWarning) return self.description class _Help(object): """ Temporary implementation, will be used as `Script.help() or something in the future. """ def __init__(self, definition): self._name = definition def full(self): try: return self._name.doc except AttributeError: return self.raw() def raw(self): """ The raw docstring ``__doc__`` for any object. See :attr:`doc` for example. """ try: return self._name.raw_doc except AttributeError: return '' jedi-0.9.0/jedi/api/keywords.py0000664000175000017500000000377712517736533016574 0ustar daviddavid00000000000000import pydoc import keyword from jedi._compatibility import is_py3 from jedi import common from jedi.evaluate import compiled from jedi.evaluate.helpers import FakeName try: from pydoc_data import topics as pydoc_topics except ImportError: # Python 2.6 import pydoc_topics if is_py3: keys = keyword.kwlist else: keys = keyword.kwlist + ['None', 'False', 'True'] def keywords(string='', pos=(0, 0), all=False): if all: return set([Keyword(k, pos) for k in keys]) if string in keys: return set([Keyword(string, pos)]) return set() def keyword_names(*args, **kwargs): return [k.name for k in keywords(*args, **kwargs)] def get_operator(string, pos): return Keyword(string, pos) class Keyword(object): def __init__(self, name, pos): self.name = FakeName(name, self, pos) self.start_pos = pos self.parent = compiled.builtin def get_parent_until(self): return self.parent @property def names(self): """ For a `parsing.Name` like comparision """ return [self.name] @property def docstr(self): return imitate_pydoc(self.name) def __repr__(self): return '<%s: %s>' % (type(self).__name__, self.name) def imitate_pydoc(string): """ It's not possible to get the pydoc's without starting the annoying pager stuff. """ # str needed because of possible unicode stuff in py2k (pydoc doesn't work # with unicode strings) string = str(string) h = pydoc.help with common.ignored(KeyError): # try to access symbols string = h.symbols[string] string, _, related = string.partition(' ') get_target = lambda s: h.topics.get(s, h.keywords.get(s)) while isinstance(string, str): string = get_target(string) try: # is a tuple now label, related = string except TypeError: return '' try: return pydoc_topics.topics[label] if pydoc_topics else '' except KeyError: return '' jedi-0.9.0/jedi/api/usages.py0000664000175000017500000000337712517736533016210 0ustar daviddavid00000000000000from jedi._compatibility import unicode from jedi.api import classes from jedi.parser import tree from jedi.evaluate import imports def usages(evaluator, definition_names, mods): """ :param definitions: list of Name """ def compare_array(definitions): """ `definitions` are being compared by module/start_pos, because sometimes the id's of the objects change (e.g. executions). """ result = [] for d in definitions: module = d.get_parent_until() result.append((module, d.start_pos)) return result search_name = unicode(list(definition_names)[0]) compare_definitions = compare_array(definition_names) mods |= set([d.get_parent_until() for d in definition_names]) definitions = [] for m in imports.get_modules_containing_name(evaluator, mods, search_name): try: check_names = m.used_names[search_name] except KeyError: continue for name in check_names: result = evaluator.goto(name) if [c for c in compare_array(result) if c in compare_definitions]: definitions.append(classes.Definition(evaluator, name)) # Previous definitions might be imports, so include them # (because goto might return that import name). compare_definitions += compare_array([name]) return definitions def usages_add_import_modules(evaluator, definitions): """ Adds the modules of the imports """ new = set() for d in definitions: imp_or_stmt = d.get_definition() if isinstance(imp_or_stmt, tree.Import): s = imports.ImportWrapper(evaluator, d) new |= set(s.follow(is_goto=True)) return set(definitions) | new jedi-0.9.0/jedi/api/replstartup.py0000664000175000017500000000150012363566726017274 0ustar daviddavid00000000000000""" To use Jedi completion in Python interpreter, add the following in your shell setup (e.g., ``.bashrc``):: export PYTHONSTARTUP="$(python -m jedi repl)" Then you will be able to use Jedi completer in your Python interpreter:: $ python Python 2.7.2+ (default, Jul 20 2012, 22:15:08) [GCC 4.6.1] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> import os >>> os.path.join().split().in # doctest: +SKIP os.path.join().split().index os.path.join().split().insert """ import jedi.utils from jedi import __version__ as __jedi_version__ print('REPL completion using Jedi %s' % __jedi_version__) jedi.utils.setup_readline() del jedi # Note: try not to do many things here, as it will contaminate global # namespace of the interpreter. jedi-0.9.0/jedi/api/helpers.py0000664000175000017500000000500712517736533016353 0ustar daviddavid00000000000000""" Helpers for the API """ import re from jedi.parser import tree as pt from jedi.evaluate import imports def completion_parts(path_until_cursor): """ Returns the parts for the completion :return: tuple - (path, dot, like) """ match = re.match(r'^(.*?)(\.|)(\w?[\w\d]*)$', path_until_cursor, flags=re.S) return match.groups() def sorted_definitions(defs): # Note: `or ''` below is required because `module_path` could be return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0)) def get_on_import_stmt(evaluator, user_context, user_stmt, is_like_search=False): """ Resolve the user statement, if it is an import. Only resolve the parts until the user position. """ name = user_stmt.name_for_position(user_context.position) if name is None: return None, None i = imports.ImportWrapper(evaluator, name) return i, name def check_error_statements(module, pos): for error_statement in module.error_statement_stacks: if error_statement.first_type in ('import_from', 'import_name') \ and error_statement.first_pos < pos <= error_statement.next_start_pos: return importer_from_error_statement(error_statement, pos) return None, 0, False, False def importer_from_error_statement(error_statement, pos): def check_dotted(children): for name in children[::2]: if name.start_pos <= pos: yield name names = [] level = 0 only_modules = True unfinished_dotted = False for typ, nodes in error_statement.stack: if typ == 'dotted_name': names += check_dotted(nodes) if nodes[-1] == '.': # An unfinished dotted_name unfinished_dotted = True elif typ == 'import_name': if nodes[0].start_pos <= pos <= nodes[0].end_pos: # We are on the import. return None, 0, False, False elif typ == 'import_from': for node in nodes: if node.start_pos >= pos: break elif isinstance(node, pt.Node) and node.type == 'dotted_name': names += check_dotted(node.children) elif node in ('.', '...'): level += len(node.value) elif isinstance(node, pt.Name): names.append(node) elif node == 'import': only_modules = False return names, level, only_modules, unfinished_dotted jedi-0.9.0/jedi/api/interpreter.py0000664000175000017500000000720212517736533017253 0ustar daviddavid00000000000000""" TODO Some parts of this module are still not well documented. """ import inspect import re from jedi._compatibility import builtins from jedi import debug from jedi.common import source_to_unicode from jedi.cache import underscore_memoization from jedi.evaluate import compiled from jedi.evaluate.compiled.fake import get_module from jedi.parser import tree as pt from jedi.parser import load_grammar from jedi.parser.fast import FastParser from jedi.evaluate import helpers from jedi.evaluate import iterable from jedi.evaluate import representation as er def add_namespaces_to_parser(evaluator, namespaces, parser_module): for namespace in namespaces: for key, value in namespace.items(): # Name lookups in an ast tree work by checking names_dict. # Therefore we just add fake names to that and we're done. arr = parser_module.names_dict.setdefault(key, []) arr.append(LazyName(evaluator, parser_module, key, value)) class LazyName(helpers.FakeName): def __init__(self, evaluator, module, name, value): super(LazyName, self).__init__(name) self._module = module self._evaluator = evaluator self._value = value self._name = name def is_definition(self): return True @property @underscore_memoization def parent(self): """ Creating fake statements for the interpreter. """ obj = self._value parser_path = [] if inspect.ismodule(obj): module = obj else: names = [] try: o = obj.__objclass__ names.append(obj.__name__) obj = o except AttributeError: pass try: module_name = obj.__module__ names.insert(0, obj.__name__) except AttributeError: # Unfortunately in some cases like `int` there's no __module__ module = builtins else: # TODO this import is wrong. Yields x for x.y.z instead of z module = __import__(module_name) parser_path = names raw_module = get_module(self._value) found = [] try: path = module.__file__ except AttributeError: pass else: path = re.sub('c$', '', path) if path.endswith('.py'): # cut the `c` from `.pyc` with open(path) as f: source = source_to_unicode(f.read()) mod = FastParser(load_grammar(), source, path[:-1]).module if parser_path: assert len(parser_path) == 1 found = self._evaluator.find_types(mod, parser_path[0], search_global=True) else: found = [self._evaluator.wrap(mod)] if not found: debug.warning('Possibly an interpreter lookup for Python code failed %s', parser_path) if not found: evaluated = compiled.CompiledObject(obj) if evaluated == builtins: # The builtins module is special and always cached. evaluated = compiled.builtin found = [evaluated] content = iterable.AlreadyEvaluated(found) stmt = pt.ExprStmt([self, pt.Operator(pt.zero_position_modifier, '=', (0, 0), ''), content]) stmt.parent = self._module return stmt @parent.setter def parent(self, value): """Needed because the super class tries to set parent.""" jedi-0.9.0/jedi/cache.py0000664000175000017500000002350712517736533015210 0ustar daviddavid00000000000000""" This caching is very important for speed and memory optimizations. There's nothing really spectacular, just some decorators. The following cache types are available: - module caching (`load_parser` and `save_parser`), which uses pickle and is really important to assure low load times of modules like ``numpy``. - ``time_cache`` can be used to cache something for just a limited time span, which can be useful if there's user interaction and the user cannot react faster than a certain time. This module is one of the reasons why |jedi| is not thread-safe. As you can see there are global variables, which are holding the cache information. Some of these variables are being cleaned after every API usage. """ import time import os import sys import json import hashlib import gc import inspect import shutil import re try: import cPickle as pickle except ImportError: import pickle from jedi import settings from jedi import common from jedi import debug _time_caches = {} # for fast_parser, should not be deleted parser_cache = {} class ParserCacheItem(object): def __init__(self, parser, change_time=None): self.parser = parser if change_time is None: change_time = time.time() self.change_time = change_time def clear_time_caches(delete_all=False): """ Jedi caches many things, that should be completed after each completion finishes. :param delete_all: Deletes also the cache that is normally not deleted, like parser cache, which is important for faster parsing. """ global _time_caches if delete_all: for cache in _time_caches.values(): cache.clear() parser_cache.clear() else: # normally just kill the expired entries, not all for tc in _time_caches.values(): # check time_cache for expired entries for key, (t, value) in list(tc.items()): if t < time.time(): # delete expired entries del tc[key] def time_cache(time_add_setting): """ s This decorator works as follows: Call it with a setting and after that use the function with a callable that returns the key. But: This function is only called if the key is not available. After a certain amount of time (`time_add_setting`) the cache is invalid. """ def _temp(key_func): dct = {} _time_caches[time_add_setting] = dct def wrapper(*args, **kwargs): generator = key_func(*args, **kwargs) key = next(generator) try: expiry, value = dct[key] if expiry > time.time(): return value except KeyError: pass value = next(generator) time_add = getattr(settings, time_add_setting) if key is not None: dct[key] = time.time() + time_add, value return value return wrapper return _temp @time_cache("call_signatures_validity") def cache_call_signatures(evaluator, call, source, user_pos): """This function calculates the cache key.""" index = user_pos[0] - 1 lines = common.splitlines(source) before_cursor = lines[index][:user_pos[1]] other_lines = lines[call.start_pos[0]:index] whole = '\n'.join(other_lines + [before_cursor]) before_bracket = re.match(r'.*\(', whole, re.DOTALL) module_path = call.get_parent_until().path yield None if module_path is None else (module_path, before_bracket, call.start_pos) yield evaluator.eval_element(call) def underscore_memoization(func): """ Decorator for methods:: class A(object): def x(self): if self._x: self._x = 10 return self._x Becomes:: class A(object): @underscore_memoization def x(self): return 10 A now has an attribute ``_x`` written by this decorator. """ name = '_' + func.__name__ def wrapper(self): try: return getattr(self, name) except AttributeError: result = func(self) if inspect.isgenerator(result): result = list(result) setattr(self, name, result) return result return wrapper def memoize_method(method): """A normal memoize function.""" def wrapper(self, *args, **kwargs): dct = self.__dict__.setdefault('_memoize_method_dct', {}) key = (args, frozenset(kwargs.items())) try: return dct[key] except KeyError: result = method(self, *args, **kwargs) dct[key] = result return result return wrapper def cache_star_import(func): @time_cache("star_import_cache_validity") def wrapper(self): yield self.base # The cache key yield func(self) return wrapper def _invalidate_star_import_cache_module(module, only_main=False): """ Important if some new modules are being reparsed """ try: t, modules = _time_caches['star_import_cache_validity'][module] except KeyError: pass else: del _time_caches['star_import_cache_validity'][module] def invalidate_star_import_cache(path): """On success returns True.""" try: parser_cache_item = parser_cache[path] except KeyError: pass else: _invalidate_star_import_cache_module(parser_cache_item.parser.module) def load_parser(path): """ Returns the module or None, if it fails. """ p_time = os.path.getmtime(path) if path else None try: parser_cache_item = parser_cache[path] if not path or p_time <= parser_cache_item.change_time: return parser_cache_item.parser else: # In case there is already a module cached and this module # has to be reparsed, we also need to invalidate the import # caches. _invalidate_star_import_cache_module(parser_cache_item.parser.module) except KeyError: if settings.use_filesystem_cache: return ParserPickling.load_parser(path, p_time) def save_parser(path, parser, pickling=True): try: p_time = None if path is None else os.path.getmtime(path) except OSError: p_time = None pickling = False item = ParserCacheItem(parser, p_time) parser_cache[path] = item if settings.use_filesystem_cache and pickling: ParserPickling.save_parser(path, item) class ParserPickling(object): version = 24 """ Version number (integer) for file system cache. Increment this number when there are any incompatible changes in parser representation classes. For example, the following changes are regarded as incompatible. - Class name is changed. - Class is moved to another module. - Defined slot of the class is changed. """ def __init__(self): self.__index = None self.py_tag = 'cpython-%s%s' % sys.version_info[:2] """ Short name for distinguish Python implementations and versions. It's like `sys.implementation.cache_tag` but for Python < 3.3 we generate something similar. See: http://docs.python.org/3/library/sys.html#sys.implementation .. todo:: Detect interpreter (e.g., PyPy). """ def load_parser(self, path, original_changed_time): try: pickle_changed_time = self._index[path] except KeyError: return None if original_changed_time is not None \ and pickle_changed_time < original_changed_time: # the pickle file is outdated return None with open(self._get_hashed_path(path), 'rb') as f: try: gc.disable() parser_cache_item = pickle.load(f) finally: gc.enable() debug.dbg('pickle loaded: %s', path) parser_cache[path] = parser_cache_item return parser_cache_item.parser def save_parser(self, path, parser_cache_item): self.__index = None try: files = self._index except KeyError: files = {} self._index = files with open(self._get_hashed_path(path), 'wb') as f: pickle.dump(parser_cache_item, f, pickle.HIGHEST_PROTOCOL) files[path] = parser_cache_item.change_time self._flush_index() @property def _index(self): if self.__index is None: try: with open(self._get_path('index.json')) as f: data = json.load(f) except (IOError, ValueError): self.__index = {} else: # 0 means version is not defined (= always delete cache): if data.get('version', 0) != self.version: self.clear_cache() self.__index = {} else: self.__index = data['index'] return self.__index def _remove_old_modules(self): # TODO use change = False if change: self._flush_index(self) self._index # reload index def _flush_index(self): data = {'version': self.version, 'index': self._index} with open(self._get_path('index.json'), 'w') as f: json.dump(data, f) self.__index = None def clear_cache(self): shutil.rmtree(self._cache_directory()) def _get_hashed_path(self, path): return self._get_path('%s.pkl' % hashlib.md5(path.encode("utf-8")).hexdigest()) def _get_path(self, file): dir = self._cache_directory() if not os.path.exists(dir): os.makedirs(dir) return os.path.join(dir, file) def _cache_directory(self): return os.path.join(settings.cache_directory, self.py_tag) # is a singleton ParserPickling = ParserPickling() jedi-0.9.0/setup.cfg0000664000175000017500000000007312517736563014475 0ustar daviddavid00000000000000[egg_info] tag_build = tag_svn_revision = 0 tag_date = 0 jedi-0.9.0/.coveragerc0000664000175000017500000000064512143361722014764 0ustar daviddavid00000000000000[run] omit = jedi/_compatibility.py [report] # Regexes for lines to exclude from consideration exclude_lines = # Don't complain about missing debug-only code: def __repr__ if self\.debug # Don't complain if tests don't hit defensive assertion code: raise AssertionError raise NotImplementedError # Don't complain if non-runnable code isn't run: if 0: if __name__ == .__main__.: jedi-0.9.0/conftest.py0000664000175000017500000000276112331540214015035 0ustar daviddavid00000000000000import tempfile import shutil import jedi collect_ignore = ["setup.py"] # The following hooks (pytest_configure, pytest_unconfigure) are used # to modify `jedi.settings.cache_directory` because `clean_jedi_cache` # has no effect during doctests. Without these hooks, doctests uses # user's cache (e.g., ~/.cache/jedi/). We should remove this # workaround once the problem is fixed in py.test. # # See: # - https://github.com/davidhalter/jedi/pull/168 # - https://bitbucket.org/hpk42/pytest/issue/275/ jedi_cache_directory_orig = None jedi_cache_directory_temp = None def pytest_addoption(parser): parser.addoption("--jedi-debug", "-D", action='store_true', help="Enables Jedi's debug output.") parser.addoption("--warning-is-error", action='store_true', help="Warnings are treated as errors.") def pytest_configure(config): global jedi_cache_directory_orig, jedi_cache_directory_temp jedi_cache_directory_orig = jedi.settings.cache_directory jedi_cache_directory_temp = tempfile.mkdtemp(prefix='jedi-test-') jedi.settings.cache_directory = jedi_cache_directory_temp if config.option.jedi_debug: jedi.set_debug_function() if config.option.warning_is_error: import warnings warnings.simplefilter("error") def pytest_unconfigure(config): global jedi_cache_directory_orig, jedi_cache_directory_temp jedi.settings.cache_directory = jedi_cache_directory_orig shutil.rmtree(jedi_cache_directory_temp) jedi-0.9.0/setup.py0000775000175000017500000000350312517736533014367 0ustar daviddavid00000000000000#!/usr/bin/env python from __future__ import with_statement try: from setuptools import setup except ImportError: # Distribute is not actually required to install from distutils.core import setup __AUTHOR__ = 'David Halter' __AUTHOR_EMAIL__ = 'davidhalter88@gmail.com' readme = open('README.rst').read() + '\n\n' + open('CHANGELOG.rst').read() packages = ['jedi', 'jedi.parser', 'jedi.parser.pgen2', 'jedi.evaluate', 'jedi.evaluate.compiled', 'jedi.api'] import jedi setup(name='jedi', version=jedi.__version__, description='An autocompletion tool for Python that can be used for text editors.', author=__AUTHOR__, author_email=__AUTHOR_EMAIL__, maintainer=__AUTHOR__, maintainer_email=__AUTHOR_EMAIL__, url='https://github.com/davidhalter/jedi', license='MIT', keywords='python completion refactoring vim', long_description=readme, packages=packages, package_data={'jedi': ['evaluate/compiled/fake/*.pym', 'parser/grammar*.txt']}, platforms=['any'], classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Plugins', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Text Editors :: Integrated Development Environments (IDE)', 'Topic :: Utilities', ], ) jedi-0.9.0/README.rst0000664000175000017500000001452612517736533014350 0ustar daviddavid00000000000000################################################################### Jedi - an awesome autocompletion/static analysis library for Python ################################################################### .. image:: https://secure.travis-ci.org/davidhalter/jedi.png?branch=master :target: http://travis-ci.org/davidhalter/jedi :alt: Travis-CI build status .. image:: https://coveralls.io/repos/davidhalter/jedi/badge.png?branch=master :target: https://coveralls.io/r/davidhalter/jedi :alt: Coverage Status *If you have specific questions, please add an issue or ask on* `stackoverflow `_ *with the label* ``python-jedi``. Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its historic focus is autocompletion, but does static analysis for now as well. Jedi is fast and is very well tested. It understands Python on a deeper level than all other static analysis frameworks for Python. Jedi has support for two different goto functions. It's possible to search for related names and to list all names in a Python file and infer them. Jedi understands docstrings and you can use Jedi autocompletion in your REPL as well. Jedi uses a very simple API to connect with IDE's. There's a reference implementation as a `VIM-Plugin `_, which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs. It's really easy. Jedi can currently be used with the following editors: - Vim (jedi-vim_, YouCompleteMe_) - Emacs (Jedi.el_, elpy_, anaconda-mode_, ycmd_) - Sublime Text (SublimeJEDI_ [ST2 + ST3], anaconda_ [only ST3]) - SynWrite_ - TextMate_ (Not sure if it's actually working) - Kate_ version 4.13+ supports it natively, you have to enable it, though. [`proof `_] And it powers the following projects: - wdb_ - Web Debugger Here are some pictures taken from jedi-vim_: .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_complete.png Completion for almost anything (Ctrl+Space). .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_function.png Display of function/class bodies, docstrings. .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_pydoc.png Pydoc support (Shift+k). There is also support for goto and renaming. Get the latest version from `github `_ (master branch should always be kind of stable/working). Docs are available at `https://jedi.readthedocs.org/en/latest/ `_. Pull requests with documentation enhancements and/or fixes are awesome and most welcome. Jedi uses `semantic versioning `_. Installation ============ pip install jedi Note: This just installs the Jedi library, not the editor plugins. For information about how to make it work with your editor, refer to the corresponding documentation. You don't want to use ``pip``? Please refer to the `manual `_. Feature Support and Caveats =========================== Jedi really understands your Python code. For a comprehensive list what Jedi understands, see: `Features `_. A list of caveats can be found on the same page. You can run Jedi on cPython 2.6, 2.7, 3.2, 3.3 or 3.4, but it should also understand/parse code older than those versions. Tips on how to use Jedi efficiently can be found `here `_. API --- You can find the documentation for the `API here `_. Autocompletion / Goto / Pydoc ----------------------------- Please check the API for a good explanation. There are the following commands: - ``jedi.Script.goto_assignments`` - ``jedi.Script.completions`` - ``jedi.Script.usages`` The returned objects are very powerful and really all you might need. Autocompletion in your REPL (IPython, etc.) ------------------------------------------- It's possible to have Jedi autocompletion in REPL modes - `example video `_. This means that IPython and others are `supported `_. Static Analysis / Linter ------------------------ To do all forms of static analysis, please try to use ``jedi.names``. It will return a list of names that you can use to infer types and so on. Linting is another thing that is going to be part of Jedi. For now you can try an alpha version ``python -m jedi linter``. The API might change though and it's still buggy. It's Jedi's goal to be smarter than classic linter and understand ``AttributeError`` and other code issues. Refactoring ----------- Jedi would in theory support refactoring, but we have never publicized it, because it's not production ready. If you're interested in helping out here, let me know. With the latest parser changes, it should be very easy to actually make it work. Development =========== There's a pretty good and extensive `development documentation `_. Testing ======= The test suite depends on ``tox`` and ``pytest``:: pip install tox pytest To run the tests for all supported Python versions:: tox If you want to test only a specific Python version (e.g. Python 2.7), it's as easy as :: tox -e py27 Tests are also run automatically on `Travis CI `_. For more detailed information visit the `testing documentation `_ .. _jedi-vim: https://github.com/davidhalter/jedi-vim .. _youcompleteme: http://valloric.github.io/YouCompleteMe/ .. _Jedi.el: https://github.com/tkf/emacs-jedi .. _elpy: https://github.com/jorgenschaefer/elpy .. _anaconda-mode: https://github.com/proofit404/anaconda-mode .. _ycmd: https://github.com/abingham/emacs-ycmd .. _sublimejedi: https://github.com/srusskih/SublimeJEDI .. _anaconda: https://github.com/DamnWidget/anaconda .. _SynWrite: http://uvviewsoft.com/synjedi/ .. _wdb: https://github.com/Kozea/wdb .. _TextMate: https://github.com/lawrenceakka/python-jedi.tmbundle .. _Kate: http://kate-editor.org jedi-0.9.0/CHANGELOG.rst0000664000175000017500000000313512517736533014674 0ustar daviddavid00000000000000.. :changelog: Changelog --------- 0.9.0 (2015-04-10) ++++++++++++++++++ - Integrated the parser of 2to3. This will make refactoring possible. It will also be possible to check for error messages (like compiling an AST would give) in the future. - With the new parser, the evaluation also completely changed. It's now simpler and more readable. - Completely rewritten REPL completion. - Added ``jedi.names``, a command to do static analysis. Thanks to that sourcegraph guys for sponsoring this! - Alpha version of the linter. 0.8.1 (2014-07-23) +++++++++++++++++++ - Bugfix release, the last release forgot to include files that improve autocompletion for builtin libraries. Fixed. 0.8.0 (2014-05-05) +++++++++++++++++++ - Memory Consumption for compiled modules (e.g. builtins, sys) has been reduced drastically. Loading times are down as well (it takes basically as long as an import). - REPL completion is starting to become usable. - Various small API changes. Generally this release focuses on stability and refactoring of internal APIs. - Introducing operator precedence, which makes calculating correct Array indices and ``__getattr__`` strings possible. 0.7.0 (2013-08-09) ++++++++++++++++++ - Switched from LGPL to MIT license. - Added an Interpreter class to the API to make autocompletion in REPL possible. - Added autocompletion support for namespace packages. - Add sith.py, a new random testing method. 0.6.0 (2013-05-14) ++++++++++++++++++ - Much faster parser with builtin part caching. - A test suite, thanks @tkf. 0.5 versions (2012) +++++++++++++++++++ - Initial development. jedi-0.9.0/LICENSE.txt0000664000175000017500000000212712204171716014463 0ustar daviddavid00000000000000The MIT License (MIT) Copyright (c) <2013> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. jedi-0.9.0/jedi.egg-info/0000775000175000017500000000000012517736563015261 5ustar daviddavid00000000000000jedi-0.9.0/jedi.egg-info/PKG-INFO0000664000175000017500000002561712517736561016367 0ustar daviddavid00000000000000Metadata-Version: 1.1 Name: jedi Version: 0.9.0 Summary: An autocompletion tool for Python that can be used for text editors. Home-page: https://github.com/davidhalter/jedi Author: David Halter Author-email: davidhalter88@gmail.com License: MIT Description: ################################################################### Jedi - an awesome autocompletion/static analysis library for Python ################################################################### .. image:: https://secure.travis-ci.org/davidhalter/jedi.png?branch=master :target: http://travis-ci.org/davidhalter/jedi :alt: Travis-CI build status .. image:: https://coveralls.io/repos/davidhalter/jedi/badge.png?branch=master :target: https://coveralls.io/r/davidhalter/jedi :alt: Coverage Status *If you have specific questions, please add an issue or ask on* `stackoverflow `_ *with the label* ``python-jedi``. Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its historic focus is autocompletion, but does static analysis for now as well. Jedi is fast and is very well tested. It understands Python on a deeper level than all other static analysis frameworks for Python. Jedi has support for two different goto functions. It's possible to search for related names and to list all names in a Python file and infer them. Jedi understands docstrings and you can use Jedi autocompletion in your REPL as well. Jedi uses a very simple API to connect with IDE's. There's a reference implementation as a `VIM-Plugin `_, which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs. It's really easy. Jedi can currently be used with the following editors: - Vim (jedi-vim_, YouCompleteMe_) - Emacs (Jedi.el_, elpy_, anaconda-mode_, ycmd_) - Sublime Text (SublimeJEDI_ [ST2 + ST3], anaconda_ [only ST3]) - SynWrite_ - TextMate_ (Not sure if it's actually working) - Kate_ version 4.13+ supports it natively, you have to enable it, though. [`proof `_] And it powers the following projects: - wdb_ - Web Debugger Here are some pictures taken from jedi-vim_: .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_complete.png Completion for almost anything (Ctrl+Space). .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_function.png Display of function/class bodies, docstrings. .. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_pydoc.png Pydoc support (Shift+k). There is also support for goto and renaming. Get the latest version from `github `_ (master branch should always be kind of stable/working). Docs are available at `https://jedi.readthedocs.org/en/latest/ `_. Pull requests with documentation enhancements and/or fixes are awesome and most welcome. Jedi uses `semantic versioning `_. Installation ============ pip install jedi Note: This just installs the Jedi library, not the editor plugins. For information about how to make it work with your editor, refer to the corresponding documentation. You don't want to use ``pip``? Please refer to the `manual `_. Feature Support and Caveats =========================== Jedi really understands your Python code. For a comprehensive list what Jedi understands, see: `Features `_. A list of caveats can be found on the same page. You can run Jedi on cPython 2.6, 2.7, 3.2, 3.3 or 3.4, but it should also understand/parse code older than those versions. Tips on how to use Jedi efficiently can be found `here `_. API --- You can find the documentation for the `API here `_. Autocompletion / Goto / Pydoc ----------------------------- Please check the API for a good explanation. There are the following commands: - ``jedi.Script.goto_assignments`` - ``jedi.Script.completions`` - ``jedi.Script.usages`` The returned objects are very powerful and really all you might need. Autocompletion in your REPL (IPython, etc.) ------------------------------------------- It's possible to have Jedi autocompletion in REPL modes - `example video `_. This means that IPython and others are `supported `_. Static Analysis / Linter ------------------------ To do all forms of static analysis, please try to use ``jedi.names``. It will return a list of names that you can use to infer types and so on. Linting is another thing that is going to be part of Jedi. For now you can try an alpha version ``python -m jedi linter``. The API might change though and it's still buggy. It's Jedi's goal to be smarter than classic linter and understand ``AttributeError`` and other code issues. Refactoring ----------- Jedi would in theory support refactoring, but we have never publicized it, because it's not production ready. If you're interested in helping out here, let me know. With the latest parser changes, it should be very easy to actually make it work. Development =========== There's a pretty good and extensive `development documentation `_. Testing ======= The test suite depends on ``tox`` and ``pytest``:: pip install tox pytest To run the tests for all supported Python versions:: tox If you want to test only a specific Python version (e.g. Python 2.7), it's as easy as :: tox -e py27 Tests are also run automatically on `Travis CI `_. For more detailed information visit the `testing documentation `_ .. _jedi-vim: https://github.com/davidhalter/jedi-vim .. _youcompleteme: http://valloric.github.io/YouCompleteMe/ .. _Jedi.el: https://github.com/tkf/emacs-jedi .. _elpy: https://github.com/jorgenschaefer/elpy .. _anaconda-mode: https://github.com/proofit404/anaconda-mode .. _ycmd: https://github.com/abingham/emacs-ycmd .. _sublimejedi: https://github.com/srusskih/SublimeJEDI .. _anaconda: https://github.com/DamnWidget/anaconda .. _SynWrite: http://uvviewsoft.com/synjedi/ .. _wdb: https://github.com/Kozea/wdb .. _TextMate: https://github.com/lawrenceakka/python-jedi.tmbundle .. _Kate: http://kate-editor.org .. :changelog: Changelog --------- 0.9.0 (2015-04-10) ++++++++++++++++++ - Integrated the parser of 2to3. This will make refactoring possible. It will also be possible to check for error messages (like compiling an AST would give) in the future. - With the new parser, the evaluation also completely changed. It's now simpler and more readable. - Completely rewritten REPL completion. - Added ``jedi.names``, a command to do static analysis. Thanks to that sourcegraph guys for sponsoring this! - Alpha version of the linter. 0.8.1 (2014-07-23) +++++++++++++++++++ - Bugfix release, the last release forgot to include files that improve autocompletion for builtin libraries. Fixed. 0.8.0 (2014-05-05) +++++++++++++++++++ - Memory Consumption for compiled modules (e.g. builtins, sys) has been reduced drastically. Loading times are down as well (it takes basically as long as an import). - REPL completion is starting to become usable. - Various small API changes. Generally this release focuses on stability and refactoring of internal APIs. - Introducing operator precedence, which makes calculating correct Array indices and ``__getattr__`` strings possible. 0.7.0 (2013-08-09) ++++++++++++++++++ - Switched from LGPL to MIT license. - Added an Interpreter class to the API to make autocompletion in REPL possible. - Added autocompletion support for namespace packages. - Add sith.py, a new random testing method. 0.6.0 (2013-05-14) ++++++++++++++++++ - Much faster parser with builtin part caching. - A test suite, thanks @tkf. 0.5 versions (2012) +++++++++++++++++++ - Initial development. Keywords: python completion refactoring vim Platform: any Classifier: Development Status :: 4 - Beta Classifier: Environment :: Plugins Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.2 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Topic :: Text Editors :: Integrated Development Environments (IDE) Classifier: Topic :: Utilities jedi-0.9.0/jedi.egg-info/dependency_links.txt0000664000175000017500000000000112517736561021325 0ustar daviddavid00000000000000 jedi-0.9.0/jedi.egg-info/top_level.txt0000664000175000017500000000000512517736561020004 0ustar daviddavid00000000000000jedi jedi-0.9.0/jedi.egg-info/SOURCES.txt0000664000175000017500000001642612517736563017156 0ustar daviddavid00000000000000.coveragerc AUTHORS.txt CHANGELOG.rst LICENSE.txt MANIFEST.in README.rst conftest.py pytest.ini setup.py sith.py tox.ini docs/Makefile docs/conf.py docs/global.rst docs/index.rst docs/_screenshots/screenshot_complete.png docs/_screenshots/screenshot_function.png docs/_screenshots/screenshot_pydoc.png docs/_static/logo-src.txt docs/_static/logo.png docs/_templates/ghbuttons.html docs/_templates/sidebarlogo.html docs/_themes/flask_theme_support.py docs/_themes/flask/LICENSE docs/_themes/flask/layout.html docs/_themes/flask/relations.html docs/_themes/flask/theme.conf docs/_themes/flask/static/flasky.css_t docs/_themes/flask/static/small_flask.css docs/docs/development.rst docs/docs/features.rst docs/docs/installation.rst docs/docs/plugin-api-classes.rst docs/docs/plugin-api.rst docs/docs/settings.rst docs/docs/testing.rst docs/docs/usage.rst jedi/__init__.py jedi/__main__.py jedi/_compatibility.py jedi/cache.py jedi/common.py jedi/debug.py jedi/refactoring.py jedi/settings.py jedi/utils.py jedi.egg-info/PKG-INFO jedi.egg-info/SOURCES.txt jedi.egg-info/dependency_links.txt jedi.egg-info/top_level.txt jedi/api/__init__.py jedi/api/classes.py jedi/api/helpers.py jedi/api/interpreter.py jedi/api/keywords.py jedi/api/replstartup.py jedi/api/usages.py jedi/evaluate/__init__.py jedi/evaluate/analysis.py jedi/evaluate/cache.py jedi/evaluate/docstrings.py jedi/evaluate/dynamic.py jedi/evaluate/finder.py jedi/evaluate/flow_analysis.py jedi/evaluate/helpers.py jedi/evaluate/imports.py jedi/evaluate/iterable.py jedi/evaluate/param.py jedi/evaluate/precedence.py jedi/evaluate/recursion.py jedi/evaluate/representation.py jedi/evaluate/stdlib.py jedi/evaluate/sys_path.py jedi/evaluate/compiled/__init__.py jedi/evaluate/compiled/fake.py jedi/evaluate/compiled/fake/_functools.pym jedi/evaluate/compiled/fake/_sqlite3.pym jedi/evaluate/compiled/fake/_sre.pym jedi/evaluate/compiled/fake/_weakref.pym jedi/evaluate/compiled/fake/builtins.pym jedi/evaluate/compiled/fake/datetime.pym jedi/evaluate/compiled/fake/io.pym jedi/evaluate/compiled/fake/posix.pym jedi/parser/__init__.py jedi/parser/fast.py jedi/parser/grammar2.7.txt jedi/parser/grammar3.4.txt jedi/parser/token.py jedi/parser/tokenize.py jedi/parser/tree.py jedi/parser/user_context.py jedi/parser/pgen2/__init__.py jedi/parser/pgen2/grammar.py jedi/parser/pgen2/parse.py jedi/parser/pgen2/pgen.py test/__init__.py test/conftest.py test/helpers.py test/refactor.py test/run.py test/test_cache.py test/test_debug.py test/test_integration.py test/test_integration_analysis.py test/test_integration_import.py test/test_integration_keyword.py test/test_integration_stdlib.py test/test_jedi_system.py test/test_new_parser.py test/test_regression.py test/test_speed.py test/test_utils.py test/completion/__init__.py test/completion/arrays.py test/completion/basic.py test/completion/classes.py test/completion/complex.py test/completion/comprehensions.py test/completion/decorators.py test/completion/definition.py test/completion/descriptors.py test/completion/docstring.py test/completion/dynamic_arrays.py test/completion/dynamic_params.py test/completion/flow_analysis.py test/completion/functions.py test/completion/generators.py test/completion/goto.py test/completion/imports.py test/completion/invalid.py test/completion/isinstance.py test/completion/keywords.py test/completion/lambdas.py test/completion/named_param.py test/completion/on_import.py test/completion/ordering.py test/completion/parser.py test/completion/precedence.py test/completion/stdlib.py test/completion/sys_path.py test/completion/types.py test/completion/usages.py test/completion/import_tree/__init__.py test/completion/import_tree/invisible_pkg.py test/completion/import_tree/mod1.py test/completion/import_tree/mod2.py test/completion/import_tree/random.py test/completion/import_tree/recurse_class1.py test/completion/import_tree/recurse_class2.py test/completion/import_tree/rename1.py test/completion/import_tree/rename2.py test/completion/import_tree/pkg/__init__.py test/completion/import_tree/pkg/mod1.py test/completion/thirdparty/PyQt4_.py test/completion/thirdparty/django_.py test/completion/thirdparty/jedi_.py test/completion/thirdparty/psycopg2_.py test/completion/thirdparty/pylab_.py test/refactor/extract.py test/refactor/inline.py test/refactor/rename.py test/speed/precedence.py test/static_analysis/arguments.py test/static_analysis/attribute_error.py test/static_analysis/attribute_warnings.py test/static_analysis/descriptors.py test/static_analysis/generators.py test/static_analysis/imports.py test/static_analysis/operations.py test/static_analysis/star_arguments.py test/static_analysis/try_except.py test/static_analysis/import_tree/__init__.py test/static_analysis/import_tree/a.py test/static_analysis/import_tree/b.py test/test_api/__init__.py test/test_api/test_api.py test/test_api/test_api_classes_follow_definition.py test/test_api/test_call_signatures.py test/test_api/test_classes.py test/test_api/test_defined_names.py test/test_api/test_full_name.py test/test_api/test_interpreter.py test/test_api/test_unicode.py test/test_evaluate/__init__.py test/test_evaluate/test_absolute_import.py test/test_evaluate/test_annotations.py test/test_evaluate/test_buildout_detection.py test/test_evaluate/test_compiled.py test/test_evaluate/test_docstring.py test/test_evaluate/test_extension.py test/test_evaluate/test_imports.py test/test_evaluate/test_namespace_package.py test/test_evaluate/test_pyc.py test/test_evaluate/test_representation.py test/test_evaluate/test_sys_path.py test/test_evaluate/absolute_import/local_module.py test/test_evaluate/absolute_import/unittest.py test/test_evaluate/buildout_project/buildout.cfg test/test_evaluate/buildout_project/bin/app test/test_evaluate/buildout_project/bin/empty_file test/test_evaluate/buildout_project/src/proj_name/module_name.py test/test_evaluate/egg-link/venv/lib/python3.4/site-packages/egg_link.egg-link test/test_evaluate/flask-site-packages/flask_foo.py test/test_evaluate/flask-site-packages/flask/__init__.py test/test_evaluate/flask-site-packages/flask/ext/__init__.py test/test_evaluate/flask-site-packages/flask_baz/__init__.py test/test_evaluate/flask-site-packages/flaskext/__init__.py test/test_evaluate/flask-site-packages/flaskext/bar.py test/test_evaluate/flask-site-packages/flaskext/moo/__init__.py test/test_evaluate/init_extension_module/__init__.cpython-34m.so test/test_evaluate/init_extension_module/module.c test/test_evaluate/init_extension_module/setup.py test/test_evaluate/namespace_package/ns1/pkg/__init__.py test/test_evaluate/namespace_package/ns1/pkg/ns1_file.py test/test_evaluate/namespace_package/ns1/pkg/ns1_folder/__init__.py test/test_evaluate/namespace_package/ns2/pkg/ns2_file.py test/test_evaluate/namespace_package/ns2/pkg/ns2_folder/__init__.py test/test_evaluate/namespace_package/ns2/pkg/ns2_folder/nested/__init__.py test/test_evaluate/not_in_sys_path/__init__.py test/test_evaluate/not_in_sys_path/not_in_sys_path.py test/test_evaluate/not_in_sys_path/not_in_sys_path_package/__init__.py test/test_evaluate/not_in_sys_path/not_in_sys_path_package/module.py test/test_evaluate/not_in_sys_path/pkg/__init__.py test/test_evaluate/not_in_sys_path/pkg/module.py test/test_parser/__init__.py test/test_parser/test_fast_parser.py test/test_parser/test_get_code.py test/test_parser/test_parser.py test/test_parser/test_tokenize.py test/test_parser/test_user_context.py