pgcli-1.6.0/0000755000076500000240000000000013112353401013032 5ustar irinastaff00000000000000pgcli-1.6.0/pgcli/0000755000076500000240000000000013112353401014130 5ustar irinastaff00000000000000pgcli-1.6.0/pgcli/__init__.py0000644000076500000240000000002613112353323016242 0ustar irinastaff00000000000000__version__ = '1.6.0' pgcli-1.6.0/pgcli/completion_refresher.py0000644000076500000240000001203513112353104020721 0ustar irinastaff00000000000000import threading import os try: from collections import OrderedDict except ImportError: from .packages.ordereddict import OrderedDict from .pgcompleter import PGCompleter from .pgexecute import PGExecute class CompletionRefresher(object): refreshers = OrderedDict() def __init__(self): self._completer_thread = None self._restart_refresh = threading.Event() def refresh(self, executor, special, callbacks, history=None, settings=None): """ Creates a PGCompleter object and populates it with the relevant completion suggestions in a background thread. executor - PGExecute object, used to extract the credentials to connect to the database. special - PGSpecial object used for creating a new completion object. settings - dict of settings for completer object callbacks - A function or a list of functions to call after the thread has completed the refresh. The newly created completion object will be passed in as an argument to each callback. """ if self.is_refreshing(): self._restart_refresh.set() return [(None, None, None, 'Auto-completion refresh restarted.')] else: self._completer_thread = threading.Thread( target=self._bg_refresh, args=(executor, special, callbacks, history, settings), name='completion_refresh') self._completer_thread.setDaemon(True) self._completer_thread.start() return [(None, None, None, 'Auto-completion refresh started in the background.')] def is_refreshing(self): return self._completer_thread and self._completer_thread.is_alive() def _bg_refresh(self, pgexecute, special, callbacks, history=None, settings=None): settings = settings or {} completer = PGCompleter(smart_completion=True, pgspecial=special, settings=settings) if settings.get('single_connection'): executor = pgexecute else: # Create a new pgexecute method to popoulate the completions. e = pgexecute executor = PGExecute( e.dbname, e.user, e.password, e.host, e.port, e.dsn, **e.extra_args) # If callbacks is a single function then push it into a list. if callable(callbacks): callbacks = [callbacks] while 1: for refresher in self.refreshers.values(): refresher(completer, executor) if self._restart_refresh.is_set(): self._restart_refresh.clear() break else: # Break out of while loop if the for loop finishes natually # without hitting the break statement. break # Start over the refresh from the beginning if the for loop hit the # break statement. continue # Load history into pgcompleter so it can learn user preferences n_recent = 100 if history: for recent in history[-n_recent:]: completer.extend_query_history(recent, is_init=True) for callback in callbacks: callback(completer) def refresher(name, refreshers=CompletionRefresher.refreshers): """Decorator to populate the dictionary of refreshers with the current function. """ def wrapper(wrapped): refreshers[name] = wrapped return wrapped return wrapper @refresher('schemata') def refresh_schemata(completer, executor): completer.set_search_path(executor.search_path()) completer.extend_schemata(executor.schemata()) @refresher('tables') def refresh_tables(completer, executor): completer.extend_relations(executor.tables(), kind='tables') completer.extend_columns(executor.table_columns(), kind='tables') completer.extend_foreignkeys(executor.foreignkeys()) @refresher('views') def refresh_views(completer, executor): completer.extend_relations(executor.views(), kind='views') completer.extend_columns(executor.view_columns(), kind='views') @refresher('functions') def refresh_functions(completer, executor): completer.extend_functions(executor.functions()) @refresher('types') def refresh_types(completer, executor): completer.extend_datatypes(executor.datatypes()) @refresher('databases') def refresh_databases(completer, executor): completer.extend_database_names(executor.databases()) @refresher('casing') def refresh_casing(completer, executor): casing_file = completer.casing_file if not casing_file: return generate_casing_file = completer.generate_casing_file if generate_casing_file and not os.path.isfile(casing_file): casing_prefs = '\n'.join(executor.casing()) with open(casing_file, 'w') as f: f.write(casing_prefs) if os.path.isfile(casing_file): with open(casing_file, 'r') as f: completer.extend_casing([line.strip() for line in f]) pgcli-1.6.0/pgcli/config.py0000644000076500000240000000352312766703104015767 0ustar irinastaff00000000000000import errno import shutil import os import platform from os.path import expanduser, exists, dirname from configobj import ConfigObj def config_location(): if platform.system() == 'Windows': return os.getenv('USERPROFILE') + '\\AppData\\Local\\dbcli\\pgcli\\' elif 'XDG_CONFIG_HOME' in os.environ: return '%s/pgcli/' % expanduser(os.environ['XDG_CONFIG_HOME']) else: return expanduser('~/.config/pgcli/') def load_config(usr_cfg, def_cfg=None): cfg = ConfigObj() cfg.merge(ConfigObj(def_cfg, interpolation=False)) cfg.merge(ConfigObj(expanduser(usr_cfg), interpolation=False, encoding='utf-8')) cfg.filename = expanduser(usr_cfg) return cfg def ensure_dir_exists(path): parent_dir = expanduser(dirname(path)) try: os.makedirs(parent_dir) except OSError as exc: # ignore existing destination (py2 has no exist_ok arg to makedirs) if exc.errno != errno.EEXIST: raise def write_default_config(source, destination, overwrite=False): destination = expanduser(destination) if not overwrite and exists(destination): return ensure_dir_exists(destination) shutil.copyfile(source, destination) def upgrade_config(config, def_config): cfg = load_config(config, def_config) cfg.write() def get_config(pgclirc_file=None): from pgcli import __file__ as package_root package_root = os.path.dirname(package_root) pgclirc_file = pgclirc_file or '%sconfig' % config_location() default_config = os.path.join(package_root, 'pgclirc') write_default_config(default_config, pgclirc_file) return load_config(pgclirc_file, default_config) def get_casing_file(config): casing_file = config['main']['casing_file'] if casing_file == 'default': casing_file = config_location() + 'casing' return casing_file pgcli-1.6.0/pgcli/encodingutils.py0000644000076500000240000000107612572656503017377 0ustar irinastaff00000000000000import sys PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 def unicode2utf8(arg): """ Only in Python 2. Psycopg2 expects the args as bytes not unicode. In Python 3 the args are expected as unicode. """ if PY2 and isinstance(arg, unicode): return arg.encode('utf-8') return arg def utf8tounicode(arg): """ Only in Python 2. Psycopg2 returns the error message as utf-8. In Python 3 the errors are returned as unicode. """ if PY2 and isinstance(arg, str): return arg.decode('utf-8') return arg pgcli-1.6.0/pgcli/filters.py0000644000076500000240000000063512606361441016170 0ustar irinastaff00000000000000from prompt_toolkit.filters import Filter class HasSelectedCompletion(Filter): """Enable when the current buffer has a selected completion.""" def __call__(self, cli): complete_state = cli.current_buffer.complete_state return (complete_state is not None and complete_state.current_completion is not None) def __repr__(self): return "HasSelectedCompletion()" pgcli-1.6.0/pgcli/key_bindings.py0000644000076500000240000000554212714463343017173 0ustar irinastaff00000000000000import logging from prompt_toolkit.enums import EditingMode from prompt_toolkit.keys import Keys from prompt_toolkit.key_binding.manager import KeyBindingManager from prompt_toolkit.filters import Condition from .filters import HasSelectedCompletion _logger = logging.getLogger(__name__) def pgcli_bindings(get_vi_mode_enabled, set_vi_mode_enabled): """ Custom key bindings for pgcli. """ assert callable(get_vi_mode_enabled) assert callable(set_vi_mode_enabled) key_binding_manager = KeyBindingManager( enable_open_in_editor=True, enable_system_bindings=True, enable_search=True, enable_abort_and_exit_bindings=True) @key_binding_manager.registry.add_binding(Keys.F2) def _(event): """ Enable/Disable SmartCompletion Mode. """ _logger.debug('Detected F2 key.') buf = event.cli.current_buffer buf.completer.smart_completion = not buf.completer.smart_completion @key_binding_manager.registry.add_binding(Keys.F3) def _(event): """ Enable/Disable Multiline Mode. """ _logger.debug('Detected F3 key.') buf = event.cli.current_buffer buf.always_multiline = not buf.always_multiline @key_binding_manager.registry.add_binding(Keys.F4) def _(event): """ Toggle between Vi and Emacs mode. """ _logger.debug('Detected F4 key.') vi_mode = not get_vi_mode_enabled() set_vi_mode_enabled(vi_mode) event.cli.editing_mode = EditingMode.VI if vi_mode else EditingMode.EMACS @key_binding_manager.registry.add_binding(Keys.Tab) def _(event): """ Force autocompletion at cursor. """ _logger.debug('Detected key.') b = event.cli.current_buffer if b.complete_state: b.complete_next() else: event.cli.start_completion(select_first=True) @key_binding_manager.registry.add_binding(Keys.ControlSpace) def _(event): """ Initialize autocompletion at cursor. If the autocompletion menu is not showing, display it with the appropriate completions for the context. If the menu is showing, select the next completion. """ _logger.debug('Detected key.') b = event.cli.current_buffer if b.complete_state: b.complete_next() else: event.cli.start_completion(select_first=False) @key_binding_manager.registry.add_binding(Keys.ControlJ, filter=HasSelectedCompletion()) def _(event): """ Makes the enter key work as the tab key only when showing the menu. """ _logger.debug('Detected key.') event.current_buffer.complete_state = None b = event.cli.current_buffer b.complete_state = None return key_binding_manager pgcli-1.6.0/pgcli/magic.py0000644000076500000240000000337312615222066015601 0ustar irinastaff00000000000000from .main import PGCli import sql.parse import sql.connection import logging _logger = logging.getLogger(__name__) def load_ipython_extension(ipython): """This is called via the ipython command '%load_ext pgcli.magic'""" # first, load the sql magic if it isn't already loaded if not ipython.find_line_magic('sql'): ipython.run_line_magic('load_ext', 'sql') # register our own magic ipython.register_magic_function(pgcli_line_magic, 'line', 'pgcli') def pgcli_line_magic(line): _logger.debug('pgcli magic called: %r', line) parsed = sql.parse.parse(line, {}) conn = sql.connection.Connection.get(parsed['connection']) try: # A corresponding pgcli object already exists pgcli = conn._pgcli _logger.debug('Reusing existing pgcli') except AttributeError: # I can't figure out how to get the underylying psycopg2 connection # from the sqlalchemy connection, so just grab the url and make a # new connection pgcli = PGCli() u = conn.session.engine.url _logger.debug('New pgcli: %r', str(u)) pgcli.connect(u.database, u.host, u.username, u.port, u.password) conn._pgcli = pgcli # For convenience, print the connection alias print('Connected: {}'.format(conn.name)) try: pgcli.run_cli() except SystemExit: pass if not pgcli.query_history: return q = pgcli.query_history[-1] if not q.successful: _logger.debug('Unsuccessful query - ignoring') return if q.meta_changed or q.db_changed or q.path_changed: _logger.debug('Dangerous query detected -- ignoring') return ipython = get_ipython() return ipython.run_cell_magic('sql', line, q.query) pgcli-1.6.0/pgcli/main.py0000755000076500000240000011337013112353104015436 0ustar irinastaff00000000000000#!/usr/bin/env python from __future__ import unicode_literals from __future__ import print_function import os import re import sys import traceback import logging import threading import shutil import functools import humanize import datetime as dt from time import time, sleep from codecs import open import click try: import setproctitle except ImportError: setproctitle = None from prompt_toolkit import CommandLineInterface, Application, AbortAction from prompt_toolkit.enums import DEFAULT_BUFFER, EditingMode from prompt_toolkit.shortcuts import create_prompt_layout, create_eventloop from prompt_toolkit.buffer import AcceptAction from prompt_toolkit.document import Document from prompt_toolkit.filters import Always, HasFocus, IsDone from prompt_toolkit.layout.lexers import PygmentsLexer from prompt_toolkit.layout.processors import (ConditionalProcessor, HighlightMatchingBracketProcessor) from prompt_toolkit.history import FileHistory from pygments.lexers.sql import PostgresLexer from pygments.token import Token from .packages.tabulate import tabulate from .packages.expanded import expanded_table from pgspecial.main import (PGSpecial, NO_QUERY, content_exceeds_width) import pgspecial as special from .pgcompleter import PGCompleter from .pgtoolbar import create_toolbar_tokens_func from .pgstyle import style_factory from .pgexecute import PGExecute from .pgbuffer import PGBuffer from .completion_refresher import CompletionRefresher from .config import (get_casing_file, load_config, config_location, ensure_dir_exists, get_config) from .key_bindings import pgcli_bindings from .encodingutils import utf8tounicode from .__init__ import __version__ click.disable_unicode_literals_warning = True try: from urlparse import urlparse, unquote, parse_qs except ImportError: from urllib.parse import urlparse, unquote, parse_qs from getpass import getuser from psycopg2 import OperationalError from collections import namedtuple # Query tuples are used for maintaining history MetaQuery = namedtuple( 'Query', [ 'query', # The entire text of the command 'successful', # True If all subqueries were successful 'total_time', # Time elapsed executing the query 'meta_changed', # True if any subquery executed create/alter/drop 'db_changed', # True if any subquery changed the database 'path_changed', # True if any subquery changed the search path 'mutated', # True if any subquery executed insert/update/delete ]) MetaQuery.__new__.__defaults__ = ('', False, 0, False, False, False, False) OutputSettings = namedtuple( 'OutputSettings', 'table_format dcmlfmt floatfmt missingval expanded max_width case_function' ) OutputSettings.__new__.__defaults__ = ( None, None, None, '', False, None, lambda x: x ) # no-op logging handler class NullHandler(logging.Handler): def emit(self, record): pass class PGCli(object): default_prompt = '\\u@\\h:\\d> ' max_len_prompt = 30 def set_default_pager(self, config): configured_pager = config['main'].get('pager') os_environ_pager = os.environ.get('PAGER') if configured_pager: self.logger.info('Default pager found in config file: ' + '\'' + configured_pager + '\'') os.environ['PAGER'] = configured_pager elif os_environ_pager: self.logger.info('Default pager found in PAGER environment variable: ' + '\'' + os_environ_pager + '\'') os.environ['PAGER'] = os_environ_pager else: self.logger.info('No default pager found in environment. Using os default pager') # Always set default set of less recommended options, they are ignored if pager is # different than less or is already parameterized with their own arguments os.environ['LESS'] = '-SRXF' def __init__(self, force_passwd_prompt=False, never_passwd_prompt=False, pgexecute=None, pgclirc_file=None, row_limit=None, single_connection=False, less_chatty=None, prompt=None): self.force_passwd_prompt = force_passwd_prompt self.never_passwd_prompt = never_passwd_prompt self.pgexecute = pgexecute # Load config. c = self.config = get_config(pgclirc_file) self.logger = logging.getLogger(__name__) self.initialize_logging() self.set_default_pager(c) self.output_file = None self.pgspecial = PGSpecial() self.multi_line = c['main'].as_bool('multi_line') self.multiline_mode = c['main'].get('multi_line_mode', 'psql') self.vi_mode = c['main'].as_bool('vi') self.auto_expand = c['main'].as_bool('auto_expand') self.expanded_output = c['main'].as_bool('expand') self.pgspecial.timing_enabled = c['main'].as_bool('timing') if row_limit is not None: self.row_limit = row_limit else: self.row_limit = c['main'].as_int('row_limit') self.min_num_menu_lines = c['main'].as_int('min_num_menu_lines') self.multiline_continuation_char = c['main']['multiline_continuation_char'] self.table_format = c['main']['table_format'] self.syntax_style = c['main']['syntax_style'] self.cli_style = c['colors'] self.wider_completion_menu = c['main'].as_bool('wider_completion_menu') self.less_chatty = bool(less_chatty) or c['main'].as_bool('less_chatty') self.null_string = c['main'].get('null_string', '') self.prompt_format = prompt if prompt is not None else c['main'].get('prompt', self.default_prompt) self.on_error = c['main']['on_error'].upper() self.decimal_format = c['data_formats']['decimal'] self.float_format = c['data_formats']['float'] self.now = dt.datetime.today() self.completion_refresher = CompletionRefresher() self.query_history = [] # Initialize completer smart_completion = c['main'].as_bool('smart_completion') keyword_casing = c['main']['keyword_casing'] self.settings = { 'casing_file': get_casing_file(c), 'generate_casing_file': c['main'].as_bool('generate_casing_file'), 'generate_aliases': c['main'].as_bool('generate_aliases'), 'asterisk_column_order': c['main']['asterisk_column_order'], 'qualify_columns': c['main']['qualify_columns'], 'case_column_headers': c['main'].as_bool('case_column_headers'), 'search_path_filter': c['main'].as_bool('search_path_filter'), 'single_connection': single_connection, 'less_chatty': less_chatty, 'keyword_casing': keyword_casing, } completer = PGCompleter(smart_completion, pgspecial=self.pgspecial, settings=self.settings) self.completer = completer self._completer_lock = threading.Lock() self.register_special_commands() self.eventloop = create_eventloop() self.cli = None def register_special_commands(self): self.pgspecial.register( self.change_db, '\\c', '\\c[onnect] database_name', 'Change to a new database.', aliases=('use', '\\connect', 'USE')) refresh_callback = lambda: self.refresh_completions( persist_priorities='all') self.pgspecial.register(refresh_callback, '\\#', '\\#', 'Refresh auto-completions.', arg_type=NO_QUERY) self.pgspecial.register(refresh_callback, '\\refresh', '\\refresh', 'Refresh auto-completions.', arg_type=NO_QUERY) self.pgspecial.register(self.execute_from_file, '\\i', '\\i filename', 'Execute commands from file.') self.pgspecial.register(self.write_to_file, '\\o', '\\o [filename]', 'Send all query results to file.') def change_db(self, pattern, **_): if pattern: db = pattern[1:-1] if pattern[0] == pattern[-1] == '"' else pattern self.pgexecute.connect(database=db) else: self.pgexecute.connect() yield (None, None, None, 'You are now connected to database "%s" as ' 'user "%s"' % (self.pgexecute.dbname, self.pgexecute.user)) def execute_from_file(self, pattern, **_): if not pattern: message = '\\i: missing required argument' return [(None, None, None, message, '', False)] try: with open(os.path.expanduser(pattern), encoding='utf-8') as f: query = f.read() except IOError as e: return [(None, None, None, str(e), '', False)] on_error_resume = (self.on_error == 'RESUME') return self.pgexecute.run( query, self.pgspecial, on_error_resume=on_error_resume ) def write_to_file(self, pattern, **_): if not pattern: self.output_file = None message = 'File output disabled' return [(None, None, None, message, '', True)] filename = os.path.abspath(os.path.expanduser(pattern)) if not os.path.isfile(filename): try: open(filename, 'w').close() except IOError as e: self.output_file = None message = str(e) + '\nFile output disabled' return [(None, None, None, message, '', False)] self.output_file = filename message = 'Writing to file "%s"' % self.output_file return [(None, None, None, message, '', True)] def initialize_logging(self): log_file = self.config['main']['log_file'] if log_file == 'default': log_file = config_location() + 'log' ensure_dir_exists(log_file) log_level = self.config['main']['log_level'] # Disable logging if value is NONE by switching to a no-op handler. # Set log level to a high value so it doesn't even waste cycles getting called. if log_level.upper() == 'NONE': handler = NullHandler() else: handler = logging.FileHandler(os.path.expanduser(log_file)) level_map = {'CRITICAL': logging.CRITICAL, 'ERROR': logging.ERROR, 'WARNING': logging.WARNING, 'INFO': logging.INFO, 'DEBUG': logging.DEBUG, 'NONE': logging.CRITICAL } log_level = level_map[log_level.upper()] formatter = logging.Formatter( '%(asctime)s (%(process)d/%(threadName)s) ' '%(name)s %(levelname)s - %(message)s') handler.setFormatter(formatter) root_logger = logging.getLogger('pgcli') root_logger.addHandler(handler) root_logger.setLevel(log_level) root_logger.debug('Initializing pgcli logging.') root_logger.debug('Log file %r.', log_file) pgspecial_logger = logging.getLogger('pgspecial') pgspecial_logger.addHandler(handler) pgspecial_logger.setLevel(log_level) def connect_dsn(self, dsn): self.connect(dsn=dsn) def connect_uri(self, uri): uri = urlparse(uri) database = uri.path[1:] # ignore the leading fwd slash def fixup_possible_percent_encoding(s): return unquote(str(s)) if s else s arguments = dict(database=fixup_possible_percent_encoding(database), host=fixup_possible_percent_encoding(uri.hostname), user=fixup_possible_percent_encoding(uri.username), port=fixup_possible_percent_encoding(uri.port), passwd=fixup_possible_percent_encoding(uri.password)) # Deal with extra params e.g. ?sslmode=verify-ca&ssl-cert=/mycert if uri.query: arguments = dict( {k: v for k, (v,) in parse_qs(uri.query).items()}, **arguments) # unquote str(each URI part (they may be percent encoded) self.connect(**arguments) def connect(self, database='', host='', user='', port='', passwd='', dsn='', **kwargs): # Connect to the database. if not user: user = getuser() if not database: database = user # If password prompt is not forced but no password is provided, try # getting it from environment variable. if not self.force_passwd_prompt and not passwd: passwd = os.environ.get('PGPASSWORD', '') # Prompt for a password immediately if requested via the -W flag. This # avoids wasting time trying to connect to the database and catching a # no-password exception. # If we successfully parsed a password from a URI, there's no need to # prompt for it, even with the -W flag if self.force_passwd_prompt and not passwd: passwd = click.prompt('Password', hide_input=True, show_default=False, type=str) # Prompt for a password after 1st attempt to connect without a password # fails. Don't prompt if the -w flag is supplied auto_passwd_prompt = not passwd and not self.never_passwd_prompt # Attempt to connect to the database. # Note that passwd may be empty on the first attempt. If connection # fails because of a missing password, but we're allowed to prompt for # a password (no -w flag), prompt for a passwd and try again. try: try: pgexecute = PGExecute(database, user, passwd, host, port, dsn, **kwargs) except OperationalError as e: if ('no password supplied' in utf8tounicode(e.args[0]) and auto_passwd_prompt): passwd = click.prompt('Password', hide_input=True, show_default=False, type=str) pgexecute = PGExecute(database, user, passwd, host, port, dsn, **kwargs) else: raise e except Exception as e: # Connecting to a database could fail. self.logger.debug('Database connection failed: %r.', e) self.logger.error("traceback: %r", traceback.format_exc()) click.secho(str(e), err=True, fg='red') exit(1) self.pgexecute = pgexecute def handle_editor_command(self, cli, document): """ Editor command is any query that is prefixed or suffixed by a '\e'. The reason for a while loop is because a user might edit a query multiple times. For eg: "select * from \e" to edit it in vim, then come back to the prompt with the edited query "select * from blah where q = 'abc'\e" to edit it again. :param cli: CommandLineInterface :param document: Document :return: Document """ # FIXME: using application.pre_run_callables like this here is not the best solution. # It's internal api of prompt_toolkit that may change. This was added to fix #668. # We may find a better way to do it in the future. saved_callables = cli.application.pre_run_callables while special.editor_command(document.text): filename = special.get_filename(document.text) query = (special.get_editor_query(document.text) or self.get_last_query()) sql, message = special.open_external_editor(filename, sql=query) if message: # Something went wrong. Raise an exception and bail. raise RuntimeError(message) cli.current_buffer.document = Document(sql, cursor_position=len(sql)) cli.application.pre_run_callables = [] document = cli.run() continue cli.application.pre_run_callables = saved_callables return document def execute_command(self, text, query): logger = self.logger try: output, query = self._evaluate_command(text) except KeyboardInterrupt: # Restart connection to the database self.pgexecute.connect() logger.debug("cancelled query, sql: %r", text) click.secho("cancelled query", err=True, fg='red') except NotImplementedError: click.secho('Not Yet Implemented.', fg="yellow") except OperationalError as e: logger.error("sql: %r, error: %r", text, e) logger.error("traceback: %r", traceback.format_exc()) self._handle_server_closed_connection() except Exception as e: logger.error("sql: %r, error: %r", text, e) logger.error("traceback: %r", traceback.format_exc()) click.secho(str(e), err=True, fg='red') else: try: if self.output_file and not text.startswith(('\\o ', '\\? ')): try: with open(self.output_file, 'a', encoding='utf-8') as f: click.echo(text, file=f) click.echo('\n'.join(output), file=f) click.echo('', file=f) # extra newline except IOError as e: click.secho(str(e), err=True, fg='red') else: click.echo_via_pager('\n'.join(output)) except KeyboardInterrupt: pass if self.pgspecial.timing_enabled: # Only add humanized time display if > 1 second if query.total_time > 1: print('Time: %0.03fs (%s)' % (query.total_time, humanize.time.naturaldelta(query.total_time))) else: print('Time: %0.03fs' % query.total_time) # Check if we need to update completions, in order of most # to least drastic changes if query.db_changed: with self._completer_lock: self.completer.reset_completions() self.refresh_completions(persist_priorities='keywords') elif query.meta_changed: self.refresh_completions(persist_priorities='all') elif query.path_changed: logger.debug('Refreshing search path') with self._completer_lock: self.completer.set_search_path( self.pgexecute.search_path()) logger.debug('Search path: %r', self.completer.search_path) return query def run_cli(self): logger = self.logger history_file = self.config['main']['history_file'] if history_file == 'default': history_file = config_location() + 'history' history = FileHistory(os.path.expanduser(history_file)) self.refresh_completions(history=history, persist_priorities='none') self.cli = self._build_cli(history) if not self.less_chatty: print('Version:', __version__) print('Chat: https://gitter.im/dbcli/pgcli') print('Mail: https://groups.google.com/forum/#!forum/pgcli') print('Home: http://pgcli.com') try: while True: document = self.cli.run() # The reason we check here instead of inside the pgexecute is # because we want to raise the Exit exception which will be # caught by the try/except block that wraps the pgexecute.run() # statement. if quit_command(document.text): raise EOFError try: document = self.handle_editor_command(self.cli, document) except RuntimeError as e: logger.error("sql: %r, error: %r", document.text, e) logger.error("traceback: %r", traceback.format_exc()) click.secho(str(e), err=True, fg='red') continue # Initialize default metaquery in case execution fails query = MetaQuery(query=document.text, successful=False) watch_command, timing = special.get_watch_command(document.text) if watch_command: while watch_command: try: query = self.execute_command(watch_command, query) click.echo('Waiting for {0} seconds before repeating'.format(timing)) sleep(timing) except KeyboardInterrupt: watch_command = None else: query = self.execute_command(document.text, query) self.now = dt.datetime.today() # Allow PGCompleter to learn user's preferred keywords, etc. with self._completer_lock: self.completer.extend_query_history(document.text) self.query_history.append(query) except EOFError: if not self.less_chatty: print ('Goodbye!') def _build_cli(self, history): def set_vi_mode(value): self.vi_mode = value key_binding_manager = pgcli_bindings( get_vi_mode_enabled=lambda: self.vi_mode, set_vi_mode_enabled=set_vi_mode) def prompt_tokens(_): prompt = self.get_prompt(self.prompt_format) if (self.prompt_format == self.default_prompt and len(prompt) > self.max_len_prompt): prompt = self.get_prompt('\\d> ') return [(Token.Prompt, prompt)] def get_continuation_tokens(cli, width): continuation=self.multiline_continuation_char * (width - 1) + ' ' return [(Token.Continuation, continuation)] get_toolbar_tokens = create_toolbar_tokens_func( lambda: self.vi_mode, self.completion_refresher.is_refreshing, self.pgexecute.failed_transaction, self.pgexecute.valid_transaction) layout = create_prompt_layout( lexer=PygmentsLexer(PostgresLexer), reserve_space_for_menu=self.min_num_menu_lines, get_prompt_tokens=prompt_tokens, get_continuation_tokens=get_continuation_tokens, get_bottom_toolbar_tokens=get_toolbar_tokens, display_completions_in_columns=self.wider_completion_menu, multiline=True, extra_input_processors=[ # Highlight matching brackets while editing. ConditionalProcessor( processor=HighlightMatchingBracketProcessor(chars='[](){}'), filter=HasFocus(DEFAULT_BUFFER) & ~IsDone()), ]) with self._completer_lock: buf = PGBuffer( always_multiline=self.multi_line, multiline_mode=self.multiline_mode, completer=self.completer, history=history, complete_while_typing=Always(), accept_action=AcceptAction.RETURN_DOCUMENT) editing_mode = EditingMode.VI if self.vi_mode else EditingMode.EMACS application = Application( style=style_factory(self.syntax_style, self.cli_style), layout=layout, buffer=buf, key_bindings_registry=key_binding_manager.registry, on_exit=AbortAction.RAISE_EXCEPTION, on_abort=AbortAction.RETRY, ignore_case=True, editing_mode=editing_mode) cli = CommandLineInterface(application=application, eventloop=self.eventloop) return cli def _should_show_limit_prompt(self, status, cur): """returns True if limit prompt should be shown, False otherwise.""" if not is_select(status): return False return self.row_limit > 0 and cur and cur.rowcount > self.row_limit def _evaluate_command(self, text): """Used to run a command entered by the user during CLI operation (Puts the E in REPL) returns (results, MetaQuery) """ logger = self.logger logger.debug('sql: %r', text) all_success = True meta_changed = False # CREATE, ALTER, DROP, etc mutated = False # INSERT, DELETE, etc db_changed = False path_changed = False output = [] total = 0 # Run the query. start = time() on_error_resume = self.on_error == 'RESUME' res = self.pgexecute.run(text, self.pgspecial, exception_formatter, on_error_resume) for title, cur, headers, status, sql, success in res: logger.debug("headers: %r", headers) logger.debug("rows: %r", cur) logger.debug("status: %r", status) threshold = self.row_limit if self._should_show_limit_prompt(status, cur): click.secho('The result set has more than %s rows.' % threshold, fg='red') if not click.confirm('Do you want to continue?'): click.secho("Aborted!", err=True, fg='red') break if self.pgspecial.auto_expand or self.auto_expand: max_width = self.cli.output.get_size().columns else: max_width = None expanded = self.pgspecial.expanded_output or self.expanded_output settings = OutputSettings( table_format=self.table_format, dcmlfmt=self.decimal_format, floatfmt=self.float_format, missingval=self.null_string, expanded=expanded, max_width=max_width, case_function=( self.completer.case if self.settings['case_column_headers'] else lambda x: x ) ) formatted = format_output(title, cur, headers, status, settings) output.extend(formatted) total = time() - start # Keep track of whether any of the queries are mutating or changing # the database if success: mutated = mutated or is_mutating(status) db_changed = db_changed or has_change_db_cmd(sql) meta_changed = meta_changed or has_meta_cmd(sql) path_changed = path_changed or has_change_path_cmd(sql) else: all_success = False meta_query = MetaQuery(text, all_success, total, meta_changed, db_changed, path_changed, mutated) return output, meta_query def _handle_server_closed_connection(self): """Used during CLI execution""" reconnect = click.prompt( 'Connection reset. Reconnect (Y/n)', show_default=False, type=bool, default=True) if reconnect: try: self.pgexecute.connect() click.secho('Reconnected!\nTry the command again.', fg='green') except OperationalError as e: click.secho(str(e), err=True, fg='red') def refresh_completions(self, history=None, persist_priorities='all'): """ Refresh outdated completions :param history: A prompt_toolkit.history.FileHistory object. Used to load keyword and identifier preferences :param persist_priorities: 'all' or 'keywords' """ callback = functools.partial(self._on_completions_refreshed, persist_priorities=persist_priorities) self.completion_refresher.refresh(self.pgexecute, self.pgspecial, callback, history=history, settings=self.settings) return [(None, None, None, 'Auto-completion refresh started in the background.')] def _on_completions_refreshed(self, new_completer, persist_priorities): self._swap_completer_objects(new_completer, persist_priorities) if self.cli: # After refreshing, redraw the CLI to clear the statusbar # "Refreshing completions..." indicator self.cli.request_redraw() def _swap_completer_objects(self, new_completer, persist_priorities): """Swap the completer object in cli with the newly created completer. persist_priorities is a string specifying how the old completer's learned prioritizer should be transferred to the new completer. 'none' - The new prioritizer is left in a new/clean state 'all' - The new prioritizer is updated to exactly reflect the old one 'keywords' - The new prioritizer is updated with old keyword priorities, but not any other. """ with self._completer_lock: old_completer = self.completer self.completer = new_completer if persist_priorities == 'all': # Just swap over the entire prioritizer new_completer.prioritizer = old_completer.prioritizer elif persist_priorities == 'keywords': # Swap over the entire prioritizer, but clear name priorities, # leaving learned keyword priorities alone new_completer.prioritizer = old_completer.prioritizer new_completer.prioritizer.clear_names() elif persist_priorities == 'none': # Leave the new prioritizer as is pass # When pgcli is first launched we call refresh_completions before # instantiating the cli object. So it is necessary to check if cli # exists before trying the replace the completer object in cli. if self.cli: self.cli.current_buffer.completer = new_completer def get_completions(self, text, cursor_positition): with self._completer_lock: return self.completer.get_completions( Document(text=text, cursor_position=cursor_positition), None) def get_prompt(self, string): string = string.replace('\\t', self.now.strftime('%x %X')) string = string.replace('\\u', self.pgexecute.user or '(none)') string = string.replace('\\h', self.pgexecute.host or '(none)') string = string.replace('\\d', self.pgexecute.dbname or '(none)') string = string.replace('\\p', str(self.pgexecute.port) or '(none)') string = string.replace('\\i', str(self.pgexecute.pid) or '(none)') string = string.replace('\\#', "#" if (self.pgexecute.superuser) else ">") string = string.replace('\\n', "\n") return string def get_last_query(self): """Get the last query executed or None.""" return self.query_history[-1][0] if self.query_history else None @click.command() # Default host is '' so psycopg2 can default to either localhost or unix socket @click.option('-h', '--host', default='', envvar='PGHOST', help='Host address of the postgres database.') @click.option('-p', '--port', default=5432, help='Port number at which the ' 'postgres instance is listening.', envvar='PGPORT') @click.option('-U', '--username', 'username_opt', envvar='PGUSER', help='Username to connect to the postgres database.') @click.option('-W', '--password', 'prompt_passwd', is_flag=True, default=False, help='Force password prompt.') @click.option('-w', '--no-password', 'never_prompt', is_flag=True, default=False, help='Never prompt for password.') @click.option('--single-connection', 'single_connection', is_flag=True, default=False, help='Do not use a separate connection for completions.') @click.option('-v', '--version', is_flag=True, help='Version of pgcli.') @click.option('-d', '--dbname', default='', envvar='PGDATABASE', help='database name to connect to.') @click.option('--pgclirc', default=config_location() + 'config', envvar='PGCLIRC', help='Location of pgclirc file.') @click.option('-D', '--dsn', default='', envvar='DSN', help='Use DSN configured into the [alias_dsn] section of pgclirc file.') @click.option('--row-limit', default=None, envvar='PGROWLIMIT', type=click.INT, help='Set threshold for row limit prompt. Use 0 to disable prompt.') @click.option('--less-chatty', 'less_chatty', is_flag=True, default=False, help='Skip intro on startup and goodbye on exit.') @click.option('--prompt', help='Prompt format (Default: "\\u@\\h:\\d> ").') @click.argument('database', default=lambda: None, envvar='PGDATABASE', nargs=1) @click.argument('username', default=lambda: None, envvar='PGUSER', nargs=1) def cli(database, username_opt, host, port, prompt_passwd, never_prompt, single_connection, dbname, username, version, pgclirc, dsn, row_limit, less_chatty, prompt): if version: print('Version:', __version__) sys.exit(0) config_dir = os.path.dirname(config_location()) if not os.path.exists(config_dir): os.makedirs(config_dir) # Migrate the config file from old location. config_full_path = config_location() + 'config' if os.path.exists(os.path.expanduser('~/.pgclirc')): if not os.path.exists(config_full_path): shutil.move(os.path.expanduser('~/.pgclirc'), config_full_path) print ('Config file (~/.pgclirc) moved to new location', config_full_path) else: print ('Config file is now located at', config_full_path) print ('Please move the existing config file ~/.pgclirc to', config_full_path) pgcli = PGCli(prompt_passwd, never_prompt, pgclirc_file=pgclirc, row_limit=row_limit, single_connection=single_connection, less_chatty=less_chatty, prompt=prompt) # Choose which ever one has a valid value. database = database or dbname user = username_opt or username if dsn is not '': try: cfg = load_config(config_full_path) dsn_config = cfg['alias_dsn'][dsn] except: click.secho('Invalid DSNs found in the config file. '\ 'Please check the "[alias_dsn]" section in pgclirc.', err=True, fg='red') exit(1) pgcli.connect_uri(dsn_config) elif '://' in database: pgcli.connect_uri(database) elif "=" in database: pgcli.connect_dsn(database) elif os.environ.get('PGSERVICE', None): pgcli.connect_dsn('service={0}'.format(os.environ['PGSERVICE'])) else: pgcli.connect(database, host, user, port) pgcli.logger.debug('Launch Params: \n' '\tdatabase: %r' '\tuser: %r' '\thost: %r' '\tport: %r', database, user, host, port) if setproctitle: obfuscate_process_password() pgcli.run_cli() def obfuscate_process_password(): process_title = setproctitle.getproctitle() if '://' in process_title: process_title = re.sub(r":(.*):(.*)@", r":\1:xxxx@", process_title) elif "=" in process_title: process_title = re.sub(r"password=(.+?)((\s[a-zA-Z]+=)|$)", r"password=xxxx\2", process_title) setproctitle.setproctitle(process_title) def format_output(title, cur, headers, status, settings): output = [] missingval = settings.missingval table_format = settings.table_format dcmlfmt = settings.dcmlfmt floatfmt = settings.floatfmt expanded = settings.expanded max_width = settings.max_width case_function = settings.case_function if title: # Only print the title if it's not None. output.append(title) if cur: headers = [case_function(utf8tounicode(x)) for x in headers] if expanded and headers: output.append(expanded_table(cur, headers, missingval)) else: tabulated, rows = tabulate(cur, headers, tablefmt=table_format, missingval=missingval, dcmlfmt=dcmlfmt, floatfmt=floatfmt) if (max_width and rows and content_exceeds_width(rows[0], max_width) and headers): output.append(expanded_table(rows, headers, missingval)) else: output.append(tabulated) if status: # Only print the status if it's not None. output.append(status) return output def has_meta_cmd(query): """Determines if the completion needs a refresh by checking if the sql statement is an alter, create, or drop""" try: first_token = query.split()[0] if first_token.lower() in ('alter', 'create', 'drop'): return True except Exception: return False return False def has_change_db_cmd(query): """Determines if the statement is a database switch such as 'use' or '\\c'""" try: first_token = query.split()[0] if first_token.lower() in ('use', '\\c', '\\connect'): return True except Exception: return False return False def has_change_path_cmd(sql): """Determines if the search_path should be refreshed by checking if the sql has 'set search_path'.""" return 'set search_path' in sql.lower() def is_mutating(status): """Determines if the statement is mutating based on the status.""" if not status: return False mutating = set(['insert', 'update', 'delete']) return status.split(None, 1)[0].lower() in mutating def is_select(status): """Returns true if the first word in status is 'select'.""" if not status: return False return status.split(None, 1)[0].lower() == 'select' def quit_command(sql): return (sql.strip().lower() == 'exit' or sql.strip().lower() == 'quit' or sql.strip() == '\q' or sql.strip() == ':q') def exception_formatter(e): return click.style(utf8tounicode(str(e)), fg='red') if __name__ == "__main__": cli() pgcli-1.6.0/pgcli/packages/0000755000076500000240000000000013112353401015706 5ustar irinastaff00000000000000pgcli-1.6.0/pgcli/packages/__init__.py0000644000076500000240000000000012572656503020027 0ustar irinastaff00000000000000pgcli-1.6.0/pgcli/packages/expanded.py0000644000076500000240000000204713057724327020074 0ustar irinastaff00000000000000from .tabulate import _text_type from ..encodingutils import utf8tounicode def pad(field, total, char=u" "): return field + (char * (total - len(field))) def expanded_table(rows, headers, missingval=""): header_len = max([len(x) for x in headers]) max_row_len = 0 results = [] sep = u"-[ RECORD {0} ]-------------------------\n" padded_headers = [pad(x, header_len) + u" |" for x in headers] header_len += 2 for row in rows: row_len = max([len(_text_type(utf8tounicode(x))) for x in row]) row_result = [] if row_len > max_row_len: max_row_len = row_len for header, value in zip(padded_headers, row): value = missingval if value is None else value row_result.append((u"%s" % header) + " " + (u"%s" % utf8tounicode(value)).strip()) results.append('\n'.join(row_result)) output = [] for i, result in enumerate(results): output.append(sep.format(i)) output.append(result) output.append('\n') return ''.join(output) pgcli-1.6.0/pgcli/packages/ordereddict.py0000644000076500000240000001017512575163646020602 0ustar irinastaff00000000000000# Copyright (c) 2009 Raymond Hettinger # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. from UserDict import DictMixin class OrderedDict(dict, DictMixin): def __init__(self, *args, **kwds): if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__end except AttributeError: self.clear() self.update(*args, **kwds) def clear(self): self.__end = end = [] end += [None, end, end] # sentinel node for doubly linked list self.__map = {} # key --> [key, prev, next] dict.clear(self) def __setitem__(self, key, value): if key not in self: end = self.__end curr = end[1] curr[2] = end[1] = self.__map[key] = [key, curr, end] dict.__setitem__(self, key, value) def __delitem__(self, key): dict.__delitem__(self, key) key, prev, next = self.__map.pop(key) prev[2] = next next[1] = prev def __iter__(self): end = self.__end curr = end[2] while curr is not end: yield curr[0] curr = curr[2] def __reversed__(self): end = self.__end curr = end[1] while curr is not end: yield curr[0] curr = curr[1] def popitem(self, last=True): if not self: raise KeyError('dictionary is empty') if last: key = reversed(self).next() else: key = iter(self).next() value = self.pop(key) return key, value def __reduce__(self): items = [[k, self[k]] for k in self] tmp = self.__map, self.__end del self.__map, self.__end inst_dict = vars(self).copy() self.__map, self.__end = tmp if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def keys(self): return list(self) setdefault = DictMixin.setdefault update = DictMixin.update pop = DictMixin.pop values = DictMixin.values items = DictMixin.items iterkeys = DictMixin.iterkeys itervalues = DictMixin.itervalues iteritems = DictMixin.iteritems def __repr__(self): if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) def copy(self): return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): if isinstance(other, OrderedDict): if len(self) != len(other): return False for p, q in zip(self.items(), other.items()): if p != q: return False return True return dict.__eq__(self, other) def __ne__(self, other): return not self == other pgcli-1.6.0/pgcli/packages/parseutils/0000755000076500000240000000000013112353401020101 5ustar irinastaff00000000000000pgcli-1.6.0/pgcli/packages/parseutils/__init__.py0000644000076500000240000000000012756663177022233 0ustar irinastaff00000000000000pgcli-1.6.0/pgcli/packages/parseutils/ctes.py0000644000076500000240000001124513015116244021420 0ustar irinastaff00000000000000from sqlparse import parse from sqlparse.tokens import Keyword, CTE, DML from sqlparse.sql import Identifier, IdentifierList, Parenthesis from collections import namedtuple from .meta import TableMetadata, ColumnMetadata # TableExpression is a namedtuple representing a CTE, used internally # name: cte alias assigned in the query # columns: list of column names # start: index into the original string of the left parens starting the CTE # stop: index into the original string of the right parens ending the CTE TableExpression = namedtuple('TableExpression', 'name columns start stop') def isolate_query_ctes(full_text, text_before_cursor): """Simplify a query by converting CTEs into table metadata objects """ if not full_text: return full_text, text_before_cursor, tuple() ctes, remainder = extract_ctes(full_text) if not ctes: return full_text, text_before_cursor, () current_position = len(text_before_cursor) meta = [] for cte in ctes: if cte.start < current_position < cte.stop: # Currently editing a cte - treat its body as the current full_text text_before_cursor = full_text[cte.start:current_position] full_text = full_text[cte.start:cte.stop] return full_text, text_before_cursor, meta # Append this cte to the list of available table metadata cols = (ColumnMetadata(name, None, ()) for name in cte.columns) meta.append(TableMetadata(cte.name, cols)) # Editing past the last cte (ie the main body of the query) full_text = full_text[ctes[-1].stop:] text_before_cursor = text_before_cursor[ctes[-1].stop:current_position] return full_text, text_before_cursor, tuple(meta) def extract_ctes(sql): """ Extract constant table expresseions from a query Returns tuple (ctes, remainder_sql) ctes is a list of TableExpression namedtuples remainder_sql is the text from the original query after the CTEs have been stripped. """ p = parse(sql)[0] # Make sure the first meaningful token is "WITH" which is necessary to # define CTEs idx, tok = p.token_next(-1, skip_ws=True, skip_cm=True) if not (tok and tok.ttype == CTE): return [], sql # Get the next (meaningful) token, which should be the first CTE idx, tok = p.token_next(idx) if not tok: return ([], '') start_pos = token_start_pos(p.tokens, idx) ctes = [] if isinstance(tok, IdentifierList): # Multiple ctes for t in tok.get_identifiers(): cte_start_offset = token_start_pos(tok.tokens, tok.token_index(t)) cte = get_cte_from_token(t, start_pos + cte_start_offset) if not cte: continue ctes.append(cte) elif isinstance(tok, Identifier): # A single CTE cte = get_cte_from_token(tok, start_pos) if cte: ctes.append(cte) idx = p.token_index(tok) + 1 # Collapse everything after the ctes into a remainder query remainder = u''.join(str(tok) for tok in p.tokens[idx:]) return ctes, remainder def get_cte_from_token(tok, pos0): cte_name = tok.get_real_name() if not cte_name: return None # Find the start position of the opening parens enclosing the cte body idx, parens = tok.token_next_by(Parenthesis) if not parens: return None start_pos = pos0 + token_start_pos(tok.tokens, idx) cte_len = len(str(parens)) # includes parens stop_pos = start_pos + cte_len column_names = extract_column_names(parens) return TableExpression(cte_name, column_names, start_pos, stop_pos) def extract_column_names(parsed): # Find the first DML token to check if it's a SELECT or INSERT/UPDATE/DELETE idx, tok = parsed.token_next_by(t=DML) tok_val = tok and tok.value.lower() if tok_val in ('insert', 'update', 'delete'): # Jump ahead to the RETURNING clause where the list of column names is idx, tok = parsed.token_next_by(idx, (Keyword, 'returning')) elif not tok_val == 'select': # Must be invalid CTE return () # The next token should be either a column name, or a list of column names idx, tok = parsed.token_next(idx, skip_ws=True, skip_cm=True) return tuple(t.get_name() for t in _identifiers(tok)) def token_start_pos(tokens, idx): return sum(len(str(t)) for t in tokens[:idx]) def _identifiers(tok): if isinstance(tok, IdentifierList): for t in tok.get_identifiers(): # NB: IdentifierList.get_identifiers() can return non-identifiers! if isinstance(t, Identifier): yield t elif isinstance(tok, Identifier): yield tok pgcli-1.6.0/pgcli/packages/parseutils/meta.py0000644000076500000240000000565513101470176021423 0ustar irinastaff00000000000000from collections import namedtuple ColumnMetadata = namedtuple('ColumnMetadata', ['name', 'datatype', 'foreignkeys']) ForeignKey = namedtuple('ForeignKey', ['parentschema', 'parenttable', 'parentcolumn', 'childschema', 'childtable', 'childcolumn']) TableMetadata = namedtuple('TableMetadata', 'name columns') class FunctionMetadata(object): def __init__(self, schema_name, func_name, arg_names, arg_types, arg_modes, return_type, is_aggregate, is_window, is_set_returning): """Class for describing a postgresql function""" self.schema_name = schema_name self.func_name = func_name self.arg_modes = tuple(arg_modes) if arg_modes else None self.arg_names = tuple(arg_names) if arg_names else None # Be flexible in not requiring arg_types -- use None as a placeholder # for each arg. (Used for compatibility with old versions of postgresql # where such info is hard to get. if arg_types: self.arg_types = tuple(arg_types) elif arg_modes: self.arg_types = tuple([None] * len(arg_modes)) elif arg_names: self.arg_types = tuple([None] * len(arg_names)) else: self.arg_types = None self.return_type = return_type.strip() self.is_aggregate = is_aggregate self.is_window = is_window self.is_set_returning = is_set_returning def __eq__(self, other): return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash((self.schema_name, self.func_name, self.arg_names, self.arg_types, self.arg_modes, self.return_type, self.is_aggregate, self.is_window, self.is_set_returning)) def __repr__(self): return (('%s(schema_name=%r, func_name=%r, arg_names=%r, ' 'arg_types=%r, arg_modes=%r, return_type=%r, is_aggregate=%r, ' 'is_window=%r, is_set_returning=%r)') % (self.__class__.__name__, self.schema_name, self.func_name, self.arg_names, self.arg_types, self.arg_modes, self.return_type, self.is_aggregate, self.is_window, self.is_set_returning)) def fields(self): """Returns a list of output-field ColumnMetadata namedtuples""" if self.return_type.lower() == 'void': return [] elif not self.arg_modes: # For functions without output parameters, the function name # is used as the name of the output column. # E.g. 'SELECT unnest FROM unnest(...);' return [ColumnMetadata(self.func_name, self.return_type, [])] return [ColumnMetadata(name, typ, []) for name, typ, mode in zip( self.arg_names, self.arg_types, self.arg_modes) if mode in ('o', 'b', 't')] # OUT, INOUT, TABLE pgcli-1.6.0/pgcli/packages/parseutils/tables.py0000644000076500000240000001446413005465503021746 0ustar irinastaff00000000000000from __future__ import print_function import sqlparse from collections import namedtuple from sqlparse.sql import IdentifierList, Identifier, Function from sqlparse.tokens import Keyword, DML, Punctuation TableReference = namedtuple('TableReference', ['schema', 'name', 'alias', 'is_function']) TableReference.ref = property(lambda self: self.alias or ( self.name if self.name.islower() or self.name[0] == '"' else '"' + self.name + '"')) # This code is borrowed from sqlparse example script. # def is_subselect(parsed): if not parsed.is_group: return False for item in parsed.tokens: if item.ttype is DML and item.value.upper() in ('SELECT', 'INSERT', 'UPDATE', 'CREATE', 'DELETE'): return True return False def _identifier_is_function(identifier): return any(isinstance(t, Function) for t in identifier.tokens) def extract_from_part(parsed, stop_at_punctuation=True): tbl_prefix_seen = False for item in parsed.tokens: if tbl_prefix_seen: if is_subselect(item): for x in extract_from_part(item, stop_at_punctuation): yield x elif stop_at_punctuation and item.ttype is Punctuation: raise StopIteration # An incomplete nested select won't be recognized correctly as a # sub-select. eg: 'SELECT * FROM (SELECT id FROM user'. This causes # the second FROM to trigger this elif condition resulting in a # StopIteration. So we need to ignore the keyword if the keyword # FROM. # Also 'SELECT * FROM abc JOIN def' will trigger this elif # condition. So we need to ignore the keyword JOIN and its variants # INNER JOIN, FULL OUTER JOIN, etc. elif item.ttype is Keyword and ( not item.value.upper() == 'FROM') and ( not item.value.upper().endswith('JOIN')): tbl_prefix_seen = False else: yield item elif item.ttype is Keyword or item.ttype is Keyword.DML: item_val = item.value.upper() if (item_val in ('COPY', 'FROM', 'INTO', 'UPDATE', 'TABLE') or item_val.endswith('JOIN')): tbl_prefix_seen = True # 'SELECT a, FROM abc' will detect FROM as part of the column list. # So this check here is necessary. elif isinstance(item, IdentifierList): for identifier in item.get_identifiers(): if (identifier.ttype is Keyword and identifier.value.upper() == 'FROM'): tbl_prefix_seen = True break def extract_table_identifiers(token_stream, allow_functions=True): """yields tuples of TableReference namedtuples""" # We need to do some massaging of the names because postgres is case- # insensitive and '"Foo"' is not the same table as 'Foo' (while 'foo' is) def parse_identifier(item): name = item.get_real_name() schema_name = item.get_parent_name() alias = item.get_alias() if not name: schema_name = None name = item.get_name() alias = alias or name schema_quoted = schema_name and item.value[0] == '"' if schema_name and not schema_quoted: schema_name = schema_name.lower() quote_count = item.value.count('"') name_quoted = quote_count > 2 or (quote_count and not schema_quoted) alias_quoted = alias and item.value[-1] == '"' if alias_quoted or name_quoted and not alias and name.islower(): alias = '"' + (alias or name) + '"' if name and not name_quoted and not name.islower(): if not alias: alias = name name = name.lower() return schema_name, name, alias for item in token_stream: if isinstance(item, IdentifierList): for identifier in item.get_identifiers(): # Sometimes Keywords (such as FROM ) are classified as # identifiers which don't have the get_real_name() method. try: schema_name = identifier.get_parent_name() real_name = identifier.get_real_name() is_function = (allow_functions and _identifier_is_function(identifier)) except AttributeError: continue if real_name: yield TableReference(schema_name, real_name, identifier.get_alias(), is_function) elif isinstance(item, Identifier): schema_name, real_name, alias = parse_identifier(item) is_function = allow_functions and _identifier_is_function(item) yield TableReference(schema_name, real_name, alias, is_function) elif isinstance(item, Function): schema_name, real_name, alias = parse_identifier(item) yield TableReference(None, real_name, alias, allow_functions) # extract_tables is inspired from examples in the sqlparse lib. def extract_tables(sql): """Extract the table names from an SQL statment. Returns a list of TableReference namedtuples """ parsed = sqlparse.parse(sql) if not parsed: return () # INSERT statements must stop looking for tables at the sign of first # Punctuation. eg: INSERT INTO abc (col1, col2) VALUES (1, 2) # abc is the table name, but if we don't stop at the first lparen, then # we'll identify abc, col1 and col2 as table names. insert_stmt = parsed[0].token_first().value.lower() == 'insert' stream = extract_from_part(parsed[0], stop_at_punctuation=insert_stmt) # Kludge: sqlparse mistakenly identifies insert statements as # function calls due to the parenthesized column list, e.g. interprets # "insert into foo (bar, baz)" as a function call to foo with arguments # (bar, baz). So don't allow any identifiers in insert statements # to have is_function=True identifiers = extract_table_identifiers(stream, allow_functions=not insert_stmt) # In the case 'sche.', we get an empty TableReference; remove that return tuple(i for i in identifiers if i.name) pgcli-1.6.0/pgcli/packages/parseutils/utils.py0000644000076500000240000001066113005465503021627 0ustar irinastaff00000000000000from __future__ import print_function import re import sqlparse from sqlparse.sql import Identifier from sqlparse.tokens import Token, Error cleanup_regex = { # This matches only alphanumerics and underscores. 'alphanum_underscore': re.compile(r'(\w+)$'), # This matches everything except spaces, parens, colon, and comma 'many_punctuations': re.compile(r'([^():,\s]+)$'), # This matches everything except spaces, parens, colon, comma, and period 'most_punctuations': re.compile(r'([^\.():,\s]+)$'), # This matches everything except a space. 'all_punctuations': re.compile('([^\s]+)$'), } def last_word(text, include='alphanum_underscore'): """ Find the last word in a sentence. >>> last_word('abc') 'abc' >>> last_word(' abc') 'abc' >>> last_word('') '' >>> last_word(' ') '' >>> last_word('abc ') '' >>> last_word('abc def') 'def' >>> last_word('abc def ') '' >>> last_word('abc def;') '' >>> last_word('bac $def') 'def' >>> last_word('bac $def', include='most_punctuations') '$def' >>> last_word('bac \def', include='most_punctuations') '\\\\def' >>> last_word('bac \def;', include='most_punctuations') '\\\\def;' >>> last_word('bac::def', include='most_punctuations') 'def' >>> last_word('"foo*bar', include='most_punctuations') '"foo*bar' """ if not text: # Empty string return '' if text[-1].isspace(): return '' else: regex = cleanup_regex[include] matches = regex.search(text) if matches: return matches.group(0) else: return '' def find_prev_keyword(sql, n_skip=0): """ Find the last sql keyword in an SQL statement Returns the value of the last keyword, and the text of the query with everything after the last keyword stripped """ if not sql.strip(): return None, '' parsed = sqlparse.parse(sql)[0] flattened = list(parsed.flatten()) flattened = flattened[:len(flattened)-n_skip] logical_operators = ('AND', 'OR', 'NOT', 'BETWEEN') for t in reversed(flattened): if t.value == '(' or (t.is_keyword and ( t.value.upper() not in logical_operators)): # Find the location of token t in the original parsed statement # We can't use parsed.token_index(t) because t may be a child token # inside a TokenList, in which case token_index thows an error # Minimal example: # p = sqlparse.parse('select * from foo where bar') # t = list(p.flatten())[-3] # The "Where" token # p.token_index(t) # Throws ValueError: not in list idx = flattened.index(t) # Combine the string values of all tokens in the original list # up to and including the target keyword token t, to produce a # query string with everything after the keyword token removed text = ''.join(tok.value for tok in flattened[:idx+1]) return t, text return None, '' # Postgresql dollar quote signs look like `$$` or `$tag$` dollar_quote_regex = re.compile(r'^\$[^$]*\$$') def is_open_quote(sql): """Returns true if the query contains an unclosed quote""" # parsed can contain one or more semi-colon separated commands parsed = sqlparse.parse(sql) return any(_parsed_is_open_quote(p) for p in parsed) def _parsed_is_open_quote(parsed): # Look for unmatched single quotes, or unmatched dollar sign quotes return any(tok.match(Token.Error, ("'", "$")) for tok in parsed.flatten()) def parse_partial_identifier(word): """Attempt to parse a (partially typed) word as an identifier word may include a schema qualification, like `schema_name.partial_name` or `schema_name.` There may also be unclosed quotation marks, like `"schema`, or `schema."partial_name` :param word: string representing a (partially complete) identifier :return: sqlparse.sql.Identifier, or None """ p = sqlparse.parse(word)[0] n_tok = len(p.tokens) if n_tok == 1 and isinstance(p.tokens[0], Identifier): return p.tokens[0] elif p.token_next_by(m=(Error, '"'))[1]: # An unmatched double quote, e.g. '"foo', 'foo."', or 'foo."bar' # Close the double quote, then reparse return parse_partial_identifier(word + '"') else: return None pgcli-1.6.0/pgcli/packages/pgliterals/0000755000076500000240000000000013112353401020054 5ustar irinastaff00000000000000pgcli-1.6.0/pgcli/packages/pgliterals/__init__.py0000644000076500000240000000000212606361441022167 0ustar irinastaff00000000000000 pgcli-1.6.0/pgcli/packages/pgliterals/main.py0000644000076500000240000000060213101210552021345 0ustar irinastaff00000000000000import os import json root = os.path.dirname(__file__) literal_file = os.path.join(root, 'pgliterals.json') with open(literal_file) as f: literals = json.load(f) def get_literals(literal_type, type_=tuple): # Where `literal_type` is one of 'keywords', 'functions', 'datatypes', # returns a tuple of literal values of that type. return type_(literals[literal_type]) pgcli-1.6.0/pgcli/packages/pgliterals/pgliterals.json0000644000076500000240000001426113101210552023116 0ustar irinastaff00000000000000{ "keywords": { "ACCESS": [], "ADD": [], "ALL": [], "ALTER": [ "AGGREGATE", "COLLATION", "COLUMN", "CONVERSION", "DATABASE", "DEFAULT", "DOMAIN", "EVENT TRIGGER", "EXTENSION", "FOREIGN", "FUNCTION", "GROUP", "INDEX", "LANGUAGE", "LARGE OBJECT", "MATERIALIZED VIEW", "OPERATOR", "POLICY", "ROLE", "RULE", "SCHEMA", "SEQUENCE", "SERVER", "SYSTEM", "TABLE", "TABLESPACE", "TEXT SEARCH", "TRIGGER", "TYPE", "USER", "VIEW" ], "AND": [], "ANY": [], "AS": [], "ASC": [], "AUDIT": [], "BEGIN": [], "BETWEEN": [], "BY": [], "CASE": [], "CHAR": [], "CHECK": [], "CLUSTER": [], "COLUMN": [], "COMMENT": [], "COMPRESS": [], "CONCURRENTLY": [], "CONNECT": [], "COPY": [], "CREATE": [ "ACCESS METHOD", "AGGREGATE", "CAST", "COLLATION", "CONVERSION", "DATABASE", "DOMAIN", "EVENT TRIGGER", "EXTENSION", "FOREIGN DATA WRAPPER", "FOREIGN EXTENSION", "FUNCTION", "GLOBAL", "GROUP", "IF NOT EXISTS", "INDEX", "LANGUAGE", "LOCAL", "MATERIALIZED VIEW", "OPERATOR", "OR REPLACE", "POLICY", "ROLE", "RULE", "SCHEMA", "SEQUENCE", "SERVER", "TABLE", "TABLESPACE", "TEMPORARY", "TEXT SEARCH", "TRIGGER", "TYPE", "UNIQUE", "UNLOGGED", "USER", "USER MAPPING", "VIEW" ], "CURRENT": [], "DATABASE": [], "DATE": [], "DECIMAL": [], "DEFAULT": [], "DELETE FROM": [], "DELIMITER": [], "DESC": [], "DESCRIBE": [], "DISTINCT": [], "DROP": [ "ACCESS METHOD", "AGGREGATE", "CAST", "COLLATION", "CONVERSION", "DATABASE", "DOMAIN", "EVENT TRIGGER", "EXTENSION", "FOREIGN DATA WRAPPER", "FOREIGN TABLE", "FUNCTION", "GROUP", "INDEX", "LANGUAGE", "MATERIALIZED VIEW", "OPERATOR", "OWNED", "POLICY", "ROLE", "RULE", "SCHEMA", "SEQUENCE", "SERVER", "TABLE", "TABLESPACE", "TEXT SEARCH", "TRANSFORM", "TRIGGER", "TYPE", "USER", "USER MAPPING", "VIEW" ], "EXPLAIN": [], "ELSE": [], "ENCODING": [], "ESCAPE": [], "EXCLUSIVE": [], "EXISTS": [], "EXTENSION": [], "FILE": [], "FLOAT": [], "FOR": [], "FORMAT": [], "FORCE_QUOTE": [], "FORCE_NOT_NULL": [], "FREEZE": [], "FROM": [], "FULL": [], "FUNCTION": [], "GRANT": [], "GROUP BY": [], "HAVING": [], "HEADER": [], "IDENTIFIED": [], "IMMEDIATE": [], "IN": [], "INCREMENT": [], "INDEX": [], "INITIAL": [], "INSERT INTO": [], "INTEGER": [], "INTERSECT": [], "INTERVAL": [], "INTO": [], "IS": [], "JOIN": [], "LANGUAGE": [], "LEFT": [], "LEVEL": [], "LIKE": [], "LIMIT": [], "LOCK": [], "LONG": [], "MATERIALIZED VIEW": [], "MAXEXTENTS": [], "MINUS": [], "MLSLABEL": [], "MODE": [], "MODIFY": [], "NOT": [], "NOAUDIT": [], "NOTICE": [], "NOCOMPRESS": [], "NOWAIT": [], "NULL": [], "NUMBER": [], "OIDS": [], "OF": [], "OFFLINE": [], "ON": [], "ONLINE": [], "OPTION": [], "OR": [], "ORDER BY": [], "OUTER": [], "OWNER": [], "PCTFREE": [], "PRIMARY": [], "PRIOR": [], "PRIVILEGES": [], "QUOTE": [], "RAISE": [], "RENAME": [], "REPLACE": [], "RESET": ["ALL"], "RAW": [], "REFRESH MATERIALIZED VIEW": [], "RESOURCE": [], "RETURNS": [], "REVOKE": [], "RIGHT": [], "ROW": [], "ROWID": [], "ROWNUM": [], "ROWS": [], "SELECT": [], "SESSION": [], "SET": [], "SHARE": [], "SHOW": [], "SIZE": [], "SMALLINT": [], "START": [], "SUCCESSFUL": [], "SYNONYM": [], "SYSDATE": [], "TABLE": [], "TEMPLATE": [], "THEN": [], "TO": [], "TRIGGER": [], "TRUNCATE": [], "UID": [], "UNION": [], "UNIQUE": [], "UPDATE": [], "USE": [], "USER": [], "USING": [], "VALIDATE": [], "VALUES": [], "VARCHAR": [], "VARCHAR2": [], "VIEW": [], "WHEN": [], "WHENEVER": [], "WHERE": [], "WITH": [] }, "functions": [ "AVG", "COUNT", "FIRST", "FORMAT", "LAST", "LCASE", "LEN", "MAX", "MIN", "MID", "NOW", "ROUND", "SUM", "TOP", "UCASE" ], "datatypes": [ "BIGINT", "BOOLEAN", "CHAR", "DATE", "DOUBLE PRECISION", "INT", "INTEGER", "NUMERIC", "REAL", "TEXT", "VARCHAR", "VOID" ] } pgcli-1.6.0/pgcli/packages/prioritization.py0000644000076500000240000000301413002760441021350 0ustar irinastaff00000000000000import re import sqlparse from sqlparse.tokens import Name from collections import defaultdict from .pgliterals.main import get_literals white_space_regex = re.compile('\\s+', re.MULTILINE) def _compile_regex(keyword): # Surround the keyword with word boundaries and replace interior whitespace # with whitespace wildcards pattern = '\\b' + white_space_regex.sub(r'\\s+', keyword) + '\\b' return re.compile(pattern, re.MULTILINE | re.IGNORECASE) keywords = get_literals('keywords') keyword_regexs = dict((kw, _compile_regex(kw)) for kw in keywords) class PrevalenceCounter(object): def __init__(self): self.keyword_counts = defaultdict(int) self.name_counts = defaultdict(int) def update(self, text): self.update_keywords(text) self.update_names(text) def update_names(self, text): for parsed in sqlparse.parse(text): for token in parsed.flatten(): if token.ttype in Name: self.name_counts[token.value] += 1 def clear_names(self): self.name_counts = defaultdict(int) def update_keywords(self, text): # Count keywords. Can't rely for sqlparse for this, because it's # database agnostic for keyword, regex in keyword_regexs.items(): for _ in regex.finditer(text): self.keyword_counts[keyword] += 1 def keyword_count(self, keyword): return self.keyword_counts[keyword] def name_count(self, name): return self.name_counts[name] pgcli-1.6.0/pgcli/packages/sqlcompletion.py0000644000076500000240000005164713101210552021163 0ustar irinastaff00000000000000from __future__ import print_function import sys import re import sqlparse from collections import namedtuple from sqlparse.sql import Comparison, Identifier, Where from .parseutils.utils import ( last_word, find_prev_keyword, parse_partial_identifier) from .parseutils.tables import extract_tables from .parseutils.ctes import isolate_query_ctes from pgspecial.main import parse_special_command PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 if PY3: string_types = str else: string_types = basestring Special = namedtuple('Special', []) Database = namedtuple('Database', []) Schema = namedtuple('Schema', []) # FromClauseItem is a table/view/function used in the FROM clause # `table_refs` contains the list of tables/... already in the statement, # used to ensure that the alias we suggest is unique FromClauseItem = namedtuple('FromClauseItem', 'schema table_refs local_tables') Table = namedtuple('Table', ['schema', 'table_refs', 'local_tables']) View = namedtuple('View', ['schema', 'table_refs']) # JoinConditions are suggested after ON, e.g. 'foo.barid = bar.barid' JoinCondition = namedtuple('JoinCondition', ['table_refs', 'parent']) # Joins are suggested after JOIN, e.g. 'foo ON foo.barid = bar.barid' Join = namedtuple('Join', ['table_refs', 'schema']) Function = namedtuple('Function', ['schema', 'table_refs', 'filter']) # For convenience, don't require the `filter` argument in Function constructor Function.__new__.__defaults__ = (None, tuple(), None) Table.__new__.__defaults__ = (None, tuple(), tuple()) View.__new__.__defaults__ = (None, tuple()) FromClauseItem.__new__.__defaults__ = (None, tuple(), tuple()) Column = namedtuple( 'Column', ['table_refs', 'require_last_table', 'local_tables', 'qualifiable'] ) Column.__new__.__defaults__ = (None, None, tuple(), False) Keyword = namedtuple('Keyword', ['last_token']) Keyword.__new__.__defaults__ = (None,) NamedQuery = namedtuple('NamedQuery', []) Datatype = namedtuple('Datatype', ['schema']) Alias = namedtuple('Alias', ['aliases']) Path = namedtuple('Path', []) class SqlStatement(object): def __init__(self, full_text, text_before_cursor): self.identifier = None self.word_before_cursor = word_before_cursor = last_word( text_before_cursor, include='many_punctuations') full_text = _strip_named_query(full_text) text_before_cursor = _strip_named_query(text_before_cursor) full_text, text_before_cursor, self.local_tables = \ isolate_query_ctes(full_text, text_before_cursor) self.text_before_cursor_including_last_word = text_before_cursor # If we've partially typed a word then word_before_cursor won't be an # empty string. In that case we want to remove the partially typed # string before sending it to the sqlparser. Otherwise the last token # will always be the partially typed string which renders the smart # completion useless because it will always return the list of # keywords as completion. if self.word_before_cursor: if word_before_cursor[-1] == '(' or word_before_cursor[0] == '\\': parsed = sqlparse.parse(text_before_cursor) else: text_before_cursor = text_before_cursor[:-len(word_before_cursor)] parsed = sqlparse.parse(text_before_cursor) self.identifier = parse_partial_identifier(word_before_cursor) else: parsed = sqlparse.parse(text_before_cursor) full_text, text_before_cursor, parsed = \ _split_multiple_statements(full_text, text_before_cursor, parsed) self.full_text = full_text self.text_before_cursor = text_before_cursor self.parsed = parsed self.last_token = parsed and parsed.token_prev(len(parsed.tokens))[1] or '' def is_insert(self): return self.parsed.token_first().value.lower() == 'insert' def get_tables(self, scope='full'): """ Gets the tables available in the statement. param `scope:` possible values: 'full', 'insert', 'before' If 'insert', only the first table is returned. If 'before', only tables before the cursor are returned. If not 'insert' and the stmt is an insert, the first table is skipped. """ tables = extract_tables( self.full_text if scope == 'full' else self.text_before_cursor) if scope == 'insert': tables = tables[:1] elif self.is_insert(): tables = tables[1:] return tables def get_identifier_schema(self): schema = (self.identifier and self.identifier.get_parent_name()) or None # If schema name is unquoted, lower-case it if schema and self.identifier.value[0] != '"': schema = schema.lower() return schema def reduce_to_prev_keyword(self, n_skip=0): prev_keyword, self.text_before_cursor = \ find_prev_keyword(self.text_before_cursor, n_skip=n_skip) return prev_keyword def suggest_type(full_text, text_before_cursor): """Takes the full_text that is typed so far and also the text before the cursor to suggest completion type and scope. Returns a tuple with a type of entity ('table', 'column' etc) and a scope. A scope for a column category will be a list of tables. """ if full_text.startswith('\\i '): return (Path(),) # This is a temporary hack; the exception handling # here should be removed once sqlparse has been fixed try: stmt = SqlStatement(full_text, text_before_cursor) except (TypeError, AttributeError): return [] # Check for special commands and handle those separately if stmt.parsed: # Be careful here because trivial whitespace is parsed as a # statement, but the statement won't have a first token tok1 = stmt.parsed.token_first() if tok1 and tok1.value == '\\': text = stmt.text_before_cursor + stmt.word_before_cursor return suggest_special(text) return suggest_based_on_last_token(stmt.last_token, stmt) named_query_regex = re.compile(r'^\s*\\ns\s+[A-z0-9\-_]+\s+') def _strip_named_query(txt): """ This will strip "save named query" command in the beginning of the line: '\ns zzz SELECT * FROM abc' -> 'SELECT * FROM abc' ' \ns zzz SELECT * FROM abc' -> 'SELECT * FROM abc' """ if named_query_regex.match(txt): txt = named_query_regex.sub('', txt) return txt function_body_pattern = re.compile('(\\$.*?\\$)([\s\S]*?)\\1', re.M) def _find_function_body(text): split = function_body_pattern.search(text) return (split.start(2), split.end(2)) if split else (None, None) def _statement_from_function(full_text, text_before_cursor, statement): current_pos = len(text_before_cursor) body_start, body_end = _find_function_body(full_text) if body_start is None: return full_text, text_before_cursor, statement if not body_start <= current_pos < body_end: return full_text, text_before_cursor, statement full_text = full_text[body_start:body_end] text_before_cursor = text_before_cursor[body_start:] parsed = sqlparse.parse(text_before_cursor) return _split_multiple_statements(full_text, text_before_cursor, parsed) def _split_multiple_statements(full_text, text_before_cursor, parsed): if len(parsed) > 1: # Multiple statements being edited -- isolate the current one by # cumulatively summing statement lengths to find the one that bounds # the current position current_pos = len(text_before_cursor) stmt_start, stmt_end = 0, 0 for statement in parsed: stmt_len = len(str(statement)) stmt_start, stmt_end = stmt_end, stmt_end + stmt_len if stmt_end >= current_pos: text_before_cursor = full_text[stmt_start:current_pos] full_text = full_text[stmt_start:] break elif parsed: # A single statement statement = parsed[0] else: # The empty string return full_text, text_before_cursor, None token2 = None if statement.get_type() in ('CREATE', 'CREATE OR REPLACE'): token1 = statement.token_first() if token1: token1_idx = statement.token_index(token1) token2 = statement.token_next(token1_idx)[1] if token2 and token2.value.upper() == 'FUNCTION': full_text, text_before_cursor, statement = _statement_from_function( full_text, text_before_cursor, statement ) return full_text, text_before_cursor, statement SPECIALS_SUGGESTION = { 'dT': Datatype, 'df': Function, 'dt': Table, 'dv': View, 'sf': Function, } def suggest_special(text): text = text.lstrip() cmd, _, arg = parse_special_command(text) if cmd == text: # Trying to complete the special command itself return (Special(),) if cmd in ('\\c', '\\connect'): return (Database(),) if cmd == '\\dn': return (Schema(),) if arg: # Try to distinguish "\d name" from "\d schema.name" # Note that this will fail to obtain a schema name if wildcards are # used, e.g. "\d schema???.name" parsed = sqlparse.parse(arg)[0].tokens[0] try: schema = parsed.get_parent_name() except AttributeError: schema = None else: schema = None if cmd[1:] == 'd': # \d can describe tables or views if schema: return (Table(schema=schema), View(schema=schema),) else: return (Schema(), Table(schema=None), View(schema=None),) elif cmd[1:] in SPECIALS_SUGGESTION: rel_type = SPECIALS_SUGGESTION[cmd[1:]] if schema: return (rel_type(schema=schema),) else: return (Schema(), rel_type(schema=None)) if cmd in ['\\n', '\\ns', '\\nd']: return (NamedQuery(),) return (Keyword(), Special()) def suggest_based_on_last_token(token, stmt): if isinstance(token, string_types): token_v = token.lower() elif isinstance(token, Comparison): # If 'token' is a Comparison type such as # 'select * FROM abc a JOIN def d ON a.id = d.'. Then calling # token.value on the comparison type will only return the lhs of the # comparison. In this case a.id. So we need to do token.tokens to get # both sides of the comparison and pick the last token out of that # list. token_v = token.tokens[-1].value.lower() elif isinstance(token, Where): # sqlparse groups all tokens from the where clause into a single token # list. This means that token.value may be something like # 'where foo > 5 and '. We need to look "inside" token.tokens to handle # suggestions in complicated where clauses correctly prev_keyword = stmt.reduce_to_prev_keyword() return suggest_based_on_last_token(prev_keyword, stmt) elif isinstance(token, Identifier): # If the previous token is an identifier, we can suggest datatypes if # we're in a parenthesized column/field list, e.g.: # CREATE TABLE foo (Identifier # CREATE FUNCTION foo (Identifier # If we're not in a parenthesized list, the most likely scenario is the # user is about to specify an alias, e.g.: # SELECT Identifier # SELECT foo FROM Identifier prev_keyword, _ = find_prev_keyword(stmt.text_before_cursor) if prev_keyword and prev_keyword.value == '(': # Suggest datatypes return suggest_based_on_last_token('type', stmt) else: return (Keyword(),) else: token_v = token.value.lower() if not token: return (Keyword(), Special()) elif token_v.endswith('('): p = sqlparse.parse(stmt.text_before_cursor)[0] if p.tokens and isinstance(p.tokens[-1], Where): # Four possibilities: # 1 - Parenthesized clause like "WHERE foo AND (" # Suggest columns/functions # 2 - Function call like "WHERE foo(" # Suggest columns/functions # 3 - Subquery expression like "WHERE EXISTS (" # Suggest keywords, in order to do a subquery # 4 - Subquery OR array comparison like "WHERE foo = ANY(" # Suggest columns/functions AND keywords. (If we wanted to be # really fancy, we could suggest only array-typed columns) column_suggestions = suggest_based_on_last_token('where', stmt) # Check for a subquery expression (cases 3 & 4) where = p.tokens[-1] prev_tok = where.token_prev(len(where.tokens) - 1)[1] if isinstance(prev_tok, Comparison): # e.g. "SELECT foo FROM bar WHERE foo = ANY(" prev_tok = prev_tok.tokens[-1] prev_tok = prev_tok.value.lower() if prev_tok == 'exists': return (Keyword(),) else: return column_suggestions # Get the token before the parens prev_tok = p.token_prev(len(p.tokens) - 1)[1] if (prev_tok and prev_tok.value and prev_tok.value.lower().split(' ')[-1] == 'using'): # tbl1 INNER JOIN tbl2 USING (col1, col2) tables = stmt.get_tables('before') # suggest columns that are present in more than one table return (Column(table_refs=tables, require_last_table=True, local_tables=stmt.local_tables),) elif p.token_first().value.lower() == 'select': # If the lparen is preceeded by a space chances are we're about to # do a sub-select. if last_word(stmt.text_before_cursor, 'all_punctuations').startswith('('): return (Keyword(),) prev_prev_tok = prev_tok and p.token_prev(p.token_index(prev_tok))[1] if prev_prev_tok and prev_prev_tok.normalized == 'INTO': return (Column(table_refs=stmt.get_tables('insert')),) # We're probably in a function argument list return (Column(table_refs=extract_tables(stmt.full_text), local_tables=stmt.local_tables, qualifiable=True),) elif token_v == 'set': return (Column(table_refs=stmt.get_tables(), local_tables=stmt.local_tables),) elif token_v in ('select', 'where', 'having', 'by', 'distinct'): # Check for a table alias or schema qualification parent = (stmt.identifier and stmt.identifier.get_parent_name()) or [] tables = stmt.get_tables() if parent: tables = tuple(t for t in tables if identifies(parent, t)) return (Column(table_refs=tables, local_tables=stmt.local_tables), Table(schema=parent), View(schema=parent), Function(schema=parent),) else: return (Column(table_refs=tables, local_tables=stmt.local_tables, qualifiable=True), Function(schema=None), Keyword(token_v.upper()),) elif token_v == 'as': # Don't suggest anything for aliases return () elif (token_v.endswith('join') and token.is_keyword) or (token_v in ('copy', 'from', 'update', 'into', 'describe', 'truncate')): schema = stmt.get_identifier_schema() tables = extract_tables(stmt.text_before_cursor) is_join = token_v.endswith('join') and token.is_keyword # Suggest tables from either the currently-selected schema or the # public schema if no schema has been specified suggest = [] if not schema: # Suggest schemas suggest.insert(0, Schema()) if token_v == 'from' or is_join: suggest.append(FromClauseItem(schema=schema, table_refs=tables, local_tables=stmt.local_tables)) elif token_v == 'truncate': suggest.append(Table(schema)) else: suggest.extend((Table(schema), View(schema))) if is_join and _allow_join(stmt.parsed): tables = stmt.get_tables('before') suggest.append(Join(table_refs=tables, schema=schema)) return tuple(suggest) elif token_v in ('table', 'view', 'function'): # E.g. 'DROP FUNCTION ', 'ALTER TABLE ' rel_type = {'table': Table, 'view': View, 'function': Function}[token_v] schema = stmt.get_identifier_schema() if schema: return (rel_type(schema=schema),) else: return (Schema(), rel_type(schema=schema)) elif token_v == 'column': # E.g. 'ALTER TABLE foo ALTER COLUMN bar return (Column(table_refs=stmt.get_tables()),) elif token_v == 'on': tables = stmt.get_tables('before') parent = (stmt.identifier and stmt.identifier.get_parent_name()) or None if parent: # "ON parent." # parent can be either a schema name or table alias filteredtables = tuple(t for t in tables if identifies(parent, t)) sugs = [Column(table_refs=filteredtables, local_tables=stmt.local_tables), Table(schema=parent), View(schema=parent), Function(schema=parent)] if filteredtables and _allow_join_condition(stmt.parsed): sugs.append(JoinCondition(table_refs=tables, parent=filteredtables[-1])) return tuple(sugs) else: # ON # Use table alias if there is one, otherwise the table name aliases = tuple(t.ref for t in tables) if _allow_join_condition(stmt.parsed): return (Alias(aliases=aliases), JoinCondition( table_refs=tables, parent=None)) else: return (Alias(aliases=aliases),) elif token_v in ('c', 'use', 'database', 'template'): # "\c ", "DROP DATABASE ", # "CREATE DATABASE WITH TEMPLATE " return (Database(),) elif token_v == 'schema': # DROP SCHEMA schema_name return (Schema(),) elif token_v.endswith(',') or token_v in ('=', 'and', 'or'): prev_keyword = stmt.reduce_to_prev_keyword() if prev_keyword: return suggest_based_on_last_token(prev_keyword, stmt) else: return () elif token_v in ('type', '::'): # ALTER TABLE foo SET DATA TYPE bar # SELECT foo::bar # Note that tables are a form of composite type in postgresql, so # they're suggested here as well schema = stmt.get_identifier_schema() suggestions = [Datatype(schema=schema), Table(schema=schema)] if not schema: suggestions.append(Schema()) return tuple(suggestions) elif token_v in {'alter', 'create', 'drop'}: return (Keyword(token_v.upper()),) elif token.is_keyword: # token is a keyword we haven't implemented any special handling for # go backwards in the query until we find one we do recognize prev_keyword = stmt.reduce_to_prev_keyword(n_skip=1) if prev_keyword: return suggest_based_on_last_token(prev_keyword, stmt) else: return (Keyword(token_v.upper()),) else: return (Keyword(),) def identifies(id, ref): """Returns true if string `id` matches TableReference `ref`""" return id == ref.alias or id == ref.name or ( ref.schema and (id == ref.schema + '.' + ref.name)) def _allow_join_condition(statement): """ Tests if a join condition should be suggested We need this to avoid bad suggestions when entering e.g. select * from tbl1 a join tbl2 b on a.id = So check that the preceding token is a ON, AND, or OR keyword, instead of e.g. an equals sign. :param statement: an sqlparse.sql.Statement :return: boolean """ if not statement or not statement.tokens: return False last_tok = statement.token_prev(len(statement.tokens))[1] return last_tok.value.lower() in ('on', 'and', 'or') def _allow_join(statement): """ Tests if a join should be suggested We need this to avoid bad suggestions when entering e.g. select * from tbl1 a join tbl2 b So check that the preceding token is a JOIN keyword :param statement: an sqlparse.sql.Statement :return: boolean """ if not statement or not statement.tokens: return False last_tok = statement.token_prev(len(statement.tokens))[1] return (last_tok.value.lower().endswith('join') and last_tok.value.lower() not in('cross join', 'natural join')) pgcli-1.6.0/pgcli/packages/tabulate.py0000644000076500000240000011243613002760441020074 0ustar irinastaff00000000000000# -*- coding: utf-8 -*- """Pretty-print tabular data.""" from __future__ import print_function from __future__ import unicode_literals from collections import namedtuple from platform import python_version_tuple from wcwidth import wcswidth from ..encodingutils import utf8tounicode from decimal import Decimal import re if python_version_tuple()[0] < "3": from itertools import izip_longest from functools import partial _none_type = type(None) _int_type = int _long_type = long _float_type = float _text_type = unicode _binary_type = str def _is_file(f): return isinstance(f, file) else: from itertools import zip_longest as izip_longest from functools import reduce, partial _none_type = type(None) _int_type = int _long_type = int _float_type = float _text_type = str _binary_type = bytes import io def _is_file(f): return isinstance(f, io.IOBase) __all__ = ["tabulate", "tabulate_formats", "simple_separated_format"] __version__ = "0.7.4" MIN_PADDING = 2 Line = namedtuple("Line", ["begin", "hline", "sep", "end"]) DataRow = namedtuple("DataRow", ["begin", "sep", "end"]) # A table structure is suppposed to be: # # --- lineabove --------- # headerrow # --- linebelowheader --- # datarow # --- linebewteenrows --- # ... (more datarows) ... # --- linebewteenrows --- # last datarow # --- linebelow --------- # # TableFormat's line* elements can be # # - either None, if the element is not used, # - or a Line tuple, # - or a function: [col_widths], [col_alignments] -> string. # # TableFormat's *row elements can be # # - either None, if the element is not used, # - or a DataRow tuple, # - or a function: [cell_values], [col_widths], [col_alignments] -> string. # # padding (an integer) is the amount of white space around data values. # # with_header_hide: # # - either None, to display all table elements unconditionally, # - or a list of elements not to be displayed if the table has column headers. # TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader", "linebetweenrows", "linebelow", "headerrow", "datarow", "padding", "with_header_hide"]) def _pipe_segment_with_colons(align, colwidth): """Return a segment of a horizontal line with optional colons which indicate column's alignment (as in `pipe` output format).""" w = colwidth if align in ["right", "decimal"]: return ('-' * (w - 1)) + ":" elif align == "center": return ":" + ('-' * (w - 2)) + ":" elif align == "left": return ":" + ('-' * (w - 1)) else: return '-' * w def _pipe_line_with_colons(colwidths, colaligns): """Return a horizontal line with optional colons to indicate column's alignment (as in `pipe` output format).""" segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)] return "|" + "|".join(segments) + "|" def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns): alignment = { "left": '', "right": 'align="right"| ', "center": 'align="center"| ', "decimal": 'align="right"| ' } # hard-coded padding _around_ align attribute and value together # rather than padding parameter which affects only the value values_with_attrs = [' ' + alignment.get(a, '') + c + ' ' for c, a in zip(cell_values, colaligns)] colsep = separator*2 return (separator + colsep.join(values_with_attrs)).rstrip() def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns): alignment = { "left": '', "right": ' style="text-align: right;"', "center": ' style="text-align: center;"', "decimal": ' style="text-align: right;"' } values_with_attrs = ["<{0}{1}>{2}".format(celltag, alignment.get(a, ''), c) for c, a in zip(cell_values, colaligns)] return "" + "".join(values_with_attrs).rstrip() + "" def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False): alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" } tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns]) return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}", "\\toprule" if booktabs else "\hline"]) LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#", r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}", r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}", r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"} def _latex_row(cell_values, colwidths, colaligns): def escape_char(c): return LATEX_ESCAPE_RULES.get(c, c) escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values] rowfmt = DataRow("", "&", "\\\\") return _build_simple_row(escaped_values, rowfmt) _table_formats = {"simple": TableFormat(lineabove=Line("", "-", " ", ""), linebelowheader=Line("", "-", " ", ""), linebetweenrows=None, linebelow=Line("", "-", " ", ""), headerrow=DataRow("", " ", ""), datarow=DataRow("", " ", ""), padding=0, with_header_hide=["lineabove", "linebelow"]), "plain": TableFormat(lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("", " ", ""), datarow=DataRow("", " ", ""), padding=0, with_header_hide=None), "grid": TableFormat(lineabove=Line("+", "-", "+", "+"), linebelowheader=Line("+", "=", "+", "+"), linebetweenrows=Line("+", "-", "+", "+"), linebelow=Line("+", "-", "+", "+"), headerrow=DataRow("|", "|", "|"), datarow=DataRow("|", "|", "|"), padding=1, with_header_hide=None), "fancy_grid": TableFormat(lineabove=Line("╒", "═", "╤", "╕"), linebelowheader=Line("╞", "═", "╪", "╡"), linebetweenrows=Line("├", "─", "┼", "┤"), linebelow=Line("╘", "═", "╧", "╛"), headerrow=DataRow("│", "│", "│"), datarow=DataRow("│", "│", "│"), padding=1, with_header_hide=None), "pipe": TableFormat(lineabove=_pipe_line_with_colons, linebelowheader=_pipe_line_with_colons, linebetweenrows=None, linebelow=None, headerrow=DataRow("|", "|", "|"), datarow=DataRow("|", "|", "|"), padding=1, with_header_hide=["lineabove"]), "orgtbl": TableFormat(lineabove=None, linebelowheader=Line("|", "-", "+", "|"), linebetweenrows=None, linebelow=None, headerrow=DataRow("|", "|", "|"), datarow=DataRow("|", "|", "|"), padding=1, with_header_hide=None), "psql": TableFormat(lineabove=Line("+", "-", "+", "+"), linebelowheader=Line("|", "-", "+", "|"), linebetweenrows=None, linebelow=Line("+", "-", "+", "+"), headerrow=DataRow("|", "|", "|"), datarow=DataRow("|", "|", "|"), padding=1, with_header_hide=None), "rst": TableFormat(lineabove=Line("", "=", " ", ""), linebelowheader=Line("", "=", " ", ""), linebetweenrows=None, linebelow=Line("", "=", " ", ""), headerrow=DataRow("", " ", ""), datarow=DataRow("", " ", ""), padding=0, with_header_hide=None), "mediawiki": TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"", "", "", "\n|+ \n|-"), linebelowheader=Line("|-", "", "", ""), linebetweenrows=Line("|-", "", "", ""), linebelow=Line("|}", "", "", ""), headerrow=partial(_mediawiki_row_with_attrs, "!"), datarow=partial(_mediawiki_row_with_attrs, "|"), padding=0, with_header_hide=None), "html": TableFormat(lineabove=Line("", "", "", ""), linebelowheader=None, linebetweenrows=None, linebelow=Line("
", "", "", ""), headerrow=partial(_html_row_with_attrs, "th"), datarow=partial(_html_row_with_attrs, "td"), padding=0, with_header_hide=None), "latex": TableFormat(lineabove=_latex_line_begin_tabular, linebelowheader=Line("\\hline", "", "", ""), linebetweenrows=None, linebelow=Line("\\hline\n\\end{tabular}", "", "", ""), headerrow=_latex_row, datarow=_latex_row, padding=1, with_header_hide=None), "latex_booktabs": TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True), linebelowheader=Line("\\midrule", "", "", ""), linebetweenrows=None, linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""), headerrow=_latex_row, datarow=_latex_row, padding=1, with_header_hide=None), "tsv": TableFormat(lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("", "\t", ""), datarow=DataRow("", "\t", ""), padding=0, with_header_hide=None)} tabulate_formats = list(sorted(_table_formats.keys())) _invisible_codes = re.compile(r"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes _invisible_codes_bytes = re.compile(b"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes def simple_separated_format(separator): """Construct a simple TableFormat with columns separated by a separator. >>> tsv = simple_separated_format("\\t") ; \ tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23' True """ return TableFormat(None, None, None, None, headerrow=DataRow('', separator, ''), datarow=DataRow('', separator, ''), padding=0, with_header_hide=None) def _isconvertible(conv, string): try: n = conv(string) return True except (ValueError, TypeError): return False def _isnumber(string): """ >>> _isnumber("123.45") True >>> _isnumber("123") True >>> _isnumber("spam") False """ return _isconvertible(float, string) def _isint(string): """ >>> _isint(123) True >>> _isint("123") False >>> _isint("123.45") False """ return type(string) is _int_type or type(string) is _long_type def _type(string, has_invisible=True): """The least generic type (type(None), int, float, str, unicode). >>> _type(None) is type(None) True >>> _type("foo") is type("") True >>> _type("1") is type(1) False >>> _type('\x1b[31m42\x1b[0m') is type(42) True >>> _type('\x1b[31m42\x1b[0m') is type(42) True """ if has_invisible and \ (isinstance(string, _text_type) or isinstance(string, _binary_type)): string = _strip_invisible(string) if string is None: return _none_type if isinstance(string, bool): return _text_type elif hasattr(string, "isoformat"): # datetime.datetime, date, and time return _text_type elif _isint(string): return int elif isinstance(string, (float, Decimal)): return float elif isinstance(string, _binary_type): return _binary_type else: return _text_type def _afterpoint(string): """Symbols after a decimal point, -1 if the string lacks the decimal point. >>> _afterpoint("123.45") 2 >>> _afterpoint("1001") -1 >>> _afterpoint("eggs") -1 >>> _afterpoint("123e45") 2 """ if _isnumber(string): if _isint(string): return -1 else: pos = string.rfind(".") pos = string.lower().rfind("e") if pos < 0 else pos if pos >= 0: return len(string) - pos - 1 else: return -1 # no point else: return -1 # not a number def _padleft(width, s, has_invisible=True): """Flush right. >>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430' True """ lwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s) return ' ' * lwidth + s def _padright(width, s, has_invisible=True): """Flush left. >>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 ' True """ rwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s) return s + ' ' * rwidth def _padboth(width, s, has_invisible=True): """Center string. >>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 ' True """ xwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s) lwidth = xwidth // 2 rwidth = 0 if xwidth <= 0 else lwidth + xwidth % 2 return ' ' * lwidth + s + ' ' * rwidth def _strip_invisible(s): "Remove invisible ANSI color codes." if isinstance(s, _text_type): return re.sub(_invisible_codes, "", s) else: # a bytestring return re.sub(_invisible_codes_bytes, "", s) def _visible_width(s): """Visible width of a printed string. ANSI color codes are removed. >>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world") (5, 5) """ if isinstance(s, _text_type) or isinstance(s, _binary_type): return wcswidth(_strip_invisible(s)) else: return wcswidth(_text_type(s)) def _align_column(strings, alignment, minwidth=0, has_invisible=True): """[string] -> [padded_string] >>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal"))) [' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234'] >>> list(map(str,_align_column(['123.4', '56.7890'], None))) ['123.4', '56.7890'] """ if alignment == "right": padfn = _padleft elif alignment == "center": padfn = _padboth elif alignment == "decimal": decimals = [_afterpoint(s) for s in strings] maxdecimals = max(decimals) strings = [s + (maxdecimals - decs) * " " for s, decs in zip(strings, decimals)] padfn = _padleft elif not alignment: return strings else: padfn = _padright if has_invisible: width_fn = _visible_width else: width_fn = wcswidth maxwidth = max(max(map(width_fn, strings)), minwidth) padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings] return padded_strings def _more_generic(type1, type2): types = { _none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4 } invtypes = { 4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type } moregeneric = max(types.get(type1, 4), types.get(type2, 4)) return invtypes[moregeneric] def _column_type(strings, has_invisible=True): """The least generic type all column values are convertible to. >>> _column_type(["1", "2"]) is _int_type True >>> _column_type(["1", "2.3"]) is _float_type True >>> _column_type(["1", "2.3", "four"]) is _text_type True >>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type True >>> _column_type([None, "brux"]) is _text_type True >>> _column_type([1, 2, None]) is _int_type True >>> import datetime as dt >>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type True """ types = [_type(s, has_invisible) for s in strings ] return reduce(_more_generic, types, int) def _format(val, valtype, dcmlfmt, floatfmt, missingval=""): """Format a value accoding to its type. Unicode is supported: >>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \ tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \ good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \ tabulate(tbl, headers=hrow) == good_result True """ if val is None: return missingval if valtype is int: return format(int(val), dcmlfmt) elif valtype is _text_type: return "{0}".format(val) elif valtype is _binary_type: try: return _text_type(val, "utf-8") except TypeError: return _text_type(val) elif valtype is float: return format(float(val), floatfmt) else: return "{0}".format(val) def _align_header(header, alignment, width): if alignment == "left": return _padright(width, header) elif alignment == "center": return _padboth(width, header) elif not alignment: return "{0}".format(header) else: return _padleft(width, header) def _normalize_tabular_data(tabular_data, headers): """Transform a supported data type to a list of lists, and a list of headers. Supported tabular data types: * list-of-lists or another iterable of iterables * list of named tuples (usually used with headers="keys") * list of dicts (usually used with headers="keys") * list of OrderedDicts (usually used with headers="keys") * 2D NumPy arrays * NumPy record arrays (usually used with headers="keys") * dict of iterables (usually used with headers="keys") * pandas.DataFrame (usually used with headers="keys") The first row can be used as headers if headers="firstrow", column indices can be used as headers if headers="keys". """ if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"): # dict-like and pandas.DataFrame? if hasattr(tabular_data.values, "__call__"): # likely a conventional dict keys = tabular_data.keys() rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed elif hasattr(tabular_data, "index"): # values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0) keys = tabular_data.keys() vals = tabular_data.values # values matrix doesn't need to be transposed names = tabular_data.index rows = [[v]+list(row) for v,row in zip(names, vals)] else: raise ValueError("tabular data doesn't appear to be a dict or a DataFrame") if headers == "keys": headers = list(map(_text_type,keys)) # headers should be strings else: # it's a usual an iterable of iterables, or a NumPy array rows = list(tabular_data) if (headers == "keys" and hasattr(tabular_data, "dtype") and getattr(tabular_data.dtype, "names")): # numpy record array headers = tabular_data.dtype.names elif (headers == "keys" and len(rows) > 0 and isinstance(rows[0], tuple) and hasattr(rows[0], "_fields")): # namedtuple headers = list(map(_text_type, rows[0]._fields)) elif (len(rows) > 0 and isinstance(rows[0], dict)): # dict or OrderedDict uniq_keys = set() # implements hashed lookup keys = [] # storage for set if headers == "firstrow": firstdict = rows[0] if len(rows) > 0 else {} keys.extend(firstdict.keys()) uniq_keys.update(keys) rows = rows[1:] for row in rows: for k in row.keys(): #Save unique items in input order if k not in uniq_keys: keys.append(k) uniq_keys.add(k) if headers == 'keys': headers = keys elif isinstance(headers, dict): # a dict of headers for a list of dicts headers = [headers.get(k, k) for k in keys] headers = list(map(_text_type, headers)) elif headers == "firstrow": if len(rows) > 0: headers = [firstdict.get(k, k) for k in keys] headers = list(map(_text_type, headers)) else: headers = [] elif headers: raise ValueError('headers for a list of dicts is not a dict or a keyword') rows = [[row.get(k) for k in keys] for row in rows] elif headers == "keys" and len(rows) > 0: # keys are column indices headers = list(map(_text_type, range(len(rows[0])))) # take headers from the first row if necessary if headers == "firstrow" and len(rows) > 0: headers = list(map(_text_type, rows[0])) # headers should be strings rows = rows[1:] headers = list(map(_text_type,headers)) rows = list(map(list,rows)) # pad with empty headers for initial columns if necessary if headers and len(rows) > 0: nhs = len(headers) ncols = len(rows[0]) if nhs < ncols: headers = [""]*(ncols - nhs) + headers return rows, headers def tabulate(tabular_data, headers=[], tablefmt="simple", dcmlfmt="d", floatfmt="g", numalign="decimal", stralign="left", missingval=""): """Format a fixed width table for pretty printing. >>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]])) --- --------- 1 2.34 -56 8.999 2 10001 --- --------- The first required argument (`tabular_data`) can be a list-of-lists (or another iterable of iterables), a list of named tuples, a dictionary of iterables, an iterable of dictionaries, a two-dimensional NumPy array, NumPy record array, or a Pandas' dataframe. Table headers ------------- To print nice column headers, supply the second argument (`headers`): - `headers` can be an explicit list of column headers - if `headers="firstrow"`, then the first row of data is used - if `headers="keys"`, then dictionary keys or column indices are used Otherwise a headerless table is produced. If the number of headers is less than the number of columns, they are supposed to be names of the last columns. This is consistent with the plain-text format of R and Pandas' dataframes. >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]], ... headers="firstrow")) sex age ----- ----- ----- Alice F 24 Bob M 19 Column alignment ---------------- `tabulate` tries to detect column types automatically, and aligns the values properly. By default it aligns decimal points of the numbers (or flushes integer numbers to the right), and flushes everything else to the left. Possible column alignments (`numalign`, `stralign`) are: "right", "center", "left", "decimal" (only for `numalign`), and None (to disable alignment). Table formats ------------- `floatfmt` is a format specification used for columns which contain numeric data with a decimal point. `None` values are replaced with a `missingval` string: >>> print(tabulate([["spam", 1, None], ... ["eggs", 42, 3.14], ... ["other", None, 2.7]], missingval="?")) ----- -- ---- spam 1 ? eggs 42 3.14 other ? 2.7 ----- -- ---- Various plain-text table formats (`tablefmt`) are supported: 'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki', 'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of currently supported formats. "plain" format doesn't use any pseudographics to draw tables, it separates columns with a double space: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "plain")) strings numbers spam 41.9999 eggs 451 >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain")) spam 41.9999 eggs 451 "simple" format is like Pandoc simple_tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "simple")) strings numbers --------- --------- spam 41.9999 eggs 451 >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple")) ---- -------- spam 41.9999 eggs 451 ---- -------- "grid" is similar to tables produced by Emacs table.el package or Pandoc grid_tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "grid")) +-----------+-----------+ | strings | numbers | +===========+===========+ | spam | 41.9999 | +-----------+-----------+ | eggs | 451 | +-----------+-----------+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid")) +------+----------+ | spam | 41.9999 | +------+----------+ | eggs | 451 | +------+----------+ "fancy_grid" draws a grid using box-drawing characters: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "fancy_grid")) ╒═══════════╤═══════════╕ │ strings │ numbers │ ╞═══════════╪═══════════╡ │ spam │ 41.9999 │ ├───────────┼───────────┤ │ eggs │ 451 │ ╘═══════════╧═══════════╛ "pipe" is like tables in PHP Markdown Extra extension or Pandoc pipe_tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "pipe")) | strings | numbers | |:----------|----------:| | spam | 41.9999 | | eggs | 451 | >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe")) |:-----|---------:| | spam | 41.9999 | | eggs | 451 | "orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They are slightly different from "pipe" format by not using colons to define column alignment, and using a "+" sign to indicate line intersections: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "orgtbl")) | strings | numbers | |-----------+-----------| | spam | 41.9999 | | eggs | 451 | >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl")) | spam | 41.9999 | | eggs | 451 | "rst" is like a simple table format from reStructuredText; please note that reStructuredText accepts also "grid" tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "rst")) ========= ========= strings numbers ========= ========= spam 41.9999 eggs 451 ========= ========= >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst")) ==== ======== spam 41.9999 eggs 451 ==== ======== "mediawiki" produces a table markup used in Wikipedia and on other MediaWiki-based sites: >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]], ... headers="firstrow", tablefmt="mediawiki")) {| class="wikitable" style="text-align: left;" |+ |- ! strings !! align="right"| numbers |- | spam || align="right"| 41.9999 |- | eggs || align="right"| 451 |} "html" produces HTML markup: >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]], ... headers="firstrow", tablefmt="html"))
strings numbers
spam 41.9999
eggs 451
"latex" produces a tabular environment of LaTeX document markup: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex")) \\begin{tabular}{lr} \\hline spam & 41.9999 \\\\ eggs & 451 \\\\ \\hline \\end{tabular} "latex_booktabs" produces a tabular environment of LaTeX document markup using the booktabs.sty package: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs")) \\begin{tabular}{lr} \\toprule spam & 41.9999 \\\\ eggs & 451 \\\\ \\bottomrule \end{tabular} Also returns a tuple of the raw rows pulled from tabular_data """ if tabular_data is None: tabular_data = [] list_of_lists, headers = _normalize_tabular_data(tabular_data, headers) # optimization: look for ANSI control codes once, # enable smart width functions only if a control code is found _text_type_encode = lambda x: _text_type(utf8tounicode(x)) plain_text = '\n'.join(['\t'.join(map(_text_type_encode, headers))] + \ ['\t'.join(map(_text_type_encode, row)) for row in list_of_lists]) has_invisible = (re.search(_invisible_codes, plain_text) or re.search(_invisible_codes, missingval)) if has_invisible: width_fn = _visible_width else: width_fn = wcswidth # format rows and columns, convert numeric values to strings cols = list(zip(*list_of_lists)) coltypes = list(map(_column_type, cols)) cols = [[_format(v, ct, dcmlfmt, floatfmt, missingval) for v in c] for c,ct in zip(cols, coltypes)] # align columns aligns = [numalign if ct in [int,float] else stralign for ct in coltypes] minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols) cols = [_align_column(c, a, minw, has_invisible) for c, a, minw in zip(cols, aligns, minwidths)] if headers: # align headers and add headers t_cols = cols or [['']] * len(headers) t_aligns = aligns or [stralign] * len(headers) minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)] headers = [_align_header(h, a, minw) for h, a, minw in zip(headers, t_aligns, minwidths)] rows = list(zip(*cols)) else: minwidths = [width_fn(c[0]) for c in cols] rows = list(zip(*cols)) if not isinstance(tablefmt, TableFormat): tablefmt = _table_formats.get(tablefmt, _table_formats["simple"]) return _format_table(tablefmt, headers, rows, minwidths, aligns), rows def _build_simple_row(padded_cells, rowfmt): "Format row according to DataRow format without padding." begin, sep, end = rowfmt return (begin + sep.join(padded_cells) + end).rstrip() def _build_row(padded_cells, colwidths, colaligns, rowfmt): "Return a string which represents a row of data cells." if not rowfmt: return None if hasattr(rowfmt, "__call__"): return rowfmt(padded_cells, colwidths, colaligns) else: return _build_simple_row(padded_cells, rowfmt) def _build_line(colwidths, colaligns, linefmt): "Return a string which represents a horizontal line." if not linefmt: return None if hasattr(linefmt, "__call__"): return linefmt(colwidths, colaligns) else: begin, fill, sep, end = linefmt cells = [fill*w for w in colwidths] return _build_simple_row(cells, (begin, sep, end)) def _pad_row(cells, padding): if cells: pad = " "*padding padded_cells = [pad + cell + pad for cell in cells] return padded_cells else: return cells def _format_table(fmt, headers, rows, colwidths, colaligns): """Produce a plain-text representation of the table.""" lines = [] hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else [] pad = fmt.padding headerrow = fmt.headerrow padded_widths = [(w + 2*pad) for w in colwidths] padded_headers = _pad_row(headers, pad) padded_rows = [_pad_row(row, pad) for row in rows] if fmt.lineabove and "lineabove" not in hidden: lines.append(_build_line(padded_widths, colaligns, fmt.lineabove)) if padded_headers: lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow)) if fmt.linebelowheader and "linebelowheader" not in hidden: lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader)) if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden: # initial rows with a line below for row in padded_rows[:-1]: lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow)) lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows)) # the last row without a line below lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow)) else: for row in padded_rows: lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow)) if fmt.linebelow and "linebelow" not in hidden: lines.append(_build_line(padded_widths, colaligns, fmt.linebelow)) return "\n".join(lines) def _main(): """\ Usage: tabulate [options] [FILE ...] Pretty-print tabular data. See also https://bitbucket.org/astanin/python-tabulate FILE a filename of the file with tabular data; if "-" or missing, read data from stdin. Options: -h, --help show this message -1, --header use the first row of data as a table header -s REGEXP, --sep REGEXP use a custom column separator (default: whitespace) -f FMT, --format FMT set output table format; supported formats: plain, simple, grid, fancy_grid, pipe, orgtbl, rst, mediawiki, html, latex, latex_booktabs, tsv (default: simple) """ import getopt import sys import textwrap usage = textwrap.dedent(_main.__doc__) try: opts, args = getopt.getopt(sys.argv[1:], "h1f:s:", ["help", "header", "format", "separator"]) except getopt.GetoptError as e: print(e) print(usage) sys.exit(2) headers = [] tablefmt = "simple" sep = r"\s+" for opt, value in opts: if opt in ["-1", "--header"]: headers = "firstrow" elif opt in ["-f", "--format"]: if value not in tabulate_formats: print("%s is not a supported table format" % value) print(usage) sys.exit(3) tablefmt = value elif opt in ["-s", "--sep"]: sep = value elif opt in ["-h", "--help"]: print(usage) sys.exit(0) files = [sys.stdin] if not args else args for f in files: if f == "-": f = sys.stdin if _is_file(f): _pprint_file(f, headers=headers, tablefmt=tablefmt, sep=sep) else: with open(f) as fobj: _pprint_file(fobj) def _pprint_file(fobject, headers, tablefmt, sep): rows = fobject.readlines() table = [re.split(sep, r.rstrip()) for r in rows] print(tabulate(table, headers, tablefmt)) if __name__ == "__main__": _main() pgcli-1.6.0/pgcli/pgbuffer.py0000644000076500000240000000313212766703104016316 0ustar irinastaff00000000000000from prompt_toolkit.buffer import Buffer from prompt_toolkit.filters import Condition from .packages.parseutils.utils import is_open_quote class PGBuffer(Buffer): def __init__(self, always_multiline, multiline_mode, *args, **kwargs): self.always_multiline = always_multiline self.multiline_mode = multiline_mode @Condition def is_multiline(): doc = self.document if not self.always_multiline: return False if self.multiline_mode == 'safe': return True else: return not _multiline_exception(doc.text) super(self.__class__, self).__init__(*args, is_multiline=is_multiline, tempfile_suffix='.sql', **kwargs) def _is_complete(sql): # A complete command is an sql statement that ends with a semicolon, unless # there's an open quote surrounding it, as is common when writing a # CREATE FUNCTION command return sql.endswith(';') and not is_open_quote(sql) def _multiline_exception(text): text = text.strip() return (text.startswith('\\') or # Special Command text.endswith('\e') or # Ended with \e which should launch the editor. _is_complete(text) or # A complete SQL command (text == 'exit') or # Exit doesn't need semi-colon (text == 'quit') or # Quit doesn't need semi-colon (text == ':q') or # To all the vim fans out there (text == '') # Just a plain enter without any text ) pgcli-1.6.0/pgcli/pgclirc0000644000076500000240000001262513076754403015524 0ustar irinastaff00000000000000# vi: ft=dosini [main] # Enables context sensitive auto-completion. If this is disabled the all # possible completions will be listed. smart_completion = True # Display the completions in several columns. (More completions will be # visible.) wider_completion_menu = False # Multi-line mode allows breaking up the sql statements into multiple lines. If # this is set to True, then the end of the statements must have a semi-colon. # If this is set to False then sql statements can't be split into multiple # lines. End of line (return) is considered as the end of the statement. multi_line = False # If multi_line_mode is set to "psql", in multi-line mode, [Enter] will execute # the current input if the input ends in a semicolon. # If multi_line_mode is set to "safe", in multi-line mode, [Enter] will always # insert a newline, and [Esc] [Enter] or [Alt]-[Enter] must be used to execute # a command. multi_line_mode = psql # Enables expand mode, which is similar to `\x` in psql. expand = False # Enables auto expand mode, which is similar to `\x auto` in psql. auto_expand = False # If set to True, table suggestions will include a table alias generate_aliases = False # log_file location. # In Unix/Linux: ~/.config/pgcli/log # In Windows: %USERPROFILE%\AppData\Local\dbcli\pgcli\log # %USERPROFILE% is typically C:\Users\{username} log_file = default # keyword casing preference. Possible values "lower", "upper", "auto" keyword_casing = upper # casing_file location. # In Unix/Linux: ~/.config/pgcli/casing # In Windows: %USERPROFILE%\AppData\Local\dbcli\pgcli\casing # %USERPROFILE% is typically C:\Users\{username} casing_file = default # If generate_casing_file is set to True and there is no file in the above # location, one will be generated based on usage in SQL/PLPGSQL functions. generate_casing_file = False # Casing of column headers based on the casing_file described above case_column_headers = True # history_file location. # In Unix/Linux: ~/.config/pgcli/history # In Windows: %USERPROFILE%\AppData\Local\dbcli\pgcli\history # %USERPROFILE% is typically C:\Users\{username} history_file = default # Default log level. Possible values: "CRITICAL", "ERROR", "WARNING", "INFO" # and "DEBUG". "NONE" disables logging. log_level = INFO # Order of columns when expanding * to column list # Possible values: "table_order" and "alphabetic" asterisk_column_order = table_order # Whether to qualify with table alias/name when suggesting columns # Possible values: "always", never" and "if_more_than_one_table" qualify_columns = if_more_than_one_table # When no schema is entered, only suggest objects in search_path search_path_filter = False # Default pager. # By default 'PAGER' environment variable is used # pager = less -SRXF # Timing of sql statments and table rendering. timing = True # Table format. Possible values: psql, plain, simple, grid, fancy_grid, pipe, # orgtbl, rst, mediawiki, html, latex, latex_booktabs. # Recommended: psql, fancy_grid and grid. table_format = psql # Syntax Style. Possible values: manni, igor, xcode, vim, autumn, vs, rrt, # native, perldoc, borland, tango, emacs, friendly, monokai, paraiso-dark, # colorful, murphy, bw, pastie, paraiso-light, trac, default, fruity syntax_style = default # Keybindings: # When Vi mode is enabled you can use modal editing features offered by Vi in the REPL. # When Vi mode is disabled emacs keybindings such as Ctrl-A for home and Ctrl-E # for end are available in the REPL. vi = False # Error handling # When one of multiple SQL statements causes an error, choose to either # continue executing the remaining statements, or stopping # Possible values "STOP" or "RESUME" on_error = STOP # Set threshold for row limit prompt. Use 0 to disable prompt. row_limit = 1000 # Skip intro on startup and goodbye on exit less_chatty = False # Postgres prompt # \u - Username # \h - Hostname of the server # \d - Database name # \n - Newline prompt = '\u@\h:\d> ' # Number of lines to reserve for the suggestion menu min_num_menu_lines = 4 # Character used to left pad multi-line queries to match the prompt size. multiline_continuation_char = '.' # Custom colors for the completion menu, toolbar, etc. [colors] Token.Menu.Completions.Completion.Current = 'bg:#ffffff #000000' Token.Menu.Completions.Completion = 'bg:#008888 #ffffff' Token.Menu.Completions.Meta.Current = 'bg:#44aaaa #000000' Token.Menu.Completions.Meta = 'bg:#448888 #ffffff' Token.Menu.Completions.MultiColumnMeta = 'bg:#aaffff #000000' Token.Menu.Completions.ProgressButton = 'bg:#003333' Token.Menu.Completions.ProgressBar = 'bg:#00aaaa' Token.SelectedText = '#ffffff bg:#6666aa' Token.SearchMatch = '#ffffff bg:#4444aa' Token.SearchMatch.Current = '#ffffff bg:#44aa44' Token.Toolbar = 'bg:#222222 #aaaaaa' Token.Toolbar.Off = 'bg:#222222 #888888' Token.Toolbar.On = 'bg:#222222 #ffffff' Token.Toolbar.Search = 'noinherit bold' Token.Toolbar.Search.Text = 'nobold' Token.Toolbar.System = 'noinherit bold' Token.Toolbar.Arg = 'noinherit bold' Token.Toolbar.Arg.Text = 'nobold' Token.Toolbar.Transaction.Valid = 'bg:#222222 #00ff5f bold' Token.Toolbar.Transaction.Failed = 'bg:#222222 #ff005f bold' # Named queries are queries you can execute by name. [named queries] # DNS to call by -D option [alias_dsn] # example_dsn = postgresql://[user[:password]@][netloc][:port][/dbname] # Format for number representation # for decimal "d" - 12345678, ",d" - 12,345,678 # for float "g" - 123456.78, ",g" - 123,456.78 [data_formats] decimal = "" float = "" pgcli-1.6.0/pgcli/pgcompleter.py0000644000076500000240000010623513101210552017027 0ustar irinastaff00000000000000from __future__ import print_function, unicode_literals import logging import re from itertools import count, repeat, chain import operator from collections import namedtuple, defaultdict from pgspecial.namedqueries import NamedQueries from prompt_toolkit.completion import Completer, Completion from prompt_toolkit.contrib.completers import PathCompleter from prompt_toolkit.document import Document from .packages.sqlcompletion import (FromClauseItem, suggest_type, Special, Database, Schema, Table, Function, Column, View, Keyword, NamedQuery, Datatype, Alias, Path, JoinCondition, Join) from .packages.parseutils.meta import ColumnMetadata, ForeignKey from .packages.parseutils.utils import last_word from .packages.parseutils.tables import TableReference from .packages.pgliterals.main import get_literals from .packages.prioritization import PrevalenceCounter from .config import load_config, config_location try: from collections import OrderedDict except ImportError: from .packages.ordereddict import OrderedDict _logger = logging.getLogger(__name__) NamedQueries.instance = NamedQueries.from_config( load_config(config_location() + 'config')) Match = namedtuple('Match', ['completion', 'priority']) _SchemaObject = namedtuple('SchemaObject', ['name', 'schema', 'function']) def SchemaObject(name, schema=None, function=False): return _SchemaObject(name, schema, function) _Candidate = namedtuple( 'Candidate', ['completion', 'prio', 'meta', 'synonyms', 'prio2'] ) def Candidate(completion, prio=None, meta=None, synonyms=None, prio2=None): return _Candidate(completion, prio, meta, synonyms or [completion], prio2) normalize_ref = lambda ref: ref if ref[0] == '"' else '"' + ref.lower() + '"' def generate_alias(tbl): """ Generate a table alias, consisting of all upper-case letters in the table name, or, if there are no upper-case letters, the first letter + all letters preceded by _ param tbl - unescaped name of the table to alias """ return ''.join([l for l in tbl if l.isupper()] or [l for l, prev in zip(tbl, '_' + tbl) if prev == '_' and l != '_']) class PGCompleter(Completer): # keywords_tree: A dict mapping keywords to well known following keywords. # e.g. 'CREATE': ['TABLE', 'USER', ...], keywords_tree = get_literals('keywords', type_=dict) keywords = tuple(set(chain(keywords_tree.keys(), *keywords_tree.values()))) functions = get_literals('functions') datatypes = get_literals('datatypes') def __init__(self, smart_completion=True, pgspecial=None, settings=None): super(PGCompleter, self).__init__() self.smart_completion = smart_completion self.pgspecial = pgspecial self.prioritizer = PrevalenceCounter() settings = settings or {} self.search_path_filter = settings.get('search_path_filter') self.generate_aliases = settings.get('generate_aliases') self.casing_file = settings.get('casing_file') self.generate_casing_file = settings.get('generate_casing_file') self.qualify_columns = settings.get( 'qualify_columns', 'if_more_than_one_table') self.asterisk_column_order = settings.get( 'asterisk_column_order', 'table_order') keyword_casing = settings.get('keyword_casing', 'upper').lower() if keyword_casing not in ('upper', 'lower', 'auto'): keyword_casing = 'upper' self.keyword_casing = keyword_casing self.reserved_words = set() for x in self.keywords: self.reserved_words.update(x.split()) self.name_pattern = re.compile("^[_a-z][_a-z0-9\$]*$") self.databases = [] self.dbmetadata = {'tables': {}, 'views': {}, 'functions': {}, 'datatypes': {}} self.search_path = [] self.casing = {} self.all_completions = set(self.keywords + self.functions) def escape_name(self, name): if name and ((not self.name_pattern.match(name)) or (name.upper() in self.reserved_words) or (name.upper() in self.functions)): name = '"%s"' % name return name def unescape_name(self, name): """ Unquote a string.""" if name and name[0] == '"' and name[-1] == '"': name = name[1:-1] return name def escaped_names(self, names): return [self.escape_name(name) for name in names] def extend_database_names(self, databases): databases = self.escaped_names(databases) self.databases.extend(databases) def extend_keywords(self, additional_keywords): self.keywords.extend(additional_keywords) self.all_completions.update(additional_keywords) def extend_schemata(self, schemata): # schemata is a list of schema names schemata = self.escaped_names(schemata) metadata = self.dbmetadata['tables'] for schema in schemata: metadata[schema] = {} # dbmetadata.values() are the 'tables' and 'functions' dicts for metadata in self.dbmetadata.values(): for schema in schemata: metadata[schema] = {} self.all_completions.update(schemata) def extend_casing(self, words): """ extend casing data :return: """ # casing should be a dict {lowercasename:PreferredCasingName} self.casing = dict((word.lower(), word) for word in words) def extend_relations(self, data, kind): """ extend metadata for tables or views :param data: list of (schema_name, rel_name) tuples :param kind: either 'tables' or 'views' :return: """ data = [self.escaped_names(d) for d in data] # dbmetadata['tables']['schema_name']['table_name'] should be an # OrderedDict {column_name:ColumnMetaData}. metadata = self.dbmetadata[kind] for schema, relname in data: try: metadata[schema][relname] = OrderedDict() except KeyError: _logger.error('%r %r listed in unrecognized schema %r', kind, relname, schema) self.all_completions.add(relname) def extend_columns(self, column_data, kind): """ extend column metadata :param column_data: list of (schema_name, rel_name, column_name, column_type) tuples :param kind: either 'tables' or 'views' :return: """ metadata = self.dbmetadata[kind] for schema, relname, colname, datatype in column_data: (schema, relname, colname) = self.escaped_names( [schema, relname, colname]) column = ColumnMetadata(name=colname, datatype=datatype, foreignkeys=[]) metadata[schema][relname][colname] = column self.all_completions.add(colname) def extend_functions(self, func_data): # func_data is a list of function metadata namedtuples # with fields schema_name, func_name, arg_list, result, # is_aggregate, is_window, is_set_returning # dbmetadata['schema_name']['functions']['function_name'] should return # the function metadata namedtuple for the corresponding function metadata = self.dbmetadata['functions'] for f in func_data: schema, func = self.escaped_names([f.schema_name, f.func_name]) if func in metadata[schema]: metadata[schema][func].append(f) else: metadata[schema][func] = [f] self.all_completions.add(func) def extend_foreignkeys(self, fk_data): # fk_data is a list of ForeignKey namedtuples, with fields # parentschema, childschema, parenttable, childtable, # parentcolumns, childcolumns # These are added as a list of ForeignKey namedtuples to the # ColumnMetadata namedtuple for both the child and parent meta = self.dbmetadata['tables'] for fk in fk_data: e = self.escaped_names parentschema, childschema = e([fk.parentschema, fk.childschema]) parenttable, childtable = e([fk.parenttable, fk.childtable]) childcol, parcol = e([fk.childcolumn, fk.parentcolumn]) childcolmeta = meta[childschema][childtable][childcol] parcolmeta = meta[parentschema][parenttable][parcol] fk = ForeignKey(parentschema, parenttable, parcol, childschema, childtable, childcol) childcolmeta.foreignkeys.append((fk)) parcolmeta.foreignkeys.append((fk)) def extend_datatypes(self, type_data): # dbmetadata['datatypes'][schema_name][type_name] should store type # metadata, such as composite type field names. Currently, we're not # storing any metadata beyond typename, so just store None meta = self.dbmetadata['datatypes'] for t in type_data: schema, type_name = self.escaped_names(t) meta[schema][type_name] = None self.all_completions.add(type_name) def extend_query_history(self, text, is_init=False): if is_init: # During completer initialization, only load keyword preferences, # not names self.prioritizer.update_keywords(text) else: self.prioritizer.update(text) def set_search_path(self, search_path): self.search_path = self.escaped_names(search_path) def reset_completions(self): self.databases = [] self.special_commands = [] self.search_path = [] self.dbmetadata = {'tables': {}, 'views': {}, 'functions': {}, 'datatypes': {}} self.all_completions = set(self.keywords + self.functions) def find_matches(self, text, collection, mode='fuzzy', meta=None): """Find completion matches for the given text. Given the user's input text and a collection of available completions, find completions matching the last word of the text. `collection` can be either a list of strings or a list of Candidate namedtuples. `mode` can be either 'fuzzy', or 'strict' 'fuzzy': fuzzy matching, ties broken by name prevalance `keyword`: start only matching, ties broken by keyword prevalance yields prompt_toolkit Completion instances for any matches found in the collection of available completions. """ if not collection: return [] prio_order = [ 'keyword', 'function', 'view', 'table', 'datatype', 'database', 'schema', 'column', 'table alias', 'join', 'name join', 'fk join' ] type_priority = prio_order.index(meta) if meta in prio_order else -1 text = last_word(text, include='most_punctuations').lower() text_len = len(text) if text and text[0] == '"': # text starts with double quote; user is manually escaping a name # Match on everything that follows the double-quote. Note that # text_len is calculated before removing the quote, so the # Completion.position value is correct text = text[1:] if mode == 'fuzzy': fuzzy = True priority_func = self.prioritizer.name_count else: fuzzy = False priority_func = self.prioritizer.keyword_count # Construct a `_match` function for either fuzzy or non-fuzzy matching # The match function returns a 2-tuple used for sorting the matches, # or None if the item doesn't match # Note: higher priority values mean more important, so use negative # signs to flip the direction of the tuple if fuzzy: regex = '.*?'.join(map(re.escape, text)) pat = re.compile('(%s)' % regex) def _match(item): if item.lower()[:len(text) + 1] in (text, text + ' '): # Exact match of first word in suggestion # This is to get exact alias matches to the top # E.g. for input `e`, 'Entries E' should be on top # (before e.g. `EndUsers EU`) return float('Infinity'), -1 r = pat.search(self.unescape_name(item.lower())) if r: return -len(r.group()), -r.start() else: match_end_limit = len(text) def _match(item): match_point = item.lower().find(text, 0, match_end_limit) if match_point >= 0: # Use negative infinity to force keywords to sort after all # fuzzy matches return -float('Infinity'), -match_point matches = [] for cand in collection: if isinstance(cand, _Candidate): item, prio, display_meta, synonyms, prio2 = cand if display_meta is None: display_meta = meta syn_matches = (_match(x) for x in synonyms) # Nones need to be removed to avoid max() crashing in Python 3 syn_matches = [m for m in syn_matches if m] sort_key = max(syn_matches) if syn_matches else None else: item, display_meta, prio, prio2 = cand, meta, 0, 0 sort_key = _match(cand) if sort_key: if display_meta and len(display_meta) > 50: # Truncate meta-text to 50 characters, if necessary display_meta = display_meta[:47] + u'...' # Lexical order of items in the collection, used for # tiebreaking items with the same match group length and start # position. Since we use *higher* priority to mean "more # important," we use -ord(c) to prioritize "aa" > "ab" and end # with 1 to prioritize shorter strings (ie "user" > "users"). # We first do a case-insensitive sort and then a # case-sensitive one as a tie breaker. # We also use the unescape_name to make sure quoted names have # the same priority as unquoted names. lexical_priority = (tuple(0 if c in(' _') else -ord(c) for c in self.unescape_name(item.lower())) + (1,) + tuple(c for c in item)) item = self.case(item) priority = ( sort_key, type_priority, prio, priority_func(item), prio2, lexical_priority ) matches.append(Match( completion=Completion(item, -text_len, display_meta=display_meta), priority=priority)) return matches def case(self, word): return self.casing.get(word, word) def get_completions(self, document, complete_event, smart_completion=None): word_before_cursor = document.get_word_before_cursor(WORD=True) if smart_completion is None: smart_completion = self.smart_completion # If smart_completion is off then match any word that starts with # 'word_before_cursor'. if not smart_completion: matches = self.find_matches(word_before_cursor, self.all_completions, mode='strict') completions = [m.completion for m in matches] return sorted(completions, key=operator.attrgetter('text')) matches = [] suggestions = suggest_type(document.text, document.text_before_cursor) for suggestion in suggestions: suggestion_type = type(suggestion) _logger.debug('Suggestion type: %r', suggestion_type) # Map suggestion type to method # e.g. 'table' -> self.get_table_matches matcher = self.suggestion_matchers[suggestion_type] matches.extend(matcher(self, suggestion, word_before_cursor)) # Sort matches so highest priorities are first matches = sorted(matches, key=operator.attrgetter('priority'), reverse=True) return [m.completion for m in matches] def get_column_matches(self, suggestion, word_before_cursor): tables = suggestion.table_refs do_qualify = suggestion.qualifiable and {'always': True, 'never': False, 'if_more_than_one_table': len(tables) > 1}[self.qualify_columns] qualify = lambda col, tbl: ( (tbl + '.' + self.case(col)) if do_qualify else self.case(col)) _logger.debug("Completion column scope: %r", tables) scoped_cols = self.populate_scoped_cols(tables, suggestion.local_tables) colit = scoped_cols.items def make_cand(name, ref): synonyms = (name, generate_alias(self.case(name))) return Candidate(qualify(name, ref), 0, 'column', synonyms) flat_cols = [] for t, cols in colit(): for c in cols: flat_cols.append(make_cand(c.name, t.ref)) if suggestion.require_last_table: # require_last_table is used for 'tb11 JOIN tbl2 USING (...' which should # suggest only columns that appear in the last table and one more ltbl = tables[-1].ref flat_cols = list( set(c.name for t, cs in colit() if t.ref == ltbl for c in cs) & set(c.name for t, cs in colit() if t.ref != ltbl for c in cs)) lastword = last_word(word_before_cursor, include='most_punctuations') if lastword == '*': if self.asterisk_column_order == 'alphabetic': flat_cols.sort() for cols in scoped_cols.values(): cols.sort(key=operator.attrgetter('name')) if (lastword != word_before_cursor and len(tables) == 1 and word_before_cursor[-len(lastword) - 1] == '.'): # User typed x.*; replicate "x." for all columns except the # first, which gets the original (as we only replace the "*"") sep = ', ' + word_before_cursor[:-1] collist = sep.join(self.case(c.completion) for c in flat_cols) else: collist = ', '.join(qualify(c.name, t.ref) for t, cs in colit() for c in cs) return [Match(completion=Completion(collist, -1, display_meta='columns', display='*'), priority=(1,1,1))] return self.find_matches(word_before_cursor, flat_cols, meta='column') def alias(self, tbl, tbls): """ Generate a unique table alias tbl - name of the table to alias, quoted if it needs to be tbls - TableReference iterable of tables already in query """ tbl = self.case(tbl) tbls = set(normalize_ref(t.ref) for t in tbls) if self.generate_aliases: tbl = generate_alias(self.unescape_name(tbl)) if normalize_ref(tbl) not in tbls: return tbl elif tbl[0] == '"': aliases = ('"' + tbl[1:-1] + str(i) + '"' for i in count(2)) else: aliases = (tbl + str(i) for i in count(2)) return next(a for a in aliases if normalize_ref(a) not in tbls) def get_join_matches(self, suggestion, word_before_cursor): tbls = suggestion.table_refs cols = self.populate_scoped_cols(tbls) # Set up some data structures for efficient access qualified = dict((normalize_ref(t.ref), t.schema) for t in tbls) ref_prio = dict((normalize_ref(t.ref), n) for n, t in enumerate(tbls)) refs = set(normalize_ref(t.ref) for t in tbls) other_tbls = set((t.schema, t.name) for t in list(cols)[:-1]) joins = [] # Iterate over FKs in existing tables to find potential joins fks = ((fk, rtbl, rcol) for rtbl, rcols in cols.items() for rcol in rcols for fk in rcol.foreignkeys) col = namedtuple('col', 'schema tbl col') for fk, rtbl, rcol in fks: right = col(rtbl.schema, rtbl.name, rcol.name) child = col(fk.childschema, fk.childtable, fk.childcolumn) parent = col(fk.parentschema, fk.parenttable, fk.parentcolumn) left = child if parent == right else parent if suggestion.schema and left.schema != suggestion.schema: continue c = self.case if self.generate_aliases or normalize_ref(left.tbl) in refs: lref = self.alias(left.tbl, suggestion.table_refs) join = '{0} {4} ON {4}.{1} = {2}.{3}'.format( c(left.tbl), c(left.col), rtbl.ref, c(right.col), lref) else: join = '{0} ON {0}.{1} = {2}.{3}'.format( c(left.tbl), c(left.col), rtbl.ref, c(right.col)) alias = generate_alias(self.case(left.tbl)) synonyms = [join, '{0} ON {0}.{1} = {2}.{3}'.format( alias, c(left.col), rtbl.ref, c(right.col))] # Schema-qualify if (1) new table in same schema as old, and old # is schema-qualified, or (2) new in other schema, except public if not suggestion.schema and (qualified[normalize_ref(rtbl.ref)] and left.schema == right.schema or left.schema not in(right.schema, 'public')): join = left.schema + '.' + join prio = ref_prio[normalize_ref(rtbl.ref)] * 2 + ( 0 if (left.schema, left.tbl) in other_tbls else 1) joins.append(Candidate(join, prio, 'join', synonyms=synonyms)) return self.find_matches(word_before_cursor, joins, meta='join') def get_join_condition_matches(self, suggestion, word_before_cursor): col = namedtuple('col', 'schema tbl col') tbls = self.populate_scoped_cols(suggestion.table_refs).items cols = [(t, c) for t, cs in tbls() for c in cs] try: lref = (suggestion.parent or suggestion.table_refs[-1]).ref ltbl, lcols = [(t, cs) for (t, cs) in tbls() if t.ref == lref][-1] except IndexError: # The user typed an incorrect table qualifier return [] conds, found_conds = [], set() def add_cond(lcol, rcol, rref, prio, meta): prefix = '' if suggestion.parent else ltbl.ref + '.' case = self.case cond = prefix + case(lcol) + ' = ' + rref + '.' + case(rcol) if cond not in found_conds: found_conds.add(cond) conds.append(Candidate(cond, prio + ref_prio[rref], meta)) def list_dict(pairs): # Turns [(a, b), (a, c)] into {a: [b, c]} d = defaultdict(list) for pair in pairs: d[pair[0]].append(pair[1]) return d # Tables that are closer to the cursor get higher prio ref_prio = dict((tbl.ref, num) for num, tbl in enumerate(suggestion.table_refs)) # Map (schema, table, col) to tables coldict = list_dict(((t.schema, t.name, c.name), t) for t, c in cols if t.ref != lref) # For each fk from the left table, generate a join condition if # the other table is also in the scope fks = ((fk, lcol.name) for lcol in lcols for fk in lcol.foreignkeys) for fk, lcol in fks: left = col(ltbl.schema, ltbl.name, lcol) child = col(fk.childschema, fk.childtable, fk.childcolumn) par = col(fk.parentschema, fk.parenttable, fk.parentcolumn) left, right = (child, par) if left == child else (par, child) for rtbl in coldict[right]: add_cond(left.col, right.col, rtbl.ref, 2000, 'fk join') # For name matching, use a {(colname, coltype): TableReference} dict coltyp = namedtuple('coltyp', 'name datatype') col_table = list_dict((coltyp(c.name, c.datatype), t) for t, c in cols) # Find all name-match join conditions for c in (coltyp(c.name, c.datatype) for c in lcols): for rtbl in (t for t in col_table[c] if t.ref != ltbl.ref): prio = 1000 if c.datatype in ( 'integer', 'bigint', 'smallint') else 0 add_cond(c.name, c.name, rtbl.ref, prio, 'name join') return self.find_matches(word_before_cursor, conds, meta='join') def get_function_matches(self, suggestion, word_before_cursor, alias=False): def _cand(func, alias): return self._make_cand(func, alias, suggestion) if suggestion.filter == 'for_from_clause': # Only suggest functions allowed in FROM clause filt = lambda f: not f.is_aggregate and not f.is_window funcs = [_cand(f, alias) for f in self.populate_functions(suggestion.schema, filt)] else: fs = self.populate_schema_objects(suggestion.schema, 'functions') funcs = [_cand(f, alias=False) for f in fs] # Function overloading means we way have multiple functions of the same # name at this point, so keep unique names only funcs = set(funcs) funcs = self.find_matches(word_before_cursor, funcs, meta='function') if not suggestion.schema and not suggestion.filter: # also suggest hardcoded functions using startswith matching predefined_funcs = self.find_matches( word_before_cursor, self.functions, mode='strict', meta='function') funcs.extend(predefined_funcs) return funcs def get_schema_matches(self, _, word_before_cursor): schema_names = self.dbmetadata['tables'].keys() # Unless we're sure the user really wants them, hide schema names # starting with pg_, which are mostly temporary schemas if not word_before_cursor.startswith('pg_'): schema_names = [s for s in schema_names if not s.startswith('pg_')] return self.find_matches(word_before_cursor, schema_names, meta='schema') def get_from_clause_item_matches(self, suggestion, word_before_cursor): alias = self.generate_aliases s = suggestion t_sug = Table(s.schema, s.table_refs, s.local_tables) v_sug = View(s.schema, s.table_refs) f_sug = Function(s.schema, s.table_refs, filter='for_from_clause') return (self.get_table_matches(t_sug, word_before_cursor, alias) + self.get_view_matches(v_sug, word_before_cursor, alias) + self.get_function_matches(f_sug, word_before_cursor, alias)) # Note: tbl is a SchemaObject def _make_cand(self, tbl, do_alias, suggestion): cased_tbl = self.case(tbl.name) if do_alias: alias = self.alias(cased_tbl, suggestion.table_refs) synonyms = (cased_tbl, generate_alias(cased_tbl)) maybe_parens = '()' if tbl.function else '' maybe_alias = (' ' + alias) if do_alias else '' maybe_schema = (self.case(tbl.schema) + '.') if tbl.schema else '' item = maybe_schema + cased_tbl + maybe_parens + maybe_alias prio2 = 0 if tbl.schema else 1 return Candidate(item, synonyms=synonyms, prio2=prio2) def get_table_matches(self, suggestion, word_before_cursor, alias=False): tables = self.populate_schema_objects(suggestion.schema, 'tables') tables.extend(SchemaObject(tbl.name) for tbl in suggestion.local_tables) # Unless we're sure the user really wants them, don't suggest the # pg_catalog tables that are implicitly on the search path if not suggestion.schema and ( not word_before_cursor.startswith('pg_')): tables = [t for t in tables if not t.name.startswith('pg_')] tables = [self._make_cand(t, alias, suggestion) for t in tables] return self.find_matches(word_before_cursor, tables, meta='table') def get_view_matches(self, suggestion, word_before_cursor, alias=False): views = self.populate_schema_objects(suggestion.schema, 'views') if not suggestion.schema and ( not word_before_cursor.startswith('pg_')): views = [v for v in views if not v.name.startswith('pg_')] views = [self._make_cand(v, alias, suggestion) for v in views] return self.find_matches(word_before_cursor, views, meta='view') def get_alias_matches(self, suggestion, word_before_cursor): aliases = suggestion.aliases return self.find_matches(word_before_cursor, aliases, meta='table alias') def get_database_matches(self, _, word_before_cursor): return self.find_matches(word_before_cursor, self.databases, meta='database') def get_keyword_matches(self, suggestion, word_before_cursor): keywords = self.keywords_tree.keys() # Get well known following keywords for the last token. If any, narrow # candidates to this list. next_keywords = self.keywords_tree.get(suggestion.last_token, []) if next_keywords: keywords = next_keywords casing = self.keyword_casing if casing == 'auto': if word_before_cursor and word_before_cursor[-1].islower(): casing = 'lower' else: casing = 'upper' if casing == 'upper': keywords = [k.upper() for k in keywords] else: keywords = [k.lower() for k in keywords] return self.find_matches(word_before_cursor, keywords, mode='strict', meta='keyword') def get_path_matches(self, _, word_before_cursor): completer = PathCompleter(expanduser=True) document = Document(text=word_before_cursor, cursor_position=len(word_before_cursor)) for c in completer.get_completions(document, None): yield Match(completion=c, priority=(0,)) def get_special_matches(self, _, word_before_cursor): if not self.pgspecial: return [] commands = self.pgspecial.commands cmds = commands.keys() cmds = [Candidate(cmd, 0, commands[cmd].description) for cmd in cmds] return self.find_matches(word_before_cursor, cmds, mode='strict') def get_datatype_matches(self, suggestion, word_before_cursor): # suggest custom datatypes types = self.populate_schema_objects(suggestion.schema, 'datatypes') types = [self._make_cand(t, False, suggestion) for t in types] matches = self.find_matches(word_before_cursor, types, meta='datatype') if not suggestion.schema: # Also suggest hardcoded types matches.extend(self.find_matches(word_before_cursor, self.datatypes, mode='strict', meta='datatype')) return matches def get_namedquery_matches(self, _, word_before_cursor): return self.find_matches( word_before_cursor, NamedQueries.instance.list(), meta='named query') suggestion_matchers = { FromClauseItem: get_from_clause_item_matches, JoinCondition: get_join_condition_matches, Join: get_join_matches, Column: get_column_matches, Function: get_function_matches, Schema: get_schema_matches, Table: get_table_matches, View: get_view_matches, Alias: get_alias_matches, Database: get_database_matches, Keyword: get_keyword_matches, Special: get_special_matches, Datatype: get_datatype_matches, NamedQuery: get_namedquery_matches, Path: get_path_matches, } def populate_scoped_cols(self, scoped_tbls, local_tbls=()): """ Find all columns in a set of scoped_tables :param scoped_tbls: list of TableReference namedtuples :param local_tbls: tuple(TableMetadata) :return: {TableReference:{colname:ColumnMetaData}} """ ctes = dict((normalize_ref(t.name), t.columns) for t in local_tbls) columns = OrderedDict() meta = self.dbmetadata def addcols(schema, rel, alias, reltype, cols): tbl = TableReference(schema, rel, alias, reltype == 'functions') if tbl not in columns: columns[tbl] = [] columns[tbl].extend(cols) for tbl in scoped_tbls: # Local tables should shadow database tables if tbl.schema is None and normalize_ref(tbl.name) in ctes: cols = ctes[normalize_ref(tbl.name)] addcols(None, tbl.name, 'CTE', tbl.alias, cols) continue schemas = [tbl.schema] if tbl.schema else self.search_path for schema in schemas: relname = self.escape_name(tbl.name) schema = self.escape_name(schema) if tbl.is_function: # Return column names from a set-returning function # Get an array of FunctionMetadata objects functions = meta['functions'].get(schema, {}).get(relname) for func in (functions or []): # func is a FunctionMetadata object cols = func.fields() addcols(schema, relname, tbl.alias, 'functions', cols) else: for reltype in ('tables', 'views'): cols = meta[reltype].get(schema, {}).get(relname) if cols: cols = cols.values() addcols(schema, relname, tbl.alias, reltype, cols) break return columns def _get_schemas(self, obj_typ, schema): """ Returns a list of schemas from which to suggest objects schema is the schema qualification input by the user (if any) """ metadata = self.dbmetadata[obj_typ] if schema: schema = self.escape_name(schema) return [schema] if schema in metadata else [] return self.search_path if self.search_path_filter else metadata.keys() def _maybe_schema(self, schema, parent): return None if parent or schema in self.search_path else schema def populate_schema_objects(self, schema, obj_type): """Returns a list of SchemaObjects representing tables, views, funcs schema is the schema qualification input by the user (if any) """ return [ SchemaObject( name=obj, schema=(self._maybe_schema(schema=sch, parent=schema)), function=(obj_type == 'functions') ) for sch in self._get_schemas(obj_type, schema) for obj in self.dbmetadata[obj_type][sch].keys() ] def populate_functions(self, schema, filter_func): """Returns a list of function names filter_func is a function that accepts a FunctionMetadata namedtuple and returns a boolean indicating whether that function should be kept or discarded """ # Because of multiple dispatch, we can have multiple functions # with the same name, which is why `for meta in metas` is necessary # in the comprehensions below return [ SchemaObject( name=func, schema=(self._maybe_schema(schema=sch, parent=schema)), function=True ) for sch in self._get_schemas('functions', schema) for (func, metas) in self.dbmetadata['functions'][sch].items() for meta in metas if filter_func(meta) ] pgcli-1.6.0/pgcli/pgexecute.py0000644000076500000240000006165413112353104016507 0ustar irinastaff00000000000000import traceback import logging import psycopg2 import psycopg2.extras import psycopg2.errorcodes import psycopg2.extensions as ext import sqlparse import pgspecial as special import select from psycopg2.extensions import POLL_OK, POLL_READ, POLL_WRITE from .packages.parseutils.meta import FunctionMetadata, ForeignKey from .encodingutils import unicode2utf8, PY2, utf8tounicode _logger = logging.getLogger(__name__) # Cast all database input to unicode automatically. # See http://initd.org/psycopg/docs/usage.html#unicode-handling for more info. ext.register_type(ext.UNICODE) ext.register_type(ext.UNICODEARRAY) ext.register_type(ext.new_type((705,), "UNKNOWN", ext.UNICODE)) # See https://github.com/dbcli/pgcli/issues/426 for more details. # This registers a unicode type caster for datatype 'RECORD'. ext.register_type(ext.new_type((2249,), "RECORD", ext.UNICODE)) # Cast bytea fields to text. By default, this will render as hex strings with # Postgres 9+ and as escaped binary in earlier versions. ext.register_type(ext.new_type((17,), 'BYTEA_TEXT', psycopg2.STRING)) # TODO: Get default timeout from pgclirc? _WAIT_SELECT_TIMEOUT = 1 def _wait_select(conn): """ copy-pasted from psycopg2.extras.wait_select the default implementation doesn't define a timeout in the select calls """ while 1: try: state = conn.poll() if state == POLL_OK: break elif state == POLL_READ: select.select([conn.fileno()], [], [], _WAIT_SELECT_TIMEOUT) elif state == POLL_WRITE: select.select([], [conn.fileno()], [], _WAIT_SELECT_TIMEOUT) else: raise conn.OperationalError("bad state from poll: %s" % state) except KeyboardInterrupt: conn.cancel() # the loop will be broken by a server error continue # When running a query, make pressing CTRL+C raise a KeyboardInterrupt # See http://initd.org/psycopg/articles/2014/07/20/cancelling-postgresql-statements-python/ # See also https://github.com/psycopg/psycopg2/issues/468 ext.set_wait_callback(_wait_select) def register_date_typecasters(connection): """ Casts date and timestamp values to string, resolves issues with out of range dates (e.g. BC) which psycopg2 can't handle """ def cast_date(value, cursor): return value cursor = connection.cursor() cursor.execute('SELECT NULL::date') date_oid = cursor.description[0][1] cursor.execute('SELECT NULL::timestamp') timestamp_oid = cursor.description[0][1] cursor.execute('SELECT NULL::timestamp with time zone') timestamptz_oid = cursor.description[0][1] oids = (date_oid, timestamp_oid, timestamptz_oid) new_type = psycopg2.extensions.new_type(oids, 'DATE', cast_date) psycopg2.extensions.register_type(new_type) def register_json_typecasters(conn, loads_fn): """Set the function for converting JSON data for a connection. Use the supplied function to decode JSON data returned from the database via the given connection. The function should accept a single argument of the data as a string encoded in the database's character encoding. psycopg2's default handler for JSON data is json.loads. http://initd.org/psycopg/docs/extras.html#json-adaptation This function attempts to register the typecaster for both JSON and JSONB types. Returns a set that is a subset of {'json', 'jsonb'} indicating which types (if any) were successfully registered. """ available = set() for name in ['json', 'jsonb']: try: psycopg2.extras.register_json(conn, loads=loads_fn, name=name) available.add(name) except psycopg2.ProgrammingError: pass return available def register_hstore_typecaster(conn): """ Instead of using register_hstore() which converts hstore into a python dict, we query the 'oid' of hstore which will be different for each database and register a type caster that converts it to unicode. http://initd.org/psycopg/docs/extras.html#psycopg2.extras.register_hstore """ with conn.cursor() as cur: try: cur.execute("SELECT 'hstore'::regtype::oid") oid = cur.fetchone()[0] ext.register_type(ext.new_type((oid,), "HSTORE", ext.UNICODE)) except Exception: pass class PGExecute(object): # The boolean argument to the current_schemas function indicates whether # implicit schemas, e.g. pg_catalog search_path_query = ''' SELECT * FROM unnest(current_schemas(true))''' schemata_query = ''' SELECT nspname FROM pg_catalog.pg_namespace ORDER BY 1 ''' tables_query = ''' SELECT n.nspname schema_name, c.relname table_name FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind = ANY(%s) ORDER BY 1,2;''' databases_query = ''' SELECT d.datname FROM pg_catalog.pg_database d ORDER BY 1''' def __init__(self, database, user, password, host, port, dsn, **kwargs): self.dbname = database self.user = user self.password = password self.host = host self.port = port self.dsn = dsn self.extra_args = {k: unicode2utf8(v) for k, v in kwargs.items()} self.connect() def connect(self, database=None, user=None, password=None, host=None, port=None, dsn=None, **kwargs): db = (database or self.dbname) user = (user or self.user) password = (password or self.password) host = (host or self.host) port = (port or self.port) dsn = (dsn or self.dsn) kwargs = (kwargs or self.extra_args) pid = -1 if dsn: if password: dsn = "{0} password={1}".format(dsn, password) conn = psycopg2.connect(dsn=unicode2utf8(dsn)) cursor = conn.cursor() else: conn = psycopg2.connect( database=unicode2utf8(db), user=unicode2utf8(user), password=unicode2utf8(password), host=unicode2utf8(host), port=unicode2utf8(port), **kwargs) cursor = conn.cursor() conn.set_client_encoding('utf8') if hasattr(self, 'conn'): self.conn.close() self.conn = conn self.conn.autocommit = True if dsn: # When we connect using a DSN, we don't really know what db, # user, etc. we connected to. Let's read it. # Note: moved this after setting autocommit because of #664. db, user, host, port = self._select_one( cursor, 'select current_database(), current_user, inet_server_addr(), inet_server_port()') self.dbname = db self.user = user self.password = password self.host = host self.port = port cursor.execute("SHOW ALL") db_parameters = dict(name_val_desc[:2] for name_val_desc in cursor.fetchall()) pid = self._select_one(cursor, 'select pg_backend_pid()')[0] self.pid = pid self.superuser = db_parameters.get('is_superuser') == '1' register_date_typecasters(conn) register_json_typecasters(self.conn, self._json_typecaster) register_hstore_typecaster(self.conn) def _select_one(self, cur, sql): """ Helper method to run a select and retrieve a single field value :param cur: cursor :param sql: string :return: string """ cur.execute(sql) return cur.fetchone() def _json_typecaster(self, json_data): """Interpret incoming JSON data as a string. The raw data is decoded using the connection's encoding, which defaults to the database's encoding. See http://initd.org/psycopg/docs/connection.html#connection.encoding """ if PY2: return json_data.decode(self.conn.encoding) else: return json_data def failed_transaction(self): status = self.conn.get_transaction_status() return status == ext.TRANSACTION_STATUS_INERROR def valid_transaction(self): status = self.conn.get_transaction_status() return (status == ext.TRANSACTION_STATUS_ACTIVE or status == ext.TRANSACTION_STATUS_INTRANS) def run(self, statement, pgspecial=None, exception_formatter=None, on_error_resume=False): """Execute the sql in the database and return the results. :param statement: A string containing one or more sql statements :param pgspecial: PGSpecial object :param exception_formatter: A callable that accepts an Exception and returns a formatted (title, rows, headers, status) tuple that can act as a query result. If an exception_formatter is not supplied, psycopg2 exceptions are always raised. :param on_error_resume: Bool. If true, queries following an exception (assuming exception_formatter has been supplied) continue to execute. :return: Generator yielding tuples containing (title, rows, headers, status, query, success) """ # Remove spaces and EOL statement = statement.strip() if not statement: # Empty string yield (None, None, None, None, statement, False) # Split the sql into separate queries and run each one. for sql in sqlparse.split(statement): # Remove spaces, eol and semi-colons. sql = sql.rstrip(';') try: if pgspecial: # First try to run each query as special _logger.debug('Trying a pgspecial command. sql: %r', sql) cur = self.conn.cursor() try: for result in pgspecial.execute(cur, sql): # e.g. execute_from_file already appends these if len(result) < 6: yield result + (sql, True) else: yield result continue except special.CommandNotFound: pass # Not a special command, so execute as normal sql yield self.execute_normal_sql(sql) + (sql, True) except psycopg2.DatabaseError as e: _logger.error("sql: %r, error: %r", sql, e) _logger.error("traceback: %r", traceback.format_exc()) if (self._must_raise(e) or not exception_formatter): raise yield None, None, None, exception_formatter(e), sql, False if not on_error_resume: break def _must_raise(self, e): """Return true if e is an error that should not be caught in ``run``. ``OperationalError``s are raised for errors that are not under the control of the programmer. Usually that means unexpected disconnects, which we shouldn't catch; we handle uncaught errors by prompting the user to reconnect. We *do* want to catch OperationalErrors caused by a lock being unavailable, as reconnecting won't solve that problem. :param e: DatabaseError. An exception raised while executing a query. :return: Bool. True if ``run`` must raise this exception. """ return (isinstance(e, psycopg2.OperationalError) and psycopg2.errorcodes.lookup(e.pgcode) != 'LOCK_NOT_AVAILABLE') def execute_normal_sql(self, split_sql): """Returns tuple (title, rows, headers, status)""" _logger.debug('Regular sql statement. sql: %r', split_sql) cur = self.conn.cursor() cur.execute(split_sql) # conn.notices persist between queies, we use pop to clear out the list title = '' while len(self.conn.notices) > 0: title = utf8tounicode(self.conn.notices.pop()) + title # cur.description will be None for operations that do not return # rows. if cur.description: headers = [x[0] for x in cur.description] return title, cur, headers, cur.statusmessage else: _logger.debug('No rows in result.') return title, None, None, cur.statusmessage def search_path(self): """Returns the current search path as a list of schema names""" try: with self.conn.cursor() as cur: _logger.debug('Search path query. sql: %r', self.search_path_query) cur.execute(self.search_path_query) return [x[0] for x in cur.fetchall()] except psycopg2.ProgrammingError: fallback = 'SELECT * FROM current_schemas(true)' with self.conn.cursor() as cur: _logger.debug('Search path query. sql: %r', fallback) cur.execute(fallback) return cur.fetchone()[0] def schemata(self): """Returns a list of schema names in the database""" with self.conn.cursor() as cur: _logger.debug('Schemata Query. sql: %r', self.schemata_query) cur.execute(self.schemata_query) return [x[0] for x in cur.fetchall()] def _relations(self, kinds=('r', 'v', 'm')): """Get table or view name metadata :param kinds: list of postgres relkind filters: 'r' - table 'v' - view 'm' - materialized view :return: (schema_name, rel_name) tuples """ with self.conn.cursor() as cur: sql = cur.mogrify(self.tables_query, [kinds]) _logger.debug('Tables Query. sql: %r', sql) cur.execute(sql) for row in cur: yield row def tables(self): """Yields (schema_name, table_name) tuples""" for row in self._relations(kinds=['r']): yield row def views(self): """Yields (schema_name, view_name) tuples. Includes both views and and materialized views """ for row in self._relations(kinds=['v', 'm']): yield row def _columns(self, kinds=('r', 'v', 'm')): """Get column metadata for tables and views :param kinds: kinds: list of postgres relkind filters: 'r' - table 'v' - view 'm' - materialized view :return: list of (schema_name, relation_name, column_name, column_type) tuples """ if self.conn.server_version >= 80400: columns_query = ''' SELECT nsp.nspname schema_name, cls.relname table_name, att.attname column_name, att.atttypid::regtype::text type_name FROM pg_catalog.pg_attribute att INNER JOIN pg_catalog.pg_class cls ON att.attrelid = cls.oid INNER JOIN pg_catalog.pg_namespace nsp ON cls.relnamespace = nsp.oid WHERE cls.relkind = ANY(%s) AND NOT att.attisdropped AND att.attnum > 0 ORDER BY 1, 2, att.attnum''' else: columns_query = ''' SELECT nsp.nspname schema_name, cls.relname table_name, att.attname column_name, typ.typname type_name FROM pg_catalog.pg_attribute att INNER JOIN pg_catalog.pg_class cls ON att.attrelid = cls.oid INNER JOIN pg_catalog.pg_namespace nsp ON cls.relnamespace = nsp.oid INNER JOIN pg_catalog.pg_type typ ON typ.oid = att.atttypid WHERE cls.relkind = ANY(%s) AND NOT att.attisdropped AND att.attnum > 0 ORDER BY 1, 2, att.attnum''' with self.conn.cursor() as cur: sql = cur.mogrify(columns_query, [kinds]) _logger.debug('Columns Query. sql: %r', sql) cur.execute(sql) for row in cur: yield row def table_columns(self): for row in self._columns(kinds=['r']): yield row def view_columns(self): for row in self._columns(kinds=['v', 'm']): yield row def databases(self): with self.conn.cursor() as cur: _logger.debug('Databases Query. sql: %r', self.databases_query) cur.execute(self.databases_query) return [x[0] for x in cur.fetchall()] def foreignkeys(self): """Yields ForeignKey named tuples""" if self.conn.server_version < 90000: return with self.conn.cursor() as cur: query = ''' SELECT s_p.nspname AS parentschema, t_p.relname AS parenttable, unnest(( select array_agg(attname ORDER BY i) from (select unnest(confkey) as attnum, generate_subscripts(confkey, 1) as i) x JOIN pg_catalog.pg_attribute c USING(attnum) WHERE c.attrelid = fk.confrelid )) AS parentcolumn, s_c.nspname AS childschema, t_c.relname AS childtable, unnest(( select array_agg(attname ORDER BY i) from (select unnest(conkey) as attnum, generate_subscripts(conkey, 1) as i) x JOIN pg_catalog.pg_attribute c USING(attnum) WHERE c.attrelid = fk.conrelid )) AS childcolumn FROM pg_catalog.pg_constraint fk JOIN pg_catalog.pg_class t_p ON t_p.oid = fk.confrelid JOIN pg_catalog.pg_namespace s_p ON s_p.oid = t_p.relnamespace JOIN pg_catalog.pg_class t_c ON t_c.oid = fk.conrelid JOIN pg_catalog.pg_namespace s_c ON s_c.oid = t_c.relnamespace WHERE fk.contype = 'f'; ''' _logger.debug('Functions Query. sql: %r', query) cur.execute(query) for row in cur: yield ForeignKey(*row) def functions(self): """Yields FunctionMetadata named tuples""" if self.conn.server_version > 90000: query = ''' SELECT n.nspname schema_name, p.proname func_name, p.proargnames, COALESCE(proallargtypes::regtype[], proargtypes::regtype[])::text[], p.proargmodes, prorettype::regtype::text return_type, p.proisagg is_aggregate, p.proiswindow is_window, p.proretset is_set_returning FROM pg_catalog.pg_proc p INNER JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace WHERE p.prorettype::regtype != 'trigger'::regtype ORDER BY 1, 2 ''' elif self.conn.server_version >= 80400: query = ''' SELECT n.nspname schema_name, p.proname func_name, p.proargnames, COALESCE(proallargtypes::regtype[], proargtypes::regtype[])::text[], p.proargmodes, prorettype::regtype::text, p.proisagg is_aggregate, false is_window, p.proretset is_set_returning FROM pg_catalog.pg_proc p INNER JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace WHERE p.prorettype::regtype != 'trigger'::regtype ORDER BY 1, 2 ''' else: query = ''' SELECT n.nspname schema_name, p.proname func_name, p.proargnames, NULL arg_types, NULL arg_modes, '' ret_type, p.proisagg is_aggregate, false is_window, p.proretset is_set_returning FROM pg_catalog.pg_proc p INNER JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace WHERE p.prorettype::regtype != 'trigger'::regtype ORDER BY 1, 2 ''' with self.conn.cursor() as cur: _logger.debug('Functions Query. sql: %r', query) cur.execute(query) for row in cur: yield FunctionMetadata(*row) def datatypes(self): """Yields tuples of (schema_name, type_name)""" with self.conn.cursor() as cur: if self.conn.server_version > 90000: query = ''' SELECT n.nspname schema_name, t.typname type_name FROM pg_catalog.pg_type t INNER JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace WHERE ( t.typrelid = 0 -- non-composite types OR ( -- composite type, but not a table SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid ) ) AND NOT EXISTS( -- ignore array types SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid ) AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema' ORDER BY 1, 2; ''' else: query = ''' SELECT n.nspname schema_name, pg_catalog.format_type(t.oid, NULL) type_name FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid)) AND t.typname !~ '^_' AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema' AND pg_catalog.pg_type_is_visible(t.oid) ORDER BY 1, 2; ''' _logger.debug('Datatypes Query. sql: %r', query) cur.execute(query) for row in cur: yield row def casing(self): """Yields the most common casing for names used in db functions""" with self.conn.cursor() as cur: query = ''' WITH Words AS ( SELECT regexp_split_to_table(prosrc, '\W+') AS Word, COUNT(1) FROM pg_catalog.pg_proc P JOIN pg_catalog.pg_namespace N ON N.oid = P.pronamespace JOIN pg_catalog.pg_language L ON L.oid = P.prolang WHERE L.lanname IN ('sql', 'plpgsql') AND N.nspname NOT IN ('pg_catalog', 'information_schema') GROUP BY Word ), OrderWords AS ( SELECT Word, ROW_NUMBER() OVER(PARTITION BY LOWER(Word) ORDER BY Count DESC) FROM Words WHERE Word ~* '.*[a-z].*' ), Names AS ( --Column names SELECT attname AS Name FROM pg_catalog.pg_attribute UNION -- Table/view names SELECT relname FROM pg_catalog.pg_class UNION -- Function names SELECT proname FROM pg_catalog.pg_proc UNION -- Type names SELECT typname FROM pg_catalog.pg_type UNION -- Schema names SELECT nspname FROM pg_catalog.pg_namespace ) SELECT Word FROM OrderWords WHERE LOWER(Word) IN (SELECT Name FROM Names) AND Row_Number = 1; ''' _logger.debug('Casing Query. sql: %r', query) cur.execute(query) for row in cur: yield row[0] pgcli-1.6.0/pgcli/pgstyle.py0000644000076500000240000000112112714463343016202 0ustar irinastaff00000000000000from pygments.token import string_to_tokentype from pygments.util import ClassNotFound from prompt_toolkit.styles import PygmentsStyle import pygments.styles def style_factory(name, cli_style): try: style = pygments.styles.get_style_by_name(name) except ClassNotFound: style = pygments.styles.get_style_by_name('native') custom_styles = dict([(string_to_tokentype(x), y) for x, y in cli_style.items()]) return PygmentsStyle.from_defaults(style_dict=custom_styles, pygments_style_cls=style) pgcli-1.6.0/pgcli/pgtoolbar.py0000644000076500000240000000371513026402102016476 0ustar irinastaff00000000000000from pygments.token import Token from prompt_toolkit.enums import DEFAULT_BUFFER from prompt_toolkit.key_binding.vi_state import InputMode def _get_vi_mode(cli): return { InputMode.INSERT: 'I', InputMode.NAVIGATION: 'N', InputMode.REPLACE: 'R', InputMode.INSERT_MULTIPLE: 'M', }[cli.vi_state.input_mode] def create_toolbar_tokens_func(get_vi_mode_enabled, get_is_refreshing, failed_transaction, valid_transaction): """ Return a function that generates the toolbar tokens. """ assert callable(get_vi_mode_enabled) token = Token.Toolbar def get_toolbar_tokens(cli): result = [] result.append((token, ' ')) if cli.buffers[DEFAULT_BUFFER].completer.smart_completion: result.append((token.On, '[F2] Smart Completion: ON ')) else: result.append((token.Off, '[F2] Smart Completion: OFF ')) if cli.buffers[DEFAULT_BUFFER].always_multiline: result.append((token.On, '[F3] Multiline: ON ')) else: result.append((token.Off, '[F3] Multiline: OFF ')) if cli.buffers[DEFAULT_BUFFER].always_multiline: if cli.buffers[DEFAULT_BUFFER].multiline_mode == 'safe': result.append((token,' ([Esc] [Enter] to execute]) ')) else: result.append((token,' (Semi-colon [;] will end the line) ')) if get_vi_mode_enabled(): result.append((token.On, '[F4] Vi-mode (' + _get_vi_mode(cli) + ')')) else: result.append((token.On, '[F4] Emacs-mode')) if failed_transaction(): result.append((token.Transaction.Failed, ' Failed transaction')) if valid_transaction(): result.append((token.Transaction.Valid, ' Transaction')) if get_is_refreshing(): result.append((token, ' Refreshing completions...')) return result return get_toolbar_tokens pgcli-1.6.0/pgcli.egg-info/0000755000076500000240000000000013112353401015622 5ustar irinastaff00000000000000pgcli-1.6.0/pgcli.egg-info/dependency_links.txt0000644000076500000240000000000113112353401021670 0ustar irinastaff00000000000000 pgcli-1.6.0/pgcli.egg-info/entry_points.txt0000644000076500000240000000007413112353401021121 0ustar irinastaff00000000000000 [console_scripts] pgcli=pgcli.main:cli pgcli-1.6.0/pgcli.egg-info/pbr.json0000644000076500000240000000005712643600227017313 0ustar irinastaff00000000000000{"is_release": false, "git_version": "18d19fe"}pgcli-1.6.0/pgcli.egg-info/PKG-INFO0000644000076500000240000002222413112353401016721 0ustar irinastaff00000000000000Metadata-Version: 1.1 Name: pgcli Version: 1.6.0 Summary: CLI for Postgres Database. With auto-completion and syntax highlighting. Home-page: http://pgcli.com Author: Pgcli Core Team Author-email: pgcli-dev@googlegroups.com License: LICENSE.txt Description: A REPL for Postgres ------------------- |Build Status| |PyPI| |Gitter| This is a postgres client that does auto-completion and syntax highlighting. Home Page: http://pgcli.com MySQL Equivalent: http://mycli.net .. image:: screenshots/pgcli.gif .. image:: screenshots/image01.png Quick Start ----------- If you already know how to install python packages, then you can simply do: :: $ pip install -U pgcli or $ brew install pgcli # Only on OS X If you don't know how to install python packages, please check the `detailed instructions`__. __ https://github.com/dbcli/pgcli#detailed-installation-instructions Usage ----- :: $ pgcli [database_name] or $ pgcli postgresql://[user[:password]@][netloc][:port][/dbname] Examples: :: $ pgcli local_database $ pgcli postgres://amjith:pa$$w0rd@example.com:5432/app_db Features -------- The `pgcli` is written using prompt_toolkit_. * Auto-completes as you type for SQL keywords as well as tables and columns in the database. * Syntax highlighting using Pygments. * Smart-completion (enabled by default) will suggest context-sensitive completion. - ``SELECT * FROM `` will only show table names. - ``SELECT * FROM users WHERE `` will only show column names. * Primitive support for ``psql`` back-slash commands. * Pretty prints tabular data. Note: `pgcli` uses [tabulate](https://github.com/dbcli/pgcli/blob/master/pgcli/packages/tabulate.py) package to pretty-print tables. This library does smart formatting of numbers, which can sometimes lead to unexpected output. See [this issue](https://github.com/dbcli/pgcli/issues/617) for more details. .. _prompt_toolkit: https://github.com/jonathanslenders/python-prompt-toolkit Config ------ A config file is automatically created at ``~/.config/pgcli/config`` at first launch. See the file itself for a description of all available options. Contributions: -------------- If you're interested in contributing to this project, first of all I would like to extend my heartfelt gratitude. I've written a small doc to describe how to get this running in a development setup. https://github.com/dbcli/pgcli/blob/master/DEVELOP.rst Please feel free to reach out to me if you need help. My email: amjith.r@gmail.com, Twitter: `@amjithr `_ Detailed Installation Instructions: ----------------------------------- OS X: ===== Easiest way to install pgcli is using brew. Please be aware that this will install postgres via brew if it wasn't installed via brew. :: $ brew install pgcli Done! If you have postgres installed via a different means (such as PostgresApp), you can ``brew install --build-from-source pgcli`` which will skip installing postgres via brew if postgres is available in the path. Alternatively, you can install ``pgcli`` as a python package using a package manager called called ``pip``. You will need postgres installed on your system for this to work. In depth getting started guide for ``pip`` - https://pip.pypa.io/en/latest/installing.html. :: $ which pip If it is installed then you can do: :: $ pip install pgcli If that fails due to permission issues, you might need to run the command with sudo permissions. :: $ sudo pip install pgcli If pip is not installed check if easy_install is available on the system. :: $ which easy_install $ sudo easy_install pgcli Linux: ====== In depth getting started guide for ``pip`` - https://pip.pypa.io/en/latest/installing.html. Check if pip is already available in your system. :: $ which pip If it doesn't exist, use your linux package manager to install `pip`. This might look something like: :: $ sudo apt-get install python-pip # Debian, Ubuntu, Mint etc or $ sudo yum install python-pip # RHEL, Centos, Fedora etc ``pgcli`` requires python-dev, libpq-dev and libevent-dev packages. You can install these via your operating system package manager. :: $ sudo apt-get install python-dev libpq-dev libevent-dev or $ sudo yum install python-devel postgresql-devel Then you can install pgcli: :: $ sudo pip install pgcli Docker ====== Pgcli can be run from within Docker. This can be useful to try pgcli without installing it, or any dependencies, system-wide. To build the image: :: $ docker build -t pgcli . To create a container from the image: :: $ docker run --rm -ti pgcli pgcli To access postgresql databases listening on localhost, make sure to run the docker in "host net mode". E.g. to access a database called "foo" on the postgresql server running on localhost:5432 (the standard port): :: $ docker run --rm -ti --net host pgcli pgcli -h localhost foo To connect to a locally running instance over a unix socket, bind the socket to the docker container: :: $ docker run --rm -ti -v /var/run/postgres:/var/run/postgres pgcli pgcli foo Thanks: ------- A special thanks to `Jonathan Slenders `_ for creating `Python Prompt Toolkit `_, which is quite literally the backbone library, that made this app possible. Jonathan has also provided valuable feedback and support during the development of this app. This app includes the awesome `tabulate `_ library for pretty printing the output of tables. The reason for vendoring this library rather than listing it as a dependency in setup.py, is because I had to make a change to the table format which is merged back into the original repo, but not yet released in PyPI. `Click `_ is used for command line option parsing and printing error messages. Thanks to `psycopg `_ for providing a rock solid interface to Postgres database. Thanks to all the beta testers and contributors for your time and patience. :) .. |Build Status| image:: https://api.travis-ci.org/dbcli/pgcli.svg?branch=master :target: https://travis-ci.org/dbcli/pgcli .. |PyPI| image:: https://img.shields.io/pypi/v/pgcli.svg :target: https://pypi.python.org/pypi/pgcli/ :alt: Latest Version .. |Gitter| image:: https://badges.gitter.im/Join%20Chat.svg :target: https://gitter.im/dbcli/pgcli?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge :alt: Gitter Chat Platform: UNKNOWN Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: Unix Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: SQL Classifier: Topic :: Database Classifier: Topic :: Database :: Front-Ends Classifier: Topic :: Software Development Classifier: Topic :: Software Development :: Libraries :: Python Modules pgcli-1.6.0/pgcli.egg-info/requires.txt0000644000076500000240000000030213112353401020215 0ustar irinastaff00000000000000pgspecial>=1.8.0 click >= 4.1 Pygments >= 2.0 prompt_toolkit>=1.0.10,<1.1.0 psycopg2 >= 2.5.4 sqlparse >=0.2.2,<0.3.0 configobj >= 5.0.6 humanize >= 0.5.1 wcwidth >= 0.1.6 setproctitle >= 1.1.9 pgcli-1.6.0/pgcli.egg-info/SOURCES.txt0000644000076500000240000000166513112353401017516 0ustar irinastaff00000000000000README.rst setup.py pgcli/__init__.py pgcli/completion_refresher.py pgcli/config.py pgcli/encodingutils.py pgcli/filters.py pgcli/key_bindings.py pgcli/magic.py pgcli/main.py pgcli/pgbuffer.py pgcli/pgclirc pgcli/pgcompleter.py pgcli/pgexecute.py pgcli/pgstyle.py pgcli/pgtoolbar.py pgcli.egg-info/PKG-INFO pgcli.egg-info/SOURCES.txt pgcli.egg-info/dependency_links.txt pgcli.egg-info/entry_points.txt pgcli.egg-info/pbr.json pgcli.egg-info/requires.txt pgcli.egg-info/top_level.txt pgcli/packages/__init__.py pgcli/packages/expanded.py pgcli/packages/ordereddict.py pgcli/packages/prioritization.py pgcli/packages/sqlcompletion.py pgcli/packages/tabulate.py pgcli/packages/parseutils/__init__.py pgcli/packages/parseutils/ctes.py pgcli/packages/parseutils/meta.py pgcli/packages/parseutils/tables.py pgcli/packages/parseutils/utils.py pgcli/packages/pgliterals/__init__.py pgcli/packages/pgliterals/main.py pgcli/packages/pgliterals/pgliterals.jsonpgcli-1.6.0/pgcli.egg-info/top_level.txt0000644000076500000240000000000613112353401020350 0ustar irinastaff00000000000000pgcli pgcli-1.6.0/PKG-INFO0000644000076500000240000002222413112353401014131 0ustar irinastaff00000000000000Metadata-Version: 1.1 Name: pgcli Version: 1.6.0 Summary: CLI for Postgres Database. With auto-completion and syntax highlighting. Home-page: http://pgcli.com Author: Pgcli Core Team Author-email: pgcli-dev@googlegroups.com License: LICENSE.txt Description: A REPL for Postgres ------------------- |Build Status| |PyPI| |Gitter| This is a postgres client that does auto-completion and syntax highlighting. Home Page: http://pgcli.com MySQL Equivalent: http://mycli.net .. image:: screenshots/pgcli.gif .. image:: screenshots/image01.png Quick Start ----------- If you already know how to install python packages, then you can simply do: :: $ pip install -U pgcli or $ brew install pgcli # Only on OS X If you don't know how to install python packages, please check the `detailed instructions`__. __ https://github.com/dbcli/pgcli#detailed-installation-instructions Usage ----- :: $ pgcli [database_name] or $ pgcli postgresql://[user[:password]@][netloc][:port][/dbname] Examples: :: $ pgcli local_database $ pgcli postgres://amjith:pa$$w0rd@example.com:5432/app_db Features -------- The `pgcli` is written using prompt_toolkit_. * Auto-completes as you type for SQL keywords as well as tables and columns in the database. * Syntax highlighting using Pygments. * Smart-completion (enabled by default) will suggest context-sensitive completion. - ``SELECT * FROM `` will only show table names. - ``SELECT * FROM users WHERE `` will only show column names. * Primitive support for ``psql`` back-slash commands. * Pretty prints tabular data. Note: `pgcli` uses [tabulate](https://github.com/dbcli/pgcli/blob/master/pgcli/packages/tabulate.py) package to pretty-print tables. This library does smart formatting of numbers, which can sometimes lead to unexpected output. See [this issue](https://github.com/dbcli/pgcli/issues/617) for more details. .. _prompt_toolkit: https://github.com/jonathanslenders/python-prompt-toolkit Config ------ A config file is automatically created at ``~/.config/pgcli/config`` at first launch. See the file itself for a description of all available options. Contributions: -------------- If you're interested in contributing to this project, first of all I would like to extend my heartfelt gratitude. I've written a small doc to describe how to get this running in a development setup. https://github.com/dbcli/pgcli/blob/master/DEVELOP.rst Please feel free to reach out to me if you need help. My email: amjith.r@gmail.com, Twitter: `@amjithr `_ Detailed Installation Instructions: ----------------------------------- OS X: ===== Easiest way to install pgcli is using brew. Please be aware that this will install postgres via brew if it wasn't installed via brew. :: $ brew install pgcli Done! If you have postgres installed via a different means (such as PostgresApp), you can ``brew install --build-from-source pgcli`` which will skip installing postgres via brew if postgres is available in the path. Alternatively, you can install ``pgcli`` as a python package using a package manager called called ``pip``. You will need postgres installed on your system for this to work. In depth getting started guide for ``pip`` - https://pip.pypa.io/en/latest/installing.html. :: $ which pip If it is installed then you can do: :: $ pip install pgcli If that fails due to permission issues, you might need to run the command with sudo permissions. :: $ sudo pip install pgcli If pip is not installed check if easy_install is available on the system. :: $ which easy_install $ sudo easy_install pgcli Linux: ====== In depth getting started guide for ``pip`` - https://pip.pypa.io/en/latest/installing.html. Check if pip is already available in your system. :: $ which pip If it doesn't exist, use your linux package manager to install `pip`. This might look something like: :: $ sudo apt-get install python-pip # Debian, Ubuntu, Mint etc or $ sudo yum install python-pip # RHEL, Centos, Fedora etc ``pgcli`` requires python-dev, libpq-dev and libevent-dev packages. You can install these via your operating system package manager. :: $ sudo apt-get install python-dev libpq-dev libevent-dev or $ sudo yum install python-devel postgresql-devel Then you can install pgcli: :: $ sudo pip install pgcli Docker ====== Pgcli can be run from within Docker. This can be useful to try pgcli without installing it, or any dependencies, system-wide. To build the image: :: $ docker build -t pgcli . To create a container from the image: :: $ docker run --rm -ti pgcli pgcli To access postgresql databases listening on localhost, make sure to run the docker in "host net mode". E.g. to access a database called "foo" on the postgresql server running on localhost:5432 (the standard port): :: $ docker run --rm -ti --net host pgcli pgcli -h localhost foo To connect to a locally running instance over a unix socket, bind the socket to the docker container: :: $ docker run --rm -ti -v /var/run/postgres:/var/run/postgres pgcli pgcli foo Thanks: ------- A special thanks to `Jonathan Slenders `_ for creating `Python Prompt Toolkit `_, which is quite literally the backbone library, that made this app possible. Jonathan has also provided valuable feedback and support during the development of this app. This app includes the awesome `tabulate `_ library for pretty printing the output of tables. The reason for vendoring this library rather than listing it as a dependency in setup.py, is because I had to make a change to the table format which is merged back into the original repo, but not yet released in PyPI. `Click `_ is used for command line option parsing and printing error messages. Thanks to `psycopg `_ for providing a rock solid interface to Postgres database. Thanks to all the beta testers and contributors for your time and patience. :) .. |Build Status| image:: https://api.travis-ci.org/dbcli/pgcli.svg?branch=master :target: https://travis-ci.org/dbcli/pgcli .. |PyPI| image:: https://img.shields.io/pypi/v/pgcli.svg :target: https://pypi.python.org/pypi/pgcli/ :alt: Latest Version .. |Gitter| image:: https://badges.gitter.im/Join%20Chat.svg :target: https://gitter.im/dbcli/pgcli?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge :alt: Gitter Chat Platform: UNKNOWN Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: Unix Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: SQL Classifier: Topic :: Database Classifier: Topic :: Database :: Front-Ends Classifier: Topic :: Software Development Classifier: Topic :: Software Development :: Libraries :: Python Modules pgcli-1.6.0/README.rst0000644000076500000240000001445313100750657014543 0ustar irinastaff00000000000000A REPL for Postgres ------------------- |Build Status| |PyPI| |Gitter| This is a postgres client that does auto-completion and syntax highlighting. Home Page: http://pgcli.com MySQL Equivalent: http://mycli.net .. image:: screenshots/pgcli.gif .. image:: screenshots/image01.png Quick Start ----------- If you already know how to install python packages, then you can simply do: :: $ pip install -U pgcli or $ brew install pgcli # Only on OS X If you don't know how to install python packages, please check the `detailed instructions`__. __ https://github.com/dbcli/pgcli#detailed-installation-instructions Usage ----- :: $ pgcli [database_name] or $ pgcli postgresql://[user[:password]@][netloc][:port][/dbname] Examples: :: $ pgcli local_database $ pgcli postgres://amjith:pa$$w0rd@example.com:5432/app_db Features -------- The `pgcli` is written using prompt_toolkit_. * Auto-completes as you type for SQL keywords as well as tables and columns in the database. * Syntax highlighting using Pygments. * Smart-completion (enabled by default) will suggest context-sensitive completion. - ``SELECT * FROM `` will only show table names. - ``SELECT * FROM users WHERE `` will only show column names. * Primitive support for ``psql`` back-slash commands. * Pretty prints tabular data. Note: `pgcli` uses [tabulate](https://github.com/dbcli/pgcli/blob/master/pgcli/packages/tabulate.py) package to pretty-print tables. This library does smart formatting of numbers, which can sometimes lead to unexpected output. See [this issue](https://github.com/dbcli/pgcli/issues/617) for more details. .. _prompt_toolkit: https://github.com/jonathanslenders/python-prompt-toolkit Config ------ A config file is automatically created at ``~/.config/pgcli/config`` at first launch. See the file itself for a description of all available options. Contributions: -------------- If you're interested in contributing to this project, first of all I would like to extend my heartfelt gratitude. I've written a small doc to describe how to get this running in a development setup. https://github.com/dbcli/pgcli/blob/master/DEVELOP.rst Please feel free to reach out to me if you need help. My email: amjith.r@gmail.com, Twitter: `@amjithr `_ Detailed Installation Instructions: ----------------------------------- OS X: ===== Easiest way to install pgcli is using brew. Please be aware that this will install postgres via brew if it wasn't installed via brew. :: $ brew install pgcli Done! If you have postgres installed via a different means (such as PostgresApp), you can ``brew install --build-from-source pgcli`` which will skip installing postgres via brew if postgres is available in the path. Alternatively, you can install ``pgcli`` as a python package using a package manager called called ``pip``. You will need postgres installed on your system for this to work. In depth getting started guide for ``pip`` - https://pip.pypa.io/en/latest/installing.html. :: $ which pip If it is installed then you can do: :: $ pip install pgcli If that fails due to permission issues, you might need to run the command with sudo permissions. :: $ sudo pip install pgcli If pip is not installed check if easy_install is available on the system. :: $ which easy_install $ sudo easy_install pgcli Linux: ====== In depth getting started guide for ``pip`` - https://pip.pypa.io/en/latest/installing.html. Check if pip is already available in your system. :: $ which pip If it doesn't exist, use your linux package manager to install `pip`. This might look something like: :: $ sudo apt-get install python-pip # Debian, Ubuntu, Mint etc or $ sudo yum install python-pip # RHEL, Centos, Fedora etc ``pgcli`` requires python-dev, libpq-dev and libevent-dev packages. You can install these via your operating system package manager. :: $ sudo apt-get install python-dev libpq-dev libevent-dev or $ sudo yum install python-devel postgresql-devel Then you can install pgcli: :: $ sudo pip install pgcli Docker ====== Pgcli can be run from within Docker. This can be useful to try pgcli without installing it, or any dependencies, system-wide. To build the image: :: $ docker build -t pgcli . To create a container from the image: :: $ docker run --rm -ti pgcli pgcli To access postgresql databases listening on localhost, make sure to run the docker in "host net mode". E.g. to access a database called "foo" on the postgresql server running on localhost:5432 (the standard port): :: $ docker run --rm -ti --net host pgcli pgcli -h localhost foo To connect to a locally running instance over a unix socket, bind the socket to the docker container: :: $ docker run --rm -ti -v /var/run/postgres:/var/run/postgres pgcli pgcli foo Thanks: ------- A special thanks to `Jonathan Slenders `_ for creating `Python Prompt Toolkit `_, which is quite literally the backbone library, that made this app possible. Jonathan has also provided valuable feedback and support during the development of this app. This app includes the awesome `tabulate `_ library for pretty printing the output of tables. The reason for vendoring this library rather than listing it as a dependency in setup.py, is because I had to make a change to the table format which is merged back into the original repo, but not yet released in PyPI. `Click `_ is used for command line option parsing and printing error messages. Thanks to `psycopg `_ for providing a rock solid interface to Postgres database. Thanks to all the beta testers and contributors for your time and patience. :) .. |Build Status| image:: https://api.travis-ci.org/dbcli/pgcli.svg?branch=master :target: https://travis-ci.org/dbcli/pgcli .. |PyPI| image:: https://img.shields.io/pypi/v/pgcli.svg :target: https://pypi.python.org/pypi/pgcli/ :alt: Latest Version .. |Gitter| image:: https://badges.gitter.im/Join%20Chat.svg :target: https://gitter.im/dbcli/pgcli?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge :alt: Gitter Chat pgcli-1.6.0/setup.cfg0000644000076500000240000000004613112353401014653 0ustar irinastaff00000000000000[egg_info] tag_build = tag_date = 0 pgcli-1.6.0/setup.py0000644000076500000240000000436113112353104014550 0ustar irinastaff00000000000000import re import ast import platform from setuptools import setup, find_packages _version_re = re.compile(r'__version__\s+=\s+(.*)') with open('pgcli/__init__.py', 'rb') as f: version = str(ast.literal_eval(_version_re.search( f.read().decode('utf-8')).group(1))) description = 'CLI for Postgres Database. With auto-completion and syntax highlighting.' install_requirements = [ 'pgspecial>=1.8.0', 'click >= 4.1', 'Pygments >= 2.0', # Pygments has to be Capitalcased. WTF? 'prompt_toolkit>=1.0.10,<1.1.0', 'psycopg2 >= 2.5.4', 'sqlparse >=0.2.2,<0.3.0', 'configobj >= 5.0.6', 'humanize >= 0.5.1', 'wcwidth >= 0.1.6', ] # setproctitle is used to mask the password when running `ps` in command line. # But this is not necessary in Windows since the password is never shown in the # task manager. Also setproctitle is a hard dependency to install in Windows, # so we'll only install it if we're not in Windows. if platform.system() != 'Windows' and not platform.system().startswith("CYGWIN"): install_requirements.append('setproctitle >= 1.1.9') setup( name='pgcli', author='Pgcli Core Team', author_email='pgcli-dev@googlegroups.com', version=version, license='LICENSE.txt', url='http://pgcli.com', packages=find_packages(), package_data={'pgcli': ['pgclirc', 'packages/pgliterals/pgliterals.json']}, description=description, long_description=open('README.rst').read(), install_requires=install_requirements, entry_points=''' [console_scripts] pgcli=pgcli.main:cli ''', classifiers=[ 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: Unix', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: SQL', 'Topic :: Database', 'Topic :: Database :: Front-Ends', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries :: Python Modules', ], )