pgcli-0.20.1/0000755000076600000240000000000012621112650013264 5ustar amjithstaff00000000000000pgcli-0.20.1/pgcli/0000755000076600000240000000000012621112650014362 5ustar amjithstaff00000000000000pgcli-0.20.1/pgcli/__init__.py0000644000076600000240000000002712620265100016470 0ustar amjithstaff00000000000000__version__ = '0.20.1' pgcli-0.20.1/pgcli/completion_refresher.py0000644000076600000240000000764412621112637021172 0ustar amjithstaff00000000000000import threading try: from collections import OrderedDict except ImportError: from .packages.ordereddict import OrderedDict from .pgcompleter import PGCompleter from .pgexecute import PGExecute class CompletionRefresher(object): refreshers = OrderedDict() def __init__(self): self._completer_thread = None self._restart_refresh = threading.Event() def refresh(self, executor, special, callbacks): """ Creates a PGCompleter object and populates it with the relevant completion suggestions in a background thread. executor - PGExecute object, used to extract the credentials to connect to the database. special - PGSpecial object used for creating a new completion object. callbacks - A function or a list of functions to call after the thread has completed the refresh. The newly created completion object will be passed in as an argument to each callback. """ if self.is_refreshing(): self._restart_refresh.set() return [(None, None, None, 'Auto-completion refresh restarted.')] else: self._completer_thread = threading.Thread(target=self._bg_refresh, args=(executor, special, callbacks), name='completion_refresh') self._completer_thread.setDaemon(True) self._completer_thread.start() return [(None, None, None, 'Auto-completion refresh started in the background.')] def is_refreshing(self): return self._completer_thread and self._completer_thread.is_alive() def _bg_refresh(self, pgexecute, special, callbacks): completer = PGCompleter(smart_completion=True, pgspecial=special) # Create a new pgexecute method to popoulate the completions. e = pgexecute executor = PGExecute(e.dbname, e.user, e.password, e.host, e.port, e.dsn) # If callbacks is a single function then push it into a list. if callable(callbacks): callbacks = [callbacks] while 1: for refresher in self.refreshers.values(): refresher(completer, executor) if self._restart_refresh.is_set(): self._restart_refresh.clear() break else: # Break out of while loop if the for loop finishes natually # without hitting the break statement. break # Start over the refresh from the beginning if the for loop hit the # break statement. continue for callback in callbacks: callback(completer) def refresher(name, refreshers=CompletionRefresher.refreshers): """Decorator to populate the dictionary of refreshers with the current function. """ def wrapper(wrapped): refreshers[name] = wrapped return wrapped return wrapper @refresher('schemata') def refresh_schemata(completer, executor): completer.set_search_path(executor.search_path()) completer.extend_schemata(executor.schemata()) @refresher('tables') def refresh_tables(completer, executor): completer.extend_relations(executor.tables(), kind='tables') completer.extend_columns(executor.table_columns(), kind='tables') @refresher('views') def refresh_views(completer, executor): completer.extend_relations(executor.views(), kind='views') completer.extend_columns(executor.view_columns(), kind='views') @refresher('functions') def refresh_functions(completer, executor): completer.extend_functions(executor.functions()) @refresher('types') def refresh_types(completer, executor): completer.extend_datatypes(executor.datatypes()) @refresher('databases') def refresh_databases(completer, executor): completer.extend_database_names(executor.databases()) pgcli-0.20.1/pgcli/config.py0000644000076600000240000000153012620265100016176 0ustar amjithstaff00000000000000import shutil import os import platform from os.path import expanduser, exists from configobj import ConfigObj def config_location(): if platform.system() == 'Windows': return os.getenv('USERPROFILE') + '\\AppData\\Local\\dbcli\\pgcli\\' else: return expanduser('~/.config/pgcli/') def load_config(usr_cfg, def_cfg=None): cfg = ConfigObj() cfg.merge(ConfigObj(def_cfg, interpolation=False)) cfg.merge(ConfigObj(expanduser(usr_cfg), interpolation=False)) cfg.filename = expanduser(usr_cfg) return cfg def write_default_config(source, destination, overwrite=False): destination = expanduser(destination) if not overwrite and exists(destination): return shutil.copyfile(source, destination) def upgrade_config(config, def_config): cfg = load_config(config, def_config) cfg.write() pgcli-0.20.1/pgcli/encodingutils.py0000644000076600000240000000107612564133707017623 0ustar amjithstaff00000000000000import sys PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 def unicode2utf8(arg): """ Only in Python 2. Psycopg2 expects the args as bytes not unicode. In Python 3 the args are expected as unicode. """ if PY2 and isinstance(arg, unicode): return arg.encode('utf-8') return arg def utf8tounicode(arg): """ Only in Python 2. Psycopg2 returns the error message as utf-8. In Python 3 the errors are returned as unicode. """ if PY2 and isinstance(arg, str): return arg.decode('utf-8') return arg pgcli-0.20.1/pgcli/filters.py0000644000076600000240000000063512620265070016414 0ustar amjithstaff00000000000000from prompt_toolkit.filters import Filter class HasSelectedCompletion(Filter): """Enable when the current buffer has a selected completion.""" def __call__(self, cli): complete_state = cli.current_buffer.complete_state return (complete_state is not None and complete_state.current_completion is not None) def __repr__(self): return "HasSelectedCompletion()" pgcli-0.20.1/pgcli/key_bindings.py0000644000076600000240000000530412620265070017407 0ustar amjithstaff00000000000000import logging from prompt_toolkit.keys import Keys from prompt_toolkit.key_binding.manager import KeyBindingManager from prompt_toolkit.filters import Condition from .filters import HasSelectedCompletion _logger = logging.getLogger(__name__) def pgcli_bindings(get_vi_mode_enabled, set_vi_mode_enabled): """ Custom key bindings for pgcli. """ assert callable(get_vi_mode_enabled) assert callable(set_vi_mode_enabled) key_binding_manager = KeyBindingManager( enable_open_in_editor=True, enable_system_bindings=True, enable_vi_mode=Condition(lambda cli: get_vi_mode_enabled())) @key_binding_manager.registry.add_binding(Keys.F2) def _(event): """ Enable/Disable SmartCompletion Mode. """ _logger.debug('Detected F2 key.') buf = event.cli.current_buffer buf.completer.smart_completion = not buf.completer.smart_completion @key_binding_manager.registry.add_binding(Keys.F3) def _(event): """ Enable/Disable Multiline Mode. """ _logger.debug('Detected F3 key.') buf = event.cli.current_buffer buf.always_multiline = not buf.always_multiline @key_binding_manager.registry.add_binding(Keys.F4) def _(event): """ Toggle between Vi and Emacs mode. """ _logger.debug('Detected F4 key.') set_vi_mode_enabled(not get_vi_mode_enabled()) @key_binding_manager.registry.add_binding(Keys.Tab) def _(event): """ Force autocompletion at cursor. """ _logger.debug('Detected key.') b = event.cli.current_buffer if b.complete_state: b.complete_next() else: event.cli.start_completion(select_first=True) @key_binding_manager.registry.add_binding(Keys.ControlSpace) def _(event): """ Initialize autocompletion at cursor. If the autocompletion menu is not showing, display it with the appropriate completions for the context. If the menu is showing, select the next completion. """ _logger.debug('Detected key.') b = event.cli.current_buffer if b.complete_state: b.complete_next() else: event.cli.start_completion(select_first=False) @key_binding_manager.registry.add_binding(Keys.ControlJ, filter=HasSelectedCompletion()) def _(event): """ Makes the enter key work as the tab key only when showing the menu. """ _logger.debug('Detected key.') event.current_buffer.complete_state = None b = event.cli.current_buffer b.complete_state = None return key_binding_manager pgcli-0.20.1/pgcli/magic.py0000644000076600000240000000337312620265070016026 0ustar amjithstaff00000000000000from .main import PGCli import sql.parse import sql.connection import logging _logger = logging.getLogger(__name__) def load_ipython_extension(ipython): """This is called via the ipython command '%load_ext pgcli.magic'""" # first, load the sql magic if it isn't already loaded if not ipython.find_line_magic('sql'): ipython.run_line_magic('load_ext', 'sql') # register our own magic ipython.register_magic_function(pgcli_line_magic, 'line', 'pgcli') def pgcli_line_magic(line): _logger.debug('pgcli magic called: %r', line) parsed = sql.parse.parse(line, {}) conn = sql.connection.Connection.get(parsed['connection']) try: # A corresponding pgcli object already exists pgcli = conn._pgcli _logger.debug('Reusing existing pgcli') except AttributeError: # I can't figure out how to get the underylying psycopg2 connection # from the sqlalchemy connection, so just grab the url and make a # new connection pgcli = PGCli() u = conn.session.engine.url _logger.debug('New pgcli: %r', str(u)) pgcli.connect(u.database, u.host, u.username, u.port, u.password) conn._pgcli = pgcli # For convenience, print the connection alias print('Connected: {}'.format(conn.name)) try: pgcli.run_cli() except SystemExit: pass if not pgcli.query_history: return q = pgcli.query_history[-1] if not q.successful: _logger.debug('Unsuccessful query - ignoring') return if q.meta_changed or q.db_changed or q.path_changed: _logger.debug('Dangerous query detected -- ignoring') return ipython = get_ipython() return ipython.run_cell_magic('sql', line, q.query) pgcli-0.20.1/pgcli/main.py0000755000076600000240000006341412621112637015700 0ustar amjithstaff00000000000000#!/usr/bin/env python from __future__ import unicode_literals from __future__ import print_function import os import re import sys import traceback import logging import threading import shutil from time import time from codecs import open import click try: import setproctitle except ImportError: setproctitle = None import sqlparse from prompt_toolkit import CommandLineInterface, Application, AbortAction from prompt_toolkit.enums import DEFAULT_BUFFER from prompt_toolkit.shortcuts import create_default_layout, create_eventloop from prompt_toolkit.document import Document from prompt_toolkit.filters import Always, HasFocus, IsDone from prompt_toolkit.layout.processors import (ConditionalProcessor, HighlightMatchingBracketProcessor) from prompt_toolkit.history import FileHistory from pygments.lexers.sql import PostgresLexer from pygments.token import Token from .packages.tabulate import tabulate from .packages.expanded import expanded_table from pgspecial.main import (PGSpecial, NO_QUERY, content_exceeds_width) import pgspecial as special from .pgcompleter import PGCompleter from .pgtoolbar import create_toolbar_tokens_func from .pgstyle import style_factory from .pgexecute import PGExecute from .pgbuffer import PGBuffer from .completion_refresher import CompletionRefresher from .config import write_default_config, load_config, config_location from .key_bindings import pgcli_bindings from .encodingutils import utf8tounicode from .__init__ import __version__ click.disable_unicode_literals_warning = True try: from urlparse import urlparse except ImportError: from urllib.parse import urlparse from getpass import getuser from psycopg2 import OperationalError from collections import namedtuple # Query tuples are used for maintaining history MetaQuery = namedtuple( 'Query', [ 'query', # The entire text of the command 'successful', # True If all subqueries were successful 'total_time', # Time elapsed executing the query 'meta_changed', # True if any subquery executed create/alter/drop 'db_changed', # True if any subquery changed the database 'path_changed', # True if any subquery changed the search path 'mutated', # True if any subquery executed insert/update/delete ]) MetaQuery.__new__.__defaults__ = ('', False, 0, False, False, False, False) class PGCli(object): def __init__(self, force_passwd_prompt=False, never_passwd_prompt=False, pgexecute=None, pgclirc_file=None): self.force_passwd_prompt = force_passwd_prompt self.never_passwd_prompt = never_passwd_prompt self.pgexecute = pgexecute from pgcli import __file__ as package_root package_root = os.path.dirname(package_root) default_config = os.path.join(package_root, 'pgclirc') write_default_config(default_config, pgclirc_file) self.pgspecial = PGSpecial() # Load config. c = self.config = load_config(pgclirc_file, default_config) self.multi_line = c['main'].as_bool('multi_line') self.vi_mode = c['main'].as_bool('vi') self.pgspecial.timing_enabled = c['main'].as_bool('timing') self.table_format = c['main']['table_format'] self.syntax_style = c['main']['syntax_style'] self.cli_style = c['colors'] self.wider_completion_menu = c['main'].as_bool('wider_completion_menu') self.on_error = c['main']['on_error'].upper() self.completion_refresher = CompletionRefresher() self.logger = logging.getLogger(__name__) self.initialize_logging() self.query_history = [] # Initialize completer smart_completion = c['main'].as_bool('smart_completion') completer = PGCompleter(smart_completion, pgspecial=self.pgspecial) self.completer = completer self._completer_lock = threading.Lock() self.register_special_commands() self.cli = None def register_special_commands(self): self.pgspecial.register(self.change_db, '\\c', '\\c[onnect] database_name', 'Change to a new database.', aliases=('use', '\\connect', 'USE')) self.pgspecial.register(self.refresh_completions, '\\#', '\\#', 'Refresh auto-completions.', arg_type=NO_QUERY) self.pgspecial.register(self.refresh_completions, '\\refresh', '\\refresh', 'Refresh auto-completions.', arg_type=NO_QUERY) self.pgspecial.register(self.execute_from_file, '\\i', '\\i filename', 'Execute commands from file.') def change_db(self, pattern, **_): if pattern: db = pattern[1:-1] if pattern[0] == pattern[-1] == '"' else pattern self.pgexecute.connect(database=db) else: self.pgexecute.connect() yield (None, None, None, 'You are now connected to database "%s" as ' 'user "%s"' % (self.pgexecute.dbname, self.pgexecute.user)) def execute_from_file(self, pattern, **_): if not pattern: message = '\\i: missing required argument' return [(None, None, None, message)] try: with open(os.path.expanduser(pattern), encoding='utf-8') as f: query = f.read() except IOError as e: return [(None, None, None, str(e))] return self.pgexecute.run(query, self.pgspecial, on_error=self.on_error) def initialize_logging(self): log_file = self.config['main']['log_file'] if log_file == 'default': log_file = config_location() + 'log' log_level = self.config['main']['log_level'] level_map = {'CRITICAL': logging.CRITICAL, 'ERROR': logging.ERROR, 'WARNING': logging.WARNING, 'INFO': logging.INFO, 'DEBUG': logging.DEBUG } handler = logging.FileHandler(os.path.expanduser(log_file)) formatter = logging.Formatter( '%(asctime)s (%(process)d/%(threadName)s) ' '%(name)s %(levelname)s - %(message)s') handler.setFormatter(formatter) root_logger = logging.getLogger('pgcli') root_logger.addHandler(handler) root_logger.setLevel(level_map[log_level.upper()]) root_logger.debug('Initializing pgcli logging.') root_logger.debug('Log file %r.', log_file) def connect_dsn(self, dsn): self.connect(dsn=dsn) def connect_uri(self, uri): uri = urlparse(uri) database = uri.path[1:] # ignore the leading fwd slash self.connect(database, uri.hostname, uri.username, uri.port, uri.password) def connect(self, database='', host='', user='', port='', passwd='', dsn=''): # Connect to the database. if not user: user = getuser() if not database: database = user # If password prompt is not forced but no password is provided, try # getting it from environment variable. if not self.force_passwd_prompt and not passwd: passwd = os.environ.get('PGPASSWORD', '') # Prompt for a password immediately if requested via the -W flag. This # avoids wasting time trying to connect to the database and catching a # no-password exception. # If we successfully parsed a password from a URI, there's no need to # prompt for it, even with the -W flag if self.force_passwd_prompt and not passwd: passwd = click.prompt('Password', hide_input=True, show_default=False, type=str) # Prompt for a password after 1st attempt to connect without a password # fails. Don't prompt if the -w flag is supplied auto_passwd_prompt = not passwd and not self.never_passwd_prompt # Attempt to connect to the database. # Note that passwd may be empty on the first attempt. If connection # fails because of a missing password, but we're allowed to prompt for # a password (no -w flag), prompt for a passwd and try again. try: try: pgexecute = PGExecute(database, user, passwd, host, port, dsn) except OperationalError as e: if ('no password supplied' in utf8tounicode(e.args[0]) and auto_passwd_prompt): passwd = click.prompt('Password', hide_input=True, show_default=False, type=str) pgexecute = PGExecute(database, user, passwd, host, port, dsn) else: raise e except Exception as e: # Connecting to a database could fail. self.logger.debug('Database connection failed: %r.', e) self.logger.error("traceback: %r", traceback.format_exc()) click.secho(str(e), err=True, fg='red') exit(1) self.pgexecute = pgexecute def handle_editor_command(self, cli, document): """ Editor command is any query that is prefixed or suffixed by a '\e'. The reason for a while loop is because a user might edit a query multiple times. For eg: "select * from \e" to edit it in vim, then come back to the prompt with the edited query "select * from blah where q = 'abc'\e" to edit it again. :param cli: CommandLineInterface :param document: Document :return: Document """ while special.editor_command(document.text): filename = special.get_filename(document.text) sql, message = special.open_external_editor(filename, sql=document.text) if message: # Something went wrong. Raise an exception and bail. raise RuntimeError(message) cli.current_buffer.document = Document(sql, cursor_position=len(sql)) document = cli.run(False) continue return document def run_cli(self): logger = self.logger original_less_opts = self.adjust_less_opts() self.refresh_completions() self.cli = self._build_cli() print('Version:', __version__) print('Chat: https://gitter.im/dbcli/pgcli') print('Mail: https://groups.google.com/forum/#!forum/pgcli') print('Home: http://pgcli.com') try: while True: document = self.cli.run() # The reason we check here instead of inside the pgexecute is # because we want to raise the Exit exception which will be # caught by the try/except block that wraps the pgexecute.run() # statement. if quit_command(document.text): raise EOFError try: document = self.handle_editor_command(self.cli, document) except RuntimeError as e: logger.error("sql: %r, error: %r", document.text, e) logger.error("traceback: %r", traceback.format_exc()) click.secho(str(e), err=True, fg='red') continue # Initialize default metaquery in case execution fails query = MetaQuery(query=document.text, successful=False) try: output, query = self._evaluate_command(document.text) except KeyboardInterrupt: # Restart connection to the database self.pgexecute.connect() logger.debug("cancelled query, sql: %r", document.text) click.secho("cancelled query", err=True, fg='red') except NotImplementedError: click.secho('Not Yet Implemented.', fg="yellow") except OperationalError as e: if ('server closed the connection' in utf8tounicode(e.args[0])): self._handle_server_closed_connection() else: logger.error("sql: %r, error: %r", document.text, e) logger.error("traceback: %r", traceback.format_exc()) click.secho(str(e), err=True, fg='red') except Exception as e: logger.error("sql: %r, error: %r", document.text, e) logger.error("traceback: %r", traceback.format_exc()) click.secho(str(e), err=True, fg='red') else: try: click.echo_via_pager('\n'.join(output)) except KeyboardInterrupt: pass if self.pgspecial.timing_enabled: print('Time: %0.03fs' % query.total_time) # Check if we need to update completions, in order of most # to least drastic changes if query.db_changed: self.refresh_completions(reset=True) elif query.meta_changed: self.refresh_completions(reset=False) elif query.path_changed: logger.debug('Refreshing search path') with self._completer_lock: self.completer.set_search_path( self.pgexecute.search_path()) logger.debug('Search path: %r', self.completer.search_path) self.query_history.append(query) except EOFError: print ('Goodbye!') finally: # Reset the less opts back to original. logger.debug('Restoring env var LESS to %r.', original_less_opts) os.environ['LESS'] = original_less_opts def _build_cli(self): def set_vi_mode(value): self.vi_mode = value key_binding_manager = pgcli_bindings( get_vi_mode_enabled=lambda: self.vi_mode, set_vi_mode_enabled=set_vi_mode) def prompt_tokens(_): return [(Token.Prompt, '%s> ' % self.pgexecute.dbname)] get_toolbar_tokens = create_toolbar_tokens_func( lambda: self.vi_mode, self.completion_refresher.is_refreshing) layout = create_default_layout( lexer=PostgresLexer, reserve_space_for_menu=True, get_prompt_tokens=prompt_tokens, get_bottom_toolbar_tokens=get_toolbar_tokens, display_completions_in_columns=self.wider_completion_menu, multiline=True, extra_input_processors=[ # Highlight matching brackets while editing. ConditionalProcessor( processor=HighlightMatchingBracketProcessor(chars='[](){}'), filter=HasFocus(DEFAULT_BUFFER) & ~IsDone()), ]) history_file = self.config['main']['history_file'] if history_file == 'default': history_file = config_location() + 'history' with self._completer_lock: buf = PGBuffer( always_multiline=self.multi_line, completer=self.completer, history=FileHistory(os.path.expanduser(history_file)), complete_while_typing=Always()) application = Application( style=style_factory(self.syntax_style, self.cli_style), layout=layout, buffer=buf, key_bindings_registry=key_binding_manager.registry, on_exit=AbortAction.RAISE_EXCEPTION, ignore_case=True) cli = CommandLineInterface( application=application, eventloop=create_eventloop()) return cli def _evaluate_command(self, text): """Used to run a command entered by the user during CLI operation (Puts the E in REPL) returns (results, MetaQuery) """ logger = self.logger logger.debug('sql: %r', text) all_success = True meta_changed = False # CREATE, ALTER, DROP, etc mutated = False # INSERT, DELETE, etc db_changed = False path_changed = False output = [] total = 0 # Run the query. start = time() on_error_resume = self.on_error == 'RESUME' res = self.pgexecute.run(text, self.pgspecial, exception_formatter, on_error_resume) for title, cur, headers, status, sql, success in res: logger.debug("headers: %r", headers) logger.debug("rows: %r", cur) logger.debug("status: %r", status) threshold = 1000 if (is_select(status) and cur and cur.rowcount > threshold): click.secho('The result set has more than %s rows.' % threshold, fg='red') if not click.confirm('Do you want to continue?'): click.secho("Aborted!", err=True, fg='red') break if self.pgspecial.auto_expand: max_width = self.cli.output.get_size().columns else: max_width = None formatted = format_output( title, cur, headers, status, self.table_format, self.pgspecial.expanded_output, max_width) output.extend(formatted) end = time() total += end - start # Keep track of whether any of the queries are mutating or changing # the database if success: mutated = mutated or is_mutating(status) db_changed = db_changed or has_change_db_cmd(sql) meta_changed = meta_changed or has_meta_cmd(sql) path_changed = path_changed or has_change_path_cmd(sql) else: all_success = False meta_query = MetaQuery(text, all_success, total, meta_changed, db_changed, path_changed, mutated) return output, meta_query def _handle_server_closed_connection(self): """Used during CLI execution""" reconnect = click.prompt( 'Connection reset. Reconnect (Y/n)', show_default=False, type=bool, default=True) if reconnect: try: self.pgexecute.connect() click.secho('Reconnected!\nTry the command again.', fg='green') except OperationalError as e: click.secho(str(e), err=True, fg='red') def adjust_less_opts(self): less_opts = os.environ.get('LESS', '') self.logger.debug('Original value for LESS env var: %r', less_opts) os.environ['LESS'] = '-SRXF' return less_opts def refresh_completions(self, reset=False): if reset: with self._completer_lock: self.completer.reset_completions() self.completion_refresher.refresh(self.pgexecute, self.pgspecial, self._on_completions_refreshed) return [(None, None, None, 'Auto-completion refresh started in the background.')] def _on_completions_refreshed(self, new_completer): self._swap_completer_objects(new_completer) if self.cli: # After refreshing, redraw the CLI to clear the statusbar # "Refreshing completions..." indicator self.cli.request_redraw() def _swap_completer_objects(self, new_completer): """Swap the completer object in cli with the newly created completer. """ with self._completer_lock: self.completer = new_completer # When pgcli is first launched we call refresh_completions before # instantiating the cli object. So it is necessary to check if cli # exists before trying the replace the completer object in cli. if self.cli: self.cli.current_buffer.completer = new_completer def get_completions(self, text, cursor_positition): with self._completer_lock: return self.completer.get_completions( Document(text=text, cursor_position=cursor_positition), None) @click.command() # Default host is '' so psycopg2 can default to either localhost or unix socket @click.option('-h', '--host', default='', envvar='PGHOST', help='Host address of the postgres database.') @click.option('-p', '--port', default=5432, help='Port number at which the ' 'postgres instance is listening.', envvar='PGPORT') @click.option('-U', '--user', envvar='PGUSER', help='User name to ' 'connect to the postgres database.') @click.option('-W', '--password', 'prompt_passwd', is_flag=True, default=False, help='Force password prompt.') @click.option('-w', '--no-password', 'never_prompt', is_flag=True, default=False, help='Never prompt for password.') @click.option('-v', '--version', is_flag=True, help='Version of pgcli.') @click.option('-d', '--dbname', default='', envvar='PGDATABASE', help='database name to connect to.') @click.option('--pgclirc', default=config_location() + 'config', envvar='PGCLIRC', help='Location of pgclirc file.') @click.argument('database', default=lambda: None, envvar='PGDATABASE', nargs=1) @click.argument('username', default=lambda: None, envvar='PGUSER', nargs=1) def cli(database, user, host, port, prompt_passwd, never_prompt, dbname, username, version, pgclirc): if version: print('Version:', __version__) sys.exit(0) config_dir = os.path.dirname(config_location()) if not os.path.exists(config_dir): os.makedirs(config_dir) # Migrate the config file from old location. config_full_path = config_location() + 'config' if os.path.exists(os.path.expanduser('~/.pgclirc')): if not os.path.exists(config_full_path): shutil.move(os.path.expanduser('~/.pgclirc'), config_full_path) print ('Config file (~/.pgclirc) moved to new location', config_full_path) else: print ('Config file is now located at', config_full_path) print ('Please move the existing config file ~/.pgclirc to', config_full_path) pgcli = PGCli(prompt_passwd, never_prompt, pgclirc_file=pgclirc) # Choose which ever one has a valid value. database = database or dbname user = username or user if '://' in database: pgcli.connect_uri(database) elif "=" in database: pgcli.connect_dsn(database) elif os.environ.get('PGSERVICE', None): pgcli.connect_dsn('service={0}'.format(os.environ['PGSERVICE'])) else: pgcli.connect(database, host, user, port) pgcli.logger.debug('Launch Params: \n' '\tdatabase: %r' '\tuser: %r' '\thost: %r' '\tport: %r', database, user, host, port) if setproctitle: obfuscate_process_password() pgcli.run_cli() def obfuscate_process_password(): process_title = setproctitle.getproctitle() if '://' in process_title: process_title = re.sub(r":(.*):(.*)@", r":\1:xxxx@", process_title) elif "=" in process_title: process_title = re.sub(r"password=(.+?)((\s[a-zA-Z]+=)|$)", r"password=xxxx\2", process_title) setproctitle.setproctitle(process_title) def format_output(title, cur, headers, status, table_format, expanded=False, max_width=None): output = [] if title: # Only print the title if it's not None. output.append(title) if cur: headers = [utf8tounicode(x) for x in headers] if expanded and headers: output.append(expanded_table(cur, headers)) else: tabulated, rows = tabulate(cur, headers, tablefmt=table_format, missingval='') if (max_width and content_exceeds_width(rows[0], max_width) and headers): output.append(expanded_table(rows, headers)) else: output.append(tabulated) if status: # Only print the status if it's not None. output.append(status) return output def has_meta_cmd(query): """Determines if the completion needs a refresh by checking if the sql statement is an alter, create, or drop""" try: first_token = query.split()[0] if first_token.lower() in ('alter', 'create', 'drop'): return True except Exception: return False return False def has_change_db_cmd(query): """Determines if the statement is a database switch such as 'use' or '\\c'""" try: first_token = query.split()[0] if first_token.lower() in ('use', '\\c', '\\connect'): return True except Exception: return False return False def has_change_path_cmd(sql): """Determines if the search_path should be refreshed by checking if the sql has 'set search_path'.""" return 'set search_path' in sql.lower() def is_mutating(status): """Determines if the statement is mutating based on the status.""" if not status: return False mutating = set(['insert', 'update', 'delete']) return status.split(None, 1)[0].lower() in mutating def is_select(status): """Returns true if the first word in status is 'select'.""" if not status: return False return status.split(None, 1)[0].lower() == 'select' def quit_command(sql): return (sql.strip().lower() == 'exit' or sql.strip().lower() == 'quit' or sql.strip() == '\q' or sql.strip() == ':q') def exception_formatter(e): return click.style(utf8tounicode(str(e)), fg='red') if __name__ == "__main__": cli() pgcli-0.20.1/pgcli/packages/0000755000076600000240000000000012621112650016140 5ustar amjithstaff00000000000000pgcli-0.20.1/pgcli/packages/__init__.py0000644000076600000240000000000012520071117020237 0ustar amjithstaff00000000000000pgcli-0.20.1/pgcli/packages/counter.py0000644000076600000240000001420112564133707020203 0ustar amjithstaff00000000000000#copied from http://code.activestate.com/recipes/576611-counter-class/ from operator import itemgetter from heapq import nlargest from itertools import repeat, ifilter class Counter(dict): '''Dict subclass for counting hashable objects. Sometimes called a bag or multiset. Elements are stored as dictionary keys and their counts are stored as dictionary values. >>> Counter('zyzygy') Counter({'y': 3, 'z': 2, 'g': 1}) ''' def __init__(self, iterable=None, **kwds): '''Create a new, empty Counter object. And if given, count elements from an input iterable. Or, initialize the count from another mapping of elements to their counts. >>> c = Counter() # a new, empty counter >>> c = Counter('gallahad') # a new counter from an iterable >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping >>> c = Counter(a=4, b=2) # a new counter from keyword args ''' self.update(iterable, **kwds) def __missing__(self, key): return 0 def most_common(self, n=None): '''List the n most common elements and their counts from the most common to the least. If n is None, then list all element counts. >>> Counter('abracadabra').most_common(3) [('a', 5), ('r', 2), ('b', 2)] ''' if n is None: return sorted(self.iteritems(), key=itemgetter(1), reverse=True) return nlargest(n, self.iteritems(), key=itemgetter(1)) def elements(self): '''Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] If an element's count has been set to zero or is a negative number, elements() will ignore it. ''' for elem, count in self.iteritems(): for _ in repeat(None, count): yield elem # Override dict methods where the meaning changes for Counter objects. @classmethod def fromkeys(cls, iterable, v=None): raise NotImplementedError( 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') def update(self, iterable=None, **kwds): '''Like dict.update() but add counts instead of replacing them. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.update('witch') # add elements from another iterable >>> d = Counter('watch') >>> c.update(d) # add elements from another counter >>> c['h'] # four 'h' in which, witch, and watch 4 ''' if iterable is not None: if hasattr(iterable, 'iteritems'): if self: self_get = self.get for elem, count in iterable.iteritems(): self[elem] = self_get(elem, 0) + count else: dict.update(self, iterable) # fast path when counter is empty else: self_get = self.get for elem in iterable: self[elem] = self_get(elem, 0) + 1 if kwds: self.update(kwds) def copy(self): 'Like dict.copy() but returns a Counter instance instead of a dict.' return Counter(self) def __delitem__(self, elem): 'Like dict.__delitem__() but does not raise KeyError for missing values.' if elem in self: dict.__delitem__(self, elem) def __repr__(self): if not self: return '%s()' % self.__class__.__name__ items = ', '.join(map('%r: %r'.__mod__, self.most_common())) return '%s({%s})' % (self.__class__.__name__, items) # Multiset-style mathematical operations discussed in: # Knuth TAOCP Volume II section 4.6.3 exercise 19 # and at http://en.wikipedia.org/wiki/Multiset # # Outputs guaranteed to only include positive counts. # # To strip negative and zero counts, add-in an empty counter: # c += Counter() def __add__(self, other): '''Add counts from two counters. >>> Counter('abbb') + Counter('bcc') Counter({'b': 4, 'c': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem in set(self) | set(other): newcount = self[elem] + other[elem] if newcount > 0: result[elem] = newcount return result def __sub__(self, other): ''' Subtract count, but keep only results with positive counts. >>> Counter('abbbc') - Counter('bccd') Counter({'b': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem in set(self) | set(other): newcount = self[elem] - other[elem] if newcount > 0: result[elem] = newcount return result def __or__(self, other): '''Union is the maximum of value in either of the input counters. >>> Counter('abbb') | Counter('bcc') Counter({'b': 3, 'c': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented _max = max result = Counter() for elem in set(self) | set(other): newcount = _max(self[elem], other[elem]) if newcount > 0: result[elem] = newcount return result def __and__(self, other): ''' Intersection is the minimum of corresponding counts. >>> Counter('abbb') & Counter('bcc') Counter({'b': 1}) ''' if not isinstance(other, Counter): return NotImplemented _min = min result = Counter() if len(self) < len(other): self, other = other, self for elem in ifilter(self.__contains__, other): newcount = _min(self[elem], other[elem]) if newcount > 0: result[elem] = newcount return result pgcli-0.20.1/pgcli/packages/expanded.py0000644000076600000240000000162512616534745020327 0ustar amjithstaff00000000000000from .tabulate import _text_type def pad(field, total, char=u" "): return field + (char * (total - len(field))) def expanded_table(rows, headers): header_len = max([len(x) for x in headers]) max_row_len = 0 results = [] sep = u"-[ RECORD {0} ]-------------------------\n" padded_headers = [pad(x, header_len) + u" |" for x in headers] header_len += 2 for row in rows: row_len = max([len(_text_type(x)) for x in row]) row_result = [] if row_len > max_row_len: max_row_len = row_len for header, value in zip(padded_headers, row): row_result.append((u"%s" % header) + " " + (u"%s" % value).strip()) results.append('\n'.join(row_result)) output = [] for i, result in enumerate(results): output.append(sep.format(i)) output.append(result) output.append('\n') return ''.join(output) pgcli-0.20.1/pgcli/packages/function_metadata.py0000644000076600000240000001226312616534745022224 0ustar amjithstaff00000000000000import re import sqlparse from sqlparse.tokens import Whitespace, Comment, Keyword, Name, Punctuation table_def_regex = re.compile(r'^TABLE\s*\((.+)\)$', re.IGNORECASE) class FunctionMetadata(object): def __init__(self, schema_name, func_name, arg_list, return_type, is_aggregate, is_window, is_set_returning): """Class for describing a postgresql function""" self.schema_name = schema_name self.func_name = func_name self.arg_list = arg_list.strip() self.return_type = return_type.strip() self.is_aggregate = is_aggregate self.is_window = is_window self.is_set_returning = is_set_returning def __eq__(self, other): return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash((self.schema_name, self.func_name, self.arg_list, self.return_type, self.is_aggregate, self.is_window, self.is_set_returning)) def __repr__(self): return (('%s(schema_name=%r, func_name=%r, arg_list=%r, return_type=%r,' ' is_aggregate=%r, is_window=%r, is_set_returning=%r)') % (self.__class__.__name__, self.schema_name, self.func_name, self.arg_list, self.return_type, self.is_aggregate, self.is_window, self.is_set_returning)) def fieldnames(self): """Returns a list of output field names""" if self.return_type.lower() == 'void': return [] match = table_def_regex.match(self.return_type) if match: # Function returns a table -- get the column names return list(field_names(match.group(1), mode_filter=None)) # Function may have named output arguments -- find them and return # their names return list(field_names(self.arg_list, mode_filter=('OUT', 'INOUT'))) class TypedFieldMetadata(object): """Describes typed field from a function signature or table definition Attributes are: name The name of the argument/column mode 'IN', 'OUT', 'INOUT', 'VARIADIC' type A list of tokens denoting the type default A list of tokens denoting the default value unknown A list of tokens not assigned to type or default """ __slots__ = ['name', 'mode', 'type', 'default', 'unknown'] def __init__(self): self.name = None self.mode = 'IN' self.type = [] self.default = [] self.unknown = [] def __getitem__(self, attr): return getattr(self, attr) def parse_typed_field_list(tokens): """Parses a argument/column list, yielding TypedFieldMetadata objects Field/column lists are used in function signatures and table definitions. This function parses a flattened list of sqlparse tokens and yields one metadata argument per argument / column. """ # postgres function argument list syntax: # " ( [ [ argmode ] [ argname ] argtype # [ { DEFAULT | = } default_expr ] [, ...] ] )" mode_names = set(('IN', 'OUT', 'INOUT', 'VARIADIC')) parse_state = 'type' parens = 0 field = TypedFieldMetadata() for tok in tokens: if tok.ttype in Whitespace or tok.ttype in Comment: continue elif tok.ttype in Punctuation: if parens == 0 and tok.value == ',': # End of the current field specification if field.type: yield field # Initialize metadata holder for the next field field, parse_state = TypedFieldMetadata(), 'type' elif parens == 0 and tok.value == '=': parse_state = 'default' else: field[parse_state].append(tok) if tok.value == '(': parens += 1 elif tok.value == ')': parens -= 1 elif parens == 0: if tok.ttype in Keyword: if not field.name and tok.value.upper() in mode_names: # No other keywords allowed before arg name field.mode = tok.value.upper() elif tok.value.upper() == 'DEFAULT': parse_state = 'default' else: parse_state = 'unknown' elif tok.ttype == Name and not field.name: # note that `ttype in Name` would also match Name.Builtin field.name = tok.value else: field[parse_state].append(tok) else: field[parse_state].append(tok) # Final argument won't be followed by a comma, so make sure it gets yielded if field.type: yield field def field_names(sql, mode_filter=('IN', 'OUT', 'INOUT', 'VARIADIC')): """Yields field names from a table declaration""" # sql is something like "x int, y text, ..." tokens = sqlparse.parse(sql)[0].flatten() for f in parse_typed_field_list(tokens): if f.name and (not mode_filter or f.mode in mode_filter): yield f.name pgcli-0.20.1/pgcli/packages/ordereddict.py0000644000076600000240000001017512616534745021027 0ustar amjithstaff00000000000000# Copyright (c) 2009 Raymond Hettinger # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. from UserDict import DictMixin class OrderedDict(dict, DictMixin): def __init__(self, *args, **kwds): if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__end except AttributeError: self.clear() self.update(*args, **kwds) def clear(self): self.__end = end = [] end += [None, end, end] # sentinel node for doubly linked list self.__map = {} # key --> [key, prev, next] dict.clear(self) def __setitem__(self, key, value): if key not in self: end = self.__end curr = end[1] curr[2] = end[1] = self.__map[key] = [key, curr, end] dict.__setitem__(self, key, value) def __delitem__(self, key): dict.__delitem__(self, key) key, prev, next = self.__map.pop(key) prev[2] = next next[1] = prev def __iter__(self): end = self.__end curr = end[2] while curr is not end: yield curr[0] curr = curr[2] def __reversed__(self): end = self.__end curr = end[1] while curr is not end: yield curr[0] curr = curr[1] def popitem(self, last=True): if not self: raise KeyError('dictionary is empty') if last: key = reversed(self).next() else: key = iter(self).next() value = self.pop(key) return key, value def __reduce__(self): items = [[k, self[k]] for k in self] tmp = self.__map, self.__end del self.__map, self.__end inst_dict = vars(self).copy() self.__map, self.__end = tmp if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def keys(self): return list(self) setdefault = DictMixin.setdefault update = DictMixin.update pop = DictMixin.pop values = DictMixin.values items = DictMixin.items iterkeys = DictMixin.iterkeys itervalues = DictMixin.itervalues iteritems = DictMixin.iteritems def __repr__(self): if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) def copy(self): return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): if isinstance(other, OrderedDict): if len(self) != len(other): return False for p, q in zip(self.items(), other.items()): if p != q: return False return True return dict.__eq__(self, other) def __ne__(self, other): return not self == other pgcli-0.20.1/pgcli/packages/parseutils.py0000644000076600000240000002252212616534745020731 0ustar amjithstaff00000000000000from __future__ import print_function import re import sqlparse from collections import namedtuple from sqlparse.sql import IdentifierList, Identifier, Function from sqlparse.tokens import Keyword, DML, Punctuation, Token cleanup_regex = { # This matches only alphanumerics and underscores. 'alphanum_underscore': re.compile(r'(\w+)$'), # This matches everything except spaces, parens, colon, and comma 'many_punctuations': re.compile(r'([^():,\s]+)$'), # This matches everything except spaces, parens, colon, comma, and period 'most_punctuations': re.compile(r'([^\.():,\s]+)$'), # This matches everything except a space. 'all_punctuations': re.compile('([^\s]+)$'), } def last_word(text, include='alphanum_underscore'): """ Find the last word in a sentence. >>> last_word('abc') 'abc' >>> last_word(' abc') 'abc' >>> last_word('') '' >>> last_word(' ') '' >>> last_word('abc ') '' >>> last_word('abc def') 'def' >>> last_word('abc def ') '' >>> last_word('abc def;') '' >>> last_word('bac $def') 'def' >>> last_word('bac $def', include='most_punctuations') '$def' >>> last_word('bac \def', include='most_punctuations') '\\\\def' >>> last_word('bac \def;', include='most_punctuations') '\\\\def;' >>> last_word('bac::def', include='most_punctuations') 'def' """ if not text: # Empty string return '' if text[-1].isspace(): return '' else: regex = cleanup_regex[include] matches = regex.search(text) if matches: return matches.group(0) else: return '' TableReference = namedtuple('TableReference', ['schema', 'name', 'alias', 'is_function']) # This code is borrowed from sqlparse example script. # def is_subselect(parsed): if not parsed.is_group(): return False for item in parsed.tokens: if item.ttype is DML and item.value.upper() in ('SELECT', 'INSERT', 'UPDATE', 'CREATE', 'DELETE'): return True return False def _identifier_is_function(identifier): return any(isinstance(t, Function) for t in identifier.tokens) def extract_from_part(parsed, stop_at_punctuation=True): tbl_prefix_seen = False for item in parsed.tokens: if tbl_prefix_seen: if is_subselect(item): for x in extract_from_part(item, stop_at_punctuation): yield x elif stop_at_punctuation and item.ttype is Punctuation: raise StopIteration # An incomplete nested select won't be recognized correctly as a # sub-select. eg: 'SELECT * FROM (SELECT id FROM user'. This causes # the second FROM to trigger this elif condition resulting in a # StopIteration. So we need to ignore the keyword if the keyword # FROM. # Also 'SELECT * FROM abc JOIN def' will trigger this elif # condition. So we need to ignore the keyword JOIN and its variants # INNER JOIN, FULL OUTER JOIN, etc. elif item.ttype is Keyword and ( not item.value.upper() == 'FROM') and ( not item.value.upper().endswith('JOIN')): raise StopIteration else: yield item elif ((item.ttype is Keyword or item.ttype is Keyword.DML) and item.value.upper() in ('COPY', 'FROM', 'INTO', 'UPDATE', 'TABLE', 'JOIN',)): tbl_prefix_seen = True # 'SELECT a, FROM abc' will detect FROM as part of the column list. # So this check here is necessary. elif isinstance(item, IdentifierList): for identifier in item.get_identifiers(): if (identifier.ttype is Keyword and identifier.value.upper() == 'FROM'): tbl_prefix_seen = True break def extract_table_identifiers(token_stream, allow_functions=True): """yields tuples of TableReference namedtuples""" for item in token_stream: if isinstance(item, IdentifierList): for identifier in item.get_identifiers(): # Sometimes Keywords (such as FROM ) are classified as # identifiers which don't have the get_real_name() method. try: schema_name = identifier.get_parent_name() real_name = identifier.get_real_name() is_function = (allow_functions and _identifier_is_function(identifier)) except AttributeError: continue if real_name: yield TableReference(schema_name, real_name, identifier.get_alias(), is_function) elif isinstance(item, Identifier): real_name = item.get_real_name() schema_name = item.get_parent_name() is_function = allow_functions and _identifier_is_function(item) if real_name: yield TableReference(schema_name, real_name, item.get_alias(), is_function) else: name = item.get_name() yield TableReference(None, name, item.get_alias() or name, is_function) elif isinstance(item, Function): yield TableReference(None, item.get_real_name(), item.get_alias(), allow_functions) # extract_tables is inspired from examples in the sqlparse lib. def extract_tables(sql): """Extract the table names from an SQL statment. Returns a list of TableReference namedtuples """ parsed = sqlparse.parse(sql) if not parsed: return [] # INSERT statements must stop looking for tables at the sign of first # Punctuation. eg: INSERT INTO abc (col1, col2) VALUES (1, 2) # abc is the table name, but if we don't stop at the first lparen, then # we'll identify abc, col1 and col2 as table names. insert_stmt = parsed[0].token_first().value.lower() == 'insert' stream = extract_from_part(parsed[0], stop_at_punctuation=insert_stmt) # Kludge: sqlparse mistakenly identifies insert statements as # function calls due to the parenthesized column list, e.g. interprets # "insert into foo (bar, baz)" as a function call to foo with arguments # (bar, baz). So don't allow any identifiers in insert statements # to have is_function=True identifiers = extract_table_identifiers(stream, allow_functions=not insert_stmt) return list(identifiers) def find_prev_keyword(sql): """ Find the last sql keyword in an SQL statement Returns the value of the last keyword, and the text of the query with everything after the last keyword stripped """ if not sql.strip(): return None, '' parsed = sqlparse.parse(sql)[0] flattened = list(parsed.flatten()) logical_operators = ('AND', 'OR', 'NOT', 'BETWEEN') for t in reversed(flattened): if t.value == '(' or (t.is_keyword and ( t.value.upper() not in logical_operators)): # Find the location of token t in the original parsed statement # We can't use parsed.token_index(t) because t may be a child token # inside a TokenList, in which case token_index thows an error # Minimal example: # p = sqlparse.parse('select * from foo where bar') # t = list(p.flatten())[-3] # The "Where" token # p.token_index(t) # Throws ValueError: not in list idx = flattened.index(t) # Combine the string values of all tokens in the original list # up to and including the target keyword token t, to produce a # query string with everything after the keyword token removed text = ''.join(tok.value for tok in flattened[:idx+1]) return t, text return None, '' # Postgresql dollar quote signs look like `$$` or `$tag$` dollar_quote_regex = re.compile(r'^\$[^$]*\$$') def is_open_quote(sql): """Returns true if the query contains an unclosed quote""" # parsed can contain one or more semi-colon separated commands parsed = sqlparse.parse(sql) return any(_parsed_is_open_quote(p) for p in parsed) def _parsed_is_open_quote(parsed): tokens = list(parsed.flatten()) i = 0 while i < len(tokens): tok = tokens[i] if tok.match(Token.Error, "'"): # An unmatched single quote return True elif (tok.ttype in Token.Name.Builtin and dollar_quote_regex.match(tok.value)): # Find the matching closing dollar quote sign for (j, tok2) in enumerate(tokens[i+1:], i+1): if tok2.match(Token.Name.Builtin, tok.value): # Found the matching closing quote - continue our scan for # open quotes thereafter i = j break else: # No matching dollar sign quote return True i += 1 return False if __name__ == '__main__': sql = 'select * from (select t. from tabl t' print (extract_tables(sql)) pgcli-0.20.1/pgcli/packages/pgliterals/0000755000076600000240000000000012621112650020306 5ustar amjithstaff00000000000000pgcli-0.20.1/pgcli/packages/pgliterals/__init__.py0000644000076600000240000000000212616534745022430 0ustar amjithstaff00000000000000 pgcli-0.20.1/pgcli/packages/pgliterals/main.py0000644000076600000240000000057112616534745021630 0ustar amjithstaff00000000000000import os import json root = os.path.dirname(__file__) literal_file = os.path.join(root, 'pgliterals.json') with open(literal_file) as f: literals = json.load(f) def get_literals(literal_type): """Where `literal_type` is one of 'keywords', 'functions', 'datatypes', returns a tuple of literal values of that type""" return tuple(literals[literal_type])pgcli-0.20.1/pgcli/packages/pgliterals/pgliterals.json0000644000076600000240000000574112616534745023377 0ustar amjithstaff00000000000000{ "keywords": [ "ACCESS", "ADD", "ALL", "ALTER TABLE", "AND", "ANY", "AS", "ASC", "AUDIT", "BETWEEN", "BY", "CASE", "CHAR", "CHECK", "CLUSTER", "COLUMN", "COMMENT", "COMPRESS", "CONCURRENTLY", "CONNECT", "COPY", "CREATE", "CURRENT", "DATABASE", "DATE", "DECIMAL", "DEFAULT", "DELETE FROM", "DELIMITER", "DESC", "DESCRIBE", "DISTINCT", "DROP", "ELSE", "ENCODING", "ESCAPE", "EXCLUSIVE", "EXISTS", "EXTENSION", "FILE", "FLOAT", "FOR", "FORMAT", "FORCE_QUOTE", "FORCE_NOT_NULL", "FREEZE", "FROM", "FULL", "FUNCTION", "GRANT", "GROUP BY", "HAVING", "HEADER", "IDENTIFIED", "IMMEDIATE", "IN", "INCREMENT", "INDEX", "INITIAL", "INSERT INTO", "INTEGER", "INTERSECT", "INTERVAL", "INTO", "IS", "JOIN", "LANGUAGE", "LEFT", "LEVEL", "LIKE", "LIMIT", "LOCK", "LONG", "MAXEXTENTS", "MINUS", "MLSLABEL", "MODE", "MODIFY", "NOAUDIT", "NOCOMPRESS", "NOT", "NOWAIT", "NULL", "NUMBER", "OIDS", "OF", "OFFLINE", "ON", "ONLINE", "OPTION", "OR", "ORDER BY", "OUTER", "OWNER", "PCTFREE", "PRIMARY", "PRIOR", "PRIVILEGES", "QUOTE", "RAW", "RENAME", "RESOURCE", "RETURNS", "REVOKE", "RIGHT", "ROW", "ROWID", "ROWNUM", "ROWS", "SELECT", "SESSION", "SET", "SHARE", "SIZE", "SMALLINT", "START", "SUCCESSFUL", "SYNONYM", "SYSDATE", "TABLE", "TEMPLATE", "THEN", "TO", "TRIGGER", "TRUNCATE", "UID", "UNION", "UNIQUE", "UPDATE", "USE", "USER", "USING", "VALIDATE", "VALUES", "VARCHAR", "VARCHAR2", "VIEW", "WHEN", "WHENEVER", "WHERE", "WITH" ], "functions": [ "AVG", "COUNT", "FIRST", "FORMAT", "LAST", "LCASE", "LEN", "MAX", "MIN", "MID", "NOW", "ROUND", "SUM", "TOP", "UCASE" ], "datatypes": [ "BIGINT", "BOOLEAN", "CHAR", "DATE", "DOUBLE PRECISION", "INT", "INTEGER", "NUMERIC", "REAL", "TEXT", "VARCHAR" ] }pgcli-0.20.1/pgcli/packages/sqlcompletion.py0000644000076600000240000003356112616534745021434 0ustar amjithstaff00000000000000from __future__ import print_function import sys import re import sqlparse from sqlparse.sql import Comparison, Identifier, Where from .parseutils import last_word, extract_tables, find_prev_keyword from pgspecial.main import parse_special_command PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 if PY3: string_types = str else: string_types = basestring def suggest_type(full_text, text_before_cursor): """Takes the full_text that is typed so far and also the text before the cursor to suggest completion type and scope. Returns a tuple with a type of entity ('table', 'column' etc) and a scope. A scope for a column category will be a list of tables. """ word_before_cursor = last_word(text_before_cursor, include='many_punctuations') identifier = None def strip_named_query(txt): """ This will strip "save named query" command in the beginning of the line: '\ns zzz SELECT * FROM abc' -> 'SELECT * FROM abc' ' \ns zzz SELECT * FROM abc' -> 'SELECT * FROM abc' """ pattern = re.compile(r'^\s*\\ns\s+[A-z0-9\-_]+\s+') if pattern.match(txt): txt = pattern.sub('', txt) return txt full_text = strip_named_query(full_text) text_before_cursor = strip_named_query(text_before_cursor) # If we've partially typed a word then word_before_cursor won't be an empty # string. In that case we want to remove the partially typed string before # sending it to the sqlparser. Otherwise the last token will always be the # partially typed string which renders the smart completion useless because # it will always return the list of keywords as completion. if word_before_cursor: if word_before_cursor[-1] == '(' or word_before_cursor[0] == '\\': parsed = sqlparse.parse(text_before_cursor) else: parsed = sqlparse.parse( text_before_cursor[:-len(word_before_cursor)]) # word_before_cursor may include a schema qualification, like # "schema_name.partial_name" or "schema_name.", so parse it # separately p = sqlparse.parse(word_before_cursor)[0] if p.tokens and isinstance(p.tokens[0], Identifier): identifier = p.tokens[0] else: parsed = sqlparse.parse(text_before_cursor) if len(parsed) > 1: # Multiple statements being edited -- isolate the current one by # cumulatively summing statement lengths to find the one that bounds the # current position current_pos = len(text_before_cursor) stmt_start, stmt_end = 0, 0 for statement in parsed: stmt_len = len(statement.to_unicode()) stmt_start, stmt_end = stmt_end, stmt_end + stmt_len if stmt_end >= current_pos: text_before_cursor = full_text[stmt_start:current_pos] full_text = full_text[stmt_start:] break elif parsed: # A single statement statement = parsed[0] else: # The empty string statement = None # Check for special commands and handle those separately if statement: # Be careful here because trivial whitespace is parsed as a statement, # but the statement won't have a first token tok1 = statement.token_first() if tok1 and tok1.value == '\\': return suggest_special(text_before_cursor) last_token = statement and statement.token_prev(len(statement.tokens)) or '' return suggest_based_on_last_token(last_token, text_before_cursor, full_text, identifier) def suggest_special(text): text = text.lstrip() cmd, _, arg = parse_special_command(text) if cmd == text: # Trying to complete the special command itself return [{'type': 'special'}] if cmd in ('\\c', '\\connect'): return [{'type': 'database'}] if cmd == '\\dn': return [{'type': 'schema'}] if arg: # Try to distinguish "\d name" from "\d schema.name" # Note that this will fail to obtain a schema name if wildcards are # used, e.g. "\d schema???.name" parsed = sqlparse.parse(arg)[0].tokens[0] try: schema = parsed.get_parent_name() except AttributeError: schema = None else: schema = None if cmd[1:] == 'd': # \d can descibe tables or views if schema: return [{'type': 'table', 'schema': schema}, {'type': 'view', 'schema': schema}] else: return [{'type': 'schema'}, {'type': 'table', 'schema': []}, {'type': 'view', 'schema': []}] elif cmd[1:] in ('dt', 'dv', 'df', 'dT'): rel_type = {'dt': 'table', 'dv': 'view', 'df': 'function', 'dT': 'datatype', }[cmd[1:]] if schema: return [{'type': rel_type, 'schema': schema}] else: return [{'type': 'schema'}, {'type': rel_type, 'schema': []}] if cmd in ['\\n', '\\ns', '\\nd']: return [{'type': 'namedquery'}] return [{'type': 'keyword'}, {'type': 'special'}] def suggest_based_on_last_token(token, text_before_cursor, full_text, identifier): if isinstance(token, string_types): token_v = token.lower() elif isinstance(token, Comparison): # If 'token' is a Comparison type such as # 'select * FROM abc a JOIN def d ON a.id = d.'. Then calling # token.value on the comparison type will only return the lhs of the # comparison. In this case a.id. So we need to do token.tokens to get # both sides of the comparison and pick the last token out of that # list. token_v = token.tokens[-1].value.lower() elif isinstance(token, Where): # sqlparse groups all tokens from the where clause into a single token # list. This means that token.value may be something like # 'where foo > 5 and '. We need to look "inside" token.tokens to handle # suggestions in complicated where clauses correctly prev_keyword, text_before_cursor = find_prev_keyword(text_before_cursor) return suggest_based_on_last_token(prev_keyword, text_before_cursor, full_text, identifier) elif isinstance(token, Identifier): # If the previous token is an identifier, we can suggest datatypes if # we're in a parenthesized column/field list, e.g.: # CREATE TABLE foo (Identifier # CREATE FUNCTION foo (Identifier # If we're not in a parenthesized list, the most likely scenario is the # user is about to specify an alias, e.g.: # SELECT Identifier # SELECT foo FROM Identifier prev_keyword, _ = find_prev_keyword(text_before_cursor) if prev_keyword and prev_keyword.value == '(': # Suggest datatypes return suggest_based_on_last_token('type', text_before_cursor, full_text, identifier) else: return [{'type': 'keyword'}] else: token_v = token.value.lower() if not token: return [{'type': 'keyword'}, {'type': 'special'}] elif token_v.endswith('('): p = sqlparse.parse(text_before_cursor)[0] if p.tokens and isinstance(p.tokens[-1], Where): # Four possibilities: # 1 - Parenthesized clause like "WHERE foo AND (" # Suggest columns/functions # 2 - Function call like "WHERE foo(" # Suggest columns/functions # 3 - Subquery expression like "WHERE EXISTS (" # Suggest keywords, in order to do a subquery # 4 - Subquery OR array comparison like "WHERE foo = ANY(" # Suggest columns/functions AND keywords. (If we wanted to be # really fancy, we could suggest only array-typed columns) column_suggestions = suggest_based_on_last_token('where', text_before_cursor, full_text, identifier) # Check for a subquery expression (cases 3 & 4) where = p.tokens[-1] prev_tok = where.token_prev(len(where.tokens) - 1) if isinstance(prev_tok, Comparison): # e.g. "SELECT foo FROM bar WHERE foo = ANY(" prev_tok = prev_tok.tokens[-1] prev_tok = prev_tok.value.lower() if prev_tok == 'exists': return [{'type': 'keyword'}] else: return column_suggestions # Get the token before the parens prev_tok = p.token_prev(len(p.tokens) - 1) if prev_tok and prev_tok.value and prev_tok.value.lower() == 'using': # tbl1 INNER JOIN tbl2 USING (col1, col2) tables = extract_tables(full_text) # suggest columns that are present in more than one table return [{'type': 'column', 'tables': tables, 'drop_unique': True}] elif p.token_first().value.lower() == 'select': # If the lparen is preceeded by a space chances are we're about to # do a sub-select. if last_word(text_before_cursor, 'all_punctuations').startswith('('): return [{'type': 'keyword'}] # We're probably in a function argument list return [{'type': 'column', 'tables': extract_tables(full_text)}] elif token_v in ('set', 'by', 'distinct'): return [{'type': 'column', 'tables': extract_tables(full_text)}] elif token_v in ('select', 'where', 'having'): # Check for a table alias or schema qualification parent = (identifier and identifier.get_parent_name()) or [] if parent: tables = extract_tables(full_text) tables = [t for t in tables if identifies(parent, t)] return [{'type': 'column', 'tables': tables}, {'type': 'table', 'schema': parent}, {'type': 'view', 'schema': parent}, {'type': 'function', 'schema': parent}] else: return [{'type': 'column', 'tables': extract_tables(full_text)}, {'type': 'function', 'schema': []}, {'type': 'keyword'}] elif (token_v.endswith('join') and token.is_keyword) or (token_v in ('copy', 'from', 'update', 'into', 'describe', 'truncate')): schema = (identifier and identifier.get_parent_name()) or [] # Suggest tables from either the currently-selected schema or the # public schema if no schema has been specified suggest = [{'type': 'table', 'schema': schema}] if not schema: # Suggest schemas suggest.insert(0, {'type': 'schema'}) # Only tables can be TRUNCATED, otherwise suggest views if token_v != 'truncate': suggest.append({'type': 'view', 'schema': schema}) # Suggest set-returning functions in the FROM clause if token_v == 'from' or (token_v.endswith('join') and token.is_keyword): suggest.append({'type': 'function', 'schema': schema, 'filter': 'is_set_returning'}) return suggest elif token_v in ('table', 'view', 'function'): # E.g. 'DROP FUNCTION ', 'ALTER TABLE ' rel_type = token_v schema = (identifier and identifier.get_parent_name()) or [] if schema: return [{'type': rel_type, 'schema': schema}] else: return [{'type': 'schema'}, {'type': rel_type, 'schema': []}] elif token_v == 'on': tables = extract_tables(full_text) # [(schema, table, alias), ...] parent = (identifier and identifier.get_parent_name()) or [] if parent: # "ON parent." # parent can be either a schema name or table alias tables = [t for t in tables if identifies(parent, t)] return [{'type': 'column', 'tables': tables}, {'type': 'table', 'schema': parent}, {'type': 'view', 'schema': parent}, {'type': 'function', 'schema': parent}] else: # ON # Use table alias if there is one, otherwise the table name aliases = [t.alias or t.name for t in tables] return [{'type': 'alias', 'aliases': aliases}] elif token_v in ('c', 'use', 'database', 'template'): # "\c ", "DROP DATABASE ", # "CREATE DATABASE WITH TEMPLATE " return [{'type': 'database'}] elif token_v == 'schema': # DROP SCHEMA schema_name return [{'type': 'schema'}] elif token_v.endswith(',') or token_v in ('=', 'and', 'or'): prev_keyword, text_before_cursor = find_prev_keyword(text_before_cursor) if prev_keyword: return suggest_based_on_last_token( prev_keyword, text_before_cursor, full_text, identifier) else: return [] elif token_v in ('type', '::'): # ALTER TABLE foo SET DATA TYPE bar # SELECT foo::bar # Note that tables are a form of composite type in postgresql, so # they're suggested here as well schema = (identifier and identifier.get_parent_name()) or [] suggestions = [{'type': 'datatype', 'schema': schema}, {'type': 'table', 'schema': schema}] if not schema: suggestions.append({'type': 'schema'}) return suggestions else: return [{'type': 'keyword'}] def identifies(id, ref): """Returns true if string `id` matches TableReference `ref`""" return id == ref.alias or id == ref.name or ( ref.schema and (id == ref.schema + '.' + ref.name)) pgcli-0.20.1/pgcli/packages/tabulate.py0000644000076600000240000011216712620265070020327 0ustar amjithstaff00000000000000# -*- coding: utf-8 -*- """Pretty-print tabular data.""" from __future__ import print_function from __future__ import unicode_literals from collections import namedtuple from decimal import Decimal from platform import python_version_tuple from wcwidth import wcswidth import re if python_version_tuple()[0] < "3": from itertools import izip_longest from functools import partial _none_type = type(None) _int_type = int _long_type = long _float_type = float _text_type = unicode _binary_type = str def _is_file(f): return isinstance(f, file) else: from itertools import zip_longest as izip_longest from functools import reduce, partial _none_type = type(None) _int_type = int _long_type = int _float_type = float _text_type = str _binary_type = bytes import io def _is_file(f): return isinstance(f, io.IOBase) __all__ = ["tabulate", "tabulate_formats", "simple_separated_format"] __version__ = "0.7.4" MIN_PADDING = 2 Line = namedtuple("Line", ["begin", "hline", "sep", "end"]) DataRow = namedtuple("DataRow", ["begin", "sep", "end"]) # A table structure is suppposed to be: # # --- lineabove --------- # headerrow # --- linebelowheader --- # datarow # --- linebewteenrows --- # ... (more datarows) ... # --- linebewteenrows --- # last datarow # --- linebelow --------- # # TableFormat's line* elements can be # # - either None, if the element is not used, # - or a Line tuple, # - or a function: [col_widths], [col_alignments] -> string. # # TableFormat's *row elements can be # # - either None, if the element is not used, # - or a DataRow tuple, # - or a function: [cell_values], [col_widths], [col_alignments] -> string. # # padding (an integer) is the amount of white space around data values. # # with_header_hide: # # - either None, to display all table elements unconditionally, # - or a list of elements not to be displayed if the table has column headers. # TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader", "linebetweenrows", "linebelow", "headerrow", "datarow", "padding", "with_header_hide"]) def _pipe_segment_with_colons(align, colwidth): """Return a segment of a horizontal line with optional colons which indicate column's alignment (as in `pipe` output format).""" w = colwidth if align in ["right", "decimal"]: return ('-' * (w - 1)) + ":" elif align == "center": return ":" + ('-' * (w - 2)) + ":" elif align == "left": return ":" + ('-' * (w - 1)) else: return '-' * w def _pipe_line_with_colons(colwidths, colaligns): """Return a horizontal line with optional colons to indicate column's alignment (as in `pipe` output format).""" segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)] return "|" + "|".join(segments) + "|" def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns): alignment = { "left": '', "right": 'align="right"| ', "center": 'align="center"| ', "decimal": 'align="right"| ' } # hard-coded padding _around_ align attribute and value together # rather than padding parameter which affects only the value values_with_attrs = [' ' + alignment.get(a, '') + c + ' ' for c, a in zip(cell_values, colaligns)] colsep = separator*2 return (separator + colsep.join(values_with_attrs)).rstrip() def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns): alignment = { "left": '', "right": ' style="text-align: right;"', "center": ' style="text-align: center;"', "decimal": ' style="text-align: right;"' } values_with_attrs = ["<{0}{1}>{2}".format(celltag, alignment.get(a, ''), c) for c, a in zip(cell_values, colaligns)] return "" + "".join(values_with_attrs).rstrip() + "" def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False): alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" } tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns]) return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}", "\\toprule" if booktabs else "\hline"]) LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#", r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}", r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}", r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"} def _latex_row(cell_values, colwidths, colaligns): def escape_char(c): return LATEX_ESCAPE_RULES.get(c, c) escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values] rowfmt = DataRow("", "&", "\\\\") return _build_simple_row(escaped_values, rowfmt) _table_formats = {"simple": TableFormat(lineabove=Line("", "-", " ", ""), linebelowheader=Line("", "-", " ", ""), linebetweenrows=None, linebelow=Line("", "-", " ", ""), headerrow=DataRow("", " ", ""), datarow=DataRow("", " ", ""), padding=0, with_header_hide=["lineabove", "linebelow"]), "plain": TableFormat(lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("", " ", ""), datarow=DataRow("", " ", ""), padding=0, with_header_hide=None), "grid": TableFormat(lineabove=Line("+", "-", "+", "+"), linebelowheader=Line("+", "=", "+", "+"), linebetweenrows=Line("+", "-", "+", "+"), linebelow=Line("+", "-", "+", "+"), headerrow=DataRow("|", "|", "|"), datarow=DataRow("|", "|", "|"), padding=1, with_header_hide=None), "fancy_grid": TableFormat(lineabove=Line("╒", "═", "╤", "╕"), linebelowheader=Line("╞", "═", "╪", "╡"), linebetweenrows=Line("├", "─", "┼", "┤"), linebelow=Line("╘", "═", "╧", "╛"), headerrow=DataRow("│", "│", "│"), datarow=DataRow("│", "│", "│"), padding=1, with_header_hide=None), "pipe": TableFormat(lineabove=_pipe_line_with_colons, linebelowheader=_pipe_line_with_colons, linebetweenrows=None, linebelow=None, headerrow=DataRow("|", "|", "|"), datarow=DataRow("|", "|", "|"), padding=1, with_header_hide=["lineabove"]), "orgtbl": TableFormat(lineabove=None, linebelowheader=Line("|", "-", "+", "|"), linebetweenrows=None, linebelow=None, headerrow=DataRow("|", "|", "|"), datarow=DataRow("|", "|", "|"), padding=1, with_header_hide=None), "psql": TableFormat(lineabove=Line("+", "-", "+", "+"), linebelowheader=Line("|", "-", "+", "|"), linebetweenrows=None, linebelow=Line("+", "-", "+", "+"), headerrow=DataRow("|", "|", "|"), datarow=DataRow("|", "|", "|"), padding=1, with_header_hide=None), "rst": TableFormat(lineabove=Line("", "=", " ", ""), linebelowheader=Line("", "=", " ", ""), linebetweenrows=None, linebelow=Line("", "=", " ", ""), headerrow=DataRow("", " ", ""), datarow=DataRow("", " ", ""), padding=0, with_header_hide=None), "mediawiki": TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"", "", "", "\n|+ \n|-"), linebelowheader=Line("|-", "", "", ""), linebetweenrows=Line("|-", "", "", ""), linebelow=Line("|}", "", "", ""), headerrow=partial(_mediawiki_row_with_attrs, "!"), datarow=partial(_mediawiki_row_with_attrs, "|"), padding=0, with_header_hide=None), "html": TableFormat(lineabove=Line("", "", "", ""), linebelowheader=None, linebetweenrows=None, linebelow=Line("
", "", "", ""), headerrow=partial(_html_row_with_attrs, "th"), datarow=partial(_html_row_with_attrs, "td"), padding=0, with_header_hide=None), "latex": TableFormat(lineabove=_latex_line_begin_tabular, linebelowheader=Line("\\hline", "", "", ""), linebetweenrows=None, linebelow=Line("\\hline\n\\end{tabular}", "", "", ""), headerrow=_latex_row, datarow=_latex_row, padding=1, with_header_hide=None), "latex_booktabs": TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True), linebelowheader=Line("\\midrule", "", "", ""), linebetweenrows=None, linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""), headerrow=_latex_row, datarow=_latex_row, padding=1, with_header_hide=None), "tsv": TableFormat(lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("", "\t", ""), datarow=DataRow("", "\t", ""), padding=0, with_header_hide=None)} tabulate_formats = list(sorted(_table_formats.keys())) _invisible_codes = re.compile(r"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes _invisible_codes_bytes = re.compile(b"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes def simple_separated_format(separator): """Construct a simple TableFormat with columns separated by a separator. >>> tsv = simple_separated_format("\\t") ; \ tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23' True """ return TableFormat(None, None, None, None, headerrow=DataRow('', separator, ''), datarow=DataRow('', separator, ''), padding=0, with_header_hide=None) def _isconvertible(conv, string): try: n = conv(string) return True except (ValueError, TypeError): return False def _isnumber(string): """ >>> _isnumber("123.45") True >>> _isnumber("123") True >>> _isnumber("spam") False """ return _isconvertible(float, string) def _isint(string): """ >>> _isint("123") True >>> _isint("123.45") False """ return type(string) is _int_type or type(string) is _long_type or \ (isinstance(string, _binary_type) or isinstance(string, _text_type)) and \ _isconvertible(int, string) def _type(string, has_invisible=True): """The least generic type (type(None), int, float, str, unicode). >>> _type(None) is type(None) True >>> _type("foo") is type("") True >>> _type("1") is type(1) True >>> _type('\x1b[31m42\x1b[0m') is type(42) True >>> _type('\x1b[31m42\x1b[0m') is type(42) True """ if has_invisible and \ (isinstance(string, _text_type) or isinstance(string, _binary_type)): string = _strip_invisible(string) if string is None: return _none_type if isinstance(string, (bool, Decimal,)): return _text_type elif hasattr(string, "isoformat"): # datetime.datetime, date, and time return _text_type elif _isint(string): return int elif _isnumber(string): return float elif isinstance(string, _binary_type): return _binary_type else: return _text_type def _afterpoint(string): """Symbols after a decimal point, -1 if the string lacks the decimal point. >>> _afterpoint("123.45") 2 >>> _afterpoint("1001") -1 >>> _afterpoint("eggs") -1 >>> _afterpoint("123e45") 2 """ if _isnumber(string): if _isint(string): return -1 else: pos = string.rfind(".") pos = string.lower().rfind("e") if pos < 0 else pos if pos >= 0: return len(string) - pos - 1 else: return -1 # no point else: return -1 # not a number def _padleft(width, s, has_invisible=True): """Flush right. >>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430' True """ lwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s) return ' ' * lwidth + s def _padright(width, s, has_invisible=True): """Flush left. >>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 ' True """ rwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s) return s + ' ' * rwidth def _padboth(width, s, has_invisible=True): """Center string. >>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 ' True """ xwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s) lwidth = xwidth // 2 rwidth = 0 if xwidth <= 0 else lwidth + xwidth % 2 return ' ' * lwidth + s + ' ' * rwidth def _strip_invisible(s): "Remove invisible ANSI color codes." if isinstance(s, _text_type): return re.sub(_invisible_codes, "", s) else: # a bytestring return re.sub(_invisible_codes_bytes, "", s) def _visible_width(s): """Visible width of a printed string. ANSI color codes are removed. >>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world") (5, 5) """ if isinstance(s, _text_type) or isinstance(s, _binary_type): return wcswidth(_strip_invisible(s)) else: return wcswidth(_text_type(s)) def _align_column(strings, alignment, minwidth=0, has_invisible=True): """[string] -> [padded_string] >>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal"))) [' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234'] >>> list(map(str,_align_column(['123.4', '56.7890'], None))) ['123.4', '56.7890'] """ if alignment == "right": padfn = _padleft elif alignment == "center": padfn = _padboth elif alignment == "decimal": decimals = [_afterpoint(s) for s in strings] maxdecimals = max(decimals) strings = [s + (maxdecimals - decs) * " " for s, decs in zip(strings, decimals)] padfn = _padleft elif not alignment: return strings else: padfn = _padright if has_invisible: width_fn = _visible_width else: width_fn = wcswidth maxwidth = max(max(map(width_fn, strings)), minwidth) padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings] return padded_strings def _more_generic(type1, type2): types = { _none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4 } invtypes = { 4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type } moregeneric = max(types.get(type1, 4), types.get(type2, 4)) return invtypes[moregeneric] def _column_type(strings, has_invisible=True): """The least generic type all column values are convertible to. >>> _column_type(["1", "2"]) is _int_type True >>> _column_type(["1", "2.3"]) is _float_type True >>> _column_type(["1", "2.3", "four"]) is _text_type True >>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type True >>> _column_type([None, "brux"]) is _text_type True >>> _column_type([1, 2, None]) is _int_type True >>> import datetime as dt >>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type True """ types = [_type(s, has_invisible) for s in strings ] return reduce(_more_generic, types, int) def _format(val, valtype, floatfmt, missingval=""): """Format a value accoding to its type. Unicode is supported: >>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \ tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \ good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \ tabulate(tbl, headers=hrow) == good_result True """ if val is None: return missingval if valtype in [int, _text_type]: return "{0}".format(val) elif valtype is _binary_type: try: return _text_type(val, "ascii") except TypeError: return _text_type(val) elif valtype is float: return format(float(val), floatfmt) else: return "{0}".format(val) def _align_header(header, alignment, width): if alignment == "left": return _padright(width, header) elif alignment == "center": return _padboth(width, header) elif not alignment: return "{0}".format(header) else: return _padleft(width, header) def _normalize_tabular_data(tabular_data, headers): """Transform a supported data type to a list of lists, and a list of headers. Supported tabular data types: * list-of-lists or another iterable of iterables * list of named tuples (usually used with headers="keys") * list of dicts (usually used with headers="keys") * list of OrderedDicts (usually used with headers="keys") * 2D NumPy arrays * NumPy record arrays (usually used with headers="keys") * dict of iterables (usually used with headers="keys") * pandas.DataFrame (usually used with headers="keys") The first row can be used as headers if headers="firstrow", column indices can be used as headers if headers="keys". """ if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"): # dict-like and pandas.DataFrame? if hasattr(tabular_data.values, "__call__"): # likely a conventional dict keys = tabular_data.keys() rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed elif hasattr(tabular_data, "index"): # values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0) keys = tabular_data.keys() vals = tabular_data.values # values matrix doesn't need to be transposed names = tabular_data.index rows = [[v]+list(row) for v,row in zip(names, vals)] else: raise ValueError("tabular data doesn't appear to be a dict or a DataFrame") if headers == "keys": headers = list(map(_text_type,keys)) # headers should be strings else: # it's a usual an iterable of iterables, or a NumPy array rows = list(tabular_data) if (headers == "keys" and hasattr(tabular_data, "dtype") and getattr(tabular_data.dtype, "names")): # numpy record array headers = tabular_data.dtype.names elif (headers == "keys" and len(rows) > 0 and isinstance(rows[0], tuple) and hasattr(rows[0], "_fields")): # namedtuple headers = list(map(_text_type, rows[0]._fields)) elif (len(rows) > 0 and isinstance(rows[0], dict)): # dict or OrderedDict uniq_keys = set() # implements hashed lookup keys = [] # storage for set if headers == "firstrow": firstdict = rows[0] if len(rows) > 0 else {} keys.extend(firstdict.keys()) uniq_keys.update(keys) rows = rows[1:] for row in rows: for k in row.keys(): #Save unique items in input order if k not in uniq_keys: keys.append(k) uniq_keys.add(k) if headers == 'keys': headers = keys elif isinstance(headers, dict): # a dict of headers for a list of dicts headers = [headers.get(k, k) for k in keys] headers = list(map(_text_type, headers)) elif headers == "firstrow": if len(rows) > 0: headers = [firstdict.get(k, k) for k in keys] headers = list(map(_text_type, headers)) else: headers = [] elif headers: raise ValueError('headers for a list of dicts is not a dict or a keyword') rows = [[row.get(k) for k in keys] for row in rows] elif headers == "keys" and len(rows) > 0: # keys are column indices headers = list(map(_text_type, range(len(rows[0])))) # take headers from the first row if necessary if headers == "firstrow" and len(rows) > 0: headers = list(map(_text_type, rows[0])) # headers should be strings rows = rows[1:] headers = list(map(_text_type,headers)) rows = list(map(list,rows)) # pad with empty headers for initial columns if necessary if headers and len(rows) > 0: nhs = len(headers) ncols = len(rows[0]) if nhs < ncols: headers = [""]*(ncols - nhs) + headers return rows, headers def tabulate(tabular_data, headers=[], tablefmt="simple", floatfmt="g", numalign="decimal", stralign="left", missingval=""): """Format a fixed width table for pretty printing. >>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]])) --- --------- 1 2.34 -56 8.999 2 10001 --- --------- The first required argument (`tabular_data`) can be a list-of-lists (or another iterable of iterables), a list of named tuples, a dictionary of iterables, an iterable of dictionaries, a two-dimensional NumPy array, NumPy record array, or a Pandas' dataframe. Table headers ------------- To print nice column headers, supply the second argument (`headers`): - `headers` can be an explicit list of column headers - if `headers="firstrow"`, then the first row of data is used - if `headers="keys"`, then dictionary keys or column indices are used Otherwise a headerless table is produced. If the number of headers is less than the number of columns, they are supposed to be names of the last columns. This is consistent with the plain-text format of R and Pandas' dataframes. >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]], ... headers="firstrow")) sex age ----- ----- ----- Alice F 24 Bob M 19 Column alignment ---------------- `tabulate` tries to detect column types automatically, and aligns the values properly. By default it aligns decimal points of the numbers (or flushes integer numbers to the right), and flushes everything else to the left. Possible column alignments (`numalign`, `stralign`) are: "right", "center", "left", "decimal" (only for `numalign`), and None (to disable alignment). Table formats ------------- `floatfmt` is a format specification used for columns which contain numeric data with a decimal point. `None` values are replaced with a `missingval` string: >>> print(tabulate([["spam", 1, None], ... ["eggs", 42, 3.14], ... ["other", None, 2.7]], missingval="?")) ----- -- ---- spam 1 ? eggs 42 3.14 other ? 2.7 ----- -- ---- Various plain-text table formats (`tablefmt`) are supported: 'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki', 'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of currently supported formats. "plain" format doesn't use any pseudographics to draw tables, it separates columns with a double space: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "plain")) strings numbers spam 41.9999 eggs 451 >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain")) spam 41.9999 eggs 451 "simple" format is like Pandoc simple_tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "simple")) strings numbers --------- --------- spam 41.9999 eggs 451 >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple")) ---- -------- spam 41.9999 eggs 451 ---- -------- "grid" is similar to tables produced by Emacs table.el package or Pandoc grid_tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "grid")) +-----------+-----------+ | strings | numbers | +===========+===========+ | spam | 41.9999 | +-----------+-----------+ | eggs | 451 | +-----------+-----------+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid")) +------+----------+ | spam | 41.9999 | +------+----------+ | eggs | 451 | +------+----------+ "fancy_grid" draws a grid using box-drawing characters: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "fancy_grid")) ╒═══════════╤═══════════╕ │ strings │ numbers │ ╞═══════════╪═══════════╡ │ spam │ 41.9999 │ ├───────────┼───────────┤ │ eggs │ 451 │ ╘═══════════╧═══════════╛ "pipe" is like tables in PHP Markdown Extra extension or Pandoc pipe_tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "pipe")) | strings | numbers | |:----------|----------:| | spam | 41.9999 | | eggs | 451 | >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe")) |:-----|---------:| | spam | 41.9999 | | eggs | 451 | "orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They are slightly different from "pipe" format by not using colons to define column alignment, and using a "+" sign to indicate line intersections: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "orgtbl")) | strings | numbers | |-----------+-----------| | spam | 41.9999 | | eggs | 451 | >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl")) | spam | 41.9999 | | eggs | 451 | "rst" is like a simple table format from reStructuredText; please note that reStructuredText accepts also "grid" tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "rst")) ========= ========= strings numbers ========= ========= spam 41.9999 eggs 451 ========= ========= >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst")) ==== ======== spam 41.9999 eggs 451 ==== ======== "mediawiki" produces a table markup used in Wikipedia and on other MediaWiki-based sites: >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]], ... headers="firstrow", tablefmt="mediawiki")) {| class="wikitable" style="text-align: left;" |+ |- ! strings !! align="right"| numbers |- | spam || align="right"| 41.9999 |- | eggs || align="right"| 451 |} "html" produces HTML markup: >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]], ... headers="firstrow", tablefmt="html"))
strings numbers
spam 41.9999
eggs 451
"latex" produces a tabular environment of LaTeX document markup: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex")) \\begin{tabular}{lr} \\hline spam & 41.9999 \\\\ eggs & 451 \\\\ \\hline \\end{tabular} "latex_booktabs" produces a tabular environment of LaTeX document markup using the booktabs.sty package: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs")) \\begin{tabular}{lr} \\toprule spam & 41.9999 \\\\ eggs & 451 \\\\ \\bottomrule \end{tabular} Also returns a tuple of the raw rows pulled from tabular_data """ if tabular_data is None: tabular_data = [] list_of_lists, headers = _normalize_tabular_data(tabular_data, headers) # optimization: look for ANSI control codes once, # enable smart width functions only if a control code is found plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \ ['\t'.join(map(_text_type, row)) for row in list_of_lists]) has_invisible = re.search(_invisible_codes, plain_text) if has_invisible: width_fn = _visible_width else: width_fn = wcswidth # format rows and columns, convert numeric values to strings cols = list(zip(*list_of_lists)) coltypes = list(map(_column_type, cols)) cols = [[_format(v, ct, floatfmt, missingval) for v in c] for c,ct in zip(cols, coltypes)] # align columns aligns = [numalign if ct in [int,float] else stralign for ct in coltypes] minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols) cols = [_align_column(c, a, minw, has_invisible) for c, a, minw in zip(cols, aligns, minwidths)] if headers: # align headers and add headers t_cols = cols or [['']] * len(headers) t_aligns = aligns or [stralign] * len(headers) minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)] headers = [_align_header(h, a, minw) for h, a, minw in zip(headers, t_aligns, minwidths)] rows = list(zip(*cols)) else: minwidths = [width_fn(c[0]) for c in cols] rows = list(zip(*cols)) if not isinstance(tablefmt, TableFormat): tablefmt = _table_formats.get(tablefmt, _table_formats["simple"]) return _format_table(tablefmt, headers, rows, minwidths, aligns), rows def _build_simple_row(padded_cells, rowfmt): "Format row according to DataRow format without padding." begin, sep, end = rowfmt return (begin + sep.join(padded_cells) + end).rstrip() def _build_row(padded_cells, colwidths, colaligns, rowfmt): "Return a string which represents a row of data cells." if not rowfmt: return None if hasattr(rowfmt, "__call__"): return rowfmt(padded_cells, colwidths, colaligns) else: return _build_simple_row(padded_cells, rowfmt) def _build_line(colwidths, colaligns, linefmt): "Return a string which represents a horizontal line." if not linefmt: return None if hasattr(linefmt, "__call__"): return linefmt(colwidths, colaligns) else: begin, fill, sep, end = linefmt cells = [fill*w for w in colwidths] return _build_simple_row(cells, (begin, sep, end)) def _pad_row(cells, padding): if cells: pad = " "*padding padded_cells = [pad + cell + pad for cell in cells] return padded_cells else: return cells def _format_table(fmt, headers, rows, colwidths, colaligns): """Produce a plain-text representation of the table.""" lines = [] hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else [] pad = fmt.padding headerrow = fmt.headerrow padded_widths = [(w + 2*pad) for w in colwidths] padded_headers = _pad_row(headers, pad) padded_rows = [_pad_row(row, pad) for row in rows] if fmt.lineabove and "lineabove" not in hidden: lines.append(_build_line(padded_widths, colaligns, fmt.lineabove)) if padded_headers: lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow)) if fmt.linebelowheader and "linebelowheader" not in hidden: lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader)) if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden: # initial rows with a line below for row in padded_rows[:-1]: lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow)) lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows)) # the last row without a line below lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow)) else: for row in padded_rows: lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow)) if fmt.linebelow and "linebelow" not in hidden: lines.append(_build_line(padded_widths, colaligns, fmt.linebelow)) return "\n".join(lines) def _main(): """\ Usage: tabulate [options] [FILE ...] Pretty-print tabular data. See also https://bitbucket.org/astanin/python-tabulate FILE a filename of the file with tabular data; if "-" or missing, read data from stdin. Options: -h, --help show this message -1, --header use the first row of data as a table header -s REGEXP, --sep REGEXP use a custom column separator (default: whitespace) -f FMT, --format FMT set output table format; supported formats: plain, simple, grid, fancy_grid, pipe, orgtbl, rst, mediawiki, html, latex, latex_booktabs, tsv (default: simple) """ import getopt import sys import textwrap usage = textwrap.dedent(_main.__doc__) try: opts, args = getopt.getopt(sys.argv[1:], "h1f:s:", ["help", "header", "format", "separator"]) except getopt.GetoptError as e: print(e) print(usage) sys.exit(2) headers = [] tablefmt = "simple" sep = r"\s+" for opt, value in opts: if opt in ["-1", "--header"]: headers = "firstrow" elif opt in ["-f", "--format"]: if value not in tabulate_formats: print("%s is not a supported table format" % value) print(usage) sys.exit(3) tablefmt = value elif opt in ["-s", "--sep"]: sep = value elif opt in ["-h", "--help"]: print(usage) sys.exit(0) files = [sys.stdin] if not args else args for f in files: if f == "-": f = sys.stdin if _is_file(f): _pprint_file(f, headers=headers, tablefmt=tablefmt, sep=sep) else: with open(f) as fobj: _pprint_file(fobj) def _pprint_file(fobject, headers, tablefmt, sep): rows = fobject.readlines() table = [re.split(sep, r.rstrip()) for r in rows] print(tabulate(table, headers, tablefmt)) if __name__ == "__main__": _main() pgcli-0.20.1/pgcli/pgbuffer.py0000644000076600000240000000261112616534745016555 0ustar amjithstaff00000000000000from prompt_toolkit.buffer import Buffer from prompt_toolkit.filters import Condition from .packages.parseutils import is_open_quote class PGBuffer(Buffer): def __init__(self, always_multiline, *args, **kwargs): self.always_multiline = always_multiline @Condition def is_multiline(): doc = self.document return self.always_multiline and not _multiline_exception(doc.text) super(self.__class__, self).__init__(*args, is_multiline=is_multiline, tempfile_suffix='.sql', **kwargs) def _is_complete(sql): # A complete command is an sql statement that ends with a semicolon, unless # there's an open quote surrounding it, as is common when writing a # CREATE FUNCTION command return sql.endswith(';') and not is_open_quote(sql) def _multiline_exception(text): text = text.strip() return (text.startswith('\\') or # Special Command text.endswith('\e') or # Ended with \e which should launch the editor. _is_complete(text) or # A complete SQL command (text == 'exit') or # Exit doesn't need semi-colon (text == 'quit') or # Quit doesn't need semi-colon (text == ':q') or # To all the vim fans out there (text == '') # Just a plain enter without any text ) pgcli-0.20.1/pgcli/pgclirc0000644000076600000240000000567612620265100015744 0ustar amjithstaff00000000000000# vi: ft=dosini [main] # Enables context sensitive auto-completion. If this is disabled the all # possible completions will be listed. smart_completion = True # Display the completions in several columns. (More completions will be # visible.) wider_completion_menu = False # Multi-line mode allows breaking up the sql statements into multiple lines. If # this is set to True, then the end of the statements must have a semi-colon. # If this is set to False then sql statements can't be split into multiple # lines. End of line (return) is considered as the end of the statement. multi_line = False # log_file location. # In Unix/Linux: ~/.config/pgcli/log # In Windows: %USERPROFILE%\AppData\Local\dbcli\pgcli\log # %USERPROFILE% is typically C:\Users\{username} log_file = default # history_file location. # In Unix/Linux: ~/.config/pgcli/history # In Windows: %USERPROFILE%\AppData\Local\dbcli\pgcli\history # %USERPROFILE% is typically C:\Users\{username} history_file = default # Default log level. Possible values: "CRITICAL", "ERROR", "WARNING", "INFO" # and "DEBUG". log_level = INFO # Timing of sql statments and table rendering. timing = True # Table format. Possible values: psql, plain, simple, grid, fancy_grid, pipe, # orgtbl, rst, mediawiki, html, latex, latex_booktabs. # Recommended: psql, fancy_grid and grid. table_format = psql # Syntax Style. Possible values: manni, igor, xcode, vim, autumn, vs, rrt, # native, perldoc, borland, tango, emacs, friendly, monokai, paraiso-dark, # colorful, murphy, bw, pastie, paraiso-light, trac, default, fruity syntax_style = default # Keybindings: # When Vi mode is enabled you can use modal editing features offered by Vi in the REPL. # When Vi mode is disabled emacs keybindings such as Ctrl-A for home and Ctrl-E # for end are available in the REPL. vi = False # Error handling # When one of multiple SQL statements causes an error, choose to either # continue executing the remaining statements, or stopping # Possible values "STOP" or "RESUME" on_error = STOP # Custom colors for the completion menu, toolbar, etc. [colors] Token.Menu.Completions.Completion.Current = 'bg:#ffffff #000000' Token.Menu.Completions.Completion = 'bg:#008888 #ffffff' Token.Menu.Completions.Meta.Current = 'bg:#44aaaa #000000' Token.Menu.Completions.Meta = 'bg:#448888 #ffffff' Token.Menu.Completions.MultiColumnMeta = 'bg:#aaffff #000000' Token.Menu.Completions.ProgressButton = 'bg:#003333' Token.Menu.Completions.ProgressBar = 'bg:#00aaaa' Token.SelectedText = '#ffffff bg:#6666aa' Token.SearchMatch = '#ffffff bg:#4444aa' Token.SearchMatch.Current = '#ffffff bg:#44aa44' Token.Toolbar = 'bg:#222222 #aaaaaa' Token.Toolbar.Off = 'bg:#222222 #888888' Token.Toolbar.On = 'bg:#222222 #ffffff' Token.Toolbar.Search = 'noinherit bold' Token.Toolbar.Search.Text = 'nobold' Token.Toolbar.System = 'noinherit bold' Token.Toolbar.Arg = 'noinherit bold' Token.Toolbar.Arg.Text = 'nobold' # Named queries are queries you can execute by name. [named queries] pgcli-0.20.1/pgcli/pgcompleter.py0000644000076600000240000004676312621112637017302 0ustar amjithstaff00000000000000from __future__ import print_function, unicode_literals import logging import re import itertools import operator from pgspecial.namedqueries import NamedQueries from prompt_toolkit.completion import Completer, Completion from .packages.sqlcompletion import suggest_type from .packages.parseutils import last_word from .packages.pgliterals.main import get_literals from .config import load_config, config_location try: from collections import Counter except ImportError: # python 2.6 from .packages.counter import Counter _logger = logging.getLogger(__name__) NamedQueries.instance = NamedQueries.from_config( load_config(config_location() + 'config')) class PGCompleter(Completer): keywords = get_literals('keywords') functions = get_literals('functions') datatypes = get_literals('datatypes') def __init__(self, smart_completion=True, pgspecial=None): super(PGCompleter, self).__init__() self.smart_completion = smart_completion self.pgspecial = pgspecial self.reserved_words = set() for x in self.keywords: self.reserved_words.update(x.split()) self.name_pattern = re.compile("^[_a-z][_a-z0-9\$]*$") self.databases = [] self.dbmetadata = {'tables': {}, 'views': {}, 'functions': {}, 'datatypes': {}} self.search_path = [] self.all_completions = set(self.keywords + self.functions) def escape_name(self, name): if name and ((not self.name_pattern.match(name)) or (name.upper() in self.reserved_words) or (name.upper() in self.functions)): name = '"%s"' % name return name def unescape_name(self, name): """ Unquote a string.""" if name and name[0] == '"' and name[-1] == '"': name = name[1:-1] return name def escaped_names(self, names): return [self.escape_name(name) for name in names] def extend_database_names(self, databases): databases = self.escaped_names(databases) self.databases.extend(databases) def extend_keywords(self, additional_keywords): self.keywords.extend(additional_keywords) self.all_completions.update(additional_keywords) def extend_schemata(self, schemata): # schemata is a list of schema names schemata = self.escaped_names(schemata) metadata = self.dbmetadata['tables'] for schema in schemata: metadata[schema] = {} # dbmetadata.values() are the 'tables' and 'functions' dicts for metadata in self.dbmetadata.values(): for schema in schemata: metadata[schema] = {} self.all_completions.update(schemata) def extend_relations(self, data, kind): """ extend metadata for tables or views :param data: list of (schema_name, rel_name) tuples :param kind: either 'tables' or 'views' :return: """ data = [self.escaped_names(d) for d in data] # dbmetadata['tables']['schema_name']['table_name'] should be a list of # column names. Default to an asterisk metadata = self.dbmetadata[kind] for schema, relname in data: try: metadata[schema][relname] = ['*'] except KeyError: _logger.error('%r %r listed in unrecognized schema %r', kind, relname, schema) self.all_completions.add(relname) def extend_columns(self, column_data, kind): """ extend column metadata :param column_data: list of (schema_name, rel_name, column_name) tuples :param kind: either 'tables' or 'views' :return: """ column_data = [self.escaped_names(d) for d in column_data] metadata = self.dbmetadata[kind] for schema, relname, column in column_data: metadata[schema][relname].append(column) self.all_completions.add(column) def extend_functions(self, func_data): # func_data is a list of function metadata namedtuples # with fields schema_name, func_name, arg_list, result, # is_aggregate, is_window, is_set_returning # dbmetadata['schema_name']['functions']['function_name'] should return # the function metadata namedtuple for the corresponding function metadata = self.dbmetadata['functions'] for f in func_data: schema, func = self.escaped_names([f.schema_name, f.func_name]) if func in metadata[schema]: metadata[schema][func].append(f) else: metadata[schema][func] = [f] self.all_completions.add(func) def extend_datatypes(self, type_data): # dbmetadata['datatypes'][schema_name][type_name] should store type # metadata, such as composite type field names. Currently, we're not # storing any metadata beyond typename, so just store None meta = self.dbmetadata['datatypes'] for t in type_data: schema, type_name = self.escaped_names(t) meta[schema][type_name] = None self.all_completions.add(type_name) def set_search_path(self, search_path): self.search_path = self.escaped_names(search_path) def reset_completions(self): self.databases = [] self.special_commands = [] self.search_path = [] self.dbmetadata = {'tables': {}, 'views': {}, 'functions': {}, 'datatypes': {}} self.all_completions = set(self.keywords + self.functions) def find_matches(self, text, collection, start_only=False, fuzzy=True, meta=None, meta_collection=None): """Find completion matches for the given text. Given the user's input text and a collection of available completions, find completions matching the last word of the text. If `start_only` is True, the text will match an available completion only at the beginning. Otherwise, a completion is considered a match if the text appears anywhere within it. yields prompt_toolkit Completion instances for any matches found in the collection of available completions. """ text = last_word(text, include='most_punctuations').lower() # Construct a `_match` function for either fuzzy or non-fuzzy matching # The match function returns a 2-tuple used for sorting the matches, # or None if the item doesn't match if fuzzy: regex = '.*?'.join(map(re.escape, text)) pat = re.compile('(%s)' % regex) def _match(item): r = pat.search(self.unescape_name(item)) if r: return len(r.group()), r.start() else: match_end_limit = len(text) if start_only else None def _match(item): match_point = item.lower().find(text, 0, match_end_limit) if match_point >= 0: return match_point, 0 if meta_collection: # Each possible completion in the collection has a corresponding # meta-display string collection = zip(collection, meta_collection) else: # All completions have an identical meta collection = zip(collection, itertools.repeat(meta)) completions = [] for item, meta in collection: sort_key = _match(item) if sort_key: if meta and len(meta) > 50: # Truncate meta-text to 50 characters, if necessary meta = meta[:47] + u'...' completions.append((sort_key, item, meta)) return [Completion(item, -len(text), display_meta=meta) for sort_key, item, meta in sorted(completions)] def get_completions(self, document, complete_event, smart_completion=None): word_before_cursor = document.get_word_before_cursor(WORD=True) if smart_completion is None: smart_completion = self.smart_completion # If smart_completion is off then match any word that starts with # 'word_before_cursor'. if not smart_completion: return self.find_matches(word_before_cursor, self.all_completions, start_only=True, fuzzy=False) completions = [] suggestions = suggest_type(document.text, document.text_before_cursor) for suggestion in suggestions: _logger.debug('Suggestion type: %r', suggestion['type']) if suggestion['type'] == 'column': tables = suggestion['tables'] _logger.debug("Completion column scope: %r", tables) scoped_cols = self.populate_scoped_cols(tables) if suggestion.get('drop_unique'): # drop_unique is used for 'tb11 JOIN tbl2 USING (...' which # should suggest only columns that appear in more than one # table scoped_cols = [col for (col, count) in Counter(scoped_cols).items() if count > 1 and col != '*'] cols = self.find_matches(word_before_cursor, scoped_cols, meta='column') completions.extend(cols) elif suggestion['type'] == 'function': if suggestion.get('filter') == 'is_set_returning': # Only suggest set-returning functions filt = operator.attrgetter('is_set_returning') funcs = self.populate_functions(suggestion['schema'], filt) else: funcs = self.populate_schema_objects( suggestion['schema'], 'functions') # Function overloading means we way have multiple functions # of the same name at this point, so keep unique names only funcs = set(funcs) funcs = self.find_matches(word_before_cursor, funcs, meta='function') completions.extend(funcs) if not suggestion['schema'] and 'filter' not in suggestion: # also suggest hardcoded functions using startswith # matching predefined_funcs = self.find_matches(word_before_cursor, self.functions, start_only=True, fuzzy=False, meta='function') completions.extend(predefined_funcs) elif suggestion['type'] == 'schema': schema_names = self.dbmetadata['tables'].keys() # Unless we're sure the user really wants them, hide schema # names starting with pg_, which are mostly temporary schemas if not word_before_cursor.startswith('pg_'): schema_names = [s for s in schema_names if not s.startswith('pg_')] schema_names = self.find_matches(word_before_cursor, schema_names, meta='schema') completions.extend(schema_names) elif suggestion['type'] == 'table': tables = self.populate_schema_objects( suggestion['schema'], 'tables') # Unless we're sure the user really wants them, don't suggest # the pg_catalog tables that are implicitly on the search path if not suggestion['schema'] and ( not word_before_cursor.startswith('pg_')): tables = [t for t in tables if not t.startswith('pg_')] tables = self.find_matches(word_before_cursor, tables, meta='table') completions.extend(tables) elif suggestion['type'] == 'view': views = self.populate_schema_objects( suggestion['schema'], 'views') if not suggestion['schema'] and ( not word_before_cursor.startswith('pg_')): views = [v for v in views if not v.startswith('pg_')] views = self.find_matches(word_before_cursor, views, meta='view') completions.extend(views) elif suggestion['type'] == 'alias': aliases = suggestion['aliases'] aliases = self.find_matches(word_before_cursor, aliases, meta='table alias') completions.extend(aliases) elif suggestion['type'] == 'database': dbs = self.find_matches(word_before_cursor, self.databases, meta='database') completions.extend(dbs) elif suggestion['type'] == 'keyword': keywords = self.find_matches(word_before_cursor, self.keywords, start_only=True, fuzzy=False, meta='keyword') completions.extend(keywords) elif suggestion['type'] == 'special': if not self.pgspecial: continue commands = self.pgspecial.commands cmd_names = commands.keys() desc = [commands[cmd].description for cmd in cmd_names] special = self.find_matches(word_before_cursor, cmd_names, start_only=True, fuzzy=False, meta_collection=desc) completions.extend(special) elif suggestion['type'] == 'datatype': # suggest custom datatypes types = self.populate_schema_objects( suggestion['schema'], 'datatypes') types = self.find_matches(word_before_cursor, types, meta='datatype') completions.extend(types) if not suggestion['schema']: # Also suggest hardcoded types types = self.find_matches(word_before_cursor, self.datatypes, start_only=True, fuzzy=False, meta='datatype') completions.extend(types) elif suggestion['type'] == 'namedquery': queries = self.find_matches( word_before_cursor, NamedQueries.instance.list(), start_only=False, fuzzy=True, meta='named query') completions.extend(queries) return completions def populate_scoped_cols(self, scoped_tbls): """ Find all columns in a set of scoped_tables :param scoped_tbls: list of TableReference namedtuples :return: list of column names """ columns = [] meta = self.dbmetadata for tbl in scoped_tbls: if tbl.schema: # A fully qualified schema.relname reference schema = self.escape_name(tbl.schema) relname = self.escape_name(tbl.name) if tbl.is_function: # Return column names from a set-returning function try: # Get an array of FunctionMetadata objects functions = meta['functions'][schema][relname] except KeyError: # No such function name continue for func in functions: # func is a FunctionMetadata object columns.extend(func.fieldnames()) else: # We don't know if schema.relname is a table or view. Since # tables and views cannot share the same name, we can check # one at a time try: columns.extend(meta['tables'][schema][relname]) # Table exists, so don't bother checking for a view continue except KeyError: pass try: columns.extend(meta['views'][schema][relname]) except KeyError: pass else: # Schema not specified, so traverse the search path looking for # a table or view that matches. Note that in order to get proper # shadowing behavior, we need to check both views and tables for # each schema before checking the next schema for schema in self.search_path: relname = self.escape_name(tbl.name) if tbl.is_function: try: functions = meta['functions'][schema][relname] except KeyError: continue for func in functions: # func is a FunctionMetadata object columns.extend(func.fieldnames()) else: try: columns.extend(meta['tables'][schema][relname]) break except KeyError: pass try: columns.extend(meta['views'][schema][relname]) break except KeyError: pass return columns def populate_schema_objects(self, schema, obj_type): """Returns list of tables or functions for a (optional) schema""" metadata = self.dbmetadata[obj_type] if schema: try: objects = metadata[schema].keys() except KeyError: # schema doesn't exist objects = [] else: schemas = self.search_path objects = [obj for schema in schemas for obj in metadata[schema].keys()] return objects def populate_functions(self, schema, filter_func): """Returns a list of function names filter_func is a function that accepts a FunctionMetadata namedtuple and returns a boolean indicating whether that function should be kept or discarded """ metadata = self.dbmetadata['functions'] # Because of multiple dispatch, we can have multiple functions # with the same name, which is why `for meta in metas` is necessary # in the comprehensions below if schema: try: return [func for (func, metas) in metadata[schema].items() for meta in metas if filter_func(meta)] except KeyError: return [] else: return [func for schema in self.search_path for (func, metas) in metadata[schema].items() for meta in metas if filter_func(meta)] pgcli-0.20.1/pgcli/pgexecute.py0000644000076600000240000003403412620265070016735 0ustar amjithstaff00000000000000import traceback import logging import psycopg2 import psycopg2.extras import psycopg2.extensions as ext import sqlparse import pgspecial as special from .packages.function_metadata import FunctionMetadata from .encodingutils import unicode2utf8, PY2 _logger = logging.getLogger(__name__) # Cast all database input to unicode automatically. # See http://initd.org/psycopg/docs/usage.html#unicode-handling for more info. ext.register_type(ext.UNICODE) ext.register_type(ext.UNICODEARRAY) ext.register_type(ext.new_type((705,), "UNKNOWN", ext.UNICODE)) # Cast bytea fields to text. By default, this will render as hex strings with # Postgres 9+ and as escaped binary in earlier versions. ext.register_type(ext.new_type((17,), 'BYTEA_TEXT', psycopg2.STRING)) # When running a query, make pressing CTRL+C raise a KeyboardInterrupt # See http://initd.org/psycopg/articles/2014/07/20/cancelling-postgresql-statements-python/ ext.set_wait_callback(psycopg2.extras.wait_select) def register_json_typecasters(conn, loads_fn): """Set the function for converting JSON data for a connection. Use the supplied function to decode JSON data returned from the database via the given connection. The function should accept a single argument of the data as a string encoded in the database's character encoding. psycopg2's default handler for JSON data is json.loads. http://initd.org/psycopg/docs/extras.html#json-adaptation This function attempts to register the typecaster for both JSON and JSONB types. Returns a set that is a subset of {'json', 'jsonb'} indicating which types (if any) were successfully registered. """ available = set() for name in ['json', 'jsonb']: try: psycopg2.extras.register_json(conn, loads=loads_fn, name=name) available.add(name) except psycopg2.ProgrammingError: pass return available def register_hstore_typecaster(conn): """ Instead of using register_hstore() which converts hstore into a python dict, we query the 'oid' of hstore which will be different for each database and register a type caster that converts it to unicode. http://initd.org/psycopg/docs/extras.html#psycopg2.extras.register_hstore """ with conn.cursor() as cur: try: cur.execute("SELECT 'hstore'::regtype::oid") oid = cur.fetchone()[0] ext.register_type(ext.new_type((oid,), "HSTORE", ext.UNICODE)) except Exception: pass class PGExecute(object): # The boolean argument to the current_schemas function indicates whether # implicit schemas, e.g. pg_catalog search_path_query = ''' SELECT * FROM unnest(current_schemas(true))''' schemata_query = ''' SELECT nspname FROM pg_catalog.pg_namespace ORDER BY 1 ''' tables_query = ''' SELECT n.nspname schema_name, c.relname table_name FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind = ANY(%s) ORDER BY 1,2;''' columns_query = ''' SELECT nsp.nspname schema_name, cls.relname table_name, att.attname column_name FROM pg_catalog.pg_attribute att INNER JOIN pg_catalog.pg_class cls ON att.attrelid = cls.oid INNER JOIN pg_catalog.pg_namespace nsp ON cls.relnamespace = nsp.oid WHERE cls.relkind = ANY(%s) AND NOT att.attisdropped AND att.attnum > 0 ORDER BY 1, 2, 3''' functions_query = ''' SELECT n.nspname schema_name, p.proname func_name, pg_catalog.pg_get_function_arguments(p.oid) arg_list, pg_catalog.pg_get_function_result(p.oid) return_type, p.proisagg is_aggregate, p.proiswindow is_window, p.proretset is_set_returning FROM pg_catalog.pg_proc p INNER JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace ORDER BY 1, 2''' databases_query = """SELECT d.datname as "Name", pg_catalog.pg_get_userbyid(d.datdba) as "Owner", pg_catalog.pg_encoding_to_char(d.encoding) as "Encoding", d.datcollate as "Collate", d.datctype as "Ctype", pg_catalog.array_to_string(d.datacl, E'\n') AS "Access privileges" FROM pg_catalog.pg_database d ORDER BY 1;""" datatypes_query = ''' SELECT n.nspname schema_name, t.typname type_name FROM pg_catalog.pg_type t INNER JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace WHERE ( t.typrelid = 0 -- non-composite types OR ( -- composite type, but not a table SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid ) ) AND NOT EXISTS( -- ignore array types SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid ) AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema' ORDER BY 1, 2;''' def __init__(self, database, user, password, host, port, dsn): self.dbname = database self.user = user self.password = password self.host = host self.port = port self.dsn = dsn self.connect() def connect(self, database=None, user=None, password=None, host=None, port=None, dsn=None): db = (database or self.dbname) user = (user or self.user) password = (password or self.password) host = (host or self.host) port = (port or self.port) dsn = (dsn or self.dsn) if dsn: if password: dsn = "{0} password={1}".format(dsn, password) conn = psycopg2.connect(dsn=unicode2utf8(dsn)) cursor = conn.cursor() # When we connect using a DSN, we don't really know what db, # user, etc. we connected to. Let's read it. db = self._select_one(cursor, 'select current_database()') user = self._select_one(cursor, 'select current_user') host = self._select_one(cursor, 'select inet_server_addr()') port = self._select_one(cursor, 'select inet_server_port()') else: conn = psycopg2.connect( database=unicode2utf8(db), user=unicode2utf8(user), password=unicode2utf8(password), host=unicode2utf8(host), port=unicode2utf8(port)) conn.set_client_encoding('utf8') if hasattr(self, 'conn'): self.conn.close() self.conn = conn self.conn.autocommit = True self.dbname = db self.user = user self.password = password self.host = host self.port = port register_json_typecasters(self.conn, self._json_typecaster) register_hstore_typecaster(self.conn) def _select_one(self, cur, sql): """ Helper method to run a select and retrieve a single field value :param cur: cursor :param sql: string :return: string """ cur.execute(sql) return cur.fetchone() def _json_typecaster(self, json_data): """Interpret incoming JSON data as a string. The raw data is decoded using the connection's encoding, which defaults to the database's encoding. See http://initd.org/psycopg/docs/connection.html#connection.encoding """ if PY2: return json_data.decode(self.conn.encoding) else: return json_data def run(self, statement, pgspecial=None, exception_formatter=None, on_error_resume=False): """Execute the sql in the database and return the results. :param statement: A string containing one or more sql statements :param pgspecial: PGSpecial object :param exception_formatter: A callable that accepts an Exception and returns a formatted (title, rows, headers, status) tuple that can act as a query result. If an exception_formatter is not supplied, psycopg2 exceptions are always raised. :param on_error_resume: Bool. If true, queries following an exception (assuming exception_formatter has been supplied) continue to execute. :return: Generator yielding tuples containing (title, rows, headers, status, query, success) """ # Remove spaces and EOL statement = statement.strip() if not statement: # Empty string yield (None, None, None, None, statement, False) # Split the sql into separate queries and run each one. for sql in sqlparse.split(statement): # Remove spaces, eol and semi-colons. sql = sql.rstrip(';') try: if pgspecial: # First try to run each query as special _logger.debug('Trying a pgspecial command. sql: %r', sql) cur = self.conn.cursor() try: for result in pgspecial.execute(cur, sql): yield result + (sql, True) continue except special.CommandNotFound: pass # Not a special command, so execute as normal sql yield self.execute_normal_sql(sql) + (sql, True) except psycopg2.DatabaseError as e: _logger.error("sql: %r, error: %r", sql, e) _logger.error("traceback: %r", traceback.format_exc()) if (isinstance(e, psycopg2.OperationalError) or not exception_formatter): # Always raise operational errors, regardless of on_error # specification raise yield None, None, None, exception_formatter(e), sql, False if not on_error_resume: break def execute_normal_sql(self, split_sql): """Returns tuple (title, rows, headers, status)""" _logger.debug('Regular sql statement. sql: %r', split_sql) cur = self.conn.cursor() cur.execute(split_sql) try: title = self.conn.notices.pop() except IndexError: title = None # cur.description will be None for operations that do not return # rows. if cur.description: headers = [x[0] for x in cur.description] return title, cur, headers, cur.statusmessage else: _logger.debug('No rows in result.') return title, None, None, cur.statusmessage def search_path(self): """Returns the current search path as a list of schema names""" with self.conn.cursor() as cur: _logger.debug('Search path query. sql: %r', self.search_path_query) cur.execute(self.search_path_query) return [x[0] for x in cur.fetchall()] def schemata(self): """Returns a list of schema names in the database""" with self.conn.cursor() as cur: _logger.debug('Schemata Query. sql: %r', self.schemata_query) cur.execute(self.schemata_query) return [x[0] for x in cur.fetchall()] def _relations(self, kinds=('r', 'v', 'm')): """Get table or view name metadata :param kinds: list of postgres relkind filters: 'r' - table 'v' - view 'm' - materialized view :return: (schema_name, rel_name) tuples """ with self.conn.cursor() as cur: sql = cur.mogrify(self.tables_query, [kinds]) _logger.debug('Tables Query. sql: %r', sql) cur.execute(sql) for row in cur: yield row def tables(self): """Yields (schema_name, table_name) tuples""" for row in self._relations(kinds=['r']): yield row def views(self): """Yields (schema_name, view_name) tuples. Includes both views and and materialized views """ for row in self._relations(kinds=['v', 'm']): yield row def _columns(self, kinds=('r', 'v', 'm')): """Get column metadata for tables and views :param kinds: kinds: list of postgres relkind filters: 'r' - table 'v' - view 'm' - materialized view :return: list of (schema_name, relation_name, column_name) tuples """ with self.conn.cursor() as cur: sql = cur.mogrify(self.columns_query, [kinds]) _logger.debug('Columns Query. sql: %r', sql) cur.execute(sql) for row in cur: yield row def table_columns(self): for row in self._columns(kinds=['r']): yield row def view_columns(self): for row in self._columns(kinds=['v', 'm']): yield row def databases(self): with self.conn.cursor() as cur: _logger.debug('Databases Query. sql: %r', self.databases_query) cur.execute(self.databases_query) return [x[0] for x in cur.fetchall()] def functions(self): """Yields FunctionMetadata named tuples""" with self.conn.cursor() as cur: _logger.debug('Functions Query. sql: %r', self.functions_query) cur.execute(self.functions_query) for row in cur: yield FunctionMetadata(*row) def datatypes(self): """Yields tuples of (schema_name, type_name)""" with self.conn.cursor() as cur: _logger.debug('Datatypes Query. sql: %r', self.datatypes_query) cur.execute(self.datatypes_query) for row in cur: yield row pgcli-0.20.1/pgcli/pgstyle.py0000644000076600000240000000127512616534745016451 0ustar amjithstaff00000000000000from pygments.token import string_to_tokentype from pygments.style import Style from pygments.util import ClassNotFound from prompt_toolkit.styles import default_style_extensions import pygments.styles def style_factory(name, cli_style): try: style = pygments.styles.get_style_by_name(name) except ClassNotFound: style = pygments.styles.get_style_by_name('native') class PGStyle(Style): styles = {} styles.update(style.styles) styles.update(default_style_extensions) custom_styles = dict([(string_to_tokentype(x), y) for x, y in cli_style.items()]) styles.update(custom_styles) return PGStyle pgcli-0.20.1/pgcli/pgtoolbar.py0000644000076600000240000000226612616534745016754 0ustar amjithstaff00000000000000from pygments.token import Token def create_toolbar_tokens_func(get_vi_mode_enabled, get_is_refreshing): """ Return a function that generates the toolbar tokens. """ assert callable(get_vi_mode_enabled) token = Token.Toolbar def get_toolbar_tokens(cli): result = [] result.append((token, ' ')) if cli.buffers['default'].completer.smart_completion: result.append((token.On, '[F2] Smart Completion: ON ')) else: result.append((token.Off, '[F2] Smart Completion: OFF ')) if cli.buffers['default'].always_multiline: result.append((token.On, '[F3] Multiline: ON ')) else: result.append((token.Off, '[F3] Multiline: OFF ')) if cli.buffers['default'].always_multiline: result.append((token, ' (Semi-colon [;] will end the line)')) if get_vi_mode_enabled(): result.append((token.On, '[F4] Vi-mode')) else: result.append((token.On, '[F4] Emacs-mode')) if get_is_refreshing(): result.append((token, ' Refreshing completions...')) return result return get_toolbar_tokens pgcli-0.20.1/pgcli.egg-info/0000755000076600000240000000000012621112650016054 5ustar amjithstaff00000000000000pgcli-0.20.1/pgcli.egg-info/dependency_links.txt0000644000076600000240000000000112621112650022122 0ustar amjithstaff00000000000000 pgcli-0.20.1/pgcli.egg-info/entry_points.txt0000644000076600000240000000011012621112650021342 0ustar amjithstaff00000000000000 [console_scripts] pgcli=pgcli.main:cli pgcli-0.20.1/pgcli.egg-info/pbr.json0000644000076600000240000000005712615313736017547 0ustar amjithstaff00000000000000{"is_release": false, "git_version": "0ff02e9"}pgcli-0.20.1/pgcli.egg-info/PKG-INFO0000644000076600000240000001730212621112650017154 0ustar amjithstaff00000000000000Metadata-Version: 1.1 Name: pgcli Version: 0.20.1 Summary: CLI for Postgres Database. With auto-completion and syntax highlighting. Home-page: http://pgcli.com Author: Amjith Ramanujam Author-email: amjith[dot]r[at]gmail.com License: LICENSE.txt Description: A REPL for Postgres ------------------- |Build Status| |PyPI| |Gitter| This is a postgres client that does auto-completion and syntax highlighting. Home Page: http://pgcli.com MySQL Equivalent: http://mysql-cli.com .. image:: screenshots/pgcli.gif .. image:: screenshots/image01.png Quick Start ----------- If you already know how to install python packages, then you can simply do: :: $ pip install pgcli or $ brew install pgcli # Only on OS X If you don't know how to install python packages, please check the `detailed instructions`__. __ https://github.com/dbcli/pgcli#detailed-installation-instructions Usage ----- :: $ pgcli [database_name] or $ pgcli postgresql://[user[:password]@][netloc][:port][/dbname] Examples: :: $ pgcli local_database $ pgcli postgres://amjith:pa$$w0rd@example.com:5432/app_db Features -------- The `pgcli` is written using prompt_toolkit_. * Auto-completes as you type for SQL keywords as well as tables and columns in the database. * Syntax highlighting using Pygments. * Smart-completion (enabled by default) will suggest context-sensitive completion. - ``SELECT * FROM `` will only show table names. - ``SELECT * FROM users WHERE `` will only show column names. * Config file is automatically created at ``~/.pgclirc`` at first launch. * Primitive support for ``psql`` back-slash commands. * Pretty prints tabular data. .. _prompt_toolkit: https://github.com/jonathanslenders/python-prompt-toolkit Contributions: -------------- If you're interested in contributing to this project, first of all I would like to extend my heartfelt gratitude. I've written a small doc to describe how to get this running in a development setup. https://github.com/dbcli/pgcli/blob/master/DEVELOP.rst Please feel free to reach out to me if you need help. My email: amjith.r@gmail.com, Twitter: `@amjithr `_ Detailed Installation Instructions: ----------------------------------- OS X: ===== Easiest way to install pgcli is using brew. Please be aware that this will install postgres via brew if it wasn't installed via brew. :: $ brew install pgcli Done! If you have postgres installed via a different means (such as PostgresApp), you can ``brew install --build-from-source pgcli`` which will skip installing postgres via brew if postgres is available in the path. Alternatively, you can install ``pgcli`` as a python package using a package manager called called ``pip``. You will need postgres installed on your system for this to work. In depth getting started guide for ``pip`` - https://pip.pypa.io/en/latest/installing.html. :: $ which pip If it is installed then you can do: :: $ pip install pgcli If that fails due to permission issues, you might need to run the command with sudo permissions. :: $ sudo pip install pgcli If pip is not installed check if easy_install is available on the system. :: $ which easy_install $ sudo easy_install pgcli Linux: ====== In depth getting started guide for ``pip`` - https://pip.pypa.io/en/latest/installing.html. Check if pip is already available in your system. :: $ which pip If it doesn't exist, use your linux package manager to install `pip`. This might look something like: :: $ sudo apt-get install python-pip # Debian, Ubuntu, Mint etc or $ sudo yum install python-pip # RHEL, Centos, Fedora etc ``pgcli`` requires python-dev, libpq-dev and libevent-dev packages. You can install these via your operating system package manager. :: $ sudo apt-get install python-dev libpq-dev libevent-dev or $ sudo yum install python-devel postgresql-devel Then you can install pgcli: :: $ sudo pip install pgcli Thanks: ------- A special thanks to `Jonathan Slenders `_ for creating `Python Prompt Toolkit `_, which is quite literally the backbone library, that made this app possible. Jonathan has also provided valuable feedback and support during the development of this app. This app includes the awesome `tabulate `_ library for pretty printing the output of tables. The reason for vendoring this library rather than listing it as a dependency in setup.py, is because I had to make a change to the table format which is merged back into the original repo, but not yet released in PyPI. `Click `_ is used for command line option parsing and printing error messages. Thanks to `psycopg `_ for providing a rock solid interface to Postgres database. Thanks to all the beta testers and contributors for your time and patience. :) .. |Build Status| image:: https://api.travis-ci.org/dbcli/pgcli.svg?branch=master :target: https://travis-ci.org/dbcli/pgcli .. |PyPI| image:: https://img.shields.io/pypi/v/pgcli.svg :target: https://pypi.python.org/pypi/pgcli/ :alt: Latest Version .. |Gitter| image:: https://badges.gitter.im/Join%20Chat.svg :target: https://gitter.im/dbcli/pgcli?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge :alt: Gitter Chat Platform: UNKNOWN Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: Unix Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: SQL Classifier: Topic :: Database Classifier: Topic :: Database :: Front-Ends Classifier: Topic :: Software Development Classifier: Topic :: Software Development :: Libraries :: Python Modules pgcli-0.20.1/pgcli.egg-info/requires.txt0000644000076600000240000000022112621112650020447 0ustar amjithstaff00000000000000pgspecial>=1.1.0 click >= 4.1 Pygments >= 2.0 prompt_toolkit==0.46 psycopg2 >= 2.5.4 sqlparse == 0.1.16 configobj >= 5.0.6 setproctitle >= 1.1.9 pgcli-0.20.1/pgcli.egg-info/SOURCES.txt0000644000076600000240000000147612621112650017750 0ustar amjithstaff00000000000000README.rst setup.py pgcli/__init__.py pgcli/completion_refresher.py pgcli/config.py pgcli/encodingutils.py pgcli/filters.py pgcli/key_bindings.py pgcli/magic.py pgcli/main.py pgcli/pgbuffer.py pgcli/pgclirc pgcli/pgcompleter.py pgcli/pgexecute.py pgcli/pgstyle.py pgcli/pgtoolbar.py pgcli.egg-info/PKG-INFO pgcli.egg-info/SOURCES.txt pgcli.egg-info/dependency_links.txt pgcli.egg-info/entry_points.txt pgcli.egg-info/pbr.json pgcli.egg-info/requires.txt pgcli.egg-info/top_level.txt pgcli/packages/__init__.py pgcli/packages/counter.py pgcli/packages/expanded.py pgcli/packages/function_metadata.py pgcli/packages/ordereddict.py pgcli/packages/parseutils.py pgcli/packages/sqlcompletion.py pgcli/packages/tabulate.py pgcli/packages/pgliterals/__init__.py pgcli/packages/pgliterals/main.py pgcli/packages/pgliterals/pgliterals.jsonpgcli-0.20.1/pgcli.egg-info/top_level.txt0000644000076600000240000000000612621112650020602 0ustar amjithstaff00000000000000pgcli pgcli-0.20.1/PKG-INFO0000644000076600000240000001730212621112650014364 0ustar amjithstaff00000000000000Metadata-Version: 1.1 Name: pgcli Version: 0.20.1 Summary: CLI for Postgres Database. With auto-completion and syntax highlighting. Home-page: http://pgcli.com Author: Amjith Ramanujam Author-email: amjith[dot]r[at]gmail.com License: LICENSE.txt Description: A REPL for Postgres ------------------- |Build Status| |PyPI| |Gitter| This is a postgres client that does auto-completion and syntax highlighting. Home Page: http://pgcli.com MySQL Equivalent: http://mysql-cli.com .. image:: screenshots/pgcli.gif .. image:: screenshots/image01.png Quick Start ----------- If you already know how to install python packages, then you can simply do: :: $ pip install pgcli or $ brew install pgcli # Only on OS X If you don't know how to install python packages, please check the `detailed instructions`__. __ https://github.com/dbcli/pgcli#detailed-installation-instructions Usage ----- :: $ pgcli [database_name] or $ pgcli postgresql://[user[:password]@][netloc][:port][/dbname] Examples: :: $ pgcli local_database $ pgcli postgres://amjith:pa$$w0rd@example.com:5432/app_db Features -------- The `pgcli` is written using prompt_toolkit_. * Auto-completes as you type for SQL keywords as well as tables and columns in the database. * Syntax highlighting using Pygments. * Smart-completion (enabled by default) will suggest context-sensitive completion. - ``SELECT * FROM `` will only show table names. - ``SELECT * FROM users WHERE `` will only show column names. * Config file is automatically created at ``~/.pgclirc`` at first launch. * Primitive support for ``psql`` back-slash commands. * Pretty prints tabular data. .. _prompt_toolkit: https://github.com/jonathanslenders/python-prompt-toolkit Contributions: -------------- If you're interested in contributing to this project, first of all I would like to extend my heartfelt gratitude. I've written a small doc to describe how to get this running in a development setup. https://github.com/dbcli/pgcli/blob/master/DEVELOP.rst Please feel free to reach out to me if you need help. My email: amjith.r@gmail.com, Twitter: `@amjithr `_ Detailed Installation Instructions: ----------------------------------- OS X: ===== Easiest way to install pgcli is using brew. Please be aware that this will install postgres via brew if it wasn't installed via brew. :: $ brew install pgcli Done! If you have postgres installed via a different means (such as PostgresApp), you can ``brew install --build-from-source pgcli`` which will skip installing postgres via brew if postgres is available in the path. Alternatively, you can install ``pgcli`` as a python package using a package manager called called ``pip``. You will need postgres installed on your system for this to work. In depth getting started guide for ``pip`` - https://pip.pypa.io/en/latest/installing.html. :: $ which pip If it is installed then you can do: :: $ pip install pgcli If that fails due to permission issues, you might need to run the command with sudo permissions. :: $ sudo pip install pgcli If pip is not installed check if easy_install is available on the system. :: $ which easy_install $ sudo easy_install pgcli Linux: ====== In depth getting started guide for ``pip`` - https://pip.pypa.io/en/latest/installing.html. Check if pip is already available in your system. :: $ which pip If it doesn't exist, use your linux package manager to install `pip`. This might look something like: :: $ sudo apt-get install python-pip # Debian, Ubuntu, Mint etc or $ sudo yum install python-pip # RHEL, Centos, Fedora etc ``pgcli`` requires python-dev, libpq-dev and libevent-dev packages. You can install these via your operating system package manager. :: $ sudo apt-get install python-dev libpq-dev libevent-dev or $ sudo yum install python-devel postgresql-devel Then you can install pgcli: :: $ sudo pip install pgcli Thanks: ------- A special thanks to `Jonathan Slenders `_ for creating `Python Prompt Toolkit `_, which is quite literally the backbone library, that made this app possible. Jonathan has also provided valuable feedback and support during the development of this app. This app includes the awesome `tabulate `_ library for pretty printing the output of tables. The reason for vendoring this library rather than listing it as a dependency in setup.py, is because I had to make a change to the table format which is merged back into the original repo, but not yet released in PyPI. `Click `_ is used for command line option parsing and printing error messages. Thanks to `psycopg `_ for providing a rock solid interface to Postgres database. Thanks to all the beta testers and contributors for your time and patience. :) .. |Build Status| image:: https://api.travis-ci.org/dbcli/pgcli.svg?branch=master :target: https://travis-ci.org/dbcli/pgcli .. |PyPI| image:: https://img.shields.io/pypi/v/pgcli.svg :target: https://pypi.python.org/pypi/pgcli/ :alt: Latest Version .. |Gitter| image:: https://badges.gitter.im/Join%20Chat.svg :target: https://gitter.im/dbcli/pgcli?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge :alt: Gitter Chat Platform: UNKNOWN Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: Unix Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: SQL Classifier: Topic :: Database Classifier: Topic :: Database :: Front-Ends Classifier: Topic :: Software Development Classifier: Topic :: Software Development :: Libraries :: Python Modules pgcli-0.20.1/README.rst0000644000076600000240000001224012620265070014756 0ustar amjithstaff00000000000000A REPL for Postgres ------------------- |Build Status| |PyPI| |Gitter| This is a postgres client that does auto-completion and syntax highlighting. Home Page: http://pgcli.com MySQL Equivalent: http://mysql-cli.com .. image:: screenshots/pgcli.gif .. image:: screenshots/image01.png Quick Start ----------- If you already know how to install python packages, then you can simply do: :: $ pip install pgcli or $ brew install pgcli # Only on OS X If you don't know how to install python packages, please check the `detailed instructions`__. __ https://github.com/dbcli/pgcli#detailed-installation-instructions Usage ----- :: $ pgcli [database_name] or $ pgcli postgresql://[user[:password]@][netloc][:port][/dbname] Examples: :: $ pgcli local_database $ pgcli postgres://amjith:pa$$w0rd@example.com:5432/app_db Features -------- The `pgcli` is written using prompt_toolkit_. * Auto-completes as you type for SQL keywords as well as tables and columns in the database. * Syntax highlighting using Pygments. * Smart-completion (enabled by default) will suggest context-sensitive completion. - ``SELECT * FROM `` will only show table names. - ``SELECT * FROM users WHERE `` will only show column names. * Config file is automatically created at ``~/.pgclirc`` at first launch. * Primitive support for ``psql`` back-slash commands. * Pretty prints tabular data. .. _prompt_toolkit: https://github.com/jonathanslenders/python-prompt-toolkit Contributions: -------------- If you're interested in contributing to this project, first of all I would like to extend my heartfelt gratitude. I've written a small doc to describe how to get this running in a development setup. https://github.com/dbcli/pgcli/blob/master/DEVELOP.rst Please feel free to reach out to me if you need help. My email: amjith.r@gmail.com, Twitter: `@amjithr `_ Detailed Installation Instructions: ----------------------------------- OS X: ===== Easiest way to install pgcli is using brew. Please be aware that this will install postgres via brew if it wasn't installed via brew. :: $ brew install pgcli Done! If you have postgres installed via a different means (such as PostgresApp), you can ``brew install --build-from-source pgcli`` which will skip installing postgres via brew if postgres is available in the path. Alternatively, you can install ``pgcli`` as a python package using a package manager called called ``pip``. You will need postgres installed on your system for this to work. In depth getting started guide for ``pip`` - https://pip.pypa.io/en/latest/installing.html. :: $ which pip If it is installed then you can do: :: $ pip install pgcli If that fails due to permission issues, you might need to run the command with sudo permissions. :: $ sudo pip install pgcli If pip is not installed check if easy_install is available on the system. :: $ which easy_install $ sudo easy_install pgcli Linux: ====== In depth getting started guide for ``pip`` - https://pip.pypa.io/en/latest/installing.html. Check if pip is already available in your system. :: $ which pip If it doesn't exist, use your linux package manager to install `pip`. This might look something like: :: $ sudo apt-get install python-pip # Debian, Ubuntu, Mint etc or $ sudo yum install python-pip # RHEL, Centos, Fedora etc ``pgcli`` requires python-dev, libpq-dev and libevent-dev packages. You can install these via your operating system package manager. :: $ sudo apt-get install python-dev libpq-dev libevent-dev or $ sudo yum install python-devel postgresql-devel Then you can install pgcli: :: $ sudo pip install pgcli Thanks: ------- A special thanks to `Jonathan Slenders `_ for creating `Python Prompt Toolkit `_, which is quite literally the backbone library, that made this app possible. Jonathan has also provided valuable feedback and support during the development of this app. This app includes the awesome `tabulate `_ library for pretty printing the output of tables. The reason for vendoring this library rather than listing it as a dependency in setup.py, is because I had to make a change to the table format which is merged back into the original repo, but not yet released in PyPI. `Click `_ is used for command line option parsing and printing error messages. Thanks to `psycopg `_ for providing a rock solid interface to Postgres database. Thanks to all the beta testers and contributors for your time and patience. :) .. |Build Status| image:: https://api.travis-ci.org/dbcli/pgcli.svg?branch=master :target: https://travis-ci.org/dbcli/pgcli .. |PyPI| image:: https://img.shields.io/pypi/v/pgcli.svg :target: https://pypi.python.org/pypi/pgcli/ :alt: Latest Version .. |Gitter| image:: https://badges.gitter.im/Join%20Chat.svg :target: https://gitter.im/dbcli/pgcli?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge :alt: Gitter Chat pgcli-0.20.1/setup.cfg0000644000076600000240000000007312621112650015105 0ustar amjithstaff00000000000000[egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 pgcli-0.20.1/setup.py0000644000076600000240000000452312620265070015006 0ustar amjithstaff00000000000000import re import ast import platform from setuptools import setup, find_packages _version_re = re.compile(r'__version__\s+=\s+(.*)') with open('pgcli/__init__.py', 'rb') as f: version = str(ast.literal_eval(_version_re.search( f.read().decode('utf-8')).group(1))) description = 'CLI for Postgres Database. With auto-completion and syntax highlighting.' install_requirements = [ 'pgspecial>=1.1.0', 'click >= 4.1', 'Pygments >= 2.0', # Pygments has to be Capitalcased. WTF? 'prompt_toolkit==0.46', 'psycopg2 >= 2.5.4', 'sqlparse == 0.1.16', 'configobj >= 5.0.6', ] # setproctitle is used to mask the password when running `ps` in command line. # But this is not necessary in Windows since the password is never shown in the # task manager. Also setproctitle is a hard dependency to install in Windows, # so we'll only install it if we're not in Windows. if platform.system() != 'Windows': install_requirements.append('setproctitle >= 1.1.9') setup( name='pgcli', author='Amjith Ramanujam', author_email='amjith[dot]r[at]gmail.com', version=version, license='LICENSE.txt', url='http://pgcli.com', packages=find_packages(), package_data={'pgcli': ['pgclirc', 'packages/pgliterals/pgliterals.json']}, description=description, long_description=open('README.rst').read(), install_requires=install_requirements, entry_points=''' [console_scripts] pgcli=pgcli.main:cli ''', classifiers=[ 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: Unix', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: SQL', 'Topic :: Database', 'Topic :: Database :: Front-Ends', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries :: Python Modules', ], )