google-apputils-0.4.1/0000750033465300116100000000000012417377553015570 5ustar craigcitroeng00000000000000google-apputils-0.4.1/README0000640033465300116100000000556002263613200016433 0ustar craigcitroeng00000000000000Google Application Utilities for Python ======================================= This project is a small collection of utilities for building Python applications. It includes some of the same set of utilities used to build and run internal Python apps at Google. Features: * Simple application startup integrated with python-gflags. * Subcommands for command-line applications. * Option to drop into pdb on uncaught exceptions. * Helper functions for dealing with files. * High-level profiling tools. * Timezone-aware wrappers for datetime.datetime classes. * Improved TestCase with the same methods as unittest2, plus helpful flags for test startup. * google_test setuptools command for running tests. * Helper module for creating application stubs. Installation ============ To install the package, simply run: python setup.py install Google-Style Tests ================== Google-style tests (those run with basetest.main()) differ from setuptools-style tests in that test modules are designed to be run as __main__. Setting up your project to use Google-style tests is easy: 1. Create one or more test modules named '*_test.py' in a directory. Each test module should have a main block that runs basetest.main(): # In tests/my_test.py from google.apputils import basetest class MyTest(basetest.TestCase): def testSomething(self): self.assertTrue('my test') if __name__ == '__main__': basetest.main() 2. Add a setup requirement on google-apputils and set the test_dir option: # In setup.py setup( ... setup_requires = ['google-apputils>=0.2'], test_dir = 'tests', ) 3. Run your tests: python setup.py google_test Google-Style Stub Scripts ========================= Google-style binaries (run with app.run()) are intended to be executed directly at the top level, so you should not use a setuptools console_script entry point to point at your main(). You can use distutils-style scripts if you want. Another alternative is to use google.apputils.run_script_module, which is a handy wrapper to execute a module directly as if it were a script: 1. Create a module like 'stubs.py' in your project: # In my/stubs.py from google.apputils import run_script_module def RunMyScript(): import my.script run_script_module.RunScriptModule(my.script) def RunMyOtherScript(): import my.other_script run_script_module.RunScriptModule(my.other_script) 2. Set up entry points in setup.py that point to the functions in your stubs module: # In setup.py setup( ... entry_points = { 'console_scripts': [ 'my_script = my.stubs:RunMyScript', 'my_other_script = my.stubs.RunMyOtherScript', ], }, ) There are also useful flags you can pass to your scripts to help you debug your binaries; run your binary with --helpstub to see the full list. google-apputils-0.4.1/google/0000750033465300116100000000000012417377553017044 5ustar craigcitroeng00000000000000google-apputils-0.4.1/google/apputils/0000750033465300116100000000000012417377553020705 5ustar craigcitroeng00000000000000google-apputils-0.4.1/google/apputils/setup_command.py0000640033465300116100000001211102263613200024066 0ustar craigcitroeng00000000000000#!/usr/bin/env python # Copyright 2010 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Setuptools extension for running Google-style Python tests. Google-style Python tests differ from normal Python tests in that each test module is intended to be executed as an independent script. In particular, the test fixture code in basetest.main() that executes module-wide setUp() and tearDown() depends on __main__ being the module under test. This conflicts with the usual setuptools test style, which uses a single TestSuite to run all of a package's tests. This package provides a new setuptools command, google_test, that runs all of the google-style tests found in a specified directory. NOTE: This works by overriding sys.modules['__main__'] with the module under test, but still runs tests in the same process. Thus it will *not* work if your tests depend on any of the following: - Per-process (as opposed to per-module) initialization. - Any entry point that is not basetest.main(). To use the google_test command in your project, do something like the following: In setup.py: setup( name = "mypackage", ... setup_requires = ["google-apputils>=0.2"], google_test_dir = "tests", ) Run: $ python setup.py google_test """ from distutils import errors import imp import os import re import shlex import sys import traceback from setuptools.command import test def ValidateGoogleTestDir(unused_dist, unused_attr, value): """Validate that the test directory is a directory.""" if not os.path.isdir(value): raise errors.DistutilsSetupError('%s is not a directory' % value) class GoogleTest(test.test): """Command to run Google-style tests after in-place build.""" description = 'run Google-style tests after in-place build' _DEFAULT_PATTERN = r'_(?:unit|reg)?test\.py$' user_options = [ ('test-dir=', 'd', 'Look for test modules in specified directory.'), ('test-module-pattern=', 'p', ('Pattern for matching test modules. Defaults to %r. ' 'Only source files (*.py) will be considered, even if more files match ' 'this pattern.' % _DEFAULT_PATTERN)), ('test-args=', 'a', ('Arguments to pass to basetest.main(). May only make sense if ' 'test_module_pattern matches exactly one test.')), ] def initialize_options(self): self.test_dir = None self.test_module_pattern = self._DEFAULT_PATTERN self.test_args = '' # Set to a dummy value, since we don't call the superclass methods for # options parsing. self.test_suite = True def finalize_options(self): if self.test_dir is None: if self.distribution.google_test_dir: self.test_dir = self.distribution.google_test_dir else: raise errors.DistutilsOptionError('No test directory specified') self.test_module_pattern = re.compile(self.test_module_pattern) self.test_args = shlex.split(self.test_args) def _RunTestModule(self, module_path): """Run a module as a test module given its path. Args: module_path: The path to the module to test; must end in '.py'. Returns: True if the tests in this module pass, False if not or if an error occurs. """ path, filename = os.path.split(module_path) old_argv = sys.argv[:] old_path = sys.path[:] old_modules = sys.modules.copy() # Make relative imports in test modules work with our mangled sys.path. sys.path.insert(0, path) module_name = filename.replace('.py', '') import_tuple = imp.find_module(module_name, [path]) module = imp.load_module(module_name, *import_tuple) sys.modules['__main__'] = module sys.argv = [module.__file__] + self.test_args # Late import since this must be run with the project's sys.path. import basetest try: try: sys.stderr.write('Testing %s\n' % module_name) basetest.main() # basetest.main() should always call sys.exit, so this is very bad. return False except SystemExit as e: returncode, = e.args return not returncode except: traceback.print_exc() return False finally: sys.argv[:] = old_argv sys.path[:] = old_path sys.modules.clear() sys.modules.update(old_modules) def run_tests(self): ok = True for path, _, filenames in os.walk(self.test_dir): for filename in filenames: if not filename.endswith('.py'): continue file_path = os.path.join(path, filename) if self.test_module_pattern.search(file_path): ok &= self._RunTestModule(file_path) sys.exit(int(not ok)) google-apputils-0.4.1/google/apputils/humanize.py0000640033465300116100000003570102263613200023062 0ustar craigcitroeng00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2008 Google Inc. All Rights Reserved. """Lightweight routines for producing more friendly output. Usage examples: 'New messages: %s' % humanize.Commas(star_count) -> 'New messages: 58,192' 'Found %s.' % humanize.Plural(error_count, 'error') -> 'Found 2 errors.' 'Found %s.' % humanize.Plural(error_count, 'ox', 'oxen') -> 'Found 2 oxen.' 'Copied at %s.' % humanize.DecimalPrefix(rate, 'bps', precision=3) -> 'Copied at 42.6 Mbps.' 'Free RAM: %s' % humanize.BinaryPrefix(bytes_free, 'B') -> 'Free RAM: 742 MiB' 'Finished all tasks in %s.' % humanize.Duration(elapsed_time) -> 'Finished all tasks in 34m 5s.' These libraries are not a substitute for full localization. If you need localization, then you will have to think about translating strings, formatting numbers in different ways, and so on. Use ICU if your application is user-facing. Use these libraries if your application is an English-only internal tool, and you are tired of seeing "1 results" or "3450134804 bytes used". Compare humanize.*Prefix() to C++ utilites HumanReadableNumBytes and HumanReadableInt in strings/human_readable.h. """ import datetime import math import re SIBILANT_ENDINGS = frozenset(['sh', 'ss', 'tch', 'ax', 'ix', 'ex']) DIGIT_SPLITTER = re.compile(r'\d+|\D+').findall # These are included because they are common technical terms. SPECIAL_PLURALS = { 'index': 'indices', 'matrix': 'matrices', 'vertex': 'vertices', } VOWELS = frozenset('AEIOUaeiou') def Commas(value): """Formats an integer with thousands-separating commas. Args: value: An integer. Returns: A string. """ if value < 0: sign = '-' value = -value else: sign = '' result = [] while value >= 1000: result.append('%03d' % (value % 1000)) value /= 1000 result.append('%d' % value) return sign + ','.join(reversed(result)) def Plural(quantity, singular, plural=None): """Formats an integer and a string into a single pluralized string. Args: quantity: An integer. singular: A string, the singular form of a noun. plural: A string, the plural form. If not specified, then simple English rules of regular pluralization will be used. Returns: A string. """ return '%d %s' % (quantity, PluralWord(quantity, singular, plural)) def PluralWord(quantity, singular, plural=None): """Builds the plural of an English word. Args: quantity: An integer. singular: A string, the singular form of a noun. plural: A string, the plural form. If not specified, then simple English rules of regular pluralization will be used. Returns: the plural form of the word. """ if quantity == 1: return singular if plural: return plural if singular in SPECIAL_PLURALS: return SPECIAL_PLURALS[singular] # We need to guess what the English plural might be. Keep this # function simple! It doesn't need to know about every possiblity; # only regular rules and the most common special cases. # # Reference: http://en.wikipedia.org/wiki/English_plural for ending in SIBILANT_ENDINGS: if singular.endswith(ending): return '%ses' % singular if singular.endswith('o') and singular[-2:-1] not in VOWELS: return '%ses' % singular if singular.endswith('y') and singular[-2:-1] not in VOWELS: return '%sies' % singular[:-1] return '%ss' % singular def WordSeries(words, conjunction='and'): """Convert a list of words to an English-language word series. Args: words: A list of word strings. conjunction: A coordinating conjunction. Returns: A single string containing all the words in the list separated by commas, the coordinating conjunction, and a serial comma, as appropriate. """ num_words = len(words) if num_words == 0: return '' elif num_words == 1: return words[0] elif num_words == 2: return (' %s ' % conjunction).join(words) else: return '%s, %s %s' % (', '.join(words[:-1]), conjunction, words[-1]) def AddIndefiniteArticle(noun): """Formats a noun with an appropriate indefinite article. Args: noun: A string representing a noun. Returns: A string containing noun prefixed with an indefinite article, e.g., "a thing" or "an object". """ if not noun: raise ValueError('argument must be a word: {!r}'.format(noun)) if noun[0] in VOWELS: return 'an ' + noun else: return 'a ' + noun def DecimalPrefix(quantity, unit, precision=1, min_scale=0, max_scale=None): """Formats an integer and a unit into a string, using decimal prefixes. The unit will be prefixed with an appropriate multiplier such that the formatted integer is less than 1,000 (as long as the raw integer is less than 10**27). For example: DecimalPrefix(576012, 'bps') -> '576 kbps' DecimalPrefix(576012, '') -> '576 k' DecimalPrefix(576, '') -> '576' DecimalPrefix(1574215, 'bps', 2) -> '1.6 Mbps' Only the SI prefixes which are powers of 10**3 will be used, so DecimalPrefix(100, 'thread') is '100 thread', not '1 hthread'. See also: BinaryPrefix() Args: quantity: A number. unit: A string, the dimension for quantity, with no multipliers (e.g. "bps"). If quantity is dimensionless, the empty string. precision: An integer, the minimum number of digits to display. min_scale: minimum power of 1000 to scale to, (None = unbounded). max_scale: maximum power of 1000 to scale to, (None = unbounded). Returns: A string, composed by the decimal (scaled) representation of quantity at the required precision, possibly followed by a space, the appropriate multiplier and the unit. """ return _Prefix(quantity, unit, precision, DecimalScale, min_scale=min_scale, max_scale=max_scale) def BinaryPrefix(quantity, unit, precision=1): """Formats an integer and a unit into a string, using binary prefixes. The unit will be prefixed with an appropriate multiplier such that the formatted integer is less than 1,024 (as long as the raw integer is less than 2**90). For example: BinaryPrefix(576012, 'B') -> '562 KiB' BinaryPrefix(576012, '') -> '562 Ki' See also: DecimalPrefix() Args: quantity: A number. unit: A string, the dimension for quantity, with no multipliers (e.g. "B"). If quantity is dimensionless, the empty string. precision: An integer, the minimum number of digits to display. Returns: A string, composed by the decimal (scaled) representation of quantity at the required precision, possibly followed by a space, the appropriate multiplier and the unit. """ return _Prefix(quantity, unit, precision, BinaryScale) def _Prefix(quantity, unit, precision, scale_callable, **args): """Formats an integer and a unit into a string. Args: quantity: A number. unit: A string, the dimension for quantity, with no multipliers (e.g. "bps"). If quantity is dimensionless, the empty string. precision: An integer, the minimum number of digits to display. scale_callable: A callable, scales the number and units. **args: named arguments passed to scale_callable. Returns: A string. """ separator = ' ' if unit else '' if not quantity: return '0%s%s' % (separator, unit) if quantity in [float('inf'), float('-inf')] or math.isnan(quantity): return '%f%s%s' % (quantity, separator, unit) scaled_quantity, scaled_unit = scale_callable(quantity, unit, **args) if scaled_unit: separator = ' ' print_pattern = '%%.%df%%s%%s' % max(0, (precision - int( math.log(abs(scaled_quantity), 10)) - 1)) return print_pattern % (scaled_quantity, separator, scaled_unit) # Prefixes and corresponding min_scale and max_scale for decimal formating. DECIMAL_PREFIXES = ('y', 'z', 'a', 'f', 'p', 'n', u'µ', 'm', '', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') DECIMAL_MIN_SCALE = -8 DECIMAL_MAX_SCALE = 8 def DecimalScale(quantity, unit, min_scale=0, max_scale=None): """Get the scaled value and decimal prefixed unit in a tupple. DecimalScale(576012, 'bps') -> (576.012, 'kbps') DecimalScale(1574215, 'bps') -> (1.574215, 'Mbps') Args: quantity: A number. unit: A string. min_scale: minimum power of 1000 to normalize to (None = unbounded) max_scale: maximum power of 1000 to normalize to (None = unbounded) Returns: A tuple of a scaled quantity (float) and BinaryPrefix for the units (string). """ if min_scale is None or min_scale < DECIMAL_MIN_SCALE: min_scale = DECIMAL_MIN_SCALE if max_scale is None or max_scale > DECIMAL_MAX_SCALE: max_scale = DECIMAL_MAX_SCALE powers = DECIMAL_PREFIXES[ min_scale - DECIMAL_MIN_SCALE:max_scale - DECIMAL_MIN_SCALE + 1] return _Scale(quantity, unit, 1000, powers, min_scale) def BinaryScale(quantity, unit): """Get the scaled value and binary prefixed unit in a tupple. BinaryPrefix(576012, 'B') -> (562.51171875, 'KiB') Args: quantity: A number. unit: A string. Returns: A tuple of a scaled quantity (float) and BinaryPrefix for the units (string). """ return _Scale(quantity, unit, 1024, ('Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')) def _Scale(quantity, unit, multiplier, prefixes=None, min_scale=None): """Returns the formatted quantity and unit into a tuple. Args: quantity: A number. unit: A string multiplier: An integer, the ratio between prefixes. prefixes: A sequence of strings. If empty or None, no scaling is done. min_scale: minimum power of multiplier corresponding to the first prefix. If None assumes prefixes are for positive powers only. Returns: A tuple containing the raw scaled quantity (float) and the prefixed unit. """ if (not prefixes or not quantity or math.isnan(quantity) or quantity in [float('inf'), float('-inf')]): return float(quantity), unit if min_scale is None: min_scale = 0 prefixes = ('',) + tuple(prefixes) value, prefix = quantity, '' for power, prefix in enumerate(prefixes, min_scale): # This is more numerically accurate than '/ multiplier ** power'. value = float(quantity) * multiplier ** -power if abs(value) < multiplier: break return value, prefix + unit # Contains the fractions where the full range [1/n ... (n - 1) / n] # is defined in Unicode. FRACTIONS = { 3: (None, u'⅓', u'⅔', None), 5: (None, u'⅕', u'⅖', u'⅗', u'⅘', None), 8: (None, u'⅛', u'¼', u'⅜', u'½', u'⅝', u'¾', u'⅞', None), } FRACTION_ROUND_DOWN = 1.0 / (max(FRACTIONS.keys()) * 2.0) FRACTION_ROUND_UP = 1.0 - FRACTION_ROUND_DOWN def PrettyFraction(number, spacer=''): """Convert a number into a string that might include a unicode fraction. This method returns the integer representation followed by the closest fraction of a denominator 2, 3, 4, 5 or 8. For instance, 0.33 will be converted to 1/3. The resulting representation should be less than 1/16 off. Args: number: a python number spacer: an optional string to insert between the integer and the fraction default is an empty string. Returns: a unicode string representing the number. """ # We do not want small negative numbers to display as -0. if number < -FRACTION_ROUND_DOWN: return u'-%s' % PrettyFraction(-number) number = abs(number) rounded = int(number) fract = number - rounded if fract >= FRACTION_ROUND_UP: return str(rounded + 1) errors_fractions = [] for denominator, fraction_elements in FRACTIONS.items(): numerator = int(round(denominator * fract)) error = abs(fract - (float(numerator) / float(denominator))) errors_fractions.append((error, fraction_elements[numerator])) unused_error, fraction_text = min(errors_fractions) if rounded and fraction_text: return u'%d%s%s' % (rounded, spacer, fraction_text) if rounded: return str(rounded) if fraction_text: return fraction_text return u'0' def Duration(duration, separator=' '): """Formats a nonnegative number of seconds into a human-readable string. Args: duration: A float duration in seconds. separator: A string separator between days, hours, minutes and seconds. Returns: Formatted string like '5d 12h 30m 45s'. """ try: delta = datetime.timedelta(seconds=duration) except OverflowError: return '>=' + TimeDelta(datetime.timedelta.max) return TimeDelta(delta, separator=separator) def TimeDelta(delta, separator=' '): """Format a datetime.timedelta into a human-readable string. Args: delta: The datetime.timedelta to format. separator: A string separator between days, hours, minutes and seconds. Returns: Formatted string like '5d 12h 30m 45s'. """ parts = [] seconds = delta.seconds if delta.days: parts.append('%dd' % delta.days) if seconds >= 3600: parts.append('%dh' % (seconds // 3600)) seconds %= 3600 if seconds >= 60: parts.append('%dm' % (seconds // 60)) seconds %= 60 seconds += delta.microseconds / 1e6 if seconds or not parts: parts.append('%gs' % seconds) return separator.join(parts) def NaturalSortKey(data): """Key function for "natural sort" ordering. This key function results in a lexigraph sort. For example: - ['1, '3', '20'] (not ['1', '20', '3']). - ['Model 9', 'Model 70 SE', 'Model 70 SE2'] (not ['Model 70 SE', 'Model 70 SE2', 'Model 9']). Usage: new_list = sorted(old_list, key=humanize.NaturalSortKey) or list_sort_in_place.sort(key=humanize.NaturalSortKey) Based on code by Steven Bazyl . Args: data: str, The key being compared in a sort. Returns: A list which is comparable to other lists for the purpose of sorting. """ segments = DIGIT_SPLITTER(data) for i, value in enumerate(segments): if value.isdigit(): segments[i] = int(value) return segments def UnixTimestamp(unix_ts, tz): """Format a UNIX timestamp into a human-readable string. Args: unix_ts: UNIX timestamp (number of seconds since epoch). May be a floating point number. tz: datetime.tzinfo object, timezone to use when formatting. Typical uses might want to rely on datelib or pytz to provide the tzinfo object, e.g. use datelib.UTC, datelib.US_PACIFIC, or pytz.timezone('Europe/Dublin'). Returns: Formatted string like '2013-11-17 11:08:27.720000 PST'. """ date_time = datetime.datetime.fromtimestamp(unix_ts, tz) return date_time.strftime('%Y-%m-%d %H:%M:%S.%f %Z') def AddOrdinalSuffix(value): """Adds an ordinal suffix to a non-negative integer (e.g. 1 -> '1st'). Args: value: A non-negative integer. Returns: A string containing the integer with a two-letter ordinal suffix. """ if value < 0 or value != int(value): raise ValueError('argument must be a non-negative integer: %s' % value) if value % 100 in (11, 12, 13): suffix = 'th' else: rem = value % 10 if rem == 1: suffix = 'st' elif rem == 2: suffix = 'nd' elif rem == 3: suffix = 'rd' else: suffix = 'th' return str(value) + suffix google-apputils-0.4.1/google/apputils/run_script_module.py0000640033465300116100000001453502263613200025001 0ustar craigcitroeng00000000000000#!/usr/bin/env python # Copyright 2010 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Script for running Google-style applications. Unlike normal scripts run through setuptools console_script entry points, Google-style applications must be run as top-level scripts. Given an already-imported module, users can use the RunScriptModule function to set up the appropriate executable environment to spawn a new Python process to run the module as a script. To use this technique in your project, first create a module called e.g. stubs.py with contents like: from google.apputils import run_script_module def RunMyScript(): import my.script run_script_module.RunScriptModule(my.script) def RunMyOtherScript(): import my.other_script run_script_module.RunScriptModule(my.other_script) Then, set up entry points in your setup.py that point to the functions in your stubs module: setup( ... entry_points = { 'console_scripts': [ 'my_script = my.stubs:RunMyScript', 'my_other_script = my.stubs.RunMyOtherScript', ], }, ) When your project is installed, setuptools will generate minimal wrapper scripts to call your stub functions, which in turn execv your script modules. That's it! """ __author__ = 'dborowitz@google.com (Dave Borowitz)' import os import re import sys def FindEnv(progname): """Find the program in the system path. Args: progname: The name of the program. Returns: The full pathname of the program. Raises: AssertionError: if the program was not found. """ for path in os.environ['PATH'].split(':'): fullname = os.path.join(path, progname) if os.access(fullname, os.X_OK): return fullname raise AssertionError( "Could not find an executable named '%s' in the system path" % progname) def GetPdbArgs(python): """Try to get the path to pdb.py and return it in a list. Args: python: The full path to a Python executable. Returns: A list of strings. If a relevant pdb.py was found, this will be ['/path/to/pdb.py']; if not, return ['-m', 'pdb'] and hope for the best. (This latter technique will fail for Python 2.2.) """ # Usually, python is /usr/bin/pythonxx and pdb is /usr/lib/pythonxx/pdb.py components = python.split('/') if len(components) >= 2: pdb_path = '/'.join(components[0:-2] + ['lib'] + components[-1:] + ['pdb.py']) if os.access(pdb_path, os.R_OK): return [pdb_path] # No pdb module found in the python path, default to -m pdb return ['-m', 'pdb'] def StripDelimiters(s, beg, end): if s[0] == beg: assert s[-1] == end return (s[1:-1], True) else: return (s, False) def StripQuotes(s): (s, stripped) = StripDelimiters(s, '"', '"') if not stripped: (s, stripped) = StripDelimiters(s, "'", "'") return s def PrintOurUsage(): """Print usage for the stub script.""" print 'Stub script %s (auto-generated). Options:' % sys.argv[0] print ('--helpstub ' 'Show help for stub script.') print ('--debug_binary ' 'Run python under debugger specified by --debugger.') print ('--debugger= ' "Debugger for --debug_binary. Default: 'gdb --args'.") print ('--debug_script ' 'Run wrapped script with python debugger module (pdb).') print ('--show_command_and_exit ' 'Print command which would be executed and exit.') print ('These options must appear first in the command line, all others will ' 'be passed to the wrapped script.') def RunScriptModule(module): """Run a module as a script. Locates the module's file and runs it in the current interpreter, or optionally a debugger. Args: module: The module object to run. """ args = sys.argv[1:] debug_binary = False debugger = 'gdb --args' debug_script = False show_command_and_exit = False while args: if args[0] == '--helpstub': PrintOurUsage() sys.exit(0) if args[0] == '--debug_binary': debug_binary = True args = args[1:] continue if args[0] == '--debug_script': debug_script = True args = args[1:] continue if args[0] == '--show_command_and_exit': show_command_and_exit = True args = args[1:] continue matchobj = re.match('--debugger=(.+)', args[0]) if matchobj is not None: debugger = StripQuotes(matchobj.group(1)) args = args[1:] continue break # Now look for my main python source file # TODO(dborowitz): This will fail if the module was zipimported, which means # no egg depending on this script runner can be zip_safe. main_filename = module.__file__ assert os.path.exists(main_filename), ('Cannot exec() %r: file not found.' % main_filename) assert os.access(main_filename, os.R_OK), ('Cannot exec() %r: file not' ' readable.' % main_filename) args = [main_filename] + args if debug_binary: debugger_args = debugger.split() program = debugger_args[0] # If pathname is not absolute, determine full path using PATH if not os.path.isabs(program): program = FindEnv(program) python_path = sys.executable command_vec = [python_path] if debug_script: command_vec.extend(GetPdbArgs(python_path)) args = [program] + debugger_args[1:] + command_vec + args elif debug_script: args = [sys.executable] + GetPdbArgs(program) + args else: program = sys.executable args = [sys.executable] + args if show_command_and_exit: print 'program: "%s"' % program print 'args:', args sys.exit(0) try: sys.stdout.flush() os.execv(program, args) except EnvironmentError as e: if not getattr(e, 'filename', None): e.filename = program # Add info to error message raise google-apputils-0.4.1/google/apputils/basetest.py0000750033465300116100000016321602263613200023061 0ustar craigcitroeng00000000000000#!/usr/bin/env python # Copyright 2010 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base functionality for google tests. This module contains base classes and high-level functions for Google-style tests. """ __author__ = 'dborowitz@google.com (Dave Borowitz)' import collections import difflib import getpass import itertools import json import os import re import signal import subprocess import sys import tempfile import types import unittest import urlparse try: import faulthandler # pylint: disable=g-import-not-at-top except ImportError: # //testing/pybase:pybase can't have deps on any extension modules as it # is used by code that is executed in such a way it cannot import them. :( # We use faulthandler if it is available (either via a user declared dep # or from the Python 3.3+ standard library). faulthandler = None from google.apputils import app # pylint: disable=g-import-not-at-top import gflags as flags from google.apputils import shellutil FLAGS = flags.FLAGS # ---------------------------------------------------------------------- # Internal functions to extract default flag values from environment. # ---------------------------------------------------------------------- def _GetDefaultTestRandomSeed(): random_seed = 301 value = os.environ.get('TEST_RANDOM_SEED', '') try: random_seed = int(value) except ValueError: pass return random_seed def _GetDefaultTestTmpdir(): """Get default test temp dir.""" tmpdir = os.environ.get('TEST_TMPDIR', '') if not tmpdir: tmpdir = os.path.join(tempfile.gettempdir(), 'google_apputils_basetest') return tmpdir flags.DEFINE_integer('test_random_seed', _GetDefaultTestRandomSeed(), 'Random seed for testing. Some test frameworks may ' 'change the default value of this flag between runs, so ' 'it is not appropriate for seeding probabilistic tests.', allow_override=1) flags.DEFINE_string('test_srcdir', os.environ.get('TEST_SRCDIR', ''), 'Root of directory tree where source files live', allow_override=1) flags.DEFINE_string('test_tmpdir', _GetDefaultTestTmpdir(), 'Directory for temporary testing files', allow_override=1) # We might need to monkey-patch TestResult so that it stops considering an # unexpected pass as a as a "successful result". For details, see # http://bugs.python.org/issue20165 def _MonkeyPatchTestResultForUnexpectedPasses(): """Workaround for .""" # pylint: disable=g-doc-return-or-yield,g-doc-args,g-wrong-blank-lines def wasSuccessful(self): """Tells whether or not this result was a success. Any unexpected pass is to be counted as a non-success. """ return (len(self.failures) == len(self.errors) == len(self.unexpectedSuccesses) == 0) # pylint: enable=g-doc-return-or-yield,g-doc-args,g-wrong-blank-lines test_result = unittest.result.TestResult() test_result.addUnexpectedSuccess('test') if test_result.wasSuccessful(): # The bug is present. unittest.result.TestResult.wasSuccessful = wasSuccessful if test_result.wasSuccessful(): # Warn the user if our hot-fix failed. sys.stderr.write('unittest.result.TestResult monkey patch to report' ' unexpected passes as failures did not work.\n') _MonkeyPatchTestResultForUnexpectedPasses() class TestCase(unittest.TestCase): """Extension of unittest.TestCase providing more powerful assertions.""" maxDiff = 80 * 20 def __init__(self, methodName='runTest'): super(TestCase, self).__init__(methodName) self.__recorded_properties = {} def shortDescription(self): """Format both the test method name and the first line of its docstring. If no docstring is given, only returns the method name. This method overrides unittest.TestCase.shortDescription(), which only returns the first line of the docstring, obscuring the name of the test upon failure. Returns: desc: A short description of a test method. """ desc = str(self) # NOTE: super() is used here instead of directly invoking # unittest.TestCase.shortDescription(self), because of the # following line that occurs later on: # unittest.TestCase = TestCase # Because of this, direct invocation of what we think is the # superclass will actually cause infinite recursion. doc_first_line = super(TestCase, self).shortDescription() if doc_first_line is not None: desc = '\n'.join((desc, doc_first_line)) return desc def assertStartsWith(self, actual, expected_start): """Assert that actual.startswith(expected_start) is True. Args: actual: str expected_start: str """ if not actual.startswith(expected_start): self.fail('%r does not start with %r' % (actual, expected_start)) def assertNotStartsWith(self, actual, unexpected_start): """Assert that actual.startswith(unexpected_start) is False. Args: actual: str unexpected_start: str """ if actual.startswith(unexpected_start): self.fail('%r does start with %r' % (actual, unexpected_start)) def assertEndsWith(self, actual, expected_end): """Assert that actual.endswith(expected_end) is True. Args: actual: str expected_end: str """ if not actual.endswith(expected_end): self.fail('%r does not end with %r' % (actual, expected_end)) def assertNotEndsWith(self, actual, unexpected_end): """Assert that actual.endswith(unexpected_end) is False. Args: actual: str unexpected_end: str """ if actual.endswith(unexpected_end): self.fail('%r does end with %r' % (actual, unexpected_end)) def assertSequenceStartsWith(self, prefix, whole, msg=None): """An equality assertion for the beginning of ordered sequences. If prefix is an empty sequence, it will raise an error unless whole is also an empty sequence. If prefix is not a sequence, it will raise an error if the first element of whole does not match. Args: prefix: A sequence expected at the beginning of the whole parameter. whole: The sequence in which to look for prefix. msg: Optional message to append on failure. """ try: prefix_len = len(prefix) except (TypeError, NotImplementedError): prefix = [prefix] prefix_len = 1 try: whole_len = len(whole) except (TypeError, NotImplementedError): self.fail('For whole: len(%s) is not supported, it appears to be type: ' '%s' % (whole, type(whole))) assert prefix_len <= whole_len, ( 'Prefix length (%d) is longer than whole length (%d).' % (prefix_len, whole_len)) if not prefix_len and whole_len: self.fail('Prefix length is 0 but whole length is %d: %s' % (len(whole), whole)) try: self.assertSequenceEqual(prefix, whole[:prefix_len], msg) except AssertionError: self.fail(msg or 'prefix: %s not found at start of whole: %s.' % (prefix, whole)) def assertContainsSubset(self, expected_subset, actual_set, msg=None): """Checks whether actual iterable is a superset of expected iterable.""" missing = set(expected_subset) - set(actual_set) if not missing: return missing_msg = 'Missing elements %s\nExpected: %s\nActual: %s' % ( missing, expected_subset, actual_set) if msg: msg += ': %s' % missing_msg else: msg = missing_msg self.fail(msg) def assertNoCommonElements(self, expected_seq, actual_seq, msg=None): """Checks whether actual iterable and expected iterable are disjoint.""" common = set(expected_seq) & set(actual_seq) if not common: return common_msg = 'Common elements %s\nExpected: %s\nActual: %s' % ( common, expected_seq, actual_seq) if msg: msg += ': %s' % common_msg else: msg = common_msg self.fail(msg) # TODO(user): Provide an assertItemsEqual method when our super class # does not provide one. That method went away in Python 3.2 (renamed # to assertCountEqual, or is that different? investigate). def assertItemsEqual(self, *args, **kwargs): # pylint: disable=g-doc-args """An unordered sequence specific comparison. It asserts that actual_seq and expected_seq have the same element counts. Equivalent to:: self.assertEqual(Counter(iter(actual_seq)), Counter(iter(expected_seq))) Asserts that each element has the same count in both sequences. Example: - [0, 1, 1] and [1, 0, 1] compare equal. - [0, 0, 1] and [0, 1] compare unequal. Args: expected_seq: A sequence containing elements we are expecting. actual_seq: The sequence that we are testing. msg: The message to be printed if the test fails. """ # pylint: enable=g-doc-args # In Python 3k this method is called assertCountEqual() if sys.version_info.major > 2: self.assertItemsEqual = super(TestCase, self).assertCountEqual self.assertItemsEqual(*args, **kwargs) return # For Python 2.x we must check for the issue below super_assert_items_equal = super(TestCase, self).assertItemsEqual try: super_assert_items_equal([23], []) # Force a fail to check behavior. except self.failureException as error_to_introspect: if 'First has 0, Second has 1: 23' in str(error_to_introspect): # It exhibits http://bugs.python.org/issue14832 # Always use our repaired method that swaps the arguments. self.assertItemsEqual = self._FixedAssertItemsEqual else: # It exhibits correct behavior. Always use the super's method. self.assertItemsEqual = super_assert_items_equal # Delegate this call to the correct method. All future calls will skip # this error patching code. self.assertItemsEqual(*args, **kwargs) assert 'Impossible: TestCase assertItemsEqual is broken.' def _FixedAssertItemsEqual(self, expected_seq, actual_seq, msg=None): """A version of assertItemsEqual that works around issue14832.""" super(TestCase, self).assertItemsEqual(actual_seq, expected_seq, msg=msg) def assertCountEqual(self, *args, **kwargs): # pylint: disable=g-doc-args """An unordered sequence specific comparison. Equivalent to assertItemsEqual(). This method is a compatibility layer for Python 3k, since 2to3 does not convert assertItemsEqual() calls into assertCountEqual() calls. Args: expected_seq: A sequence containing elements we are expecting. actual_seq: The sequence that we are testing. msg: The message to be printed if the test fails. """ # pylint: enable=g-doc-args self.assertItemsEqual(*args, **kwargs) def assertSameElements(self, expected_seq, actual_seq, msg=None): """Assert that two sequences have the same elements (in any order). This method, unlike assertItemsEqual, doesn't care about any duplicates in the expected and actual sequences. >> assertSameElements([1, 1, 1, 0, 0, 0], [0, 1]) # Doesn't raise an AssertionError If possible, you should use assertItemsEqual instead of assertSameElements. Args: expected_seq: A sequence containing elements we are expecting. actual_seq: The sequence that we are testing. msg: The message to be printed if the test fails. """ # `unittest2.TestCase` used to have assertSameElements, but it was # removed in favor of assertItemsEqual. As there's a unit test # that explicitly checks this behavior, I am leaving this method # alone. # Fail on strings: empirically, passing strings to this test method # is almost always a bug. If comparing the character sets of two strings # is desired, cast the inputs to sets or lists explicitly. if (isinstance(expected_seq, basestring) or isinstance(actual_seq, basestring)): self.fail('Passing a string to assertSameElements is usually a bug. ' 'Did you mean to use assertEqual?\n' 'Expected: %s\nActual: %s' % (expected_seq, actual_seq)) try: expected = dict([(element, None) for element in expected_seq]) actual = dict([(element, None) for element in actual_seq]) missing = [element for element in expected if element not in actual] unexpected = [element for element in actual if element not in expected] missing.sort() unexpected.sort() except TypeError: # Fall back to slower list-compare if any of the objects are # not hashable. expected = list(expected_seq) actual = list(actual_seq) expected.sort() actual.sort() missing, unexpected = _SortedListDifference(expected, actual) errors = [] if missing: errors.append('Expected, but missing:\n %r\n' % missing) if unexpected: errors.append('Unexpected, but present:\n %r\n' % unexpected) if errors: self.fail(msg or ''.join(errors)) # unittest2.TestCase.assertMulitilineEqual works very similarly, but it # has a different error format. However, I find this slightly more readable. def assertMultiLineEqual(self, first, second, msg=None): """Assert that two multi-line strings are equal.""" assert isinstance(first, types.StringTypes), ( 'First argument is not a string: %r' % (first,)) assert isinstance(second, types.StringTypes), ( 'Second argument is not a string: %r' % (second,)) if first == second: return if msg: failure_message = [msg, ':\n'] else: failure_message = ['\n'] for line in difflib.ndiff(first.splitlines(True), second.splitlines(True)): failure_message.append(line) if not line.endswith('\n'): failure_message.append('\n') raise self.failureException(''.join(failure_message)) def assertBetween(self, value, minv, maxv, msg=None): """Asserts that value is between minv and maxv (inclusive).""" if msg is None: msg = '"%r" unexpectedly not between "%r" and "%r"' % (value, minv, maxv) self.assert_(minv <= value, msg) self.assert_(maxv >= value, msg) def assertRegexMatch(self, actual_str, regexes, message=None): # pylint: disable=g-doc-bad-indent """Asserts that at least one regex in regexes matches str. If possible you should use assertRegexpMatches, which is a simpler version of this method. assertRegexpMatches takes a single regular expression (a string or re compiled object) instead of a list. Notes: 1. This function uses substring matching, i.e. the matching succeeds if *any* substring of the error message matches *any* regex in the list. This is more convenient for the user than full-string matching. 2. If regexes is the empty list, the matching will always fail. 3. Use regexes=[''] for a regex that will always pass. 4. '.' matches any single character *except* the newline. To match any character, use '(.|\n)'. 5. '^' matches the beginning of each line, not just the beginning of the string. Similarly, '$' matches the end of each line. 6. An exception will be thrown if regexes contains an invalid regex. Args: actual_str: The string we try to match with the items in regexes. regexes: The regular expressions we want to match against str. See "Notes" above for detailed notes on how this is interpreted. message: The message to be printed if the test fails. """ # pylint: enable=g-doc-bad-indent if isinstance(regexes, basestring): self.fail('regexes is a string; use assertRegexpMatches instead.') if not regexes: self.fail('No regexes specified.') regex_type = type(regexes[0]) for regex in regexes[1:]: if type(regex) is not regex_type: self.fail('regexes list must all be the same type.') if regex_type is bytes and isinstance(actual_str, unicode): regexes = [regex.decode('utf-8') for regex in regexes] regex_type = unicode elif regex_type is unicode and isinstance(actual_str, bytes): regexes = [regex.encode('utf-8') for regex in regexes] regex_type = bytes if regex_type is unicode: regex = u'(?:%s)' % u')|(?:'.join(regexes) elif regex_type is bytes: regex = b'(?:' + (b')|(?:'.join(regexes)) + b')' else: self.fail('Only know how to deal with unicode str or bytes regexes.') if not re.search(regex, actual_str, re.MULTILINE): self.fail(message or ('"%s" does not contain any of these ' 'regexes: %s.' % (actual_str, regexes))) def assertCommandSucceeds(self, command, regexes=(b'',), env=None, close_fds=True): """Asserts that a shell command succeeds (i.e. exits with code 0). Args: command: List or string representing the command to run. regexes: List of regular expression byte strings that match success. env: Dictionary of environment variable settings. close_fds: Whether or not to close all open fd's in the child after forking. """ (ret_code, err) = GetCommandStderr(command, env, close_fds) # Accommodate code which listed their output regexes w/o the b'' prefix by # converting them to bytes for the user. if isinstance(regexes[0], unicode): regexes = [regex.encode('utf-8') for regex in regexes] command_string = GetCommandString(command) self.assertEqual( ret_code, 0, 'Running command\n' '%s failed with error code %s and message\n' '%s' % ( _QuoteLongString(command_string), ret_code, _QuoteLongString(err))) self.assertRegexMatch( err, regexes, message=( 'Running command\n' '%s failed with error code %s and message\n' '%s which matches no regex in %s' % ( _QuoteLongString(command_string), ret_code, _QuoteLongString(err), regexes))) def assertCommandFails(self, command, regexes, env=None, close_fds=True): """Asserts a shell command fails and the error matches a regex in a list. Args: command: List or string representing the command to run. regexes: the list of regular expression strings. env: Dictionary of environment variable settings. close_fds: Whether or not to close all open fd's in the child after forking. """ (ret_code, err) = GetCommandStderr(command, env, close_fds) # Accommodate code which listed their output regexes w/o the b'' prefix by # converting them to bytes for the user. if isinstance(regexes[0], unicode): regexes = [regex.encode('utf-8') for regex in regexes] command_string = GetCommandString(command) self.assertNotEqual( ret_code, 0, 'The following command succeeded while expected to fail:\n%s' % _QuoteLongString(command_string)) self.assertRegexMatch( err, regexes, message=( 'Running command\n' '%s failed with error code %s and message\n' '%s which matches no regex in %s' % ( _QuoteLongString(command_string), ret_code, _QuoteLongString(err), regexes))) class _AssertRaisesContext(object): def __init__(self, expected_exception, test_case, test_func): self.expected_exception = expected_exception self.test_case = test_case self.test_func = test_func def __enter__(self): return self def __exit__(self, exc_type, exc_value, tb): if exc_type is None: self.test_case.fail(self.expected_exception.__name__ + ' not raised') if not issubclass(exc_type, self.expected_exception): return False self.test_func(exc_value) return True def assertRaisesWithPredicateMatch(self, expected_exception, predicate, callable_obj=None, *args, **kwargs): # pylint: disable=g-doc-args """Asserts that exception is thrown and predicate(exception) is true. Args: expected_exception: Exception class expected to be raised. predicate: Function of one argument that inspects the passed-in exception and returns True (success) or False (please fail the test). callable_obj: Function to be called. args: Extra args. kwargs: Extra keyword args. Returns: A context manager if callable_obj is None. Otherwise, None. Raises: self.failureException if callable_obj does not raise a macthing exception. """ # pylint: enable=g-doc-args def Check(err): self.assert_(predicate(err), '%r does not match predicate %r' % (err, predicate)) context = self._AssertRaisesContext(expected_exception, self, Check) if callable_obj is None: return context with context: callable_obj(*args, **kwargs) def assertRaisesWithLiteralMatch(self, expected_exception, expected_exception_message, callable_obj=None, *args, **kwargs): # pylint: disable=g-doc-args """Asserts that the message in a raised exception equals the given string. Unlike assertRaisesRegexp, this method takes a literal string, not a regular expression. with self.assertRaisesWithLiteralMatch(ExType, 'message'): DoSomething() Args: expected_exception: Exception class expected to be raised. expected_exception_message: String message expected in the raised exception. For a raise exception e, expected_exception_message must equal str(e). callable_obj: Function to be called, or None to return a context. args: Extra args. kwargs: Extra kwargs. Returns: A context manager if callable_obj is None. Otherwise, None. Raises: self.failureException if callable_obj does not raise a macthing exception. """ # pylint: enable=g-doc-args def Check(err): actual_exception_message = str(err) self.assert_(expected_exception_message == actual_exception_message, 'Exception message does not match.\n' 'Expected: %r\n' 'Actual: %r' % (expected_exception_message, actual_exception_message)) context = self._AssertRaisesContext(expected_exception, self, Check) if callable_obj is None: return context with context: callable_obj(*args, **kwargs) def assertRaisesWithRegexpMatch(self, expected_exception, expected_regexp, callable_obj=None, *args, **kwargs): # pylint: disable=g-doc-args """Asserts that the message in a raised exception matches the given regexp. This is just a wrapper around assertRaisesRegexp. Please use assertRaisesRegexp instead of assertRaisesWithRegexpMatch. Args: expected_exception: Exception class expected to be raised. expected_regexp: Regexp (re pattern object or string) expected to be found in error message. callable_obj: Function to be called, or None to return a context. args: Extra args. kwargs: Extra keyword args. Returns: A context manager if callable_obj is None. Otherwise, None. Raises: self.failureException if callable_obj does not raise a macthing exception. """ # pylint: enable=g-doc-args # TODO(user): this is a good candidate for a global search-and-replace. return self.assertRaisesRegexp(expected_exception, expected_regexp, callable_obj, *args, **kwargs) def assertContainsInOrder(self, strings, target): """Asserts that the strings provided are found in the target in order. This may be useful for checking HTML output. Args: strings: A list of strings, such as [ 'fox', 'dog' ] target: A target string in which to look for the strings, such as 'The quick brown fox jumped over the lazy dog'. """ if not isinstance(strings, list): strings = [strings] current_index = 0 last_string = None for string in strings: index = target.find(str(string), current_index) if index == -1 and current_index == 0: self.fail("Did not find '%s' in '%s'" % (string, target)) elif index == -1: self.fail("Did not find '%s' after '%s' in '%s'" % (string, last_string, target)) last_string = string current_index = index def assertContainsSubsequence(self, container, subsequence): """Assert that "container" contains "subsequence" as a subsequence. Asserts that big_list contains all the elements of small_list, in order, but possibly with other elements interspersed. For example, [1, 2, 3] is a subsequence of [0, 0, 1, 2, 0, 3, 0] but not of [0, 0, 1, 3, 0, 2, 0]. Args: container: the list we're testing for subsequence inclusion. subsequence: the list we hope will be a subsequence of container. """ first_nonmatching = None reversed_container = list(reversed(container)) subsequence = list(subsequence) for e in subsequence: if e not in reversed_container: first_nonmatching = e break while e != reversed_container.pop(): pass if first_nonmatching is not None: self.fail('%s not a subsequence of %s. First non-matching element: %s' % (subsequence, container, first_nonmatching)) def assertTotallyOrdered(self, *groups): # pylint: disable=g-doc-args """Asserts that total ordering has been implemented correctly. For example, say you have a class A that compares only on its attribute x. Comparators other than __lt__ are omitted for brevity. class A(object): def __init__(self, x, y): self.x = xio self.y = y def __hash__(self): return hash(self.x) def __lt__(self, other): try: return self.x < other.x except AttributeError: return NotImplemented assertTotallyOrdered will check that instances can be ordered correctly. For example, self.assertTotallyOrdered( [None], # None should come before everything else. [1], # Integers sort earlier. [A(1, 'a')], [A(2, 'b')], # 2 is after 1. [A(3, 'c'), A(3, 'd')], # The second argument is irrelevant. [A(4, 'z')], ['foo']) # Strings sort last. Args: groups: A list of groups of elements. Each group of elements is a list of objects that are equal. The elements in each group must be less than the elements in the group after it. For example, these groups are totally ordered: [None], [1], [2, 2], [3]. """ # pylint: enable=g-doc-args def CheckOrder(small, big): """Ensures small is ordered before big.""" self.assertFalse(small == big, '%r unexpectedly equals %r' % (small, big)) self.assertTrue(small != big, '%r unexpectedly equals %r' % (small, big)) self.assertLess(small, big) self.assertFalse(big < small, '%r unexpectedly less than %r' % (big, small)) self.assertLessEqual(small, big) self.assertFalse(big <= small, '%r unexpectedly less than or equal to %r' % (big, small)) self.assertGreater(big, small) self.assertFalse(small > big, '%r unexpectedly greater than %r' % (small, big)) self.assertGreaterEqual(big, small) self.assertFalse(small >= big, '%r unexpectedly greater than or equal to %r' % (small, big)) def CheckEqual(a, b): """Ensures that a and b are equal.""" self.assertEqual(a, b) self.assertFalse(a != b, '%r unexpectedly equals %r' % (a, b)) self.assertEqual(hash(a), hash(b), 'hash %d of %r unexpectedly not equal to hash %d of %r' % (hash(a), a, hash(b), b)) self.assertFalse(a < b, '%r unexpectedly less than %r' % (a, b)) self.assertFalse(b < a, '%r unexpectedly less than %r' % (b, a)) self.assertLessEqual(a, b) self.assertLessEqual(b, a) self.assertFalse(a > b, '%r unexpectedly greater than %r' % (a, b)) self.assertFalse(b > a, '%r unexpectedly greater than %r' % (b, a)) self.assertGreaterEqual(a, b) self.assertGreaterEqual(b, a) # For every combination of elements, check the order of every pair of # elements. for elements in itertools.product(*groups): elements = list(elements) for index, small in enumerate(elements[:-1]): for big in elements[index + 1:]: CheckOrder(small, big) # Check that every element in each group is equal. for group in groups: for a in group: CheckEqual(a, a) for a, b in itertools.product(group, group): CheckEqual(a, b) def assertDictEqual(self, a, b, msg=None): """Raises AssertionError if a and b are not equal dictionaries. Args: a: A dict, the expected value. b: A dict, the actual value. msg: An optional str, the associated message. Raises: AssertionError: if the dictionaries are not equal. """ self.assertIsInstance(a, dict, 'First argument is not a dictionary') self.assertIsInstance(b, dict, 'Second argument is not a dictionary') def Sorted(list_of_items): try: return sorted(list_of_items) # In 3.3, unordered are possible. except TypeError: return list_of_items if a == b: return a_items = Sorted(list(a.iteritems())) b_items = Sorted(list(b.iteritems())) unexpected = [] missing = [] different = [] safe_repr = unittest.util.safe_repr def Repr(dikt): """Deterministic repr for dict.""" # Sort the entries based on their repr, not based on their sort order, # which will be non-deterministic across executions, for many types. entries = sorted((safe_repr(k), safe_repr(v)) for k, v in dikt.iteritems()) return '{%s}' % (', '.join('%s: %s' % pair for pair in entries)) message = ['%s != %s%s' % (Repr(a), Repr(b), ' (%s)' % msg if msg else '')] # The standard library default output confounds lexical difference with # value difference; treat them separately. for a_key, a_value in a_items: if a_key not in b: missing.append((a_key, a_value)) elif a_value != b[a_key]: different.append((a_key, a_value, b[a_key])) for b_key, b_value in b_items: if b_key not in a: unexpected.append((b_key, b_value)) if unexpected: message.append( 'Unexpected, but present entries:\n%s' % ''.join( '%s: %s\n' % (safe_repr(k), safe_repr(v)) for k, v in unexpected)) if different: message.append( 'repr() of differing entries:\n%s' % ''.join( '%s: %s != %s\n' % (safe_repr(k), safe_repr(a_value), safe_repr(b_value)) for k, a_value, b_value in different)) if missing: message.append( 'Missing entries:\n%s' % ''.join( ('%s: %s\n' % (safe_repr(k), safe_repr(v)) for k, v in missing))) raise self.failureException('\n'.join(message)) def assertUrlEqual(self, a, b): """Asserts that urls are equal, ignoring ordering of query params.""" parsed_a = urlparse.urlparse(a) parsed_b = urlparse.urlparse(b) self.assertEqual(parsed_a.scheme, parsed_b.scheme) self.assertEqual(parsed_a.netloc, parsed_b.netloc) self.assertEqual(parsed_a.path, parsed_b.path) self.assertEqual(parsed_a.fragment, parsed_b.fragment) self.assertEqual(sorted(parsed_a.params.split(';')), sorted(parsed_b.params.split(';'))) self.assertDictEqual( urlparse.parse_qs(parsed_a.query, keep_blank_values=True), urlparse.parse_qs(parsed_b.query, keep_blank_values=True)) def assertSameStructure(self, a, b, aname='a', bname='b', msg=None): """Asserts that two values contain the same structural content. The two arguments should be data trees consisting of trees of dicts and lists. They will be deeply compared by walking into the contents of dicts and lists; other items will be compared using the == operator. If the two structures differ in content, the failure message will indicate the location within the structures where the first difference is found. This may be helpful when comparing large structures. Args: a: The first structure to compare. b: The second structure to compare. aname: Variable name to use for the first structure in assertion messages. bname: Variable name to use for the second structure. msg: Additional text to include in the failure message. """ # Accumulate all the problems found so we can report all of them at once # rather than just stopping at the first problems = [] _WalkStructureForProblems(a, b, aname, bname, problems) # Avoid spamming the user toooo much max_problems_to_show = self.maxDiff // 80 if len(problems) > max_problems_to_show: problems = problems[0:max_problems_to_show-1] + ['...'] if problems: failure_message = '; '.join(problems) if msg: failure_message += (': ' + msg) self.fail(failure_message) def assertJsonEqual(self, first, second, msg=None): """Asserts that the JSON objects defined in two strings are equal. A summary of the differences will be included in the failure message using assertSameStructure. Args: first: A string contining JSON to decode and compare to second. second: A string contining JSON to decode and compare to first. msg: Additional text to include in the failure message. """ try: first_structured = json.loads(first) except ValueError as e: raise ValueError('could not decode first JSON value %s: %s' % (first, e)) try: second_structured = json.loads(second) except ValueError as e: raise ValueError('could not decode second JSON value %s: %s' % (second, e)) self.assertSameStructure(first_structured, second_structured, aname='first', bname='second', msg=msg) def getRecordedProperties(self): """Return any properties that the user has recorded.""" return self.__recorded_properties def recordProperty(self, property_name, property_value): """Record an arbitrary property for later use. Args: property_name: str, name of property to record; must be a valid XML attribute name property_value: value of property; must be valid XML attribute value """ self.__recorded_properties[property_name] = property_value def _getAssertEqualityFunc(self, first, second): try: return super(TestCase, self)._getAssertEqualityFunc(first, second) except AttributeError: # This happens if unittest2.TestCase.__init__ was never run. It # usually means that somebody created a subclass just for the # assertions and has overriden __init__. "assertTrue" is a safe # value that will not make __init__ raise a ValueError (this is # a bit hacky). test_method = getattr(self, '_testMethodName', 'assertTrue') super(TestCase, self).__init__(test_method) return super(TestCase, self)._getAssertEqualityFunc(first, second) # This is not really needed here, but some unrelated code calls this # function. # TODO(user): sort it out. def _SortedListDifference(expected, actual): """Finds elements in only one or the other of two, sorted input lists. Returns a two-element tuple of lists. The first list contains those elements in the "expected" list but not in the "actual" list, and the second contains those elements in the "actual" list but not in the "expected" list. Duplicate elements in either input list are ignored. Args: expected: The list we expected. actual: The list we actualy got. Returns: (missing, unexpected) missing: items in expected that are not in actual. unexpected: items in actual that are not in expected. """ i = j = 0 missing = [] unexpected = [] while True: try: e = expected[i] a = actual[j] if e < a: missing.append(e) i += 1 while expected[i] == e: i += 1 elif e > a: unexpected.append(a) j += 1 while actual[j] == a: j += 1 else: i += 1 try: while expected[i] == e: i += 1 finally: j += 1 while actual[j] == a: j += 1 except IndexError: missing.extend(expected[i:]) unexpected.extend(actual[j:]) break return missing, unexpected # ---------------------------------------------------------------------- # Functions to compare the actual output of a test to the expected # (golden) output. # # Note: We could just replace the sys.stdout and sys.stderr objects, # but we actually redirect the underlying file objects so that if the # Python script execs any subprocess, their output will also be # redirected. # # Usage: # basetest.CaptureTestStdout() # ... do something ... # basetest.DiffTestStdout("... path to golden file ...") # ---------------------------------------------------------------------- class CapturedStream(object): """A temporarily redirected output stream.""" def __init__(self, stream, filename): self._stream = stream self._fd = stream.fileno() self._filename = filename # Keep original stream for later self._uncaptured_fd = os.dup(self._fd) # Open file to save stream to cap_fd = os.open(self._filename, os.O_CREAT | os.O_TRUNC | os.O_WRONLY, 0600) # Send stream to this file self._stream.flush() os.dup2(cap_fd, self._fd) os.close(cap_fd) def RestartCapture(self): """Resume capturing output to a file (after calling StopCapture).""" # Original stream fd assert self._uncaptured_fd # Append stream to file cap_fd = os.open(self._filename, os.O_CREAT | os.O_APPEND | os.O_WRONLY, 0600) # Send stream to this file self._stream.flush() os.dup2(cap_fd, self._fd) os.close(cap_fd) def StopCapture(self): """Remove output redirection.""" self._stream.flush() os.dup2(self._uncaptured_fd, self._fd) def filename(self): return self._filename def __del__(self): self.StopCapture() os.close(self._uncaptured_fd) del self._uncaptured_fd _captured_streams = {} def _CaptureTestOutput(stream, filename): """Redirect an output stream to a file. Args: stream: Should be sys.stdout or sys.stderr. filename: File where output should be stored. """ assert not _captured_streams.has_key(stream) _captured_streams[stream] = CapturedStream(stream, filename) def _StopCapturingStream(stream): """Stops capturing the given output stream. Args: stream: Should be sys.stdout or sys.stderr. """ assert _captured_streams.has_key(stream) for cap_stream in _captured_streams.itervalues(): cap_stream.StopCapture() def _DiffTestOutput(stream, golden_filename): """Compare ouput of redirected stream to contents of golden file. Args: stream: Should be sys.stdout or sys.stderr. golden_filename: Absolute path to golden file. """ _StopCapturingStream(stream) cap = _captured_streams[stream] try: _Diff(cap.filename(), golden_filename) finally: # remove the current stream del _captured_streams[stream] # restore other stream capture for cap_stream in _captured_streams.itervalues(): cap_stream.RestartCapture() # We want to emit exactly one notice to stderr telling the user where to look # for their stdout or stderr that may have been consumed to aid debugging. _notified_test_output_path = '' def _MaybeNotifyAboutTestOutput(outdir): global _notified_test_output_path if _notified_test_output_path != outdir: _notified_test_output_path = outdir sys.stderr.write('\nNOTE: Some tests capturing output into: %s\n' % outdir) class _DiffingTestOutputContext(object): def __init__(self, diff_fn): self._diff_fn = diff_fn def __enter__(self): return self def __exit__(self, exc_type, exc_value, tb): self._diff_fn() return True # Public interface def CaptureTestStdout(outfile='', expected_output_filepath=None): """Capture the stdout stream to a file. If expected_output_filepath, then this function returns a context manager that stops capturing and performs a diff when the context is exited. with basetest.CaptureTestStdout(expected_output_filepath=some_filepath): sys.stdout.print(....) Otherwise, StopCapturing() must be called to stop capturing stdout, and then DiffTestStdout() must be called to do the comparison. Args: outfile: The path to the local filesystem file to which to capture output; if omitted, a standard filepath in --test_tmpdir will be used. expected_output_filepath: The path to the local filesystem file containing the expected output to be diffed against when the context is exited. Returns: A context manager if expected_output_filepath is specified, otherwise None. """ if not outfile: outfile = os.path.join(FLAGS.test_tmpdir, 'captured.out') outdir = FLAGS.test_tmpdir else: outdir = os.path.dirname(outfile) _MaybeNotifyAboutTestOutput(outdir) _CaptureTestOutput(sys.stdout, outfile) if expected_output_filepath is not None: return _DiffingTestOutputContext( lambda: DiffTestStdout(expected_output_filepath)) def CaptureTestStderr(outfile='', expected_output_filepath=None): """Capture the stderr stream to a file. If expected_output_filepath, then this function returns a context manager that stops capturing and performs a diff when the context is exited. with basetest.CaptureTestStderr(expected_output_filepath=some_filepath): sys.stderr.print(....) Otherwise, StopCapturing() must be called to stop capturing stderr, and then DiffTestStderr() must be called to do the comparison. Args: outfile: The path to the local filesystem file to which to capture output; if omitted, a standard filepath in --test_tmpdir will be used. expected_output_filepath: The path to the local filesystem file containing the expected output, to be diffed against when the context is exited. Returns: A context manager if expected_output_filepath is specified, otherwise None. """ if not outfile: outfile = os.path.join(FLAGS.test_tmpdir, 'captured.err') outdir = FLAGS.test_tmpdir else: outdir = os.path.dirname(outfile) _MaybeNotifyAboutTestOutput(outdir) _CaptureTestOutput(sys.stderr, outfile) if expected_output_filepath is not None: return _DiffingTestOutputContext( lambda: DiffTestStderr(expected_output_filepath)) def DiffTestStdout(golden): _DiffTestOutput(sys.stdout, golden) def DiffTestStderr(golden): _DiffTestOutput(sys.stderr, golden) def StopCapturing(): """Stop capturing redirected output. Debugging sucks if you forget!""" while _captured_streams: _, cap_stream = _captured_streams.popitem() cap_stream.StopCapture() del cap_stream def _WriteTestData(data, filename): """Write data into file named filename.""" fd = os.open(filename, os.O_CREAT | os.O_TRUNC | os.O_WRONLY, 0o600) if not isinstance(data, (bytes, bytearray)): data = data.encode('utf-8') os.write(fd, data) os.close(fd) _INT_TYPES = (int, long) # Sadly there is no types.IntTypes defined for us. def _WalkStructureForProblems(a, b, aname, bname, problem_list): """The recursive comparison behind assertSameStructure.""" if type(a) != type(b) and not ( isinstance(a, _INT_TYPES) and isinstance(b, _INT_TYPES)): # We do not distinguish between int and long types as 99.99% of Python 2 # code should never care. They collapse into a single type in Python 3. problem_list.append('%s is a %r but %s is a %r' % (aname, type(a), bname, type(b))) # If they have different types there's no point continuing return if isinstance(a, collections.Mapping): for k in a: if k in b: _WalkStructureForProblems(a[k], b[k], '%s[%r]' % (aname, k), '%s[%r]' % (bname, k), problem_list) else: problem_list.append('%s has [%r] but %s does not' % (aname, k, bname)) for k in b: if k not in a: problem_list.append('%s lacks [%r] but %s has it' % (aname, k, bname)) # Strings are Sequences but we'll just do those with regular != elif isinstance(a, collections.Sequence) and not isinstance(a, basestring): minlen = min(len(a), len(b)) for i in xrange(minlen): _WalkStructureForProblems(a[i], b[i], '%s[%d]' % (aname, i), '%s[%d]' % (bname, i), problem_list) for i in xrange(minlen, len(a)): problem_list.append('%s has [%i] but %s does not' % (aname, i, bname)) for i in xrange(minlen, len(b)): problem_list.append('%s lacks [%i] but %s has it' % (aname, i, bname)) else: if a != b: problem_list.append('%s is %r but %s is %r' % (aname, a, bname, b)) class OutputDifferedError(AssertionError): pass class DiffFailureError(Exception): pass def _DiffViaExternalProgram(lhs, rhs, external_diff): """Compare two files using an external program; raise if it reports error.""" # The behavior of this function matches the old _Diff() method behavior # when a TEST_DIFF environment variable was set. A few old things at # Google depended on that functionality. command = [external_diff, lhs, rhs] try: subprocess.check_output(command, close_fds=True, stderr=subprocess.STDOUT) return True # No diffs. except subprocess.CalledProcessError as error: failure_output = error.output if error.returncode == 1: raise OutputDifferedError('\nRunning %s\n%s\nTest output differed from' ' golden file\n' % (command, failure_output)) except EnvironmentError as error: failure_output = str(error) # Running the program failed in some way that wasn't a diff. raise DiffFailureError('\nRunning %s\n%s\nFailure diffing test output' ' with golden file\n' % (command, failure_output)) def _Diff(lhs, rhs): """Given two pathnames, compare two files. Raise if they differ.""" # Some people rely on being able to specify TEST_DIFF in the environment to # have tests use their own diff wrapper for use when updating golden data. external_diff = os.environ.get('TEST_DIFF') if external_diff: return _DiffViaExternalProgram(lhs, rhs, external_diff) try: with open(lhs, 'r') as lhs_f: with open(rhs, 'r') as rhs_f: diff_text = ''.join( difflib.unified_diff(lhs_f.readlines(), rhs_f.readlines())) if not diff_text: return True raise OutputDifferedError('\nComparing %s and %s\nTest output differed ' 'from golden file:\n%s' % (lhs, rhs, diff_text)) except EnvironmentError as error: # Unable to read the files. raise DiffFailureError('\nComparing %s and %s\nFailure diffing test output ' 'with golden file: %s\n' % (lhs, rhs, error)) def DiffTestStringFile(data, golden): """Diff data agains a golden file.""" data_file = os.path.join(FLAGS.test_tmpdir, 'provided.dat') _WriteTestData(data, data_file) _Diff(data_file, golden) def DiffTestStrings(data1, data2): """Diff two strings.""" diff_text = ''.join( difflib.unified_diff(data1.splitlines(True), data2.splitlines(True))) if not diff_text: return raise OutputDifferedError('\nTest strings differed:\n%s' % diff_text) def DiffTestFiles(testgen, golden): _Diff(testgen, golden) def GetCommandString(command): """Returns an escaped string that can be used as a shell command. Args: command: List or string representing the command to run. Returns: A string suitable for use as a shell command. """ if isinstance(command, types.StringTypes): return command else: return shellutil.ShellEscapeList(command) def GetCommandStderr(command, env=None, close_fds=True): """Runs the given shell command and returns a tuple. Args: command: List or string representing the command to run. env: Dictionary of environment variable settings. close_fds: Whether or not to close all open fd's in the child after forking. Returns: Tuple of (exit status, text printed to stdout and stderr by the command). """ if env is None: env = {} # Forge needs PYTHON_RUNFILES in order to find the runfiles directory when a # Python executable is run by a Python test. Pass this through from the # parent environment if not explicitly defined. if os.environ.get('PYTHON_RUNFILES') and not env.get('PYTHON_RUNFILES'): env['PYTHON_RUNFILES'] = os.environ['PYTHON_RUNFILES'] use_shell = isinstance(command, types.StringTypes) process = subprocess.Popen( command, close_fds=close_fds, env=env, shell=use_shell, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) output = process.communicate()[0] exit_status = process.wait() return (exit_status, output) def _QuoteLongString(s): """Quotes a potentially multi-line string to make the start and end obvious. Args: s: A string. Returns: The quoted string. """ if isinstance(s, (bytes, bytearray)): try: s = s.decode('utf-8') except UnicodeDecodeError: s = str(s) return ('8<-----------\n' + s + '\n' + '----------->8\n') class TestProgramManualRun(unittest.TestProgram): """A TestProgram which runs the tests manually.""" def runTests(self, do_run=False): """Run the tests.""" if do_run: unittest.TestProgram.runTests(self) def main(*args, **kwargs): # pylint: disable=g-doc-args """Executes a set of Python unit tests. Usually this function is called without arguments, so the unittest.TestProgram instance will get created with the default settings, so it will run all test methods of all TestCase classes in the __main__ module. Args: args: Positional arguments passed through to unittest.TestProgram.__init__. kwargs: Keyword arguments passed through to unittest.TestProgram.__init__. """ # pylint: enable=g-doc-args _RunInApp(RunTests, args, kwargs) def _IsInAppMain(): """Returns True iff app.main or app.really_start is active.""" f = sys._getframe().f_back # pylint: disable=protected-access app_dict = app.__dict__ while f: if f.f_globals is app_dict and f.f_code.co_name in ('run', 'really_start'): return True f = f.f_back return False class SavedFlag(object): """Helper class for saving and restoring a flag value.""" def __init__(self, flag): self.flag = flag self.value = flag.value self.present = flag.present def RestoreFlag(self): self.flag.value = self.value self.flag.present = self.present def _RunInApp(function, args, kwargs): """Executes a set of Python unit tests, ensuring app.really_start. Most users should call basetest.main() instead of _RunInApp. _RunInApp calculates argv to be the command-line arguments of this program (without the flags), sets the default of FLAGS.alsologtostderr to True, then it calls function(argv, args, kwargs), making sure that `function' will get called within app.run() or app.really_start(). _RunInApp does this by checking whether it is called by either app.run() or app.really_start(), or by calling app.really_start() explicitly. The reason why app.really_start has to be ensured is to make sure that flags are parsed and stripped properly, and other initializations done by the app module are also carried out, no matter if basetest.run() is called from within or outside app.run(). If _RunInApp is called from within app.run(), then it will reparse sys.argv and pass the result without command-line flags into the argv argument of `function'. The reason why this parsing is needed is that __main__.main() calls basetest.main() without passing its argv. So the only way _RunInApp could get to know the argv without the flags is that it reparses sys.argv. _RunInApp changes the default of FLAGS.alsologtostderr to True so that the test program's stderr will contain all the log messages unless otherwise specified on the command-line. This overrides any explicit assignment to FLAGS.alsologtostderr by the test program prior to the call to _RunInApp() (e.g. in __main__.main). Please note that _RunInApp (and the function it calls) is allowed to make changes to kwargs. Args: function: basetest.RunTests or a similar function. It will be called as function(argv, args, kwargs) where argv is a list containing the elements of sys.argv without the command-line flags. args: Positional arguments passed through to unittest.TestProgram.__init__. kwargs: Keyword arguments passed through to unittest.TestProgram.__init__. """ if faulthandler: try: faulthandler.enable() except Exception as e: # pylint: disable=broad-except sys.stderr.write('faulthandler.enable() failed %r; ignoring.\n' % e) else: faulthandler.register(signal.SIGTERM) if _IsInAppMain(): # Save command-line flags so the side effects of FLAGS(sys.argv) can be # undone. saved_flags = dict((f.name, SavedFlag(f)) for f in FLAGS.FlagDict().itervalues()) # Here we'd like to change the default of alsologtostderr from False to # True, so the test programs's stderr will contain all the log messages. # The desired effect is that if --alsologtostderr is not specified in # the command-line, and __main__.main doesn't set FLAGS.logtostderr # before calling us (basetest.main), then our changed default takes # effect and alsologtostderr becomes True. # # However, we cannot achive this exact effect, because here we cannot # distinguish these situations: # # A. main.__main__ has changed it to False, it hasn't been specified on # the command-line, and the default was kept as False. We should keep # it as False. # # B. main.__main__ hasn't changed it, it hasn't been specified on the # command-line, and the default was kept as False. We should change # it to True here. # # As a workaround, we assume that main.__main__ never changes # FLAGS.alsologstderr to False, thus the value of the flag is determined # by its default unless the command-line overrides it. We want to change # the default to True, and we do it by setting the flag value to True, and # letting the command-line override it in FLAGS(sys.argv) below by not # restoring it in saved_flag.RestoreFlag(). if 'alsologtostderr' in saved_flags: FLAGS.alsologtostderr = True del saved_flags['alsologtostderr'] # The call FLAGS(sys.argv) parses sys.argv, returns the arguments # without the flags, and -- as a side effect -- modifies flag values in # FLAGS. We don't want the side effect, because we don't want to # override flag changes the program did (e.g. in __main__.main) # after the command-line has been parsed. So we have the for loop below # to change back flags to their old values. argv = FLAGS(sys.argv) for saved_flag in saved_flags.itervalues(): saved_flag.RestoreFlag() function(argv, args, kwargs) else: # Send logging to stderr. Use --alsologtostderr instead of --logtostderr # in case tests are reading their own logs. if 'alsologtostderr' in FLAGS: FLAGS.SetDefault('alsologtostderr', True) def Main(argv): function(argv, args, kwargs) app.really_start(main=Main) def RunTests(argv, args, kwargs): """Executes a set of Python unit tests within app.really_start. Most users should call basetest.main() instead of RunTests. Please note that RunTests should be called from app.really_start (which is called from app.run()). Calling basetest.main() would ensure that. Please note that RunTests is allowed to make changes to kwargs. Args: argv: sys.argv with the command-line flags removed from the front, i.e. the argv with which app.run() has called __main__.main. args: Positional arguments passed through to unittest.TestProgram.__init__. kwargs: Keyword arguments passed through to unittest.TestProgram.__init__. """ test_runner = kwargs.get('testRunner') # Make sure tmpdir exists if not os.path.isdir(FLAGS.test_tmpdir): os.makedirs(FLAGS.test_tmpdir) # Run main module setup, if it exists main_mod = sys.modules['__main__'] if hasattr(main_mod, 'setUp') and callable(main_mod.setUp): main_mod.setUp() # Let unittest.TestProgram.__init__ called by # TestProgramManualRun.__init__ do its own argv parsing, e.g. for '-v', # on argv, which is sys.argv without the command-line flags. kwargs.setdefault('argv', argv) try: result = None test_program = TestProgramManualRun(*args, **kwargs) if test_runner: test_program.testRunner = test_runner else: test_program.testRunner = unittest.TextTestRunner( verbosity=test_program.verbosity) result = test_program.testRunner.run(test_program.test) finally: # Run main module teardown, if it exists if hasattr(main_mod, 'tearDown') and callable(main_mod.tearDown): main_mod.tearDown() sys.exit(not result.wasSuccessful()) google-apputils-0.4.1/google/apputils/datelib.py0000640033465300116100000003623302263613200022647 0ustar craigcitroeng00000000000000#!/usr/bin/env python # Copyright 2002 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Set of classes and functions for dealing with dates and timestamps. The BaseTimestamp and Timestamp are timezone-aware wrappers around Python datetime.datetime class. """ import calendar import copy import datetime import re import sys import time import types import warnings import dateutil.parser import pytz _MICROSECONDS_PER_SECOND = 1000000 _MICROSECONDS_PER_SECOND_F = float(_MICROSECONDS_PER_SECOND) def SecondsToMicroseconds(seconds): """Convert seconds to microseconds. Args: seconds: number Returns: microseconds """ return seconds * _MICROSECONDS_PER_SECOND def MicrosecondsToSeconds(microseconds): """Convert microseconds to seconds. Args: microseconds: A number representing some duration of time measured in microseconds. Returns: A number representing the same duration of time measured in seconds. """ return microseconds / _MICROSECONDS_PER_SECOND_F def _GetCurrentTimeMicros(): """Get the current time in microseconds, in UTC. Returns: The number of microseconds since the epoch. """ return int(SecondsToMicroseconds(time.time())) def GetSecondsSinceEpoch(time_tuple): """Convert time_tuple (in UTC) to seconds (also in UTC). Args: time_tuple: tuple with at least 6 items. Returns: seconds. """ return calendar.timegm(time_tuple[:6] + (0, 0, 0)) def GetTimeMicros(time_tuple): """Get a time in microseconds. Arguments: time_tuple: A (year, month, day, hour, minute, second) tuple (the python time tuple format) in the UTC time zone. Returns: The number of microseconds since the epoch represented by the input tuple. """ return int(SecondsToMicroseconds(GetSecondsSinceEpoch(time_tuple))) def DatetimeToUTCMicros(date): """Converts a datetime object to microseconds since the epoch in UTC. Args: date: A datetime to convert. Returns: The number of microseconds since the epoch, in UTC, represented by the input datetime. """ # Using this guide: http://wiki.python.org/moin/WorkingWithTime # And this conversion guide: http://docs.python.org/library/time.html # Turn the date parameter into a tuple (struct_time) that can then be # manipulated into a long value of seconds. During the conversion from # struct_time to long, the source date in UTC, and so it follows that the # correct transformation is calendar.timegm() micros = calendar.timegm(date.utctimetuple()) * _MICROSECONDS_PER_SECOND return micros + date.microsecond def DatetimeToUTCMillis(date): """Converts a datetime object to milliseconds since the epoch in UTC. Args: date: A datetime to convert. Returns: The number of milliseconds since the epoch, in UTC, represented by the input datetime. """ return DatetimeToUTCMicros(date) / 1000 def UTCMicrosToDatetime(micros, tz=None): """Converts a microsecond epoch time to a datetime object. Args: micros: A UTC time, expressed in microseconds since the epoch. tz: The desired tzinfo for the datetime object. If None, the datetime will be naive. Returns: The datetime represented by the input value. """ # The conversion from micros to seconds for input into the # utcfromtimestamp function needs to be done as a float to make sure # we dont lose the sub-second resolution of the input time. dt = datetime.datetime.utcfromtimestamp( micros / _MICROSECONDS_PER_SECOND_F) if tz is not None: dt = tz.fromutc(dt) return dt def UTCMillisToDatetime(millis, tz=None): """Converts a millisecond epoch time to a datetime object. Args: millis: A UTC time, expressed in milliseconds since the epoch. tz: The desired tzinfo for the datetime object. If None, the datetime will be naive. Returns: The datetime represented by the input value. """ return UTCMicrosToDatetime(millis * 1000, tz) UTC = pytz.UTC US_PACIFIC = pytz.timezone('US/Pacific') class TimestampError(ValueError): """Generic timestamp-related error.""" pass class TimezoneNotSpecifiedError(TimestampError): """This error is raised when timezone is not specified.""" pass class TimeParseError(TimestampError): """This error is raised when we can't parse the input.""" pass # TODO(user): this class needs to handle daylight better class LocalTimezoneClass(datetime.tzinfo): """This class defines local timezone.""" ZERO = datetime.timedelta(0) HOUR = datetime.timedelta(hours=1) STDOFFSET = datetime.timedelta(seconds=-time.timezone) if time.daylight: DSTOFFSET = datetime.timedelta(seconds=-time.altzone) else: DSTOFFSET = STDOFFSET DSTDIFF = DSTOFFSET - STDOFFSET def utcoffset(self, dt): """datetime -> minutes east of UTC (negative for west of UTC).""" if self._isdst(dt): return self.DSTOFFSET else: return self.STDOFFSET def dst(self, dt): """datetime -> DST offset in minutes east of UTC.""" if self._isdst(dt): return self.DSTDIFF else: return self.ZERO def tzname(self, dt): """datetime -> string name of time zone.""" return time.tzname[self._isdst(dt)] def _isdst(self, dt): """Return true if given datetime is within local DST.""" tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1) stamp = time.mktime(tt) tt = time.localtime(stamp) return tt.tm_isdst > 0 def __repr__(self): """Return string ''.""" return '' def localize(self, dt, unused_is_dst=False): """Convert naive time to local time.""" if dt.tzinfo is not None: raise ValueError('Not naive datetime (tzinfo is already set)') return dt.replace(tzinfo=self) def normalize(self, dt, unused_is_dst=False): """Correct the timezone information on the given datetime.""" if dt.tzinfo is None: raise ValueError('Naive time - no tzinfo set') return dt.replace(tzinfo=self) LocalTimezone = LocalTimezoneClass() class BaseTimestamp(datetime.datetime): """Our kind of wrapper over datetime.datetime. The objects produced by methods now, today, fromtimestamp, utcnow, utcfromtimestamp are timezone-aware (with correct timezone). We also overload __add__ and __sub__ method, to fix the result of arithmetic operations. """ LocalTimezone = LocalTimezone @classmethod def AddLocalTimezone(cls, obj): """If obj is naive, add local timezone to it.""" if not obj.tzinfo: return obj.replace(tzinfo=cls.LocalTimezone) return obj @classmethod def Localize(cls, obj): """If obj is naive, localize it to cls.LocalTimezone.""" if not obj.tzinfo: return cls.LocalTimezone.localize(obj) return obj def __add__(self, *args, **kwargs): """x.__add__(y) <==> x+y.""" r = super(BaseTimestamp, self).__add__(*args, **kwargs) return type(self)(r.year, r.month, r.day, r.hour, r.minute, r.second, r.microsecond, r.tzinfo) def __sub__(self, *args, **kwargs): """x.__add__(y) <==> x-y.""" r = super(BaseTimestamp, self).__sub__(*args, **kwargs) if isinstance(r, datetime.datetime): return type(self)(r.year, r.month, r.day, r.hour, r.minute, r.second, r.microsecond, r.tzinfo) return r @classmethod def now(cls, *args, **kwargs): """Get a timestamp corresponding to right now. Args: args: Positional arguments to pass to datetime.datetime.now(). kwargs: Keyword arguments to pass to datetime.datetime.now(). If tz is not specified, local timezone is assumed. Returns: A new BaseTimestamp with tz's local day and time. """ return cls.AddLocalTimezone( super(BaseTimestamp, cls).now(*args, **kwargs)) @classmethod def today(cls): """Current BaseTimestamp. Same as self.__class__.fromtimestamp(time.time()). Returns: New self.__class__. """ return cls.AddLocalTimezone(super(BaseTimestamp, cls).today()) @classmethod def fromtimestamp(cls, *args, **kwargs): """Get a new localized timestamp from a POSIX timestamp. Args: args: Positional arguments to pass to datetime.datetime.fromtimestamp(). kwargs: Keyword arguments to pass to datetime.datetime.fromtimestamp(). If tz is not specified, local timezone is assumed. Returns: A new BaseTimestamp with tz's local day and time. """ return cls.Localize( super(BaseTimestamp, cls).fromtimestamp(*args, **kwargs)) @classmethod def utcnow(cls): """Return a new BaseTimestamp representing UTC day and time.""" return super(BaseTimestamp, cls).utcnow().replace(tzinfo=pytz.utc) @classmethod def utcfromtimestamp(cls, *args, **kwargs): """timestamp -> UTC datetime from a POSIX timestamp (like time.time()).""" return super(BaseTimestamp, cls).utcfromtimestamp( *args, **kwargs).replace(tzinfo=pytz.utc) @classmethod def strptime(cls, date_string, format, tz=None): """Parse date_string according to format and construct BaseTimestamp. Args: date_string: string passed to time.strptime. format: format string passed to time.strptime. tz: if not specified, local timezone assumed. Returns: New BaseTimestamp. """ date_time = super(BaseTimestamp, cls).strptime(date_string, format) return (tz.localize if tz else cls.Localize)(date_time) def astimezone(self, *args, **kwargs): """tz -> convert to time in new timezone tz.""" r = super(BaseTimestamp, self).astimezone(*args, **kwargs) return type(self)(r.year, r.month, r.day, r.hour, r.minute, r.second, r.microsecond, r.tzinfo) @classmethod def FromMicroTimestamp(cls, ts): """Create new Timestamp object from microsecond UTC timestamp value. Args: ts: integer microsecond UTC timestamp Returns: New cls() """ return cls.utcfromtimestamp(ts/_MICROSECONDS_PER_SECOND_F) def AsSecondsSinceEpoch(self): """Return number of seconds since epoch (timestamp in seconds).""" return GetSecondsSinceEpoch(self.utctimetuple()) def AsMicroTimestamp(self): """Return microsecond timestamp constructed from this object.""" return (SecondsToMicroseconds(self.AsSecondsSinceEpoch()) + self.microsecond) @classmethod def combine(cls, datepart, timepart, tz=None): """Combine date and time into timestamp, timezone-aware. Args: datepart: datetime.date timepart: datetime.time tz: timezone or None Returns: timestamp object """ result = super(BaseTimestamp, cls).combine(datepart, timepart) if tz: result = tz.localize(result) return result # Conversions from interval suffixes to number of seconds. # (m => 60s, d => 86400s, etc) _INTERVAL_CONV_DICT = {'s': 1} _INTERVAL_CONV_DICT['m'] = 60 * _INTERVAL_CONV_DICT['s'] _INTERVAL_CONV_DICT['h'] = 60 * _INTERVAL_CONV_DICT['m'] _INTERVAL_CONV_DICT['d'] = 24 * _INTERVAL_CONV_DICT['h'] _INTERVAL_CONV_DICT['D'] = _INTERVAL_CONV_DICT['d'] _INTERVAL_CONV_DICT['w'] = 7 * _INTERVAL_CONV_DICT['d'] _INTERVAL_CONV_DICT['W'] = _INTERVAL_CONV_DICT['w'] _INTERVAL_CONV_DICT['M'] = 30 * _INTERVAL_CONV_DICT['d'] _INTERVAL_CONV_DICT['Y'] = 365 * _INTERVAL_CONV_DICT['d'] _INTERVAL_REGEXP = re.compile('^([0-9]+)([%s])?' % ''.join(_INTERVAL_CONV_DICT)) def ConvertIntervalToSeconds(interval): """Convert a formatted string representing an interval into seconds. Args: interval: String to interpret as an interval. A basic interval looks like "". Complex intervals consisting of a chain of basic intervals are also allowed. Returns: An integer representing the number of seconds represented by the interval string, or None if the interval string could not be decoded. """ total = 0 while interval: match = _INTERVAL_REGEXP.match(interval) if not match: return None try: num = int(match.group(1)) except ValueError: return None suffix = match.group(2) if suffix: multiplier = _INTERVAL_CONV_DICT.get(suffix) if not multiplier: return None num *= multiplier total += num interval = interval[match.end(0):] return total class Timestamp(BaseTimestamp): """This subclass contains methods to parse W3C and interval date spec. The interval date specification is in the form "1D", where "D" can be "s"econds "m"inutes "h"ours "D"ays "W"eeks "M"onths "Y"ears. """ INTERVAL_CONV_DICT = _INTERVAL_CONV_DICT INTERVAL_REGEXP = _INTERVAL_REGEXP @classmethod def _StringToTime(cls, timestring, tz=None): """Use dateutil.parser to convert string into timestamp. dateutil.parser understands ISO8601 which is really handy. Args: timestring: string with datetime tz: optional timezone, if timezone is omitted from timestring. Returns: New Timestamp or None if unable to parse the timestring. """ try: r = dateutil.parser.parse(timestring) # dateutil will raise ValueError if it's an unknown format -- or # TypeError in some cases, due to bugs. except (TypeError, ValueError): return None if not r.tzinfo: r = (tz or cls.LocalTimezone).localize(r) result = cls(r.year, r.month, r.day, r.hour, r.minute, r.second, r.microsecond, r.tzinfo) return result @classmethod def _IntStringToInterval(cls, timestring): """Parse interval date specification and create a timedelta object. Args: timestring: string interval. Returns: A datetime.timedelta representing the specified interval or None if unable to parse the timestring. """ seconds = ConvertIntervalToSeconds(timestring) return datetime.timedelta(seconds=seconds) if seconds else None @classmethod def FromString(cls, value, tz=None): """Create a Timestamp from a string. Args: value: String interval or datetime. e.g. "2013-01-05 13:00:00" or "1d" tz: optional timezone, if timezone is omitted from timestring. Returns: A new Timestamp. Raises: TimeParseError if unable to parse value. """ result = cls._StringToTime(value, tz=tz) if result: return result result = cls._IntStringToInterval(value) if result: return cls.utcnow() - result raise TimeParseError(value) # What's written below is a clear python bug. I mean, okay, I can apply # negative timezone to it and end result will be inconversible. MAXIMUM_PYTHON_TIMESTAMP = Timestamp( 9999, 12, 31, 23, 59, 59, 999999, UTC) # This is also a bug. It is called 32bit time_t. I hate it. # This is fixed in 2.5, btw. MAXIMUM_MICROSECOND_TIMESTAMP = 0x80000000 * _MICROSECONDS_PER_SECOND - 1 MAXIMUM_MICROSECOND_TIMESTAMP_AS_TS = Timestamp(2038, 1, 19, 3, 14, 7, 999999) google-apputils-0.4.1/google/apputils/__init__.py0000640033465300116100000000030102263613200022765 0ustar craigcitroeng00000000000000#!/usr/bin/env python try: import pkg_resources pkg_resources.declare_namespace(__name__) except ImportError: from pkgutil import extend_path __path__ = extend_path(__path__, __name__) google-apputils-0.4.1/google/apputils/file_util.py0000640033465300116100000001773302263613200023223 0ustar craigcitroeng00000000000000#!/usr/bin/env python # Copyright 2007 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Simple file system utilities.""" __author__ = ('elaforge@google.com (Evan LaForge)', 'matthewb@google.com (Matthew Blecker)') import contextlib import errno import os import pwd import shutil import stat import tempfile class PasswdError(Exception): """Exception class for errors loading a password from a file.""" def ListDirPath(dir_name): """Like os.listdir with prepended dir_name, which is often more convenient.""" return [os.path.join(dir_name, fn) for fn in os.listdir(dir_name)] def Read(filename): """Read entire contents of file with name 'filename'.""" with open(filename) as fp: return fp.read() def Write(filename, contents, overwrite_existing=True, mode=0666, gid=None): """Create a file 'filename' with 'contents', with the mode given in 'mode'. The 'mode' is modified by the umask, as in open(2). If 'overwrite_existing' is False, the file will be opened in O_EXCL mode. An optional gid can be specified. Args: filename: str; the name of the file contents: str; the data to write to the file overwrite_existing: bool; whether or not to allow the write if the file already exists mode: int; permissions with which to create the file (default is 0666 octal) gid: int; group id with which to create the file """ flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT if not overwrite_existing: flags |= os.O_EXCL fd = os.open(filename, flags, mode) try: os.write(fd, contents) finally: os.close(fd) if gid is not None: os.chown(filename, -1, gid) def AtomicWrite(filename, contents, mode=0666, gid=None): """Create a file 'filename' with 'contents' atomically. As in Write, 'mode' is modified by the umask. This creates and moves a temporary file, and errors doing the above will be propagated normally, though it will try to clean up the temporary file in that case. This is very similar to the prodlib function with the same name. An optional gid can be specified. Args: filename: str; the name of the file contents: str; the data to write to the file mode: int; permissions with which to create the file (default is 0666 octal) gid: int; group id with which to create the file """ fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename)) try: os.write(fd, contents) finally: os.close(fd) try: os.chmod(tmp_filename, mode) if gid is not None: os.chown(tmp_filename, -1, gid) os.rename(tmp_filename, filename) except OSError, exc: try: os.remove(tmp_filename) except OSError, e: exc = OSError('%s. Additional errors cleaning up: %s' % (exc, e)) raise exc @contextlib.contextmanager def TemporaryFileWithContents(contents, **kw): """A contextmanager that writes out a string to a file on disk. This is useful whenever you need to call a function or command that expects a file on disk with some contents that you have in memory. The context manager abstracts the writing, flushing, and deletion of the temporary file. This is a common idiom that boils down to a single with statement. Note: if you need a temporary file-like object for calling an internal function, you should use a StringIO as a file-like object and not this. Temporary files should be avoided unless you need a file name or contents in a file on disk to be read by some other function or program. Args: contents: a string with the contents to write to the file. **kw: Optional arguments passed on to tempfile.NamedTemporaryFile. Yields: The temporary file object, opened in 'w' mode. """ temporary_file = tempfile.NamedTemporaryFile(**kw) temporary_file.write(contents) temporary_file.flush() yield temporary_file temporary_file.close() # TODO(user): remove after migration to Python 3.2. # This context manager can be replaced with tempfile.TemporaryDirectory in # Python 3.2 (http://bugs.python.org/issue5178, # http://docs.python.org/dev/library/tempfile.html#tempfile.TemporaryDirectory). @contextlib.contextmanager def TemporaryDirectory(suffix='', prefix='tmp', base_path=None): """A context manager to create a temporary directory and clean up on exit. The parameters are the same ones expected by tempfile.mkdtemp. The directory will be securely and atomically created. Everything under it will be removed when exiting the context. Args: suffix: optional suffix. prefix: options prefix. base_path: the base path under which to create the temporary directory. Yields: The absolute path of the new temporary directory. """ temp_dir_path = tempfile.mkdtemp(suffix, prefix, base_path) try: yield temp_dir_path finally: try: shutil.rmtree(temp_dir_path) except OSError, e: if e.message == 'Cannot call rmtree on a symbolic link': # Interesting synthetic exception made up by shutil.rmtree. # Means we received a symlink from mkdtemp. # Also means must clean up the symlink instead. os.unlink(temp_dir_path) else: raise def MkDirs(directory, force_mode=None): """Makes a directory including its parent directories. This function is equivalent to os.makedirs() but it avoids a race condition that os.makedirs() has. The race is between os.mkdir() and os.path.exists() which fail with errors when run in parallel. Args: directory: str; the directory to make force_mode: optional octal, chmod dir to get rid of umask interaction Raises: Whatever os.mkdir() raises when it fails for any reason EXCLUDING "dir already exists". If a directory already exists, it does not raise anything. This behaviour is different than os.makedirs() """ name = os.path.normpath(directory) dirs = name.split(os.path.sep) for i in range(0, len(dirs)): path = os.path.sep.join(dirs[:i+1]) try: if path: os.mkdir(path) # only chmod if we created if force_mode is not None: os.chmod(path, force_mode) except OSError, exc: if not (exc.errno == errno.EEXIST and os.path.isdir(path)): raise def RmDirs(dir_name): """Removes dir_name and every subsequently empty directory above it. Unlike os.removedirs and shutil.rmtree, this function doesn't raise an error if the directory does not exist. Args: dir_name: Directory to be removed. """ try: shutil.rmtree(dir_name) except OSError, err: if err.errno != errno.ENOENT: raise try: parent_directory = os.path.dirname(dir_name) while parent_directory: try: os.rmdir(parent_directory) except OSError, err: if err.errno != errno.ENOENT: raise parent_directory = os.path.dirname(parent_directory) except OSError, err: if err.errno not in (errno.EACCES, errno.ENOTEMPTY, errno.EPERM): raise def HomeDir(user=None): """Find the home directory of a user. Args: user: int, str, or None - the uid or login of the user to query for, or None (the default) to query for the current process' effective user Returns: str - the user's home directory Raises: TypeError: if user is not int, str, or None. """ if user is None: pw_struct = pwd.getpwuid(os.geteuid()) elif isinstance(user, int): pw_struct = pwd.getpwuid(user) elif isinstance(user, str): pw_struct = pwd.getpwnam(user) else: raise TypeError('user must be None or an instance of int or str') return pw_struct.pw_dir google-apputils-0.4.1/google/apputils/appcommands.py0000750033465300116100000007077502263613200023560 0ustar craigcitroeng00000000000000#!/usr/bin/env python # Copyright 2007 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module is the base for programs that provide multiple commands. This provides command line tools that have a few shared global flags, followed by a command name, followed by command specific flags, then by arguments. That is: tool [--global_flags] command [--command_flags] [args] The module is built on top of app.py and 'overrides' a bit of it. However the interface is mostly the same. The main difference is that your main is supposed to register commands and return without further execution of the commands; pre checking is of course welcome! Also your global initialization should call appcommands.Run() rather than app.run(). To register commands use AddCmd() or AddCmdFunc(). AddCmd() is used for commands that derive from class Cmd and the AddCmdFunc() is used to wrap simple functions. This module itself registers the command 'help' that allows users to retrieve help for all or specific commands. 'help' is the default command executed if no command is expressed, unless a different default command is set with SetDefaultCommand. Example: from mx import DateTime class CmdDate(appcommands.Cmd): \"\"\"This docstring contains the help for the date command.\"\"\" def Run(self, argv): print DateTime.now() def main(argv): appcommands.AddCmd('date', CmdDate, command_aliases=['data_now']) if __name__ == '__main__': appcommands.Run() In the above example the name of the registered command on the command line is 'date'. Thus, to get the date you would execute: tool date The above example also added the command alias 'data_now' which allows to replace 'tool date' with 'tool data_now'. To get a list of available commands run: tool help For help with a specific command, you would execute: tool help date For help on flags run one of the following: tool --help Note that 'tool --help' gives you information on global flags, just like for applications that do not use appcommand. Likewise 'tool --helpshort' and the other help-flags from app.py are also available. The above example also demonstrates that you only have to call appcommands.Run() and register your commands in main() to initialize your program with appcommands (and app). Handling of flags: Flags can be registered just as with any other google tool using flags.py. In addition you can also provide command specific flags. To do so simply add flags registering code into the __init__ function of your Cmd classes passing parameter flag_values to any flags registering calls. These flags will get copied to the global flag list, so that once the command is detected they behave just like any other flag. That means these flags won't be available for other commands. Note that it is possible to register flags with more than one command. Getting help: This module activates formatting and wrapping to help output. That is the main difference to help created from app.py. So just as with app.py, appcommands.py will create help from the main modules main __doc__. But it adds the new 'help' command that allows you to get a list of all available commands. Each command's help will be followed by the registered command specific flags along with their defaults and help. After help for all commands there will also be a list of all registered global flags with their defaults and help. The text for the command's help can best be supplied by overwriting the __doc__ property of the Cmd classes for commands registered with AddCmd() or the __doc__ property of command functions registered AddCmdFunc(). Inner working: This module interacts with app.py by replacing its inner start dispatcher. The replacement version basically does the same, registering help flags, checking whether help flags were present, and calling the main module's main function. However unlike app.py, this module epxpects main() to only register commands and then to return. After having all commands registered appcommands.py will then parse the remaining arguments for any registered command. If one is found it will get executed. Otherwise a short usage info will be displayed. Each provided command must be an instance of Cmd. If commands get registered from global functions using AddCmdFunc() then the helper class _FunctionalCmd will be used in the registering process. """ import os import pdb import sys import traceback from google.apputils import app import gflags as flags FLAGS = flags.FLAGS # module exceptions: class AppCommandsError(Exception): """The base class for all flags errors.""" pass _cmd_argv = None # remaining arguments with index 0 = sys.argv[0] _cmd_list = {} # list of commands index by name (_Cmd instances) _cmd_alias_list = {} # list of command_names index by command_alias _cmd_default = 'help' # command to execute if none explicitly given def GetAppBasename(): """Returns the friendly basename of this application.""" return os.path.basename(sys.argv[0]) def ShortHelpAndExit(message=None): """Display optional message, followed by a note on how to get help, then exit. Args: message: optional message to display """ sys.stdout.flush() if message is not None: sys.stderr.write('%s\n' % message) sys.stderr.write("Run '%s help' to get help\n" % GetAppBasename()) sys.exit(1) def GetCommandList(): """Return list of registered commands.""" # pylint: disable=global-variable-not-assigned global _cmd_list return _cmd_list def GetCommandAliasList(): """Return list of registered command aliases.""" # pylint: disable=global-variable-not-assigned global _cmd_alias_list return _cmd_alias_list def GetFullCommandList(): """Return list of registered commands, including aliases.""" all_cmds = dict(GetCommandList()) for cmd_alias, cmd_name in GetCommandAliasList().iteritems(): all_cmds[cmd_alias] = all_cmds.get(cmd_name) return all_cmds def GetCommandByName(name): """Get the command or None if name is not a registered command. Args: name: name of command to look for Returns: Cmd instance holding the command or None """ return GetCommandList().get(GetCommandAliasList().get(name)) def GetCommandArgv(): """Return list of remaining args.""" return _cmd_argv def GetMaxCommandLength(): """Returns the length of the longest registered command.""" return max([len(cmd_name) for cmd_name in GetCommandList()]) class Cmd(object): """Abstract class describing and implementing a command. When creating code for a command, at least you have to derive this class and override method Run(). The other methods of this class might be overridden as well. Check their documentation for details. If the command needs any specific flags, use __init__ for registration. """ def __init__(self, name, flag_values, command_aliases=None, all_commands_help=None, help_full=None): """Initialize and check whether self is actually a Cmd instance. This can be used to register command specific flags. If you do so remember that you have to provide the 'flag_values=flag_values' parameter to any flags.DEFINE_*() call. Args: name: Name of the command flag_values: FlagValues() instance that needs to be passed as flag_values parameter to any flags registering call. command_aliases: A list of aliases that the command can be run as. all_commands_help: A short description of the command that is shown when the user requests help for all commands at once. help_full: A long description of the command and usage that is shown when the user requests help for just this command. If unspecified, the command's docstring is used instead. Raises: AppCommandsError: if self is Cmd (Cmd is abstract) """ self._command_name = name self._command_aliases = command_aliases self._command_flags = flag_values self._all_commands_help = all_commands_help self._help_full = help_full if type(self) is Cmd: raise AppCommandsError('Cmd is abstract and cannot be instantiated') def Run(self, unused_argv): """Execute the command. Must be provided by the implementing class. Args: unused_argv: Remaining command line arguments after parsing flags and command (in other words, a copy of sys.argv at the time of the function call with all parsed flags removed). Returns: 0 for success, anything else for failure (must return with integer). Alternatively you may return None (or not use a return statement at all). Raises: AppCommandsError: Always, as in must be overwritten """ raise AppCommandsError('%s.%s.Run() is not implemented' % ( type(self).__module__, type(self).__name__)) def CommandRun(self, argv): """Execute the command with given arguments. First register and parse additional flags. Then run the command. Returns: Command return value. Args: argv: Remaining command line arguments after parsing command and flags (that is a copy of sys.argv at the time of the function call with all parsed flags removed). """ # Register flags global when run normally FLAGS.AppendFlagValues(self._command_flags) # Prepare flags parsing, to redirect help, to show help for command orig_app_usage = app.usage def ReplacementAppUsage(shorthelp=0, writeto_stdout=1, detailed_error=None, exitcode=None): AppcommandsUsage(shorthelp, writeto_stdout, detailed_error, exitcode=exitcode, show_cmd=self._command_name, show_global_flags=True) app.usage = ReplacementAppUsage # Parse flags and restore app.usage afterwards try: try: argv = ParseFlagsWithUsage(argv) # Run command ret = self.Run(argv) if ret is None: ret = 0 else: assert isinstance(ret, int) return ret except app.UsageError, error: app.usage(shorthelp=1, detailed_error=error, exitcode=error.exitcode) except: if FLAGS.pdb_post_mortem: traceback.print_exc() pdb.post_mortem() raise finally: # Restore app.usage and remove this command's flags from the global flags. app.usage = orig_app_usage for flag_name in self._command_flags.FlagDict(): delattr(FLAGS, flag_name) def CommandGetHelp(self, unused_argv, cmd_names=None): """Get help string for command. Args: unused_argv: Remaining command line flags and arguments after parsing command (that is a copy of sys.argv at the time of the function call with all parsed flags removed); unused in this default implementation, but may be used in subclasses. cmd_names: Complete list of commands for which help is being shown at the same time. This is used to determine whether to return _all_commands_help, or the command's docstring. (_all_commands_help is used, if not None, when help is being shown for more than one command, otherwise the command's docstring is used.) Returns: Help string, one of the following (by order): - Result of the registered 'help' function (if any) - Doc string of the Cmd class (if any) - Default fallback string """ if (type(cmd_names) is list and len(cmd_names) > 1 and self._all_commands_help is not None): return flags.DocToHelp(self._all_commands_help) elif self._help_full is not None: return flags.DocToHelp(self._help_full) elif self.__doc__: return flags.DocToHelp(self.__doc__) else: return 'No help available' def CommandGetAliases(self): """Get aliases for command. Returns: aliases: list of aliases for the command. """ return self._command_aliases def CommandGetName(self): """Get name of command. Returns: Command name. """ return self._command_name class _FunctionalCmd(Cmd): """Class to wrap functions as CMD instances. Args: cmd_func: command function """ def __init__(self, name, flag_values, cmd_func, **kargs): """Create a functional command. Args: name: Name of command flag_values: FlagValues() instance that needs to be passed as flag_values parameter to any flags registering call. cmd_func: Function to call when command is to be executed. **kargs: Additional keyword arguments to be passed to the superclass constructor. """ if 'help_full' not in kargs: kargs['help_full'] = cmd_func.__doc__ super(_FunctionalCmd, self).__init__(name, flag_values, **kargs) self._cmd_func = cmd_func def Run(self, argv): """Execute the command with given arguments. Args: argv: Remaining command line flags and arguments after parsing command (that is a copy of sys.argv at the time of the function call with all parsed flags removed). Returns: Command return value. """ return self._cmd_func(argv) def _AddCmdInstance(command_name, cmd, command_aliases=None, **_): """Add a command from a Cmd instance. Args: command_name: name of the command which will be used in argument parsing cmd: Cmd instance to register command_aliases: A list of command aliases that the command can be run as. Raises: AppCommandsError: If cmd is not a subclass of Cmd. AppCommandsError: If name is already registered OR name is not a string OR name is too short OR name does not start with a letter OR name contains any non alphanumeric characters besides '_'. """ # Update global command list. # pylint: disable=global-variable-not-assigned global _cmd_list global _cmd_alias_list if not issubclass(cmd.__class__, Cmd): raise AppCommandsError('Command must be an instance of commands.Cmd') for name in [command_name] + (command_aliases or []): _CheckCmdName(name) _cmd_alias_list[name] = command_name _cmd_list[command_name] = cmd def _CheckCmdName(name_or_alias): """Only allow strings for command names and aliases (reject unicode as well). Args: name_or_alias: properly formatted string name or alias. Raises: AppCommandsError: If name is already registered OR name is not a string OR name is too short OR name does not start with a letter OR name contains any non alphanumeric characters besides '_'. """ if name_or_alias in GetCommandAliasList(): raise AppCommandsError("Command or Alias '%s' already defined" % name_or_alias) if not isinstance(name_or_alias, str) or len(name_or_alias) <= 1: raise AppCommandsError("Command '%s' not a string or too short" % str(name_or_alias)) if not name_or_alias[0].isalpha(): raise AppCommandsError("Command '%s' does not start with a letter" % name_or_alias) if [c for c in name_or_alias if not (c.isalnum() or c == '_')]: raise AppCommandsError("Command '%s' contains non alphanumeric characters" % name_or_alias) def AddCmd(command_name, cmd_factory, **kwargs): """Add a command from a Cmd subclass or factory. Args: command_name: name of the command which will be used in argument parsing cmd_factory: A callable whose arguments match those of Cmd.__init__ and returns a Cmd. In the simplest case this is just a subclass of Cmd. **kwargs: Additional keyword arguments to be passed to the cmd_factory at initialization. Also passed to _AddCmdInstance to catch command_aliases. Raises: AppCommandsError: if calling cmd_factory does not return an instance of Cmd. """ cmd = cmd_factory(command_name, flags.FlagValues(), **kwargs) if not isinstance(cmd, Cmd): raise AppCommandsError('Command must be an instance of commands.Cmd') _AddCmdInstance(command_name, cmd, **kwargs) def AddCmdFunc(command_name, cmd_func, command_aliases=None, all_commands_help=None): """Add a new command to the list of registered commands. Args: command_name: name of the command which will be used in argument parsing cmd_func: command function, this function received the remaining arguments as its only parameter. It is supposed to do the command work and then return with the command result that is being used as the shell exit code. command_aliases: A list of command aliases that the command can be run as. all_commands_help: Help message to be displayed in place of func.__doc__ when all commands are displayed. """ _AddCmdInstance(command_name, _FunctionalCmd(command_name, flags.FlagValues(), cmd_func, command_aliases=command_aliases, all_commands_help=all_commands_help), command_aliases=command_aliases) class _CmdHelp(Cmd): """Standard help command. Allows to provide help for all or specific commands. """ def __init__(self, *args, **kwargs): if 'help_full' not in kwargs: kwargs['help_full'] = ( 'Help for all or selected command:\n' '\t%(prog)s help []\n\n' 'To retrieve help with global flags:\n' '\t%(prog)s --help\n\n' 'To retrieve help with flags only from the main module:\n' '\t%(prog)s --helpshort []\n\n' % {'prog': GetAppBasename()}) super(_CmdHelp, self).__init__(*args, **kwargs) def Run(self, argv): """Execute help command. If an argument is given and that argument is a registered command name, then help specific to that command is being displayed. If the command is unknown then a fatal error will be displayed. If no argument is present then help for all commands will be presented. If a specific command help is being generated, the list of commands is temporarily replaced with one containing only that command. Thus the call to usage() will only show help for that command. Otherwise call usage() will show help for all registered commands as it sees all commands. Args: argv: Remaining command line flags and arguments after parsing command (that is a copy of sys.argv at the time of the function call with all parsed flags removed). So argv[0] is the program and argv[1] will be the first argument to the call. For instance 'tool.py help command' will result in argv containing ('tool.py', 'command'). In this case the list of commands is searched for 'command'. Returns: 1 for failure """ if len(argv) > 1 and argv[1] in GetFullCommandList(): show_cmd = argv[1] else: show_cmd = None AppcommandsUsage(shorthelp=0, writeto_stdout=1, detailed_error=None, exitcode=1, show_cmd=show_cmd, show_global_flags=False) def GetSynopsis(): """Get synopsis for program. Returns: Synopsis including program basename. """ return '%s [--global_flags] [--command_flags] [args]' % ( GetAppBasename()) def _UsageFooter(detailed_error, cmd_names): """Output a footer at the end of usage or help output. Args: detailed_error: additional detail about why usage info was presented. cmd_names: list of command names for which help was shown or None. Returns: Generated footer that contains 'Run..' messages if appropriate. """ footer = [] if not cmd_names or len(cmd_names) == 1: footer.append("Run '%s help' to see the list of available commands." % GetAppBasename()) if not cmd_names or len(cmd_names) == len(GetCommandList()): footer.append("Run '%s help ' to get help for ." % GetAppBasename()) if detailed_error is not None: if footer: footer.append('') footer.append('%s' % detailed_error) return '\n'.join(footer) def AppcommandsUsage(shorthelp=0, writeto_stdout=0, detailed_error=None, exitcode=None, show_cmd=None, show_global_flags=False): """Output usage or help information. Extracts the __doc__ string from the __main__ module and writes it to stderr. If that string contains a '%s' then that is replaced by the command pathname. Otherwise a default usage string is being generated. The output varies depending on the following: - FLAGS.help - FLAGS.helpshort - show_cmd - show_global_flags Args: shorthelp: print only command and main module flags, rather than all. writeto_stdout: write help message to stdout, rather than to stderr. detailed_error: additional details about why usage info was presented. exitcode: if set, exit with this status code after writing help. show_cmd: show help for this command only (name of command). show_global_flags: show help for global flags. """ if writeto_stdout: stdfile = sys.stdout else: stdfile = sys.stderr prefix = ''.rjust(GetMaxCommandLength() + 2) # Deal with header, containing general tool documentation doc = sys.modules['__main__'].__doc__ if doc: help_msg = flags.DocToHelp(doc.replace('%s', sys.argv[0])) stdfile.write(flags.TextWrap(help_msg, flags.GetHelpWidth())) stdfile.write('\n\n\n') if not doc or doc.find('%s') == -1: synopsis = 'USAGE: ' + GetSynopsis() stdfile.write(flags.TextWrap(synopsis, flags.GetHelpWidth(), ' ', '')) stdfile.write('\n\n\n') # Special case just 'help' registered, that means run as 'tool --help'. if len(GetCommandList()) == 1: cmd_names = [] else: # Show list of commands if show_cmd is None or show_cmd == 'help': cmd_names = GetCommandList().keys() cmd_names.sort() stdfile.write('Any of the following commands:\n') doc = ', '.join(cmd_names) stdfile.write(flags.TextWrap(doc, flags.GetHelpWidth(), ' ')) stdfile.write('\n\n\n') # Prepare list of commands to show help for if show_cmd is not None: cmd_names = [show_cmd] # show only one command elif FLAGS.help or FLAGS.helpshort or shorthelp: cmd_names = [] else: cmd_names = GetCommandList().keys() # show all commands cmd_names.sort() # Show the command help (none, one specific, or all) for name in cmd_names: command = GetCommandByName(name) try: cmd_help = command.CommandGetHelp(GetCommandArgv(), cmd_names=cmd_names) except Exception as error: # pylint: disable=broad-except cmd_help = "Internal error for command '%s': %s." % (name, str(error)) cmd_help = cmd_help.strip() all_names = ', '.join( [command.CommandGetName()] + (command.CommandGetAliases() or [])) if len(all_names) + 1 >= len(prefix) or not cmd_help: # If command/alias list would reach over help block-indent # start the help block on a new line. stdfile.write(flags.TextWrap(all_names, flags.GetHelpWidth())) stdfile.write('\n') prefix1 = prefix else: prefix1 = all_names.ljust(GetMaxCommandLength() + 2) if cmd_help: stdfile.write(flags.TextWrap(cmd_help, flags.GetHelpWidth(), prefix, prefix1)) stdfile.write('\n\n') else: stdfile.write('\n') # When showing help for exactly one command we show its flags if len(cmd_names) == 1: # Need to register flags for command prior to be able to use them. # We do not register them globally so that they do not reappear. # pylint: disable=protected-access cmd_flags = command._command_flags if cmd_flags.RegisteredFlags(): stdfile.write('%sFlags for %s:\n' % (prefix, name)) stdfile.write(cmd_flags.GetHelp(prefix+' ')) stdfile.write('\n\n') stdfile.write('\n') # Now show global flags as asked for if show_global_flags: stdfile.write('Global flags:\n') if shorthelp: stdfile.write(FLAGS.MainModuleHelp()) else: stdfile.write(FLAGS.GetHelp()) stdfile.write('\n') else: stdfile.write("Run '%s --help' to get help for global flags." % GetAppBasename()) stdfile.write('\n%s\n' % _UsageFooter(detailed_error, cmd_names)) if exitcode is not None: sys.exit(exitcode) def ParseFlagsWithUsage(argv): """Parse the flags, exiting (after printing usage) if they are unparseable. Args: argv: command line arguments Returns: remaining command line arguments after parsing flags """ # Update the global commands. # pylint: disable=global-statement global _cmd_argv try: _cmd_argv = FLAGS(argv) return _cmd_argv except flags.FlagsError, error: ShortHelpAndExit('FATAL Flags parsing error: %s' % error) def GetCommand(command_required): """Get the command or return None (or issue an error) if there is none. Args: command_required: whether to issue an error if no command is present Returns: command or None, if command_required is True then return value is a valid command or the program will exit. The program also exits if a command was specified but that command does not exist. """ # Update the global commands. # pylint: disable=global-statement global _cmd_argv _cmd_argv = ParseFlagsWithUsage(_cmd_argv) if len(_cmd_argv) < 2: if command_required: ShortHelpAndExit('FATAL Command expected but none given') return None command = GetCommandByName(_cmd_argv[1]) if command is None: ShortHelpAndExit("FATAL Command '%s' unknown" % _cmd_argv[1]) del _cmd_argv[1] return command def SetDefaultCommand(default_command): """Change the default command to execute if none is explicitly given. Args: default_command: str, the name of the command to execute by default. """ # pylint: disable=global-statement,g-bad-name global _cmd_default _cmd_default = default_command def _CommandsStart(unused_argv): """Main initialization. Calls __main__.main(), and then the command indicated by the first non-flag argument, or 'help' if no argument was given. (The command to execute if no flag is given can be changed via SetDefaultCommand). Only non-flag arguments are passed to main(). If main does not call sys.exit, the return value of the command is used as the exit status. """ # The following is supposed to return after registering additional commands try: sys.modules['__main__'].main(GetCommandArgv()) # If sys.exit was called, return with error code. except SystemExit, e: sys.exit(e.code) except Exception, error: traceback.print_exc() # Print a backtrace to stderr. ShortHelpAndExit('\nFATAL error in main: %s' % error) if len(GetCommandArgv()) > 1: command = GetCommand(command_required=True) else: command = GetCommandByName(_cmd_default) if command is None: ShortHelpAndExit("FATAL Command '%s' unknown" % _cmd_default) sys.exit(command.CommandRun(GetCommandArgv())) def Run(): """This must be called from __main__ modules main, instead of app.run(). app.run will base its actions on its stacktrace. Returns: app.run() """ app.parse_flags_with_usage = ParseFlagsWithUsage original_really_start = app.really_start def InterceptReallyStart(): original_really_start(main=_CommandsStart) app.really_start = InterceptReallyStart app.usage = _ReplacementAppUsage return app.run() # Always register 'help' command AddCmd('help', _CmdHelp) def _ReplacementAppUsage(shorthelp=0, writeto_stdout=0, detailed_error=None, exitcode=None): AppcommandsUsage(shorthelp, writeto_stdout, detailed_error, exitcode=exitcode, show_cmd=None, show_global_flags=True) if __name__ == '__main__': Run() google-apputils-0.4.1/google/apputils/app.py0000640033465300116100000002640402263613200022022 0ustar craigcitroeng00000000000000#!/usr/bin/env python # Copyright 2003 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generic entry point for Google applications. To use this module, simply define a 'main' function with a single 'argv' argument and add the following to the end of your source file: if __name__ == '__main__': app.run() TODO(user): Remove silly main-detection logic, and force all clients of this module to check __name__ explicitly. Fix all current clients that don't check __name__. """ import errno import os import pdb import socket import stat import struct import sys import time import traceback import gflags as flags FLAGS = flags.FLAGS flags.DEFINE_boolean('run_with_pdb', 0, 'Set to true for PDB debug mode') flags.DEFINE_boolean('pdb_post_mortem', 0, 'Set to true to handle uncaught exceptions with PDB ' 'post mortem.') flags.DEFINE_boolean('run_with_profiling', 0, 'Set to true for profiling the script. ' 'Execution will be slower, and the output format might ' 'change over time.') flags.DEFINE_string('profile_file', None, 'Dump profile information to a file (for python -m ' 'pstats). Implies --run_with_profiling.') flags.DEFINE_boolean('use_cprofile_for_profiling', True, 'Use cProfile instead of the profile module for ' 'profiling. This has no effect unless ' '--run_with_profiling is set.') # If main() exits via an abnormal exception, call into these # handlers before exiting. EXCEPTION_HANDLERS = [] help_text_wrap = False # Whether to enable TextWrap in help output class Error(Exception): pass class UsageError(Error): """The arguments supplied by the user are invalid. Raise this when the arguments supplied are invalid from the point of view of the application. For example when two mutually exclusive flags have been supplied or when there are not enough non-flag arguments. It is distinct from flags.FlagsError which covers the lower level of parsing and validating individual flags. """ def __init__(self, message, exitcode=1): Error.__init__(self, message) self.exitcode = exitcode class HelpFlag(flags.BooleanFlag): """Special boolean flag that displays usage and raises SystemExit.""" NAME = 'help' def __init__(self): flags.BooleanFlag.__init__(self, self.NAME, 0, 'show this help', short_name='?', allow_override=1) def Parse(self, arg): if arg: usage(shorthelp=1, writeto_stdout=1) # Advertise --helpfull on stdout, since usage() was on stdout. print print 'Try --helpfull to get a list of all flags.' sys.exit(1) class HelpshortFlag(HelpFlag): """--helpshort is an alias for --help.""" NAME = 'helpshort' class HelpfullFlag(flags.BooleanFlag): """Display help for flags in this module and all dependent modules.""" def __init__(self): flags.BooleanFlag.__init__(self, 'helpfull', 0, 'show full help', allow_override=1) def Parse(self, arg): if arg: usage(writeto_stdout=1) sys.exit(1) class HelpXMLFlag(flags.BooleanFlag): """Similar to HelpfullFlag, but generates output in XML format.""" def __init__(self): flags.BooleanFlag.__init__(self, 'helpxml', False, 'like --helpfull, but generates XML output', allow_override=1) def Parse(self, arg): if arg: flags.FLAGS.WriteHelpInXMLFormat(sys.stdout) sys.exit(1) class BuildDataFlag(flags.BooleanFlag): """Boolean flag that writes build data to stdout and exits.""" def __init__(self): flags.BooleanFlag.__init__(self, 'show_build_data', 0, 'show build data and exit') def Parse(self, arg): if arg: sys.stdout.write(build_data.BuildData()) sys.exit(0) def parse_flags_with_usage(args): """Try parsing the flags, printing usage and exiting if unparseable.""" try: argv = FLAGS(args) return argv except flags.FlagsError, error: sys.stderr.write('FATAL Flags parsing error: %s\n' % error) sys.stderr.write('Pass --helpshort or --helpfull to see help on flags.\n') sys.exit(1) _define_help_flags_called = False def DefineHelpFlags(): """Register help flags. Idempotent.""" # Use a global to ensure idempotence. # pylint: disable=global-statement global _define_help_flags_called if not _define_help_flags_called: flags.DEFINE_flag(HelpFlag()) flags.DEFINE_flag(HelpshortFlag()) # alias for --help flags.DEFINE_flag(HelpfullFlag()) flags.DEFINE_flag(HelpXMLFlag()) flags.DEFINE_flag(BuildDataFlag()) _define_help_flags_called = True def RegisterAndParseFlagsWithUsage(): """Register help flags, parse arguments and show usage if appropriate. Returns: remaining arguments after flags parsing """ DefineHelpFlags() argv = parse_flags_with_usage(sys.argv) return argv def really_start(main=None): """Initializes flag values, and calls main with non-flag arguments. Only non-flag arguments are passed to main(). The return value of main() is used as the exit status. Args: main: Main function to run with the list of non-flag arguments, or None so that sys.modules['__main__'].main is to be used. """ argv = RegisterAndParseFlagsWithUsage() if main is None: main = sys.modules['__main__'].main try: if FLAGS.run_with_pdb: sys.exit(pdb.runcall(main, argv)) else: if FLAGS.run_with_profiling or FLAGS.profile_file: # Avoid import overhead since most apps (including performance-sensitive # ones) won't be run with profiling. import atexit if FLAGS.use_cprofile_for_profiling: import cProfile as profile else: import profile profiler = profile.Profile() if FLAGS.profile_file: atexit.register(profiler.dump_stats, FLAGS.profile_file) else: atexit.register(profiler.print_stats) retval = profiler.runcall(main, argv) sys.exit(retval) else: sys.exit(main(argv)) except UsageError, error: usage(shorthelp=1, detailed_error=error, exitcode=error.exitcode) except: if FLAGS.pdb_post_mortem: traceback.print_exc() pdb.post_mortem() raise def run(): """Begin executing the program. - Parses command line flags with the flag module. - If there are any errors, print usage(). - Calls main() with the remaining arguments. - If main() raises a UsageError, print usage and the error message. """ return _actual_start() def _actual_start(): """Another layer in the starting stack.""" # Get raw traceback tb = None try: raise ZeroDivisionError('') except ZeroDivisionError: tb = sys.exc_info()[2] assert tb # Look at previous stack frame's previous stack frame (previous # frame is run() or start(); the frame before that should be the # frame of the original caller, which should be __main__ or appcommands prev_prev_frame = tb.tb_frame.f_back.f_back if not prev_prev_frame: return prev_prev_name = prev_prev_frame.f_globals.get('__name__', None) if (prev_prev_name != '__main__' and not prev_prev_name.endswith('.appcommands')): return # just in case there's non-trivial stuff happening in __main__ del tb if hasattr(sys, 'exc_clear'): sys.exc_clear() # This functionality is gone in Python 3. try: really_start() except SystemExit, e: raise except Exception, e: # Call any installed exception handlers which may, for example, # log to a file or send email. for handler in EXCEPTION_HANDLERS: try: if handler.Wants(e): handler.Handle(e) except: # We don't want to stop for exceptions in the exception handlers but # we shouldn't hide them either. sys.stderr.write(traceback.format_exc()) raise # All handlers have had their chance, now die as we would have normally. raise def usage(shorthelp=0, writeto_stdout=0, detailed_error=None, exitcode=None): """Write __main__'s docstring to stderr with some help text. Args: shorthelp: print only flags from this module, rather than all flags. writeto_stdout: write help message to stdout, rather than to stderr. detailed_error: additional detail about why usage info was presented. exitcode: if set, exit with this status code after writing help. """ if writeto_stdout: stdfile = sys.stdout else: stdfile = sys.stderr doc = sys.modules['__main__'].__doc__ if not doc: doc = '\nUSAGE: %s [flags]\n' % sys.argv[0] doc = flags.TextWrap(doc, indent=' ', firstline_indent='') else: # Replace all '%s' with sys.argv[0], and all '%%' with '%'. num_specifiers = doc.count('%') - 2 * doc.count('%%') try: doc %= (sys.argv[0],) * num_specifiers except (OverflowError, TypeError, ValueError): # Just display the docstring as-is. pass if help_text_wrap: doc = flags.TextWrap(flags.DocToHelp(doc)) if shorthelp: flag_str = FLAGS.MainModuleHelp() else: flag_str = str(FLAGS) try: stdfile.write(doc) if flag_str: stdfile.write('\nflags:\n') stdfile.write(flag_str) stdfile.write('\n') if detailed_error is not None: stdfile.write('\n%s\n' % detailed_error) except IOError, e: # We avoid printing a huge backtrace if we get EPIPE, because # "foo.par --help | less" is a frequent use case. if e.errno != errno.EPIPE: raise if exitcode is not None: sys.exit(exitcode) class ExceptionHandler(object): """Base exception handler from which other may inherit.""" def Wants(self, unused_exc): """Check if this exception handler want to handle this exception. Args: unused_exc: Exception, the current exception Returns: boolean This base handler wants to handle all exceptions, override this method if you want to be more selective. """ return True def Handle(self, exc): """Do something with the current exception. Args: exc: Exception, the current exception This method must be overridden. """ raise NotImplementedError() def InstallExceptionHandler(handler): """Install an exception handler. Args: handler: an object conforming to the interface defined in ExceptionHandler Raises: TypeError: handler was not of the correct type All installed exception handlers will be called if main() exits via an abnormal exception, i.e. not one of SystemExit, KeyboardInterrupt, FlagsError or UsageError. """ if not isinstance(handler, ExceptionHandler): raise TypeError('handler of type %s does not inherit from ExceptionHandler' % type(handler)) EXCEPTION_HANDLERS.append(handler) google-apputils-0.4.1/google/apputils/resources.py0000640033465300116100000000412102263613200023244 0ustar craigcitroeng00000000000000#!/usr/bin/env python # Copyright 2010 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Wrapper around setuptools' pkg_resources with more Google-like names. This module is not very useful on its own, but many Google open-source projects are used to a different naming scheme, and this module makes the transition easier. """ __author__ = 'dborowitz@google.com (Dave Borowitz)' import atexit import pkg_resources def _Call(func, name): """Call a pkg_resources function. Args: func: A function from pkg_resources that takes the arguments (package_or_requirement, resource_name); for more info, see http://peak.telecommunity.com/DevCenter/PkgResources name: A name of the form 'module.name:path/to/resource'; this should generally be built from __name__ in the calling module. Returns: The result of calling the function on the split resource name. """ pkg_name, resource_name = name.split(':', 1) return func(pkg_name, resource_name) def GetResource(name): """Get a resource as a string; see _Call.""" return _Call(pkg_resources.resource_string, name) def GetResourceAsFile(name): """Get a resource as a file-like object; see _Call.""" return _Call(pkg_resources.resource_stream, name) _extracted_files = False def GetResourceFilename(name): """Get a filename for a resource; see _Call.""" global _extracted_files # pylint: disable=global-statement if not _extracted_files: atexit.register(pkg_resources.cleanup_resources) _extracted_files = True return _Call(pkg_resources.resource_filename, name) google-apputils-0.4.1/google/apputils/debug.py0000640033465300116100000000352602263613200022330 0ustar craigcitroeng00000000000000#!/usr/bin/env python # Copyright 2004 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This code must be source compatible with Python 2.6 through 3.3. """Import this module to add a hook to call pdb on uncaught exceptions. To enable this, do the following in your top-level application: import google.apputils.debug and then in your main(): google.apputils.debug.Init() Then run your program with --pdb. """ import sys import gflags as flags flags.DEFINE_boolean('pdb', 0, 'Drop into pdb on uncaught exceptions') old_excepthook = None def _DebugHandler(exc_class, value, tb): if not flags.FLAGS.pdb or hasattr(sys, 'ps1') or not sys.stderr.isatty(): # we aren't in interactive mode or we don't have a tty-like # device, so we call the default hook old_excepthook(exc_class, value, tb) else: # Don't impose import overhead on apps that never raise an exception. import traceback import pdb # we are in interactive mode, print the exception... traceback.print_exception(exc_class, value, tb) sys.stdout.write('\n') # ...then start the debugger in post-mortem mode. pdb.pm() def Init(): # Must back up old excepthook. global old_excepthook # pylint: disable=global-statement if old_excepthook is None: old_excepthook = sys.excepthook sys.excepthook = _DebugHandler google-apputils-0.4.1/google/apputils/shellutil.py0000640033465300116100000000235202263613200023243 0ustar craigcitroeng00000000000000#!/usr/bin/env python # This code must be source compatible with Python 2.4 through 3.3. # # Copyright 2003 Google Inc. All Rights Reserved. """Utility functions for dealing with command interpreters.""" import os # Running windows? win32 = (os.name == 'nt') def ShellEscapeList(words): """Turn a list of words into a shell-safe string. Args: words: A list of words, e.g. for a command. Returns: A string of shell-quoted and space-separated words. """ if win32: return ' '.join(words) s = '' for word in words: # Single quote word, and replace each ' in word with '"'"' s += "'" + word.replace("'", "'\"'\"'") + "' " return s[:-1] def ShellifyStatus(status): """Translate from a wait() exit status to a command shell exit status.""" if not win32: if os.WIFEXITED(status): # decode and return exit status status = os.WEXITSTATUS(status) else: # On Unix, the wait() produces a 16 bit return code. Unix shells # lossily compress this to an 8 bit value, using the formula below. # Shell status code < 128 means the process exited normally, status # code >= 128 means the process died because of a signal. status = 128 + os.WTERMSIG(status) return status google-apputils-0.4.1/google/apputils/stopwatch.py0000640033465300116100000001430502263613200023253 0ustar craigcitroeng00000000000000#!/usr/bin/env python # Copyright 2005 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A useful class for digesting, on a high-level, where time in a program goes. Usage: sw = StopWatch() sw.start() sw.start('foo') foo() sw.stop('foo') args = overhead_code() sw.start('bar') bar(args) sw.stop('bar') sw.dump() If you start a new timer when one is already running, then the other one will stop running, and restart when you stop this timer. This behavior is very useful for when you want to try timing for a subcall without remembering what is already running. For instance: sw.start('all_this') do_some_stuff() sw.start('just_that') small_but_expensive_function() sw.stop('just_that') cleanup_code() sw.stop('all_this') In this case, the output will be what you want: the time spent in small_but_expensive function will show up in the timer for just_that and not all_this. """ import StringIO import time __owner__ = 'dbentley@google.com (Dan Bentley)' class StopWatch(object): """Class encapsulating a timer; see above for example usage. Instance variables: timers: map of stopwatch name -> time for each currently running stopwatch, where time is seconds from the epoch of when this stopwatch was started. accum: map of stopwatch name -> accumulated time, in seconds, it has already been run for. stopped: map of timer name -> list of timer names that are blocking it. counters: map of timer name -> number of times it has been started. """ def __init__(self): self.timers = {} self.accum = {} self.stopped = {} self.counters = {} def start(self, timer='total', stop_others=True): """Start a timer. Args: timer: str; name of the timer to start, defaults to the overall timer. stop_others: bool; if True, stop all other running timers. If False, then you can have time that is spent inside more than one timer and there's a good chance that the overhead measured will be negative. """ if stop_others: stopped = [] for other in list(self.timers): if not other == 'total': self.stop(other) stopped.append(other) self.stopped[timer] = stopped self.counters[timer] = self.counters.get(timer, 0) + 1 self.timers[timer] = time.time() def stop(self, timer='total'): """Stop a running timer. This includes restarting anything that was stopped on behalf of this timer. Args: timer: str; name of the timer to stop, defaults to the overall timer. Raises: RuntimeError: if timer refers to a timer that was never started. """ if timer not in self.timers: raise RuntimeError( 'Tried to stop timer that was never started: %s' % timer) self.accum[timer] = self.timervalue(timer) del self.timers[timer] for stopped in self.stopped.get(timer, []): self.start(stopped, stop_others=0) def timervalue(self, timer='total', now=None): """Return the value seen by this timer so far. If the timer is stopped, this will be the accumulated time it has seen. If the timer is running, this will be the time it has seen up to now. If the timer has never been started, this will be zero. Args: timer: str; the name of the timer to report on. now: long; if provided, the time to use for 'now' for running timers. """ if not now: now = time.time() if timer in self.timers: # Timer is running now. return self.accum.get(timer, 0.0) + (now - self.timers[timer]) elif timer in self.accum: # Timer is stopped. return self.accum[timer] else: # Timer is never started. return 0.0 def overhead(self, now=None): """Calculate the overhead. Args: now: (optional) time to use as the current time. Returns: The overhead, that is, time spent in total but not in any sub timer. This may be negative if time was counted in two sub timers. Avoid this by always using stop_others. """ total = self.timervalue('total', now) if total == 0.0: return 0.0 all_timers = sum(self.accum.itervalues()) return total - (all_timers - total) def results(self, verbose=False): """Get the results of this stopwatch. Args: verbose: bool; if True, show all times; otherwise, show only the total. Returns: A list of tuples showing the output of this stopwatch, of the form (name, value, num_starts) for each timer. Note that if the total timer is not used, non-verbose results will be the empty list. """ now = time.time() all_names = self.accum.keys() names = [] if 'total' in all_names: all_names.remove('total') all_names.sort() if verbose: names = all_names results = [(name, self.timervalue(name, now=now), self.counters[name]) for name in names] if verbose: results.append(('overhead', self.overhead(now=now), 1)) if 'total' in self.accum or 'total' in self.timers: results.append(('total', self.timervalue('total', now=now), self.counters['total'])) return results def dump(self, verbose=False): """Describes where time in this stopwatch was spent. Args: verbose: bool; if True, show all timers; otherwise, show only the total. Returns: A string describing the stopwatch. """ output = StringIO.StringIO() results = self.results(verbose=verbose) maxlength = max([len(result[0]) for result in results]) for result in results: output.write('%*s: %6.2fs\n' % (maxlength, result[0], result[1])) return output.getvalue() # Create a stopwatch to be publicly used. sw = StopWatch() google-apputils-0.4.1/google/__init__.py0000640033465300116100000000030102263613200021124 0ustar craigcitroeng00000000000000#!/usr/bin/env python try: import pkg_resources pkg_resources.declare_namespace(__name__) except ImportError: from pkgutil import extend_path __path__ = extend_path(__path__, __name__) google-apputils-0.4.1/google_apputils.egg-info/0000750033465300116100000000000012417377553022457 5ustar craigcitroeng00000000000000google-apputils-0.4.1/google_apputils.egg-info/requires.txt0000640033465300116100000000006212417377553025056 0ustar craigcitroeng00000000000000python-dateutil>=1.4 python-gflags>=1.4 pytz>=2010google-apputils-0.4.1/google_apputils.egg-info/top_level.txt0000640033465300116100000000000712417377553025207 0ustar craigcitroeng00000000000000google google-apputils-0.4.1/google_apputils.egg-info/entry_points.txt0000640033465300116100000000025712417377553025762 0ustar craigcitroeng00000000000000[distutils.setup_keywords] google_test_dir = google.apputils.setup_command:ValidateGoogleTestDir [distutils.commands] google_test = google.apputils.setup_command:GoogleTest google-apputils-0.4.1/google_apputils.egg-info/PKG-INFO0000640033465300116100000000036712417377553023563 0ustar craigcitroeng00000000000000Metadata-Version: 1.0 Name: google-apputils Version: 0.4.1 Summary: UNKNOWN Home-page: http://code.google.com/p/google-apputils-python Author: Google Inc. Author-email: opensource@google.com License: UNKNOWN Description: UNKNOWN Platform: UNKNOWN google-apputils-0.4.1/google_apputils.egg-info/namespace_packages.txt0000640033465300116100000000000712417377553027010 0ustar craigcitroeng00000000000000google google-apputils-0.4.1/google_apputils.egg-info/SOURCES.txt0000640033465300116100000000126612417377553024351 0ustar craigcitroeng00000000000000README setup.py google/__init__.py google/apputils/__init__.py google/apputils/app.py google/apputils/appcommands.py google/apputils/basetest.py google/apputils/datelib.py google/apputils/debug.py google/apputils/file_util.py google/apputils/humanize.py google/apputils/resources.py google/apputils/run_script_module.py google/apputils/setup_command.py google/apputils/shellutil.py google/apputils/stopwatch.py google_apputils.egg-info/PKG-INFO google_apputils.egg-info/SOURCES.txt google_apputils.egg-info/dependency_links.txt google_apputils.egg-info/entry_points.txt google_apputils.egg-info/namespace_packages.txt google_apputils.egg-info/requires.txt google_apputils.egg-info/top_level.txtgoogle-apputils-0.4.1/google_apputils.egg-info/dependency_links.txt0000640033465300116100000000000112417377553026526 0ustar craigcitroeng00000000000000 google-apputils-0.4.1/PKG-INFO0000640033465300116100000000036712417377553016674 0ustar craigcitroeng00000000000000Metadata-Version: 1.0 Name: google-apputils Version: 0.4.1 Summary: UNKNOWN Home-page: http://code.google.com/p/google-apputils-python Author: Google Inc. Author-email: opensource@google.com License: UNKNOWN Description: UNKNOWN Platform: UNKNOWN google-apputils-0.4.1/setup.cfg0000640033465300116100000000007312417377553017412 0ustar craigcitroeng00000000000000[egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 google-apputils-0.4.1/setup.py0000640033465300116100000000541202263613200017261 0ustar craigcitroeng00000000000000#!/usr/bin/env python # Copyright 2010 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import ez_setup ez_setup.use_setuptools() from setuptools import setup, find_packages from setuptools.command import test REQUIRE = [ "python-dateutil>=1.4", "python-gflags>=1.4", "pytz>=2010", ] TEST_REQUIRE = ["mox>=0.5"] if sys.version_info[:2] < (2, 7): # unittest2 is a backport of Python 2.7's unittest. TEST_REQUIRE.append("unittest2>=0.5.1") # Mild hackery to get around the fact that we want to include a # GoogleTest as one of the cmdclasses for our package, but we # can't reference it until our package is installed. We simply # make a wrapper class that actually creates objects of the # appropriate class at runtime. class GoogleTestWrapper(test.test, object): test_dir = None def __new__(cls, *args, **kwds): from google.apputils import setup_command dist = setup_command.GoogleTest(*args, **kwds) dist.test_dir = GoogleTestWrapper.test_dir return dist setup( name="google-apputils", version="0.4.1", packages=find_packages(exclude=["tests"]), namespace_packages=["google"], entry_points={ "distutils.commands": [ "google_test = google.apputils.setup_command:GoogleTest", ], "distutils.setup_keywords": [ ("google_test_dir = google.apputils.setup_command" ":ValidateGoogleTestDir"), ], }, install_requires=REQUIRE, tests_require=REQUIRE + TEST_REQUIRE, # The entry_points above allow other projects to understand the # google_test command and test_dir option by specifying # setup_requires("google-apputils"). However, those entry_points only get # registered when this project is installed, and we need to run Google-style # tests for this project before it is installed. So we need to manually set # up the command and option mappings, for this project only, and we use # a wrapper class that exists before the install happens. cmdclass={"google_test": GoogleTestWrapper}, command_options={"google_test": {"test_dir": ("setup.py", "tests")}}, author="Google Inc.", author_email="opensource@google.com", url="http://code.google.com/p/google-apputils-python", )