)"""
self.write(self._indent(' '))
self.format_children(layout)
self.writeln('')
def visit_span(self, layout):
"""display links (using )"""
#TODO: translate in docbook
self.write('' % self.handle_attrs(layout))
self.format_children(layout)
self.write('')
def visit_link(self, layout):
"""display links (using )"""
self.write('%s' % (layout.url,
self.handle_attrs(layout),
layout.label))
def visit_verbatimtext(self, layout):
"""display verbatim text (using )"""
self.writeln(self._indent(' '))
self.write(layout.data.replace('&', '&').replace('<', '<'))
self.writeln(self._indent(' '))
def visit_text(self, layout):
"""add some text"""
self.write(layout.data.replace('&', '&').replace('<', '<'))
def _indent(self, string):
"""correctly indent string according to section"""
return ' ' * 2*(self.section) + string
logilab-common-0.61.0/ureports/__init__.py 0000644 0000151 0000155 00000014035 12276435602 020023 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Universal report objects and some formatting drivers.
A way to create simple reports using python objects, primarily designed to be
formatted as text and html.
"""
from __future__ import generators
__docformat__ = "restructuredtext en"
import sys
from cStringIO import StringIO
from StringIO import StringIO as UStringIO
from logilab.common.textutils import linesep
def get_nodes(node, klass):
"""return an iterator on all children node of the given klass"""
for child in node.children:
if isinstance(child, klass):
yield child
# recurse (FIXME: recursion controled by an option)
for grandchild in get_nodes(child, klass):
yield grandchild
def layout_title(layout):
"""try to return the layout's title as string, return None if not found
"""
for child in layout.children:
if isinstance(child, Title):
return ' '.join([node.data for node in get_nodes(child, Text)])
def build_summary(layout, level=1):
"""make a summary for the report, including X level"""
assert level > 0
level -= 1
summary = List(klass='summary')
for child in layout.children:
if not isinstance(child, Section):
continue
label = layout_title(child)
if not label and not child.id:
continue
if not child.id:
child.id = label.replace(' ', '-')
node = Link('#'+child.id, label=label or child.id)
# FIXME: Three following lines produce not very compliant
# docbook: there are some useless . They might be
# replaced by the three commented lines but this then produces
# a bug in html display...
if level and [n for n in child.children if isinstance(n, Section)]:
node = Paragraph([node, build_summary(child, level)])
summary.append(node)
# summary.append(node)
# if level and [n for n in child.children if isinstance(n, Section)]:
# summary.append(build_summary(child, level))
return summary
class BaseWriter(object):
"""base class for ureport writers"""
def format(self, layout, stream=None, encoding=None):
"""format and write the given layout into the stream object
unicode policy: unicode strings may be found in the layout;
try to call stream.write with it, but give it back encoded using
the given encoding if it fails
"""
if stream is None:
stream = sys.stdout
if not encoding:
encoding = getattr(stream, 'encoding', 'UTF-8')
self.encoding = encoding or 'UTF-8'
self.__compute_funcs = []
self.out = stream
self.begin_format(layout)
layout.accept(self)
self.end_format(layout)
def format_children(self, layout):
"""recurse on the layout children and call their accept method
(see the Visitor pattern)
"""
for child in getattr(layout, 'children', ()):
child.accept(self)
def writeln(self, string=''):
"""write a line in the output buffer"""
self.write(string + linesep)
def write(self, string):
"""write a string in the output buffer"""
try:
self.out.write(string)
except UnicodeEncodeError:
self.out.write(string.encode(self.encoding))
def begin_format(self, layout):
"""begin to format a layout"""
self.section = 0
def end_format(self, layout):
"""finished to format a layout"""
def get_table_content(self, table):
"""trick to get table content without actually writing it
return an aligned list of lists containing table cells values as string
"""
result = [[]]
cols = table.cols
for cell in self.compute_content(table):
if cols == 0:
result.append([])
cols = table.cols
cols -= 1
result[-1].append(cell)
# fill missing cells
while len(result[-1]) < cols:
result[-1].append('')
return result
def compute_content(self, layout):
"""trick to compute the formatting of children layout before actually
writing it
return an iterator on strings (one for each child element)
"""
# use cells !
def write(data):
try:
stream.write(data)
except UnicodeEncodeError:
stream.write(data.encode(self.encoding))
def writeln(data=''):
try:
stream.write(data+linesep)
except UnicodeEncodeError:
stream.write(data.encode(self.encoding)+linesep)
self.write = write
self.writeln = writeln
self.__compute_funcs.append((write, writeln))
for child in layout.children:
stream = UStringIO()
child.accept(self)
yield stream.getvalue()
self.__compute_funcs.pop()
try:
self.write, self.writeln = self.__compute_funcs[-1]
except IndexError:
del self.write
del self.writeln
from logilab.common.ureports.nodes import *
from logilab.common.ureports.text_writer import TextWriter
from logilab.common.ureports.html_writer import HTMLWriter
logilab-common-0.61.0/ureports/text_writer.py 0000644 0000151 0000155 00000011762 12276435602 020650 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Text formatting drivers for ureports"""
__docformat__ = "restructuredtext en"
from logilab.common.textutils import linesep
from logilab.common.ureports import BaseWriter
TITLE_UNDERLINES = ['', '=', '-', '`', '.', '~', '^']
BULLETS = ['*', '-']
class TextWriter(BaseWriter):
"""format layouts as text
(ReStructured inspiration but not totally handled yet)
"""
def begin_format(self, layout):
super(TextWriter, self).begin_format(layout)
self.list_level = 0
self.pending_urls = []
def visit_section(self, layout):
"""display a section as text
"""
self.section += 1
self.writeln()
self.format_children(layout)
if self.pending_urls:
self.writeln()
for label, url in self.pending_urls:
self.writeln('.. _`%s`: %s' % (label, url))
self.pending_urls = []
self.section -= 1
self.writeln()
def visit_title(self, layout):
title = ''.join(list(self.compute_content(layout)))
self.writeln(title)
try:
self.writeln(TITLE_UNDERLINES[self.section] * len(title))
except IndexError:
print "FIXME TITLE TOO DEEP. TURNING TITLE INTO TEXT"
def visit_paragraph(self, layout):
"""enter a paragraph"""
self.format_children(layout)
self.writeln()
def visit_span(self, layout):
"""enter a span"""
self.format_children(layout)
def visit_table(self, layout):
"""display a table as text"""
table_content = self.get_table_content(layout)
# get columns width
cols_width = [0]*len(table_content[0])
for row in table_content:
for index in range(len(row)):
col = row[index]
cols_width[index] = max(cols_width[index], len(col))
if layout.klass == 'field':
self.field_table(layout, table_content, cols_width)
else:
self.default_table(layout, table_content, cols_width)
self.writeln()
def default_table(self, layout, table_content, cols_width):
"""format a table"""
cols_width = [size+1 for size in cols_width]
format_strings = ' '.join(['%%-%ss'] * len(cols_width))
format_strings = format_strings % tuple(cols_width)
format_strings = format_strings.split(' ')
table_linesep = '\n+' + '+'.join(['-'*w for w in cols_width]) + '+\n'
headsep = '\n+' + '+'.join(['='*w for w in cols_width]) + '+\n'
# FIXME: layout.cheaders
self.write(table_linesep)
for i in range(len(table_content)):
self.write('|')
line = table_content[i]
for j in range(len(line)):
self.write(format_strings[j] % line[j])
self.write('|')
if i == 0 and layout.rheaders:
self.write(headsep)
else:
self.write(table_linesep)
def field_table(self, layout, table_content, cols_width):
"""special case for field table"""
assert layout.cols == 2
format_string = '%s%%-%ss: %%s' % (linesep, cols_width[0])
for field, value in table_content:
self.write(format_string % (field, value))
def visit_list(self, layout):
"""display a list layout as text"""
bullet = BULLETS[self.list_level % len(BULLETS)]
indent = ' ' * self.list_level
self.list_level += 1
for child in layout.children:
self.write('%s%s%s ' % (linesep, indent, bullet))
child.accept(self)
self.list_level -= 1
def visit_link(self, layout):
"""add a hyperlink"""
if layout.label != layout.url:
self.write('`%s`_' % layout.label)
self.pending_urls.append( (layout.label, layout.url) )
else:
self.write(layout.url)
def visit_verbatimtext(self, layout):
"""display a verbatim layout as text (so difficult ;)
"""
self.writeln('::\n')
for line in layout.data.splitlines():
self.writeln(' ' + line)
self.writeln()
def visit_text(self, layout):
"""add some text"""
self.write(layout.data)
logilab-common-0.61.0/ureports/html_writer.py 0000644 0000151 0000155 00000011427 12276435602 020626 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""HTML formatting drivers for ureports"""
__docformat__ = "restructuredtext en"
from cgi import escape
from logilab.common.ureports import BaseWriter
class HTMLWriter(BaseWriter):
"""format layouts as HTML"""
def __init__(self, snippet=None):
super(HTMLWriter, self).__init__()
self.snippet = snippet
def handle_attrs(self, layout):
"""get an attribute string from layout member attributes"""
attrs = ''
klass = getattr(layout, 'klass', None)
if klass:
attrs += ' class="%s"' % klass
nid = getattr(layout, 'id', None)
if nid:
attrs += ' id="%s"' % nid
return attrs
def begin_format(self, layout):
"""begin to format a layout"""
super(HTMLWriter, self).begin_format(layout)
if self.snippet is None:
self.writeln('')
self.writeln('')
def end_format(self, layout):
"""finished to format a layout"""
if self.snippet is None:
self.writeln('')
self.writeln('')
def visit_section(self, layout):
"""display a section as html, using div + h[section level]"""
self.section += 1
self.writeln('' % self.handle_attrs(layout))
self.format_children(layout)
self.writeln('
')
self.section -= 1
def visit_title(self, layout):
"""display a title using """
self.write('' % (self.section, self.handle_attrs(layout)))
self.format_children(layout)
self.writeln('' % self.section)
def visit_table(self, layout):
"""display a table as html"""
self.writeln('' % self.handle_attrs(layout))
table_content = self.get_table_content(layout)
for i in range(len(table_content)):
row = table_content[i]
if i == 0 and layout.rheaders:
self.writeln('' % (i%2 and 'even' or 'odd'))
for j in range(len(row)):
cell = row[j] or ' '
if (layout.rheaders and i == 0) or \
(layout.cheaders and j == 0) or \
(layout.rrheaders and i+1 == len(table_content)) or \
(layout.rcheaders and j+1 == len(row)):
self.writeln('%s | ' % cell)
else:
self.writeln('%s | ' % cell)
self.writeln('
')
self.writeln('
')
def visit_list(self, layout):
"""display a list as html"""
self.writeln('' % self.handle_attrs(layout))
for row in list(self.compute_content(layout)):
self.writeln('- %s
' % row)
self.writeln('
')
def visit_paragraph(self, layout):
"""display links (using )"""
self.write('
')
self.format_children(layout)
self.write('
')
def visit_span(self, layout):
"""display links (using )"""
self.write('' % self.handle_attrs(layout))
self.format_children(layout)
self.write('')
def visit_link(self, layout):
"""display links (using )"""
self.write(' %s' % (layout.url,
self.handle_attrs(layout),
layout.label))
def visit_verbatimtext(self, layout):
"""display verbatim text (using
)"""
self.write('')
self.write(layout.data.replace('&', '&').replace('<', '<'))
self.write('
')
def visit_text(self, layout):
"""add some text"""
data = layout.data
if layout.escaped:
data = data.replace('&', '&').replace('<', '<')
self.write(data)
logilab-common-0.61.0/fileutils.py 0000644 0000151 0000155 00000030672 12276435602 016406 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""File and file-path manipulation utilities.
:group path manipulation: first_level_directory, relative_path, is_binary,\
get_by_ext, remove_dead_links
:group file manipulation: norm_read, norm_open, lines, stream_lines, lines,\
write_open_mode, ensure_fs_mode, export
:sort: path manipulation, file manipulation
"""
__docformat__ = "restructuredtext en"
import sys
import shutil
import mimetypes
from os.path import isabs, isdir, islink, split, exists, normpath, join
from os.path import abspath
from os import sep, mkdir, remove, listdir, stat, chmod, walk
from stat import ST_MODE, S_IWRITE
from cStringIO import StringIO
from logilab.common import STD_BLACKLIST as BASE_BLACKLIST, IGNORED_EXTENSIONS
from logilab.common.shellutils import find
from logilab.common.deprecation import deprecated
from logilab.common.compat import FileIO, any
def first_level_directory(path):
"""Return the first level directory of a path.
>>> first_level_directory('home/syt/work')
'home'
>>> first_level_directory('/home/syt/work')
'/'
>>> first_level_directory('work')
'work'
>>>
:type path: str
:param path: the path for which we want the first level directory
:rtype: str
:return: the first level directory appearing in `path`
"""
head, tail = split(path)
while head and tail:
head, tail = split(head)
if tail:
return tail
# path was absolute, head is the fs root
return head
def abspath_listdir(path):
"""Lists path's content using absolute paths.
>>> os.listdir('/home')
['adim', 'alf', 'arthur', 'auc']
>>> abspath_listdir('/home')
['/home/adim', '/home/alf', '/home/arthur', '/home/auc']
"""
path = abspath(path)
return [join(path, filename) for filename in listdir(path)]
def is_binary(filename):
"""Return true if filename may be a binary file, according to it's
extension.
:type filename: str
:param filename: the name of the file
:rtype: bool
:return:
true if the file is a binary file (actually if it's mime type
isn't beginning by text/)
"""
try:
return not mimetypes.guess_type(filename)[0].startswith('text')
except AttributeError:
return 1
def write_open_mode(filename):
"""Return the write mode that should used to open file.
:type filename: str
:param filename: the name of the file
:rtype: str
:return: the mode that should be use to open the file ('w' or 'wb')
"""
if is_binary(filename):
return 'wb'
return 'w'
def ensure_fs_mode(filepath, desired_mode=S_IWRITE):
"""Check that the given file has the given mode(s) set, else try to
set it.
:type filepath: str
:param filepath: path of the file
:type desired_mode: int
:param desired_mode:
ORed flags describing the desired mode. Use constants from the
`stat` module for file permission's modes
"""
mode = stat(filepath)[ST_MODE]
if not mode & desired_mode:
chmod(filepath, mode | desired_mode)
# XXX (syt) unused? kill?
class ProtectedFile(FileIO):
"""A special file-object class that automatically does a 'chmod +w' when
needed.
XXX: for now, the way it is done allows 'normal file-objects' to be
created during the ProtectedFile object lifetime.
One way to circumvent this would be to chmod / unchmod on each
write operation.
One other way would be to :
- catch the IOError in the __init__
- if IOError, then create a StringIO object
- each write operation writes in this StringIO object
- on close()/del(), write/append the StringIO content to the file and
do the chmod only once
"""
def __init__(self, filepath, mode):
self.original_mode = stat(filepath)[ST_MODE]
self.mode_changed = False
if mode in ('w', 'a', 'wb', 'ab'):
if not self.original_mode & S_IWRITE:
chmod(filepath, self.original_mode | S_IWRITE)
self.mode_changed = True
FileIO.__init__(self, filepath, mode)
def _restore_mode(self):
"""restores the original mode if needed"""
if self.mode_changed:
chmod(self.name, self.original_mode)
# Don't re-chmod in case of several restore
self.mode_changed = False
def close(self):
"""restore mode before closing"""
self._restore_mode()
FileIO.close(self)
def __del__(self):
if not self.closed:
self.close()
class UnresolvableError(Exception):
"""Exception raised by relative path when it's unable to compute relative
path between two paths.
"""
def relative_path(from_file, to_file):
"""Try to get a relative path from `from_file` to `to_file`
(path will be absolute if to_file is an absolute file). This function
is useful to create link in `from_file` to `to_file`. This typical use
case is used in this function description.
If both files are relative, they're expected to be relative to the same
directory.
>>> relative_path( from_file='toto/index.html', to_file='index.html')
'../index.html'
>>> relative_path( from_file='index.html', to_file='toto/index.html')
'toto/index.html'
>>> relative_path( from_file='tutu/index.html', to_file='toto/index.html')
'../toto/index.html'
>>> relative_path( from_file='toto/index.html', to_file='/index.html')
'/index.html'
>>> relative_path( from_file='/toto/index.html', to_file='/index.html')
'../index.html'
>>> relative_path( from_file='/toto/index.html', to_file='/toto/summary.html')
'summary.html'
>>> relative_path( from_file='index.html', to_file='index.html')
''
>>> relative_path( from_file='/index.html', to_file='toto/index.html')
Traceback (most recent call last):
File "", line 1, in ?
File "", line 37, in relative_path
UnresolvableError
>>> relative_path( from_file='/index.html', to_file='/index.html')
''
>>>
:type from_file: str
:param from_file: source file (where links will be inserted)
:type to_file: str
:param to_file: target file (on which links point)
:raise UnresolvableError: if it has been unable to guess a correct path
:rtype: str
:return: the relative path of `to_file` from `from_file`
"""
from_file = normpath(from_file)
to_file = normpath(to_file)
if from_file == to_file:
return ''
if isabs(to_file):
if not isabs(from_file):
return to_file
elif isabs(from_file):
raise UnresolvableError()
from_parts = from_file.split(sep)
to_parts = to_file.split(sep)
idem = 1
result = []
while len(from_parts) > 1:
dirname = from_parts.pop(0)
if idem and len(to_parts) > 1 and dirname == to_parts[0]:
to_parts.pop(0)
else:
idem = 0
result.append('..')
result += to_parts
return sep.join(result)
def norm_read(path):
"""Return the content of the file with normalized line feeds.
:type path: str
:param path: path to the file to read
:rtype: str
:return: the content of the file with normalized line feeds
"""
return open(path, 'U').read()
norm_read = deprecated("use \"open(path, 'U').read()\"")(norm_read)
def norm_open(path):
"""Return a stream for a file with content with normalized line feeds.
:type path: str
:param path: path to the file to open
:rtype: file or StringIO
:return: the opened file with normalized line feeds
"""
return open(path, 'U')
norm_open = deprecated("use \"open(path, 'U')\"")(norm_open)
def lines(path, comments=None):
"""Return a list of non empty lines in the file located at `path`.
:type path: str
:param path: path to the file
:type comments: str or None
:param comments:
optional string which can be used to comment a line in the file
(i.e. lines starting with this string won't be returned)
:rtype: list
:return:
a list of stripped line in the file, without empty and commented
lines
:warning: at some point this function will probably return an iterator
"""
stream = open(path, 'U')
result = stream_lines(stream, comments)
stream.close()
return result
def stream_lines(stream, comments=None):
"""Return a list of non empty lines in the given `stream`.
:type stream: object implementing 'xreadlines' or 'readlines'
:param stream: file like object
:type comments: str or None
:param comments:
optional string which can be used to comment a line in the file
(i.e. lines starting with this string won't be returned)
:rtype: list
:return:
a list of stripped line in the file, without empty and commented
lines
:warning: at some point this function will probably return an iterator
"""
try:
readlines = stream.xreadlines
except AttributeError:
readlines = stream.readlines
result = []
for line in readlines():
line = line.strip()
if line and (comments is None or not line.startswith(comments)):
result.append(line)
return result
def export(from_dir, to_dir,
blacklist=BASE_BLACKLIST, ignore_ext=IGNORED_EXTENSIONS,
verbose=0):
"""Make a mirror of `from_dir` in `to_dir`, omitting directories and
files listed in the black list or ending with one of the given
extensions.
:type from_dir: str
:param from_dir: directory to export
:type to_dir: str
:param to_dir: destination directory
:type blacklist: list or tuple
:param blacklist:
list of files or directories to ignore, default to the content of
`BASE_BLACKLIST`
:type ignore_ext: list or tuple
:param ignore_ext:
list of extensions to ignore, default to the content of
`IGNORED_EXTENSIONS`
:type verbose: bool
:param verbose:
flag indicating whether information about exported files should be
printed to stderr, default to False
"""
try:
mkdir(to_dir)
except OSError:
pass # FIXME we should use "exists" if the point is about existing dir
# else (permission problems?) shouldn't return / raise ?
for directory, dirnames, filenames in walk(from_dir):
for norecurs in blacklist:
try:
dirnames.remove(norecurs)
except ValueError:
continue
for dirname in dirnames:
src = join(directory, dirname)
dest = to_dir + src[len(from_dir):]
if isdir(src):
if not exists(dest):
mkdir(dest)
for filename in filenames:
# don't include binary files
# endswith does not accept tuple in 2.4
if any([filename.endswith(ext) for ext in ignore_ext]):
continue
src = join(directory, filename)
dest = to_dir + src[len(from_dir):]
if verbose:
print >> sys.stderr, src, '->', dest
if exists(dest):
remove(dest)
shutil.copy2(src, dest)
def remove_dead_links(directory, verbose=0):
"""Recursively traverse directory and remove all dead links.
:type directory: str
:param directory: directory to cleanup
:type verbose: bool
:param verbose:
flag indicating whether information about deleted links should be
printed to stderr, default to False
"""
for dirpath, dirname, filenames in walk(directory):
for filename in dirnames + filenames:
src = join(dirpath, filename)
if islink(src) and not exists(src):
if verbose:
print 'remove dead link', src
remove(src)
logilab-common-0.61.0/modutils.py 0000644 0000151 0000155 00000055620 12276435602 016246 0 ustar narval narval # -*- coding: utf-8 -*-
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Python modules manipulation utility functions.
:type PY_SOURCE_EXTS: tuple(str)
:var PY_SOURCE_EXTS: list of possible python source file extension
:type STD_LIB_DIR: str
:var STD_LIB_DIR: directory where standard modules are located
:type BUILTIN_MODULES: dict
:var BUILTIN_MODULES: dictionary with builtin module names has key
"""
from __future__ import with_statement
__docformat__ = "restructuredtext en"
import sys
import os
from os.path import splitext, join, abspath, isdir, dirname, exists, basename
from imp import find_module, load_module, C_BUILTIN, PY_COMPILED, PKG_DIRECTORY
from distutils.sysconfig import get_config_var, get_python_lib, get_python_version
from distutils.errors import DistutilsPlatformError
try:
import zipimport
except ImportError:
zipimport = None
ZIPFILE = object()
from logilab.common import STD_BLACKLIST, _handle_blacklist
# Notes about STD_LIB_DIR
# Consider arch-specific installation for STD_LIB_DIR definition
# :mod:`distutils.sysconfig` contains to much hardcoded values to rely on
#
# :see: `Problems with /usr/lib64 builds `_
# :see: `FHS `_
if sys.platform.startswith('win'):
PY_SOURCE_EXTS = ('py', 'pyw')
PY_COMPILED_EXTS = ('dll', 'pyd')
else:
PY_SOURCE_EXTS = ('py',)
PY_COMPILED_EXTS = ('so',)
try:
STD_LIB_DIR = get_python_lib(standard_lib=1)
# get_python_lib(standard_lib=1) is not available on pypy, set STD_LIB_DIR to
# non-valid path, see https://bugs.pypy.org/issue1164
except DistutilsPlatformError:
STD_LIB_DIR = '//'
EXT_LIB_DIR = get_python_lib()
BUILTIN_MODULES = dict(zip(sys.builtin_module_names,
[1]*len(sys.builtin_module_names)))
class NoSourceFile(Exception):
"""exception raised when we are not able to get a python
source file for a precompiled file
"""
class LazyObject(object):
def __init__(self, module, obj):
self.module = module
self.obj = obj
self._imported = None
def _getobj(self):
if self._imported is None:
self._imported = getattr(load_module_from_name(self.module),
self.obj)
return self._imported
def __getattribute__(self, attr):
try:
return super(LazyObject, self).__getattribute__(attr)
except AttributeError, ex:
return getattr(self._getobj(), attr)
def __call__(self, *args, **kwargs):
return self._getobj()(*args, **kwargs)
def load_module_from_name(dotted_name, path=None, use_sys=1):
"""Load a Python module from its name.
:type dotted_name: str
:param dotted_name: python name of a module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
return load_module_from_modpath(dotted_name.split('.'), path, use_sys)
def load_module_from_modpath(parts, path=None, use_sys=1):
"""Load a python module from its splitted name.
:type parts: list(str) or tuple(str)
:param parts:
python name of a module or package splitted on '.'
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
if use_sys:
try:
return sys.modules['.'.join(parts)]
except KeyError:
pass
modpath = []
prevmodule = None
for part in parts:
modpath.append(part)
curname = '.'.join(modpath)
module = None
if len(modpath) != len(parts):
# even with use_sys=False, should try to get outer packages from sys.modules
module = sys.modules.get(curname)
elif use_sys:
# because it may have been indirectly loaded through a parent
module = sys.modules.get(curname)
if module is None:
mp_file, mp_filename, mp_desc = find_module(part, path)
module = load_module(curname, mp_file, mp_filename, mp_desc)
if prevmodule:
setattr(prevmodule, part, module)
_file = getattr(module, '__file__', '')
if not _file and len(modpath) != len(parts):
raise ImportError('no module in %s' % '.'.join(parts[len(modpath):]) )
path = [dirname( _file )]
prevmodule = module
return module
def load_module_from_file(filepath, path=None, use_sys=1, extrapath=None):
"""Load a Python module from it's path.
:type filepath: str
:param filepath: path to the python module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
modpath = modpath_from_file(filepath, extrapath)
return load_module_from_modpath(modpath, path, use_sys)
def _check_init(path, mod_path):
"""check there are some __init__.py all along the way"""
for part in mod_path:
path = join(path, part)
if not _has_init(path):
return False
return True
def modpath_from_file(filename, extrapath=None):
"""given a file path return the corresponding splitted module's name
(i.e name of a module or package splitted on '.')
:type filename: str
:param filename: file's path for which we want the module's name
:type extrapath: dict
:param extrapath:
optional extra search path, with path as key and package name for the path
as value. This is usually useful to handle package splitted in multiple
directories using __path__ trick.
:raise ImportError:
if the corresponding module's name has not been found
:rtype: list(str)
:return: the corresponding splitted module's name
"""
base = splitext(abspath(filename))[0]
if extrapath is not None:
for path_ in extrapath:
path = abspath(path_)
if path and base[:len(path)] == path:
submodpath = [pkg for pkg in base[len(path):].split(os.sep)
if pkg]
if _check_init(path, submodpath[:-1]):
return extrapath[path_].split('.') + submodpath
for path in sys.path:
path = abspath(path)
if path and base.startswith(path):
modpath = [pkg for pkg in base[len(path):].split(os.sep) if pkg]
if _check_init(path, modpath[:-1]):
return modpath
raise ImportError('Unable to find module for %s in %s' % (
filename, ', \n'.join(sys.path)))
def file_from_modpath(modpath, path=None, context_file=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file, giving priority to source file over precompiled
file if it exists
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.')
(this means explicit relative imports that start with dots have
empty strings in this list!)
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: str or None
:return:
the path to the module's file or None if it's an integrated
builtin module such as 'sys'
"""
if context_file is not None:
context = dirname(context_file)
else:
context = context_file
if modpath[0] == 'xml':
# handle _xmlplus
try:
return _file_from_modpath(['_xmlplus'] + modpath[1:], path, context)
except ImportError:
return _file_from_modpath(modpath, path, context)
elif modpath == ['os', 'path']:
# FIXME: currently ignoring search_path...
return os.path.__file__
return _file_from_modpath(modpath, path, context)
def get_module_part(dotted_name, context_file=None):
"""given a dotted name return the module part of the name :
>>> get_module_part('logilab.common.modutils.get_module_part')
'logilab.common.modutils'
:type dotted_name: str
:param dotted_name: full name of the identifier we are interested in
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: str or None
:return:
the module part of the name or None if we have not been able at
all to import the given name
XXX: deprecated, since it doesn't handle package precedence over module
(see #10066)
"""
# os.path trick
if dotted_name.startswith('os.path'):
return 'os.path'
parts = dotted_name.split('.')
if context_file is not None:
# first check for builtin module which won't be considered latter
# in that case (path != None)
if parts[0] in BUILTIN_MODULES:
if len(parts) > 2:
raise ImportError(dotted_name)
return parts[0]
# don't use += or insert, we want a new list to be created !
path = None
starti = 0
if parts[0] == '':
assert context_file is not None, \
'explicit relative import, but no context_file?'
path = [] # prevent resolving the import non-relatively
starti = 1
while parts[starti] == '': # for all further dots: change context
starti += 1
context_file = dirname(context_file)
for i in range(starti, len(parts)):
try:
file_from_modpath(parts[starti:i+1],
path=path, context_file=context_file)
except ImportError:
if not i >= max(1, len(parts) - 2):
raise
return '.'.join(parts[:i])
return dotted_name
def get_modules(package, src_directory, blacklist=STD_BLACKLIST):
"""given a package directory return a list of all available python
modules in the package and its subpackages
:type package: str
:param package: the python name for the package
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to
the value of `logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all available python modules in the package and its
subpackages
"""
modules = []
for directory, dirnames, filenames in os.walk(src_directory):
_handle_blacklist(blacklist, dirnames, filenames)
# check for __init__.py
if not '__init__.py' in filenames:
dirnames[:] = ()
continue
if directory != src_directory:
dir_package = directory[len(src_directory):].replace(os.sep, '.')
modules.append(package + dir_package)
for filename in filenames:
if _is_python_file(filename) and filename != '__init__.py':
src = join(directory, filename)
module = package + src[len(src_directory):-3]
modules.append(module.replace(os.sep, '.'))
return modules
def get_module_files(src_directory, blacklist=STD_BLACKLIST):
"""given a package directory return a list of all available python
module's files in the package and its subpackages
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to the value of
`logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all available python module's files in the package and
its subpackages
"""
files = []
for directory, dirnames, filenames in os.walk(src_directory):
_handle_blacklist(blacklist, dirnames, filenames)
# check for __init__.py
if not '__init__.py' in filenames:
dirnames[:] = ()
continue
for filename in filenames:
if _is_python_file(filename):
src = join(directory, filename)
files.append(src)
return files
def get_source_file(filename, include_no_ext=False):
"""given a python module's file name return the matching source file
name (the filename will be returned identically if it's a already an
absolute path to a python source file...)
:type filename: str
:param filename: python module's file name
:raise NoSourceFile: if no source file exists on the file system
:rtype: str
:return: the absolute path of the source file if it exists
"""
base, orig_ext = splitext(abspath(filename))
for ext in PY_SOURCE_EXTS:
source_path = '%s.%s' % (base, ext)
if exists(source_path):
return source_path
if include_no_ext and not orig_ext and exists(base):
return base
raise NoSourceFile(filename)
def cleanup_sys_modules(directories):
"""remove submodules of `directories` from `sys.modules`"""
for modname, module in sys.modules.items():
modfile = getattr(module, '__file__', None)
if modfile:
for directory in directories:
if modfile.startswith(directory):
del sys.modules[modname]
break
def is_python_source(filename):
"""
rtype: bool
return: True if the filename is a python source file
"""
return splitext(filename)[1][1:] in PY_SOURCE_EXTS
def is_standard_module(modname, std_path=(STD_LIB_DIR,)):
"""try to guess if a module is a standard python module (by default,
see `std_path` parameter's description)
:type modname: str
:param modname: name of the module we are interested in
:type std_path: list(str) or tuple(str)
:param std_path: list of path considered has standard
:rtype: bool
:return:
true if the module:
- is located on the path listed in one of the directory in `std_path`
- is a built-in module
"""
modname = modname.split('.')[0]
try:
filename = file_from_modpath([modname])
except ImportError, ex:
# import failed, i'm probably not so wrong by supposing it's
# not standard...
return 0
# modules which are not living in a file are considered standard
# (sys and __builtin__ for instance)
if filename is None:
return 1
filename = abspath(filename)
if filename.startswith(EXT_LIB_DIR):
return 0
for path in std_path:
if filename.startswith(abspath(path)):
return 1
return False
def is_relative(modname, from_file):
"""return true if the given module name is relative to the given
file name
:type modname: str
:param modname: name of the module we are interested in
:type from_file: str
:param from_file:
path of the module from which modname has been imported
:rtype: bool
:return:
true if the module has been imported relatively to `from_file`
"""
if not isdir(from_file):
from_file = dirname(from_file)
if from_file in sys.path:
return False
try:
find_module(modname.split('.')[0], [from_file])
return True
except ImportError:
return False
# internal only functions #####################################################
def _file_from_modpath(modpath, path=None, context=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file
this function is used internally, see `file_from_modpath`'s
documentation for more information
"""
assert len(modpath) > 0
if context is not None:
try:
mtype, mp_filename = _module_file(modpath, [context])
except ImportError:
mtype, mp_filename = _module_file(modpath, path)
else:
mtype, mp_filename = _module_file(modpath, path)
if mtype == PY_COMPILED:
try:
return get_source_file(mp_filename)
except NoSourceFile:
return mp_filename
elif mtype == C_BUILTIN:
# integrated builtin module
return None
elif mtype == PKG_DIRECTORY:
mp_filename = _has_init(mp_filename)
return mp_filename
def _search_zip(modpath, pic):
for filepath, importer in pic.items():
if importer is not None:
if importer.find_module(modpath[0]):
if not importer.find_module('/'.join(modpath)):
raise ImportError('No module named %s in %s/%s' % (
'.'.join(modpath[1:]), filepath, modpath))
return ZIPFILE, abspath(filepath) + '/' + '/'.join(modpath), filepath
raise ImportError('No module named %s' % '.'.join(modpath))
try:
import pkg_resources
except ImportError:
pkg_resources = None
def _module_file(modpath, path=None):
"""get a module type / file path
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.'), with leading empty strings for explicit relative import
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:rtype: tuple(int, str)
:return: the module type flag and the file path for a module
"""
# egg support compat
try:
pic = sys.path_importer_cache
_path = (path is None and sys.path or path)
for __path in _path:
if not __path in pic:
try:
pic[__path] = zipimport.zipimporter(__path)
except zipimport.ZipImportError:
pic[__path] = None
checkeggs = True
except AttributeError:
checkeggs = False
# pkg_resources support (aka setuptools namespace packages)
if pkg_resources is not None and modpath[0] in pkg_resources._namespace_packages and len(modpath) > 1:
# setuptools has added into sys.modules a module object with proper
# __path__, get back information from there
module = sys.modules[modpath.pop(0)]
path = module.__path__
imported = []
while modpath:
modname = modpath[0]
# take care to changes in find_module implementation wrt builtin modules
#
# Python 2.6.6 (r266:84292, Sep 11 2012, 08:34:23)
# >>> imp.find_module('posix')
# (None, 'posix', ('', '', 6))
#
# Python 3.3.1 (default, Apr 26 2013, 12:08:46)
# >>> imp.find_module('posix')
# (None, None, ('', '', 6))
try:
_, mp_filename, mp_desc = find_module(modname, path)
except ImportError:
if checkeggs:
return _search_zip(modpath, pic)[:2]
raise
else:
if checkeggs and mp_filename:
fullabspath = [abspath(x) for x in _path]
try:
pathindex = fullabspath.index(dirname(abspath(mp_filename)))
emtype, emp_filename, zippath = _search_zip(modpath, pic)
if pathindex > _path.index(zippath):
# an egg takes priority
return emtype, emp_filename
except ValueError:
# XXX not in _path
pass
except ImportError:
pass
checkeggs = False
imported.append(modpath.pop(0))
mtype = mp_desc[2]
if modpath:
if mtype != PKG_DIRECTORY:
raise ImportError('No module %s in %s' % ('.'.join(modpath),
'.'.join(imported)))
# XXX guess if package is using pkgutil.extend_path by looking for
# those keywords in the first four Kbytes
try:
with open(join(mp_filename, '__init__.py')) as stream:
data = stream.read(4096)
except IOError:
path = [mp_filename]
else:
if 'pkgutil' in data and 'extend_path' in data:
# extend_path is called, search sys.path for module/packages
# of this name see pkgutil.extend_path documentation
path = [join(p, *imported) for p in sys.path
if isdir(join(p, *imported))]
else:
path = [mp_filename]
return mtype, mp_filename
def _is_python_file(filename):
"""return true if the given filename should be considered as a python file
.pyc and .pyo are ignored
"""
for ext in ('.py', '.so', '.pyd', '.pyw'):
if filename.endswith(ext):
return True
return False
def _has_init(directory):
"""if the given directory has a valid __init__ file, return its path,
else return None
"""
mod_or_pack = join(directory, '__init__')
for ext in PY_SOURCE_EXTS + ('pyc', 'pyo'):
if exists(mod_or_pack + '.' + ext):
return mod_or_pack + '.' + ext
return None
logilab-common-0.61.0/PKG-INFO 0000664 0000151 0000155 00000016327 12276435607 015141 0 ustar narval narval Metadata-Version: 1.0
Name: logilab-common
Version: 0.61.0
Summary: collection of low-level Python packages and modules used by Logilab projects
Home-page: http://www.logilab.org/project/logilab-common
Author: Logilab
Author-email: contact@logilab.fr
License: LGPL
Description: Logilab's common library
========================
What's this ?
-------------
This package contains some modules used by differents Logilab's projects.
It is released under the GNU Lesser General Public License.
There is no documentation available yet but the source code should be clean and
well documented.
Designed to ease:
* handling command line options and configuration files
* writing interactive command line tools
* manipulation of files and character strings
* manipulation of common structures such as graph, tree, and pattern such as visitor
* generating text and HTML reports
* accessing some external libraries such as OmniORB_, Pyro_...
* more...
Installation
------------
Extract the tarball, jump into the created directory and run ::
python setup.py install
For installation options, see ::
python setup.py install --help
Provided modules
----------------
Here is a brief description of the available modules.
Modules providing high-level features
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* `cache`, a cache implementation with a least recently used algorithm.
* `changelog`, a tiny library to manipulate our simplified ChangeLog file format.
* `clcommands`, high-level classes to define command line programs handling
different subcommands. It is based on `configuration` to get easy command line
/ configuration file handling.
* `cli`, a base class for interactive programs using the command line.
* `configuration`, some classes to handle unified configuration from both
command line (using optparse) and configuration file (using ConfigParser).
* `dbf`, read Visual Fox Pro DBF files.
* `proc`, interface to Linux /proc.
* `umessage`, unicode email support.
* `ureports`, micro-reports, a way to create simple reports using python objects
without care of the final formatting. ReST and html formatters are provided.
Modules providing low-level functions and structures
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* `compat`, provides a transparent compatibility layer between different python
versions.
* `date`, a set of date manipulation functions.
* `daemon`, a daemon function and mix-in class to properly start an Unix daemon
process.
* `decorators`, function decorators such as cached, timed...
* `deprecation`, decorator, metaclass & all to mark functions / classes as
deprecated or moved
* `fileutils`, some file / file path manipulation utilities.
* `graph`, graph manipulations functions such as cycle detection, bases for dot
file generation.
* `modutils`, python module manipulation functions.
* `shellutils`, some powerful shell like functions to replace shell scripts with
python scripts.
* `tasksqueue`, a prioritized tasks queue implementation.
* `textutils`, some text manipulation functions (ansi colorization, line wrapping,
rest support...).
* `tree`, base class to represent tree structure, and some others to make it
works with the visitor implementation (see below).
* `visitor`, a generic visitor pattern implementation.
Modules extending some standard modules
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* `debugger`, `pdb` customization.
* `logging_ext`, extensions to `logging` module such as a colorized formatter
and an easier initialization function.
* `optik_ext`, defines some new option types (regexp, csv, color, date, etc.)
for `optik` / `optparse`
* `xmlrpcutils`, auth support for XML-RPC
Modules extending some external modules
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* `corbautils`, useful functions for use with the OmniORB_ CORBA library.
* `hg`, some Mercurial_ utility functions.
* `pyro_ext`, some Pyro_ utility functions.
* `sphinx_ext`, Sphinx_ plugin defining a `autodocstring` directive.
* `vcgutils` , utilities functions to generate file readable with Georg Sander's
vcg tool (Visualization of Compiler Graphs).
To be deprecated modules
~~~~~~~~~~~~~~~~~~~~~~~~
Those `logilab.common` modules will much probably be deprecated in future
versions:
* `testlib`: use `unittest2`_ instead
* `pytest`: use `discover`_ instead
* `interface`: use `zope.interface`_ if you really want this
* `table`, `xmlutils`: is that used?
* `sphinxutils`: we won't go that way imo (i == syt)
Deprecated modules
~~~~~~~~~~~~~~~~~~
Those `logilab.common` modules are only there for backward compatibility. They
can go away at anytime.
* `optparser`: use `clcommands` instead
* `adbh`, `db`, `sqlgen`: see `logilab.database`_ instead
* `contexts`: content move to `shellutils`
* `html`: deprecated without replacement
Comments, support, bug reports
------------------------------
Project page http://www.logilab.org/project/logilab-common
Use the python-projects@lists.logilab.org mailing list. Since we do not have
publicly available bug tracker yet, bug reports should be emailed
there too.
You can subscribe to this mailing list at
http://lists.logilab.org/mailman/listinfo/python-projects
Archives are available at
http://lists.logilab.org/pipermail/python-projects/
.. _Pyro: http://pyro.sourceforge.net/
.. _OmniORB: http://omniorb.sourceforge.net/
.. _Mercurial: http://mercurial.selenic.com
.. _Sphinx: http://sphinx.pocoo.org/
.. _`logilab.database`: http://www.logilab.org/project/logilab-database/
.. _`unittest2`: http://pypi.python.org/pypi/unittest2
.. _`discover`: http://pypi.python.org/pypi/discover
.. _`zope.interface`: http://pypi.python.org/pypi/zope.interface
Platform: UNKNOWN
Classifier: Topic :: Utilities
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 3
logilab-common-0.61.0/cache.py 0000644 0000151 0000155 00000007045 12276435602 015447 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Cache module, with a least recently used algorithm for the management of the
deletion of entries.
"""
__docformat__ = "restructuredtext en"
from threading import Lock
from logilab.common.decorators import locked
_marker = object()
class Cache(dict):
"""A dictionary like cache.
inv:
len(self._usage) <= self.size
len(self.data) <= self.size
"""
def __init__(self, size=100):
""" Warning : Cache.__init__() != dict.__init__().
Constructor does not take any arguments beside size.
"""
assert size >= 0, 'cache size must be >= 0 (0 meaning no caching)'
self.size = size
self._usage = []
self._lock = Lock()
super(Cache, self).__init__()
def _acquire(self):
self._lock.acquire()
def _release(self):
self._lock.release()
def _update_usage(self, key):
if not self._usage:
self._usage.append(key)
elif self._usage[-1] != key:
try:
self._usage.remove(key)
except ValueError:
# we are inserting a new key
# check the size of the dictionary
# and remove the oldest item in the cache
if self.size and len(self._usage) >= self.size:
super(Cache, self).__delitem__(self._usage[0])
del self._usage[0]
self._usage.append(key)
else:
pass # key is already the most recently used key
def __getitem__(self, key):
value = super(Cache, self).__getitem__(key)
self._update_usage(key)
return value
__getitem__ = locked(_acquire, _release)(__getitem__)
def __setitem__(self, key, item):
# Just make sure that size > 0 before inserting a new item in the cache
if self.size > 0:
super(Cache, self).__setitem__(key, item)
self._update_usage(key)
__setitem__ = locked(_acquire, _release)(__setitem__)
def __delitem__(self, key):
super(Cache, self).__delitem__(key)
self._usage.remove(key)
__delitem__ = locked(_acquire, _release)(__delitem__)
def clear(self):
super(Cache, self).clear()
self._usage = []
clear = locked(_acquire, _release)(clear)
def pop(self, key, default=_marker):
if key in self:
self._usage.remove(key)
#if default is _marker:
# return super(Cache, self).pop(key)
return super(Cache, self).pop(key, default)
pop = locked(_acquire, _release)(pop)
def popitem(self):
raise NotImplementedError()
def setdefault(self, key, default=None):
raise NotImplementedError()
def update(self, other):
raise NotImplementedError()
logilab-common-0.61.0/changelog.py 0000644 0000151 0000155 00000017551 12276435602 016336 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Manipulation of upstream change log files.
The upstream change log files format handled is simpler than the one
often used such as those generated by the default Emacs changelog mode.
Sample ChangeLog format::
Change log for project Yoo
==========================
--
* add a new functionality
2002-02-01 -- 0.1.1
* fix bug #435454
* fix bug #434356
2002-01-01 -- 0.1
* initial release
There is 3 entries in this change log, one for each released version and one
for the next version (i.e. the current entry).
Each entry contains a set of messages corresponding to changes done in this
release.
All the non empty lines before the first entry are considered as the change
log title.
"""
__docformat__ = "restructuredtext en"
import sys
from stat import S_IWRITE
BULLET = '*'
SUBBULLET = '-'
INDENT = ' ' * 4
class NoEntry(Exception):
"""raised when we are unable to find an entry"""
class EntryNotFound(Exception):
"""raised when we are unable to find a given entry"""
class Version(tuple):
"""simple class to handle soft version number has a tuple while
correctly printing it as X.Y.Z
"""
def __new__(cls, versionstr):
if isinstance(versionstr, basestring):
versionstr = versionstr.strip(' :') # XXX (syt) duh?
parsed = cls.parse(versionstr)
else:
parsed = versionstr
return tuple.__new__(cls, parsed)
@classmethod
def parse(cls, versionstr):
versionstr = versionstr.strip(' :')
try:
return [int(i) for i in versionstr.split('.')]
except ValueError, ex:
raise ValueError("invalid literal for version '%s' (%s)"%(versionstr, ex))
def __str__(self):
return '.'.join([str(i) for i in self])
# upstream change log #########################################################
class ChangeLogEntry(object):
"""a change log entry, i.e. a set of messages associated to a version and
its release date
"""
version_class = Version
def __init__(self, date=None, version=None, **kwargs):
self.__dict__.update(kwargs)
if version:
self.version = self.version_class(version)
else:
self.version = None
self.date = date
self.messages = []
def add_message(self, msg):
"""add a new message"""
self.messages.append(([msg], []))
def complete_latest_message(self, msg_suite):
"""complete the latest added message
"""
if not self.messages:
raise ValueError('unable to complete last message as there is no previous message)')
if self.messages[-1][1]: # sub messages
self.messages[-1][1][-1].append(msg_suite)
else: # message
self.messages[-1][0].append(msg_suite)
def add_sub_message(self, sub_msg, key=None):
if not self.messages:
raise ValueError('unable to complete last message as there is no previous message)')
if key is None:
self.messages[-1][1].append([sub_msg])
else:
raise NotImplementedError("sub message to specific key are not implemented yet")
def write(self, stream=sys.stdout):
"""write the entry to file """
stream.write('%s -- %s\n' % (self.date or '', self.version or ''))
for msg, sub_msgs in self.messages:
stream.write('%s%s %s\n' % (INDENT, BULLET, msg[0]))
stream.write(''.join(msg[1:]))
if sub_msgs:
stream.write('\n')
for sub_msg in sub_msgs:
stream.write('%s%s %s\n' % (INDENT * 2, SUBBULLET, sub_msg[0]))
stream.write(''.join(sub_msg[1:]))
stream.write('\n')
stream.write('\n\n')
class ChangeLog(object):
"""object representation of a whole ChangeLog file"""
entry_class = ChangeLogEntry
def __init__(self, changelog_file, title=''):
self.file = changelog_file
self.title = title
self.additional_content = ''
self.entries = []
self.load()
def __repr__(self):
return '' % (self.file, id(self),
len(self.entries))
def add_entry(self, entry):
"""add a new entry to the change log"""
self.entries.append(entry)
def get_entry(self, version='', create=None):
""" return a given changelog entry
if version is omitted, return the current entry
"""
if not self.entries:
if version or not create:
raise NoEntry()
self.entries.append(self.entry_class())
if not version:
if self.entries[0].version and create is not None:
self.entries.insert(0, self.entry_class())
return self.entries[0]
version = self.version_class(version)
for entry in self.entries:
if entry.version == version:
return entry
raise EntryNotFound()
def add(self, msg, create=None):
"""add a new message to the latest opened entry"""
entry = self.get_entry(create=create)
entry.add_message(msg)
def load(self):
""" read a logilab's ChangeLog from file """
try:
stream = open(self.file)
except IOError:
return
last = None
expect_sub = False
for line in stream.readlines():
sline = line.strip()
words = sline.split()
# if new entry
if len(words) == 1 and words[0] == '--':
expect_sub = False
last = self.entry_class()
self.add_entry(last)
# if old entry
elif len(words) == 3 and words[1] == '--':
expect_sub = False
last = self.entry_class(words[0], words[2])
self.add_entry(last)
# if title
elif sline and last is None:
self.title = '%s%s' % (self.title, line)
# if new entry
elif sline and sline[0] == BULLET:
expect_sub = False
last.add_message(sline[1:].strip())
# if new sub_entry
elif expect_sub and sline and sline[0] == SUBBULLET:
last.add_sub_message(sline[1:].strip())
# if new line for current entry
elif sline and last.messages:
last.complete_latest_message(line)
else:
expect_sub = True
self.additional_content += line
stream.close()
def format_title(self):
return '%s\n\n' % self.title.strip()
def save(self):
"""write back change log"""
# filetutils isn't importable in appengine, so import locally
from logilab.common.fileutils import ensure_fs_mode
ensure_fs_mode(self.file, S_IWRITE)
self.write(open(self.file, 'w'))
def write(self, stream=sys.stdout):
"""write changelog to stream"""
stream.write(self.format_title())
for entry in self.entries:
entry.write(stream)
logilab-common-0.61.0/logging_ext.py 0000644 0000151 0000155 00000015437 12276435602 016716 0 ustar narval narval # -*- coding: utf-8 -*-
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Extends the logging module from the standard library."""
__docformat__ = "restructuredtext en"
import os
import sys
import logging
from logilab.common.textutils import colorize_ansi
def set_log_methods(cls, logger):
"""bind standard logger's methods as methods on the class"""
cls.__logger = logger
for attr in ('debug', 'info', 'warning', 'error', 'critical', 'exception'):
setattr(cls, attr, getattr(logger, attr))
def xxx_cyan(record):
if 'XXX' in record.message:
return 'cyan'
class ColorFormatter(logging.Formatter):
"""
A color Formatter for the logging standard module.
By default, colorize CRITICAL and ERROR in red, WARNING in orange, INFO in
green and DEBUG in yellow.
self.colors is customizable via the 'color' constructor argument (dictionary).
self.colorfilters is a list of functions that get the LogRecord
and return a color name or None.
"""
def __init__(self, fmt=None, datefmt=None, colors=None):
logging.Formatter.__init__(self, fmt, datefmt)
self.colorfilters = []
self.colors = {'CRITICAL': 'red',
'ERROR': 'red',
'WARNING': 'magenta',
'INFO': 'green',
'DEBUG': 'yellow',
}
if colors is not None:
assert isinstance(colors, dict)
self.colors.update(colors)
def format(self, record):
msg = logging.Formatter.format(self, record)
if record.levelname in self.colors:
color = self.colors[record.levelname]
return colorize_ansi(msg, color)
else:
for cf in self.colorfilters:
color = cf(record)
if color:
return colorize_ansi(msg, color)
return msg
def set_color_formatter(logger=None, **kw):
"""
Install a color formatter on the 'logger'. If not given, it will
defaults to the default logger.
Any additional keyword will be passed as-is to the ColorFormatter
constructor.
"""
if logger is None:
logger = logging.getLogger()
if not logger.handlers:
logging.basicConfig()
format_msg = logger.handlers[0].formatter._fmt
fmt = ColorFormatter(format_msg, **kw)
fmt.colorfilters.append(xxx_cyan)
logger.handlers[0].setFormatter(fmt)
LOG_FORMAT = '%(asctime)s - (%(name)s) %(levelname)s: %(message)s'
LOG_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
def get_handler(debug=False, syslog=False, logfile=None, rotation_parameters=None):
"""get an apropriate handler according to given parameters"""
if os.environ.get('APYCOT_ROOT'):
handler = logging.StreamHandler(sys.stdout)
if debug:
handler = logging.StreamHandler()
elif logfile is None:
if syslog:
from logging import handlers
handler = handlers.SysLogHandler()
else:
handler = logging.StreamHandler()
else:
try:
if rotation_parameters is None:
if os.name == 'posix' and sys.version_info >= (2, 6):
from logging.handlers import WatchedFileHandler
handler = WatchedFileHandler(logfile)
else:
handler = logging.FileHandler(logfile)
else:
from logging.handlers import TimedRotatingFileHandler
handler = TimedRotatingFileHandler(
logfile, **rotation_parameters)
except IOError:
handler = logging.StreamHandler()
return handler
def get_threshold(debug=False, logthreshold=None):
if logthreshold is None:
if debug:
logthreshold = logging.DEBUG
else:
logthreshold = logging.ERROR
elif isinstance(logthreshold, basestring):
logthreshold = getattr(logging, THRESHOLD_MAP.get(logthreshold,
logthreshold))
return logthreshold
def _colorable_terminal():
isatty = hasattr(sys.__stdout__, 'isatty') and sys.__stdout__.isatty()
if not isatty:
return False
if os.name == 'nt':
try:
from colorama import init as init_win32_colors
except ImportError:
return False
init_win32_colors()
return True
def get_formatter(logformat=LOG_FORMAT, logdateformat=LOG_DATE_FORMAT):
if _colorable_terminal():
fmt = ColorFormatter(logformat, logdateformat)
def col_fact(record):
if 'XXX' in record.message:
return 'cyan'
if 'kick' in record.message:
return 'red'
fmt.colorfilters.append(col_fact)
else:
fmt = logging.Formatter(logformat, logdateformat)
return fmt
def init_log(debug=False, syslog=False, logthreshold=None, logfile=None,
logformat=LOG_FORMAT, logdateformat=LOG_DATE_FORMAT, fmt=None,
rotation_parameters=None, handler=None):
"""init the log service"""
logger = logging.getLogger()
if handler is None:
handler = get_handler(debug, syslog, logfile, rotation_parameters)
# only addHandler and removeHandler method while I would like a setHandler
# method, so do it this way :$
logger.handlers = [handler]
logthreshold = get_threshold(debug, logthreshold)
logger.setLevel(logthreshold)
if fmt is None:
if debug:
fmt = get_formatter(logformat=logformat, logdateformat=logdateformat)
else:
fmt = logging.Formatter(logformat, logdateformat)
handler.setFormatter(fmt)
return handler
# map logilab.common.logger thresholds to logging thresholds
THRESHOLD_MAP = {'LOG_DEBUG': 'DEBUG',
'LOG_INFO': 'INFO',
'LOG_NOTICE': 'INFO',
'LOG_WARN': 'WARNING',
'LOG_WARNING': 'WARNING',
'LOG_ERR': 'ERROR',
'LOG_ERROR': 'ERROR',
'LOG_CRIT': 'CRITICAL',
}
logilab-common-0.61.0/date.py 0000644 0000151 0000155 00000025675 12276435602 015332 0 ustar narval narval # copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Date manipulation helper functions."""
from __future__ import division
__docformat__ = "restructuredtext en"
import math
import re
import sys
from locale import getlocale, LC_TIME
from datetime import date, time, datetime, timedelta
from time import strptime as time_strptime
from calendar import monthrange, timegm
try:
from mx.DateTime import RelativeDateTime, Date, DateTimeType
except ImportError:
endOfMonth = None
DateTimeType = datetime
else:
endOfMonth = RelativeDateTime(months=1, day=-1)
# NOTE: should we implement a compatibility layer between date representations
# as we have in lgc.db ?
FRENCH_FIXED_HOLIDAYS = {
'jour_an': '%s-01-01',
'fete_travail': '%s-05-01',
'armistice1945': '%s-05-08',
'fete_nat': '%s-07-14',
'assomption': '%s-08-15',
'toussaint': '%s-11-01',
'armistice1918': '%s-11-11',
'noel': '%s-12-25',
}
FRENCH_MOBILE_HOLIDAYS = {
'paques2004': '2004-04-12',
'ascension2004': '2004-05-20',
'pentecote2004': '2004-05-31',
'paques2005': '2005-03-28',
'ascension2005': '2005-05-05',
'pentecote2005': '2005-05-16',
'paques2006': '2006-04-17',
'ascension2006': '2006-05-25',
'pentecote2006': '2006-06-05',
'paques2007': '2007-04-09',
'ascension2007': '2007-05-17',
'pentecote2007': '2007-05-28',
'paques2008': '2008-03-24',
'ascension2008': '2008-05-01',
'pentecote2008': '2008-05-12',
'paques2009': '2009-04-13',
'ascension2009': '2009-05-21',
'pentecote2009': '2009-06-01',
'paques2010': '2010-04-05',
'ascension2010': '2010-05-13',
'pentecote2010': '2010-05-24',
'paques2011': '2011-04-25',
'ascension2011': '2011-06-02',
'pentecote2011': '2011-06-13',
'paques2012': '2012-04-09',
'ascension2012': '2012-05-17',
'pentecote2012': '2012-05-28',
}
# XXX this implementation cries for multimethod dispatching
def get_step(dateobj, nbdays=1):
# assume date is either a python datetime or a mx.DateTime object
if isinstance(dateobj, date):
return ONEDAY * nbdays
return nbdays # mx.DateTime is ok with integers
def datefactory(year, month, day, sampledate):
# assume date is either a python datetime or a mx.DateTime object
if isinstance(sampledate, datetime):
return datetime(year, month, day)
if isinstance(sampledate, date):
return date(year, month, day)
return Date(year, month, day)
def weekday(dateobj):
# assume date is either a python datetime or a mx.DateTime object
if isinstance(dateobj, date):
return dateobj.weekday()
return dateobj.day_of_week
def str2date(datestr, sampledate):
# NOTE: datetime.strptime is not an option until we drop py2.4 compat
year, month, day = [int(chunk) for chunk in datestr.split('-')]
return datefactory(year, month, day, sampledate)
def days_between(start, end):
if isinstance(start, date):
delta = end - start
# datetime.timedelta.days is always an integer (floored)
if delta.seconds:
return delta.days + 1
return delta.days
else:
return int(math.ceil((end - start).days))
def get_national_holidays(begin, end):
"""return french national days off between begin and end"""
begin = datefactory(begin.year, begin.month, begin.day, begin)
end = datefactory(end.year, end.month, end.day, end)
holidays = [str2date(datestr, begin)
for datestr in FRENCH_MOBILE_HOLIDAYS.values()]
for year in xrange(begin.year, end.year+1):
for datestr in FRENCH_FIXED_HOLIDAYS.values():
date = str2date(datestr % year, begin)
if date not in holidays:
holidays.append(date)
return [day for day in holidays if begin <= day < end]
def add_days_worked(start, days):
"""adds date but try to only take days worked into account"""
step = get_step(start)
weeks, plus = divmod(days, 5)
end = start + ((weeks * 7) + plus) * step
if weekday(end) >= 5: # saturday or sunday
end += (2 * step)
end += len([x for x in get_national_holidays(start, end + step)
if weekday(x) < 5]) * step
if weekday(end) >= 5: # saturday or sunday
end += (2 * step)
return end
def nb_open_days(start, end):
assert start <= end
step = get_step(start)
days = days_between(start, end)
weeks, plus = divmod(days, 7)
if weekday(start) > weekday(end):
plus -= 2
elif weekday(end) == 6:
plus -= 1
open_days = weeks * 5 + plus
nb_week_holidays = len([x for x in get_national_holidays(start, end+step)
if weekday(x) < 5 and x < end])
open_days -= nb_week_holidays
if open_days < 0:
return 0
return open_days
def date_range(begin, end, incday=None, incmonth=None):
"""yields each date between begin and end
:param begin: the start date
:param end: the end date
:param incr: the step to use to iterate over dates. Default is
one day.
:param include: None (means no exclusion) or a function taking a
date as parameter, and returning True if the date
should be included.
When using mx datetime, you should *NOT* use incmonth argument, use instead
oneDay, oneHour, oneMinute, oneSecond, oneWeek or endOfMonth (to enumerate
months) as `incday` argument
"""
assert not (incday and incmonth)
begin = todate(begin)
end = todate(end)
if incmonth:
while begin < end:
yield begin
begin = next_month(begin, incmonth)
else:
incr = get_step(begin, incday or 1)
while begin < end:
yield begin
begin += incr
# makes py datetime usable #####################################################
ONEDAY = timedelta(days=1)
ONEWEEK = timedelta(days=7)
try:
strptime = datetime.strptime
except AttributeError: # py < 2.5
from time import strptime as time_strptime
def strptime(value, format):
return datetime(*time_strptime(value, format)[:6])
def strptime_time(value, format='%H:%M'):
return time(*time_strptime(value, format)[3:6])
def todate(somedate):
"""return a date from a date (leaving unchanged) or a datetime"""
if isinstance(somedate, datetime):
return date(somedate.year, somedate.month, somedate.day)
assert isinstance(somedate, (date, DateTimeType)), repr(somedate)
return somedate
def totime(somedate):
"""return a time from a time (leaving unchanged), date or datetime"""
# XXX mx compat
if not isinstance(somedate, time):
return time(somedate.hour, somedate.minute, somedate.second)
assert isinstance(somedate, (time)), repr(somedate)
return somedate
def todatetime(somedate):
"""return a date from a date (leaving unchanged) or a datetime"""
# take care, datetime is a subclass of date
if isinstance(somedate, datetime):
return somedate
assert isinstance(somedate, (date, DateTimeType)), repr(somedate)
return datetime(somedate.year, somedate.month, somedate.day)
def datetime2ticks(somedate):
return timegm(somedate.timetuple()) * 1000
def ticks2datetime(ticks):
miliseconds, microseconds = divmod(ticks, 1000)
try:
return datetime.fromtimestamp(miliseconds)
except (ValueError, OverflowError):
epoch = datetime.fromtimestamp(0)
nb_days, seconds = divmod(int(miliseconds), 86400)
delta = timedelta(nb_days, seconds=seconds, microseconds=microseconds)
try:
return epoch + delta
except (ValueError, OverflowError):
raise
def days_in_month(somedate):
return monthrange(somedate.year, somedate.month)[1]
def days_in_year(somedate):
feb = date(somedate.year, 2, 1)
if days_in_month(feb) == 29:
return 366
else:
return 365
def previous_month(somedate, nbmonth=1):
while nbmonth:
somedate = first_day(somedate) - ONEDAY
nbmonth -= 1
return somedate
def next_month(somedate, nbmonth=1):
while nbmonth:
somedate = last_day(somedate) + ONEDAY
nbmonth -= 1
return somedate
def first_day(somedate):
return date(somedate.year, somedate.month, 1)
def last_day(somedate):
return date(somedate.year, somedate.month, days_in_month(somedate))
def ustrftime(somedate, fmt='%Y-%m-%d'):
"""like strftime, but returns a unicode string instead of an encoded
string which may be problematic with localized date.
"""
if sys.version_info >= (3, 3):
# datetime.date.strftime() supports dates since year 1 in Python >=3.3.
return somedate.strftime(fmt)
else:
try:
if sys.version_info < (3, 0):
encoding = getlocale(LC_TIME)[1] or 'ascii'
return unicode(somedate.strftime(str(fmt)), encoding)
else:
return somedate.strftime(fmt)
except ValueError, exc:
if somedate.year >= 1900:
raise
# datetime is not happy with dates before 1900
# we try to work around this, assuming a simple
# format string
fields = {'Y': somedate.year,
'm': somedate.month,
'd': somedate.day,
}
if isinstance(somedate, datetime):
fields.update({'H': somedate.hour,
'M': somedate.minute,
'S': somedate.second})
fmt = re.sub('%([YmdHMS])', r'%(\1)02d', fmt)
return unicode(fmt) % fields
def utcdatetime(dt):
if dt.tzinfo is None:
return dt
return datetime(*dt.utctimetuple()[:7])
def utctime(dt):
if dt.tzinfo is None:
return dt
return (dt + dt.utcoffset() + dt.dst()).replace(tzinfo=None)
def datetime_to_seconds(date):
"""return the number of seconds since the begining of the day for that date
"""
return date.second+60*date.minute + 3600*date.hour
def timedelta_to_days(delta):
"""return the time delta as a number of seconds"""
return delta.days + delta.seconds / (3600*24)
def timedelta_to_seconds(delta):
"""return the time delta as a fraction of days"""
return delta.days*(3600*24) + delta.seconds
logilab-common-0.61.0/visitor.py 0000644 0000151 0000155 00000006535 12276435602 016106 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""A generic visitor abstract implementation.
"""
__docformat__ = "restructuredtext en"
def no_filter(_):
return 1
# Iterators ###################################################################
class FilteredIterator(object):
def __init__(self, node, list_func, filter_func=None):
self._next = [(node, 0)]
if filter_func is None:
filter_func = no_filter
self._list = list_func(node, filter_func)
def next(self):
try:
return self._list.pop(0)
except :
return None
# Base Visitor ################################################################
class Visitor(object):
def __init__(self, iterator_class, filter_func=None):
self._iter_class = iterator_class
self.filter = filter_func
def visit(self, node, *args, **kargs):
"""
launch the visit on a given node
call 'open_visit' before the beginning of the visit, with extra args
given
when all nodes have been visited, call the 'close_visit' method
"""
self.open_visit(node, *args, **kargs)
return self.close_visit(self._visit(node))
def _visit(self, node):
iterator = self._get_iterator(node)
n = iterator.next()
while n:
result = n.accept(self)
n = iterator.next()
return result
def _get_iterator(self, node):
return self._iter_class(node, self.filter)
def open_visit(self, *args, **kargs):
"""
method called at the beginning of the visit
"""
pass
def close_visit(self, result):
"""
method called at the end of the visit
"""
return result
# standard visited mixin ######################################################
class VisitedMixIn(object):
"""
Visited interface allow node visitors to use the node
"""
def get_visit_name(self):
"""
return the visit name for the mixed class. When calling 'accept', the
method <'visit_' + name returned by this method> will be called on the
visitor
"""
try:
return self.TYPE.replace('-', '_')
except:
return self.__class__.__name__.lower()
def accept(self, visitor, *args, **kwargs):
func = getattr(visitor, 'visit_%s' % self.get_visit_name())
return func(self, *args, **kwargs)
def leave(self, visitor, *args, **kwargs):
func = getattr(visitor, 'leave_%s' % self.get_visit_name())
return func(self, *args, **kwargs)
logilab-common-0.61.0/optparser.py 0000644 0000151 0000155 00000006420 12276435602 016417 0 ustar narval narval # -*- coding: utf-8 -*-
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Extend OptionParser with commands.
Example:
>>> parser = OptionParser()
>>> parser.usage = '%prog COMMAND [options] ...'
>>> parser.add_command('build', 'mymod.build')
>>> parser.add_command('clean', run_clean, add_opt_clean)
>>> run, options, args = parser.parse_command(sys.argv[1:])
>>> return run(options, args[1:])
With mymod.build that defines two functions run and add_options
"""
__docformat__ = "restructuredtext en"
from warnings import warn
warn('lgc.optparser module is deprecated, use lgc.clcommands instead', DeprecationWarning,
stacklevel=2)
import sys
import optparse
class OptionParser(optparse.OptionParser):
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, *args, **kwargs)
self._commands = {}
self.min_args, self.max_args = 0, 1
def add_command(self, name, mod_or_funcs, help=''):
"""name of the command, name of module or tuple of functions
(run, add_options)
"""
assert isinstance(mod_or_funcs, str) or isinstance(mod_or_funcs, tuple), \
"mod_or_funcs has to be a module name or a tuple of functions"
self._commands[name] = (mod_or_funcs, help)
def print_main_help(self):
optparse.OptionParser.print_help(self)
print '\ncommands:'
for cmdname, (_, help) in self._commands.items():
print '% 10s - %s' % (cmdname, help)
def parse_command(self, args):
if len(args) == 0:
self.print_main_help()
sys.exit(1)
cmd = args[0]
args = args[1:]
if cmd not in self._commands:
if cmd in ('-h', '--help'):
self.print_main_help()
sys.exit(0)
elif self.version is not None and cmd == "--version":
self.print_version()
sys.exit(0)
self.error('unknown command')
self.prog = '%s %s' % (self.prog, cmd)
mod_or_f, help = self._commands[cmd]
# optparse inserts self.description between usage and options help
self.description = help
if isinstance(mod_or_f, str):
exec 'from %s import run, add_options' % mod_or_f
else:
run, add_options = mod_or_f
add_options(self)
(options, args) = self.parse_args(args)
if not (self.min_args <= len(args) <= self.max_args):
self.error('incorrect number of arguments')
return run, options, args
logilab-common-0.61.0/COPYING.LESSER 0000644 0000151 0000155 00000063637 12276435602 016072 0 ustar narval narval
GNU LESSER GENERAL PUBLIC LICENSE
Version 2.1, February 1999
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
[This is the first released version of the Lesser GPL. It also counts
as the successor of the GNU Library Public License, version 2, hence
the version number 2.1.]
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
Licenses are intended to guarantee your freedom to share and change
free software--to make sure the software is free for all its users.
This license, the Lesser General Public License, applies to some
specially designated software packages--typically libraries--of the
Free Software Foundation and other authors who decide to use it. You
can use it too, but we suggest you first think carefully about whether
this license or the ordinary General Public License is the better
strategy to use in any particular case, based on the explanations
below.
When we speak of free software, we are referring to freedom of use,
not price. Our General Public Licenses are designed to make sure that
you have the freedom to distribute copies of free software (and charge
for this service if you wish); that you receive source code or can get
it if you want it; that you can change the software and use pieces of
it in new free programs; and that you are informed that you can do
these things.
To protect your rights, we need to make restrictions that forbid
distributors to deny you these rights or to ask you to surrender these
rights. These restrictions translate to certain responsibilities for
you if you distribute copies of the library or if you modify it.
For example, if you distribute copies of the library, whether gratis
or for a fee, you must give the recipients all the rights that we gave
you. You must make sure that they, too, receive or can get the source
code. If you link other code with the library, you must provide
complete object files to the recipients, so that they can relink them
with the library after making changes to the library and recompiling
it. And you must show them these terms so they know their rights.
We protect your rights with a two-step method: (1) we copyright the
library, and (2) we offer you this license, which gives you legal
permission to copy, distribute and/or modify the library.
To protect each distributor, we want to make it very clear that
there is no warranty for the free library. Also, if the library is
modified by someone else and passed on, the recipients should know
that what they have is not the original version, so that the original
author's reputation will not be affected by problems that might be
introduced by others.
Finally, software patents pose a constant threat to the existence of
any free program. We wish to make sure that a company cannot
effectively restrict the users of a free program by obtaining a
restrictive license from a patent holder. Therefore, we insist that
any patent license obtained for a version of the library must be
consistent with the full freedom of use specified in this license.
Most GNU software, including some libraries, is covered by the
ordinary GNU General Public License. This license, the GNU Lesser
General Public License, applies to certain designated libraries, and
is quite different from the ordinary General Public License. We use
this license for certain libraries in order to permit linking those
libraries into non-free programs.
When a program is linked with a library, whether statically or using
a shared library, the combination of the two is legally speaking a
combined work, a derivative of the original library. The ordinary
General Public License therefore permits such linking only if the
entire combination fits its criteria of freedom. The Lesser General
Public License permits more lax criteria for linking other code with
the library.
We call this license the "Lesser" General Public License because it
does Less to protect the user's freedom than the ordinary General
Public License. It also provides other free software developers Less
of an advantage over competing non-free programs. These disadvantages
are the reason we use the ordinary General Public License for many
libraries. However, the Lesser license provides advantages in certain
special circumstances.
For example, on rare occasions, there may be a special need to
encourage the widest possible use of a certain library, so that it
becomes a de-facto standard. To achieve this, non-free programs must
be allowed to use the library. A more frequent case is that a free
library does the same job as widely used non-free libraries. In this
case, there is little to gain by limiting the free library to free
software only, so we use the Lesser General Public License.
In other cases, permission to use a particular library in non-free
programs enables a greater number of people to use a large body of
free software. For example, permission to use the GNU C Library in
non-free programs enables many more people to use the whole GNU
operating system, as well as its variant, the GNU/Linux operating
system.
Although the Lesser General Public License is Less protective of the
users' freedom, it does ensure that the user of a program that is
linked with the Library has the freedom and the wherewithal to run
that program using a modified version of the Library.
The precise terms and conditions for copying, distribution and
modification follow. Pay close attention to the difference between a
"work based on the library" and a "work that uses the library". The
former contains code derived from the library, whereas the latter must
be combined with the library in order to run.
GNU LESSER GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License Agreement applies to any software library or other
program which contains a notice placed by the copyright holder or
other authorized party saying it may be distributed under the terms of
this Lesser General Public License (also called "this License").
Each licensee is addressed as "you".
A "library" means a collection of software functions and/or data
prepared so as to be conveniently linked with application programs
(which use some of those functions and data) to form executables.
The "Library", below, refers to any such software library or work
which has been distributed under these terms. A "work based on the
Library" means either the Library or any derivative work under
copyright law: that is to say, a work containing the Library or a
portion of it, either verbatim or with modifications and/or translated
straightforwardly into another language. (Hereinafter, translation is
included without limitation in the term "modification".)
"Source code" for a work means the preferred form of the work for
making modifications to it. For a library, complete source code means
all the source code for all modules it contains, plus any associated
interface definition files, plus the scripts used to control
compilation and installation of the library.
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running a program using the Library is not restricted, and output from
such a program is covered only if its contents constitute a work based
on the Library (independent of the use of the Library in a tool for
writing it). Whether that is true depends on what the Library does
and what the program that uses the Library does.
1. You may copy and distribute verbatim copies of the Library's
complete source code as you receive it, in any medium, provided that
you conspicuously and appropriately publish on each copy an
appropriate copyright notice and disclaimer of warranty; keep intact
all the notices that refer to this License and to the absence of any
warranty; and distribute a copy of this License along with the
Library.
You may charge a fee for the physical act of transferring a copy,
and you may at your option offer warranty protection in exchange for a
fee.
2. You may modify your copy or copies of the Library or any portion
of it, thus forming a work based on the Library, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) The modified work must itself be a software library.
b) You must cause the files modified to carry prominent notices
stating that you changed the files and the date of any change.
c) You must cause the whole of the work to be licensed at no
charge to all third parties under the terms of this License.
d) If a facility in the modified Library refers to a function or a
table of data to be supplied by an application program that uses
the facility, other than as an argument passed when the facility
is invoked, then you must make a good faith effort to ensure that,
in the event an application does not supply such function or
table, the facility still operates, and performs whatever part of
its purpose remains meaningful.
(For example, a function in a library to compute square roots has
a purpose that is entirely well-defined independent of the
application. Therefore, Subsection 2d requires that any
application-supplied function or table used by this function must
be optional: if the application does not supply it, the square
root function must still compute square roots.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Library,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Library, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote
it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Library.
In addition, mere aggregation of another work not based on the Library
with the Library (or with a work based on the Library) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may opt to apply the terms of the ordinary GNU General Public
License instead of this License to a given copy of the Library. To do
this, you must alter all the notices that refer to this License, so
that they refer to the ordinary GNU General Public License, version 2,
instead of to this License. (If a newer version than version 2 of the
ordinary GNU General Public License has appeared, then you can specify
that version instead if you wish.) Do not make any other change in
these notices.
Once this change is made in a given copy, it is irreversible for
that copy, so the ordinary GNU General Public License applies to all
subsequent copies and derivative works made from that copy.
This option is useful when you wish to copy part of the code of
the Library into a program that is not a library.
4. You may copy and distribute the Library (or a portion or
derivative of it, under Section 2) in object code or executable form
under the terms of Sections 1 and 2 above provided that you accompany
it with the complete corresponding machine-readable source code, which
must be distributed under the terms of Sections 1 and 2 above on a
medium customarily used for software interchange.
If distribution of object code is made by offering access to copy
from a designated place, then offering equivalent access to copy the
source code from the same place satisfies the requirement to
distribute the source code, even though third parties are not
compelled to copy the source along with the object code.
5. A program that contains no derivative of any portion of the
Library, but is designed to work with the Library by being compiled or
linked with it, is called a "work that uses the Library". Such a
work, in isolation, is not a derivative work of the Library, and
therefore falls outside the scope of this License.
However, linking a "work that uses the Library" with the Library
creates an executable that is a derivative of the Library (because it
contains portions of the Library), rather than a "work that uses the
library". The executable is therefore covered by this License.
Section 6 states terms for distribution of such executables.
When a "work that uses the Library" uses material from a header file
that is part of the Library, the object code for the work may be a
derivative work of the Library even though the source code is not.
Whether this is true is especially significant if the work can be
linked without the Library, or if the work is itself a library. The
threshold for this to be true is not precisely defined by law.
If such an object file uses only numerical parameters, data
structure layouts and accessors, and small macros and small inline
functions (ten lines or less in length), then the use of the object
file is unrestricted, regardless of whether it is legally a derivative
work. (Executables containing this object code plus portions of the
Library will still fall under Section 6.)
Otherwise, if the work is a derivative of the Library, you may
distribute the object code for the work under the terms of Section 6.
Any executables containing that work also fall under Section 6,
whether or not they are linked directly with the Library itself.
6. As an exception to the Sections above, you may also combine or
link a "work that uses the Library" with the Library to produce a
work containing portions of the Library, and distribute that work
under terms of your choice, provided that the terms permit
modification of the work for the customer's own use and reverse
engineering for debugging such modifications.
You must give prominent notice with each copy of the work that the
Library is used in it and that the Library and its use are covered by
this License. You must supply a copy of this License. If the work
during execution displays copyright notices, you must include the
copyright notice for the Library among them, as well as a reference
directing the user to the copy of this License. Also, you must do one
of these things:
a) Accompany the work with the complete corresponding
machine-readable source code for the Library including whatever
changes were used in the work (which must be distributed under
Sections 1 and 2 above); and, if the work is an executable linked
with the Library, with the complete machine-readable "work that
uses the Library", as object code and/or source code, so that the
user can modify the Library and then relink to produce a modified
executable containing the modified Library. (It is understood
that the user who changes the contents of definitions files in the
Library will not necessarily be able to recompile the application
to use the modified definitions.)
b) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (1) uses at run time a
copy of the library already present on the user's computer system,
rather than copying library functions into the executable, and (2)
will operate properly with a modified version of the library, if
the user installs one, as long as the modified version is
interface-compatible with the version that the work was made with.
c) Accompany the work with a written offer, valid for at least
three years, to give the same user the materials specified in
Subsection 6a, above, for a charge no more than the cost of
performing this distribution.
d) If distribution of the work is made by offering access to copy
from a designated place, offer equivalent access to copy the above
specified materials from the same place.
e) Verify that the user has already received a copy of these
materials or that you have already sent this user a copy.
For an executable, the required form of the "work that uses the
Library" must include any data and utility programs needed for
reproducing the executable from it. However, as a special exception,
the materials to be distributed need not include anything that is
normally distributed (in either source or binary form) with the major
components (compiler, kernel, and so on) of the operating system on
which the executable runs, unless that component itself accompanies
the executable.
It may happen that this requirement contradicts the license
restrictions of other proprietary libraries that do not normally
accompany the operating system. Such a contradiction means you cannot
use both them and the Library together in an executable that you
distribute.
7. You may place library facilities that are a work based on the
Library side-by-side in a single library together with other library
facilities not covered by this License, and distribute such a combined
library, provided that the separate distribution of the work based on
the Library and of the other library facilities is otherwise
permitted, and provided that you do these two things:
a) Accompany the combined library with a copy of the same work
based on the Library, uncombined with any other library
facilities. This must be distributed under the terms of the
Sections above.
b) Give prominent notice with the combined library of the fact
that part of it is a work based on the Library, and explaining
where to find the accompanying uncombined form of the same work.
8. You may not copy, modify, sublicense, link with, or distribute
the Library except as expressly provided under this License. Any
attempt otherwise to copy, modify, sublicense, link with, or
distribute the Library is void, and will automatically terminate your
rights under this License. However, parties who have received copies,
or rights, from you under this License will not have their licenses
terminated so long as such parties remain in full compliance.
9. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Library or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Library (or any work based on the
Library), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Library or works based on it.
10. Each time you redistribute the Library (or any work based on the
Library), the recipient automatically receives a license from the
original licensor to copy, distribute, link with or modify the Library
subject to these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties with
this License.
11. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Library at all. For example, if a patent
license would not permit royalty-free redistribution of the Library by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Library.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply, and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
12. If the distribution and/or use of the Library is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Library under this License
may add an explicit geographical distribution limitation excluding those
countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
13. The Free Software Foundation may publish revised and/or new
versions of the Lesser General Public License from time to time.
Such new versions will be similar in spirit to the present version,
but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Library
specifies a version number of this License which applies to it and
"any later version", you have the option of following the terms and
conditions either of that version or of any later version published by
the Free Software Foundation. If the Library does not specify a
license version number, you may choose any version ever published by
the Free Software Foundation.
14. If you wish to incorporate parts of the Library into other free
programs whose distribution conditions are incompatible with these,
write to the author to ask for permission. For software which is
copyrighted by the Free Software Foundation, write to the Free
Software Foundation; we sometimes make exceptions for this. Our
decision will be guided by the two goals of preserving the free status
of all derivatives of our free software and of promoting the sharing
and reuse of software generally.
NO WARRANTY
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Libraries
If you develop a new library, and you want it to be of the greatest
possible use to the public, we recommend making it free software that
everyone can redistribute and change. You can do so by permitting
redistribution under these terms (or, alternatively, under the terms
of the ordinary General Public License).
To apply these terms, attach the following notices to the library.
It is safest to attach them to the start of each source file to most
effectively convey the exclusion of warranty; and each file should
have at least the "copyright" line and a pointer to where the full
notice is found.
Copyright (C)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Also add information on how to contact you by electronic and paper mail.
You should also get your employer (if you work as a programmer) or
your school, if any, to sign a "copyright disclaimer" for the library,
if necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the
library `Frob' (a library for tweaking knobs) written by James
Random Hacker.
, 1 April 1990
Ty Coon, President of Vice
That's all there is to it!
logilab-common-0.61.0/testlib.py 0000644 0000151 0000155 00000150120 12276435602 016043 0 ustar narval narval # -*- coding: utf-8 -*-
# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Run tests.
This will find all modules whose name match a given prefix in the test
directory, and run them. Various command line options provide
additional facilities.
Command line options:
-v verbose -- run tests in verbose mode with output to stdout
-q quiet -- don't print anything except if a test fails
-t testdir -- directory where the tests will be found
-x exclude -- add a test to exclude
-p profile -- profiled execution
-d dbc -- enable design-by-contract
-m match -- only run test matching the tag pattern which follow
If no non-option arguments are present, prefixes used are 'test',
'regrtest', 'smoketest' and 'unittest'.
"""
__docformat__ = "restructuredtext en"
# modified copy of some functions from test/regrtest.py from PyXml
# disable camel case warning
# pylint: disable=C0103
import sys
import os, os.path as osp
import re
import traceback
import inspect
import difflib
import tempfile
import math
import warnings
from shutil import rmtree
from operator import itemgetter
from ConfigParser import ConfigParser
from itertools import dropwhile
from logilab.common.deprecation import deprecated
from logilab.common.compat import builtins
import unittest as unittest_legacy
if not getattr(unittest_legacy, "__package__", None):
try:
import unittest2 as unittest
from unittest2 import SkipTest
except ImportError:
raise ImportError("You have to install python-unittest2 to use %s" % __name__)
else:
import unittest
from unittest import SkipTest
try:
from functools import wraps
except ImportError:
def wraps(wrapped):
def proxy(callable):
callable.__name__ = wrapped.__name__
return callable
return proxy
try:
from test import test_support
except ImportError:
# not always available
class TestSupport:
def unload(self, test):
pass
test_support = TestSupport()
# pylint: disable=W0622
from logilab.common.compat import any, InheritableSet, callable
# pylint: enable=W0622
from logilab.common.debugger import Debugger, colorize_source
from logilab.common.decorators import cached, classproperty
from logilab.common import textutils
__all__ = ['main', 'unittest_main', 'find_tests', 'run_test', 'spawn']
DEFAULT_PREFIXES = ('test', 'regrtest', 'smoketest', 'unittest',
'func', 'validation')
if sys.version_info >= (2, 6):
# FIXME : this does not work as expected / breaks tests on testlib
# however testlib does not work on py3k for many reasons ...
from inspect import CO_GENERATOR
else:
from compiler.consts import CO_GENERATOR
if sys.version_info >= (3, 0):
def is_generator(function):
flags = function.__code__.co_flags
return flags & CO_GENERATOR
else:
def is_generator(function):
flags = function.func_code.co_flags
return flags & CO_GENERATOR
# used by unittest to count the number of relevant levels in the traceback
__unittest = 1
def with_tempdir(callable):
"""A decorator ensuring no temporary file left when the function return
Work only for temporary file create with the tempfile module"""
if is_generator(callable):
def proxy(*args, **kwargs):
old_tmpdir = tempfile.gettempdir()
new_tmpdir = tempfile.mkdtemp(prefix="temp-lgc-")
tempfile.tempdir = new_tmpdir
try:
for x in callable(*args, **kwargs):
yield x
finally:
try:
rmtree(new_tmpdir, ignore_errors=True)
finally:
tempfile.tempdir = old_tmpdir
return proxy
@wraps(callable)
def proxy(*args, **kargs):
old_tmpdir = tempfile.gettempdir()
new_tmpdir = tempfile.mkdtemp(prefix="temp-lgc-")
tempfile.tempdir = new_tmpdir
try:
return callable(*args, **kargs)
finally:
try:
rmtree(new_tmpdir, ignore_errors=True)
finally:
tempfile.tempdir = old_tmpdir
return proxy
def in_tempdir(callable):
"""A decorator moving the enclosed function inside the tempfile.tempfdir
"""
@wraps(callable)
def proxy(*args, **kargs):
old_cwd = os.getcwd()
os.chdir(tempfile.tempdir)
try:
return callable(*args, **kargs)
finally:
os.chdir(old_cwd)
return proxy
def within_tempdir(callable):
"""A decorator run the enclosed function inside a tmpdir removed after execution
"""
proxy = with_tempdir(in_tempdir(callable))
proxy.__name__ = callable.__name__
return proxy
def find_tests(testdir,
prefixes=DEFAULT_PREFIXES, suffix=".py",
excludes=(),
remove_suffix=True):
"""
Return a list of all applicable test modules.
"""
tests = []
for name in os.listdir(testdir):
if not suffix or name.endswith(suffix):
for prefix in prefixes:
if name.startswith(prefix):
if remove_suffix and name.endswith(suffix):
name = name[:-len(suffix)]
if name not in excludes:
tests.append(name)
tests.sort()
return tests
## PostMortem Debug facilities #####
def start_interactive_mode(result):
"""starts an interactive shell so that the user can inspect errors
"""
debuggers = result.debuggers
descrs = result.error_descrs + result.fail_descrs
if len(debuggers) == 1:
# don't ask for test name if there's only one failure
debuggers[0].start()
else:
while True:
testindex = 0
print "Choose a test to debug:"
# order debuggers in the same way than errors were printed
print "\n".join(['\t%s : %s' % (i, descr) for i, (_, descr)
in enumerate(descrs)])
print "Type 'exit' (or ^D) to quit"
print
try:
todebug = raw_input('Enter a test name: ')
if todebug.strip().lower() == 'exit':
print
break
else:
try:
testindex = int(todebug)
debugger = debuggers[descrs[testindex][0]]
except (ValueError, IndexError):
print "ERROR: invalid test number %r" % (todebug, )
else:
debugger.start()
except (EOFError, KeyboardInterrupt):
print
break
# test utils ##################################################################
class SkipAwareTestResult(unittest._TextTestResult):
def __init__(self, stream, descriptions, verbosity,
exitfirst=False, pdbmode=False, cvg=None, colorize=False):
super(SkipAwareTestResult, self).__init__(stream,
descriptions, verbosity)
self.skipped = []
self.debuggers = []
self.fail_descrs = []
self.error_descrs = []
self.exitfirst = exitfirst
self.pdbmode = pdbmode
self.cvg = cvg
self.colorize = colorize
self.pdbclass = Debugger
self.verbose = verbosity > 1
def descrs_for(self, flavour):
return getattr(self, '%s_descrs' % flavour.lower())
def _create_pdb(self, test_descr, flavour):
self.descrs_for(flavour).append( (len(self.debuggers), test_descr) )
if self.pdbmode:
self.debuggers.append(self.pdbclass(sys.exc_info()[2]))
def _iter_valid_frames(self, frames):
"""only consider non-testlib frames when formatting traceback"""
lgc_testlib = osp.abspath(__file__)
std_testlib = osp.abspath(unittest.__file__)
invalid = lambda fi: osp.abspath(fi[1]) in (lgc_testlib, std_testlib)
for frameinfo in dropwhile(invalid, frames):
yield frameinfo
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string.
This method is overridden here because we want to colorize
lines if --color is passed, and display local variables if
--verbose is passed
"""
exctype, exc, tb = err
output = ['Traceback (most recent call last)']
frames = inspect.getinnerframes(tb)
colorize = self.colorize
frames = enumerate(self._iter_valid_frames(frames))
for index, (frame, filename, lineno, funcname, ctx, ctxindex) in frames:
filename = osp.abspath(filename)
if ctx is None: # pyc files or C extensions for instance
source = ''
else:
source = ''.join(ctx)
if colorize:
filename = textutils.colorize_ansi(filename, 'magenta')
source = colorize_source(source)
output.append(' File "%s", line %s, in %s' % (filename, lineno, funcname))
output.append(' %s' % source.strip())
if self.verbose:
output.append('%r == %r' % (dir(frame), test.__module__))
output.append('')
output.append(' ' + ' local variables '.center(66, '-'))
for varname, value in sorted(frame.f_locals.items()):
output.append(' %s: %r' % (varname, value))
if varname == 'self': # special handy processing for self
for varname, value in sorted(vars(value).items()):
output.append(' self.%s: %r' % (varname, value))
output.append(' ' + '-' * 66)
output.append('')
output.append(''.join(traceback.format_exception_only(exctype, exc)))
return '\n'.join(output)
def addError(self, test, err):
"""err -> (exc_type, exc, tcbk)"""
exc_type, exc, _ = err
if isinstance(exc, SkipTest):
assert exc_type == SkipTest
self.addSkip(test, exc)
else:
if self.exitfirst:
self.shouldStop = True
descr = self.getDescription(test)
super(SkipAwareTestResult, self).addError(test, err)
self._create_pdb(descr, 'error')
def addFailure(self, test, err):
if self.exitfirst:
self.shouldStop = True
descr = self.getDescription(test)
super(SkipAwareTestResult, self).addFailure(test, err)
self._create_pdb(descr, 'fail')
def addSkip(self, test, reason):
self.skipped.append((test, reason))
if self.showAll:
self.stream.writeln("SKIPPED")
elif self.dots:
self.stream.write('S')
def printErrors(self):
super(SkipAwareTestResult, self).printErrors()
self.printSkippedList()
def printSkippedList(self):
# format (test, err) compatible with unittest2
for test, err in self.skipped:
descr = self.getDescription(test)
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % ('SKIPPED', descr))
self.stream.writeln("\t%s" % err)
def printErrorList(self, flavour, errors):
for (_, descr), (test, err) in zip(self.descrs_for(flavour), errors):
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, descr))
self.stream.writeln(self.separator2)
self.stream.writeln(err)
self.stream.writeln('no stdout'.center(len(self.separator2)))
self.stream.writeln('no stderr'.center(len(self.separator2)))
# Add deprecation warnings about new api used by module level fixtures in unittest2
# http://www.voidspace.org.uk/python/articles/unittest2.shtml#setupmodule-and-teardownmodule
class _DebugResult(object): # simplify import statement among unittest flavors..
"Used by the TestSuite to hold previous class when running in debug."
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
from logilab.common.decorators import monkeypatch
@monkeypatch(unittest.TestSuite)
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
# add testlib specific deprecation warning and switch to new api
if hasattr(module, 'teardown_module'):
warnings.warn('Please rename teardown_module() to tearDownModule() instead.',
DeprecationWarning)
setattr(module, 'tearDownModule', module.teardown_module)
# end of monkey-patching
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
try:
tearDownModule()
except Exception, e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
@monkeypatch(unittest.TestSuite)
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
# add testlib specific deprecation warning and switch to new api
if hasattr(module, 'setup_module'):
warnings.warn('Please rename setup_module() to setUpModule() instead.',
DeprecationWarning)
setattr(module, 'setUpModule', module.setup_module)
# end of monkey-patching
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
try:
setUpModule()
except Exception, e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
# backward compatibility: TestSuite might be imported from lgc.testlib
TestSuite = unittest.TestSuite
class keywords(dict):
"""Keyword args (**kwargs) support for generative tests."""
class starargs(tuple):
"""Variable arguments (*args) for generative tests."""
def __new__(cls, *args):
return tuple.__new__(cls, args)
unittest_main = unittest.main
class InnerTestSkipped(SkipTest):
"""raised when a test is skipped"""
pass
def parse_generative_args(params):
args = []
varargs = ()
kwargs = {}
flags = 0 # 2 <=> starargs, 4 <=> kwargs
for param in params:
if isinstance(param, starargs):
varargs = param
if flags:
raise TypeError('found starargs after keywords !')
flags |= 2
args += list(varargs)
elif isinstance(param, keywords):
kwargs = param
if flags & 4:
raise TypeError('got multiple keywords parameters')
flags |= 4
elif flags & 2 or flags & 4:
raise TypeError('found parameters after kwargs or args')
else:
args.append(param)
return args, kwargs
class InnerTest(tuple):
def __new__(cls, name, *data):
instance = tuple.__new__(cls, data)
instance.name = name
return instance
class Tags(InheritableSet): # 2.4 compat
"""A set of tag able validate an expression"""
def __init__(self, *tags, **kwargs):
self.inherit = kwargs.pop('inherit', True)
if kwargs:
raise TypeError("%s are an invalid keyword argument for this function" % kwargs.keys())
if len(tags) == 1 and not isinstance(tags[0], basestring):
tags = tags[0]
super(Tags, self).__init__(tags, **kwargs)
def __getitem__(self, key):
return key in self
def match(self, exp):
return eval(exp, {}, self)
# duplicate definition from unittest2 of the _deprecate decorator
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
('Please use %s instead.' % original_func.__name__),
DeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
class TestCase(unittest.TestCase):
"""A unittest.TestCase extension with some additional methods."""
maxDiff = None
pdbclass = Debugger
tags = Tags()
def __init__(self, methodName='runTest'):
super(TestCase, self).__init__(methodName)
# internal API changed in python2.4 and needed by DocTestCase
if sys.version_info >= (2, 4):
self.__exc_info = sys.exc_info
self.__testMethodName = self._testMethodName
else:
# let's give easier access to _testMethodName to every subclasses
if hasattr(self, "__testMethodName"):
self._testMethodName = self.__testMethodName
self._current_test_descr = None
self._options_ = None
@classproperty
@cached
def datadir(cls): # pylint: disable=E0213
"""helper attribute holding the standard test's data directory
NOTE: this is a logilab's standard
"""
mod = __import__(cls.__module__)
return osp.join(osp.dirname(osp.abspath(mod.__file__)), 'data')
# cache it (use a class method to cache on class since TestCase is
# instantiated for each test run)
@classmethod
def datapath(cls, *fname):
"""joins the object's datadir and `fname`"""
return osp.join(cls.datadir, *fname)
def set_description(self, descr):
"""sets the current test's description.
This can be useful for generative tests because it allows to specify
a description per yield
"""
self._current_test_descr = descr
# override default's unittest.py feature
def shortDescription(self):
"""override default unittest shortDescription to handle correctly
generative tests
"""
if self._current_test_descr is not None:
return self._current_test_descr
return super(TestCase, self).shortDescription()
def quiet_run(self, result, func, *args, **kwargs):
try:
func(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except unittest.SkipTest, e:
self._addSkip(result, str(e))
return False
except:
result.addError(self, self.__exc_info())
return False
return True
def _get_test_method(self):
"""return the test method"""
return getattr(self, self._testMethodName)
def optval(self, option, default=None):
"""return the option value or default if the option is not define"""
return getattr(self._options_, option, default)
def __call__(self, result=None, runcondition=None, options=None):
"""rewrite TestCase.__call__ to support generative tests
This is mostly a copy/paste from unittest.py (i.e same
variable names, same logic, except for the generative tests part)
"""
from logilab.common.pytest import FILE_RESTART
if result is None:
result = self.defaultTestResult()
result.pdbclass = self.pdbclass
self._options_ = options
# if result.cvg:
# result.cvg.start()
testMethod = self._get_test_method()
if runcondition and not runcondition(testMethod):
return # test is skipped
result.startTest(self)
try:
if not self.quiet_run(result, self.setUp):
return
generative = is_generator(testMethod.im_func)
# generative tests
if generative:
self._proceed_generative(result, testMethod,
runcondition)
else:
status = self._proceed(result, testMethod)
success = (status == 0)
if not self.quiet_run(result, self.tearDown):
return
if not generative and success:
if hasattr(options, "exitfirst") and options.exitfirst:
# add this test to restart file
try:
restartfile = open(FILE_RESTART, 'a')
try:
descr = '.'.join((self.__class__.__module__,
self.__class__.__name__,
self._testMethodName))
restartfile.write(descr+os.linesep)
finally:
restartfile.close()
except Exception, ex:
print >> sys.__stderr__, "Error while saving \
succeeded test into", osp.join(os.getcwd(), FILE_RESTART)
raise ex
result.addSuccess(self)
finally:
# if result.cvg:
# result.cvg.stop()
result.stopTest(self)
def _proceed_generative(self, result, testfunc, runcondition=None):
# cancel startTest()'s increment
result.testsRun -= 1
success = True
try:
for params in testfunc():
if runcondition and not runcondition(testfunc,
skipgenerator=False):
if not (isinstance(params, InnerTest)
and runcondition(params)):
continue
if not isinstance(params, (tuple, list)):
params = (params, )
func = params[0]
args, kwargs = parse_generative_args(params[1:])
# increment test counter manually
result.testsRun += 1
status = self._proceed(result, func, args, kwargs)
if status == 0:
result.addSuccess(self)
success = True
else:
success = False
# XXX Don't stop anymore if an error occured
#if status == 2:
# result.shouldStop = True
if result.shouldStop: # either on error or on exitfirst + error
break
except:
# if an error occurs between two yield
result.addError(self, self.__exc_info())
success = False
return success
def _proceed(self, result, testfunc, args=(), kwargs=None):
"""proceed the actual test
returns 0 on success, 1 on failure, 2 on error
Note: addSuccess can't be called here because we have to wait
for tearDown to be successfully executed to declare the test as
successful
"""
kwargs = kwargs or {}
try:
testfunc(*args, **kwargs)
except self.failureException:
result.addFailure(self, self.__exc_info())
return 1
except KeyboardInterrupt:
raise
except InnerTestSkipped, e:
result.addSkip(self, e)
return 1
except SkipTest, e:
result.addSkip(self, e)
return 0
except:
result.addError(self, self.__exc_info())
return 2
return 0
def defaultTestResult(self):
"""return a new instance of the defaultTestResult"""
return SkipAwareTestResult()
skip = _deprecate(unittest.TestCase.skipTest)
assertEquals = _deprecate(unittest.TestCase.assertEqual)
assertNotEquals = _deprecate(unittest.TestCase.assertNotEqual)
assertAlmostEquals = _deprecate(unittest.TestCase.assertAlmostEqual)
assertNotAlmostEquals = _deprecate(unittest.TestCase.assertNotAlmostEqual)
def innerSkip(self, msg=None):
"""mark a generative test as skipped for the reason"""
msg = msg or 'test was skipped'
raise InnerTestSkipped(msg)
@deprecated('Please use assertDictEqual instead.')
def assertDictEquals(self, dict1, dict2, msg=None, context=None):
"""compares two dicts
If the two dict differ, the first difference is shown in the error
message
:param dict1: a Python Dictionary
:param dict2: a Python Dictionary
:param msg: custom message (String) in case of failure
"""
dict1 = dict(dict1)
msgs = []
for key, value in dict2.items():
try:
if dict1[key] != value:
msgs.append('%r != %r for key %r' % (dict1[key], value,
key))
del dict1[key]
except KeyError:
msgs.append('missing %r key' % key)
if dict1:
msgs.append('dict2 is lacking %r' % dict1)
if msg:
self.failureException(msg)
elif msgs:
if context is not None:
base = '%s\n' % context
else:
base = ''
self.fail(base + '\n'.join(msgs))
@deprecated('Please use assertCountEqual instead.')
def assertUnorderedIterableEquals(self, got, expected, msg=None):
"""compares two iterable and shows difference between both
:param got: the unordered Iterable that we found
:param expected: the expected unordered Iterable
:param msg: custom message (String) in case of failure
"""
got, expected = list(got), list(expected)
self.assertSetEqual(set(got), set(expected), msg)
if len(got) != len(expected):
if msg is None:
msg = ['Iterable have the same elements but not the same number',
'\t\ti\t']
got_count = {}
expected_count = {}
for element in got:
got_count[element] = got_count.get(element, 0) + 1
for element in expected:
expected_count[element] = expected_count.get(element, 0) + 1
# we know that got_count.key() == expected_count.key()
# because of assertSetEqual
for element, count in got_count.iteritems():
other_count = expected_count[element]
if other_count != count:
msg.append('\t%s\t%s\t%s' % (element, other_count, count))
self.fail(msg)
assertUnorderedIterableEqual = assertUnorderedIterableEquals
assertUnordIterEquals = assertUnordIterEqual = assertUnorderedIterableEqual
@deprecated('Please use assertSetEqual instead.')
def assertSetEquals(self,got,expected, msg=None):
"""compares two sets and shows difference between both
Don't use it for iterables other than sets.
:param got: the Set that we found
:param expected: the second Set to be compared to the first one
:param msg: custom message (String) in case of failure
"""
if not(isinstance(got, set) and isinstance(expected, set)):
warnings.warn("the assertSetEquals function if now intended for set only."\
"use assertUnorderedIterableEquals instead.",
DeprecationWarning, 2)
return self.assertUnorderedIterableEquals(got, expected, msg)
items={}
items['missing'] = expected - got
items['unexpected'] = got - expected
if any(items.itervalues()):
if msg is None:
msg = '\n'.join('%s:\n\t%s' % (key, "\n\t".join(str(value) for value in values))
for key, values in items.iteritems() if values)
self.fail(msg)
@deprecated('Please use assertListEqual instead.')
def assertListEquals(self, list_1, list_2, msg=None):
"""compares two lists
If the two list differ, the first difference is shown in the error
message
:param list_1: a Python List
:param list_2: a second Python List
:param msg: custom message (String) in case of failure
"""
_l1 = list_1[:]
for i, value in enumerate(list_2):
try:
if _l1[0] != value:
from pprint import pprint
pprint(list_1)
pprint(list_2)
self.fail('%r != %r for index %d' % (_l1[0], value, i))
del _l1[0]
except IndexError:
if msg is None:
msg = 'list_1 has only %d elements, not %s '\
'(at least %r missing)'% (i, len(list_2), value)
self.fail(msg)
if _l1:
if msg is None:
msg = 'list_2 is lacking %r' % _l1
self.fail(msg)
@deprecated('Non-standard. Please use assertMultiLineEqual instead.')
def assertLinesEquals(self, string1, string2, msg=None, striplines=False):
"""compare two strings and assert that the text lines of the strings
are equal.
:param string1: a String
:param string2: a String
:param msg: custom message (String) in case of failure
:param striplines: Boolean to trigger line stripping before comparing
"""
lines1 = string1.splitlines()
lines2 = string2.splitlines()
if striplines:
lines1 = [l.strip() for l in lines1]
lines2 = [l.strip() for l in lines2]
self.assertListEqual(lines1, lines2, msg)
assertLineEqual = assertLinesEquals
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertXMLWellFormed(self, stream, msg=None, context=2):
"""asserts the XML stream is well-formed (no DTD conformance check)
:param context: number of context lines in standard message
(show all data if negative).
Only available with element tree
"""
try:
from xml.etree.ElementTree import parse
self._assertETXMLWellFormed(stream, parse, msg)
except ImportError:
from xml.sax import make_parser, SAXParseException
parser = make_parser()
try:
parser.parse(stream)
except SAXParseException, ex:
if msg is None:
stream.seek(0)
for _ in xrange(ex.getLineNumber()):
line = stream.readline()
pointer = ('' * (ex.getLineNumber() - 1)) + '^'
msg = 'XML stream not well formed: %s\n%s%s' % (ex, line, pointer)
self.fail(msg)
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertXMLStringWellFormed(self, xml_string, msg=None, context=2):
"""asserts the XML string is well-formed (no DTD conformance check)
:param context: number of context lines in standard message
(show all data if negative).
Only available with element tree
"""
try:
from xml.etree.ElementTree import fromstring
except ImportError:
from elementtree.ElementTree import fromstring
self._assertETXMLWellFormed(xml_string, fromstring, msg)
def _assertETXMLWellFormed(self, data, parse, msg=None, context=2):
"""internal function used by /assertXML(String)?WellFormed/ functions
:param data: xml_data
:param parse: appropriate parser function for this data
:param msg: error message
:param context: number of context lines in standard message
(show all data if negative).
Only available with element tree
"""
from xml.parsers.expat import ExpatError
try:
from xml.etree.ElementTree import ParseError
except ImportError:
# compatibility for 1:
if len(tup)<=1:
self.fail( "tuple %s has no attributes (%s expected)"%(tup,
dict(element.attrib)))
self.assertDictEqual(element.attrib, tup[1])
# check children
if len(element) or len(tup)>2:
if len(tup)<=2:
self.fail( "tuple %s has no children (%i expected)"%(tup,
len(element)))
if len(element) != len(tup[2]):
self.fail( "tuple %s has %i children%s (%i expected)"%(tup,
len(tup[2]),
('', 's')[len(tup[2])>1], len(element)))
for index in xrange(len(tup[2])):
self.assertXMLEqualsTuple(element[index], tup[2][index])
#check text
if element.text or len(tup)>3:
if len(tup)<=3:
self.fail( "tuple %s has no text value (%r expected)"%(tup,
element.text))
self.assertTextEquals(element.text, tup[3])
#check tail
if element.tail or len(tup)>4:
if len(tup)<=4:
self.fail( "tuple %s has no tail value (%r expected)"%(tup,
element.tail))
self.assertTextEquals(element.tail, tup[4])
def _difftext(self, lines1, lines2, junk=None, msg_prefix='Texts differ'):
junk = junk or (' ', '\t')
# result is a generator
result = difflib.ndiff(lines1, lines2, charjunk=lambda x: x in junk)
read = []
for line in result:
read.append(line)
# lines that don't start with a ' ' are diff ones
if not line.startswith(' '):
self.fail('\n'.join(['%s\n'%msg_prefix]+read + list(result)))
@deprecated('Non-standard. Please use assertMultiLineEqual instead.')
def assertTextEquals(self, text1, text2, junk=None,
msg_prefix='Text differ', striplines=False):
"""compare two multiline strings (using difflib and splitlines())
:param text1: a Python BaseString
:param text2: a second Python Basestring
:param junk: List of Caracters
:param msg_prefix: String (message prefix)
:param striplines: Boolean to trigger line stripping before comparing
"""
msg = []
if not isinstance(text1, basestring):
msg.append('text1 is not a string (%s)'%(type(text1)))
if not isinstance(text2, basestring):
msg.append('text2 is not a string (%s)'%(type(text2)))
if msg:
self.fail('\n'.join(msg))
lines1 = text1.strip().splitlines(True)
lines2 = text2.strip().splitlines(True)
if striplines:
lines1 = [line.strip() for line in lines1]
lines2 = [line.strip() for line in lines2]
self._difftext(lines1, lines2, junk, msg_prefix)
assertTextEqual = assertTextEquals
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertStreamEquals(self, stream1, stream2, junk=None,
msg_prefix='Stream differ'):
"""compare two streams (using difflib and readlines())"""
# if stream2 is stream2, readlines() on stream1 will also read lines
# in stream2, so they'll appear different, although they're not
if stream1 is stream2:
return
# make sure we compare from the beginning of the stream
stream1.seek(0)
stream2.seek(0)
# compare
self._difftext(stream1.readlines(), stream2.readlines(), junk,
msg_prefix)
assertStreamEqual = assertStreamEquals
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertFileEquals(self, fname1, fname2, junk=(' ', '\t')):
"""compares two files using difflib"""
self.assertStreamEqual(open(fname1), open(fname2), junk,
msg_prefix='Files differs\n-:%s\n+:%s\n'%(fname1, fname2))
assertFileEqual = assertFileEquals
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertDirEquals(self, path_a, path_b):
"""compares two files using difflib"""
assert osp.exists(path_a), "%s doesn't exists" % path_a
assert osp.exists(path_b), "%s doesn't exists" % path_b
all_a = [ (ipath[len(path_a):].lstrip('/'), idirs, ifiles)
for ipath, idirs, ifiles in os.walk(path_a)]
all_a.sort(key=itemgetter(0))
all_b = [ (ipath[len(path_b):].lstrip('/'), idirs, ifiles)
for ipath, idirs, ifiles in os.walk(path_b)]
all_b.sort(key=itemgetter(0))
iter_a, iter_b = iter(all_a), iter(all_b)
partial_iter = True
ipath_a, idirs_a, ifiles_a = data_a = None, None, None
while True:
try:
ipath_a, idirs_a, ifiles_a = datas_a = iter_a.next()
partial_iter = False
ipath_b, idirs_b, ifiles_b = datas_b = iter_b.next()
partial_iter = True
self.assertTrue(ipath_a == ipath_b,
"unexpected %s in %s while looking %s from %s" %
(ipath_a, path_a, ipath_b, path_b))
errors = {}
sdirs_a = set(idirs_a)
sdirs_b = set(idirs_b)
errors["unexpected directories"] = sdirs_a - sdirs_b
errors["missing directories"] = sdirs_b - sdirs_a
sfiles_a = set(ifiles_a)
sfiles_b = set(ifiles_b)
errors["unexpected files"] = sfiles_a - sfiles_b
errors["missing files"] = sfiles_b - sfiles_a
msgs = [ "%s: %s"% (name, items)
for name, items in errors.iteritems() if items]
if msgs:
msgs.insert(0, "%s and %s differ :" % (
osp.join(path_a, ipath_a),
osp.join(path_b, ipath_b),
))
self.fail("\n".join(msgs))
for files in (ifiles_a, ifiles_b):
files.sort()
for index, path in enumerate(ifiles_a):
self.assertFileEquals(osp.join(path_a, ipath_a, path),
osp.join(path_b, ipath_b, ifiles_b[index]))
except StopIteration:
break
assertDirEqual = assertDirEquals
def assertIsInstance(self, obj, klass, msg=None, strict=False):
"""check if an object is an instance of a class
:param obj: the Python Object to be checked
:param klass: the target class
:param msg: a String for a custom message
:param strict: if True, check that the class of is ;
else check with 'isinstance'
"""
if strict:
warnings.warn('[API] Non-standard. Strict parameter has vanished',
DeprecationWarning, stacklevel=2)
if msg is None:
if strict:
msg = '%r is not of class %s but of %s'
else:
msg = '%r is not an instance of %s but of %s'
msg = msg % (obj, klass, type(obj))
if strict:
self.assertTrue(obj.__class__ is klass, msg)
else:
self.assertTrue(isinstance(obj, klass), msg)
@deprecated('Please use assertIsNone instead.')
def assertNone(self, obj, msg=None):
"""assert obj is None
:param obj: Python Object to be tested
"""
if msg is None:
msg = "reference to %r when None expected"%(obj,)
self.assertTrue( obj is None, msg )
@deprecated('Please use assertIsNotNone instead.')
def assertNotNone(self, obj, msg=None):
"""assert obj is not None"""
if msg is None:
msg = "unexpected reference to None"
self.assertTrue( obj is not None, msg )
@deprecated('Non-standard. Please use assertAlmostEqual instead.')
def assertFloatAlmostEquals(self, obj, other, prec=1e-5,
relative=False, msg=None):
"""compares if two floats have a distance smaller than expected
precision.
:param obj: a Float
:param other: another Float to be comparted to
:param prec: a Float describing the precision
:param relative: boolean switching to relative/absolute precision
:param msg: a String for a custom message
"""
if msg is None:
msg = "%r != %r" % (obj, other)
if relative:
prec = prec*math.fabs(obj)
self.assertTrue(math.fabs(obj - other) < prec, msg)
def failUnlessRaises(self, excClass, callableObj=None, *args, **kwargs):
"""override default failUnlessRaises method to return the raised
exception instance.
Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
CAUTION! There are subtle differences between Logilab and unittest2
- exc is not returned in standard version
- context capabilities in standard version
- try/except/else construction (minor)
:param excClass: the Exception to be raised
:param callableObj: a callable Object which should raise
:param args: a List of arguments for
:param kwargs: a List of keyword arguments for
"""
# XXX cube vcslib : test_branches_from_app
if callableObj is None:
_assert = super(TestCase, self).assertRaises
return _assert(excClass, callableObj, *args, **kwargs)
try:
callableObj(*args, **kwargs)
except excClass, exc:
class ProxyException:
def __init__(self, obj):
self._obj = obj
def __getattr__(self, attr):
warn_msg = ("This exception was retrieved with the old testlib way "
"`exc = self.assertRaises(Exc, callable)`, please use "
"the context manager instead'")
warnings.warn(warn_msg, DeprecationWarning, 2)
return self._obj.__getattribute__(attr)
return ProxyException(exc)
else:
if hasattr(excClass, '__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise self.failureException("%s not raised" % excName)
assertRaises = failUnlessRaises
if sys.version_info >= (3,2):
assertItemsEqual = unittest.TestCase.assertCountEqual
else:
assertCountEqual = unittest.TestCase.assertItemsEqual
if sys.version_info < (2,7):
def assertIsNotNone(self, value, *args, **kwargs):
self.assertNotEqual(None, value, *args, **kwargs)
TestCase.assertItemsEqual = deprecated('assertItemsEqual is deprecated, use assertCountEqual')(
TestCase.assertItemsEqual)
import doctest
class SkippedSuite(unittest.TestSuite):
def test(self):
"""just there to trigger test execution"""
self.skipped_test('doctest module has no DocTestSuite class')
class DocTestFinder(doctest.DocTestFinder):
def __init__(self, *args, **kwargs):
self.skipped = kwargs.pop('skipped', ())
doctest.DocTestFinder.__init__(self, *args, **kwargs)
def _get_test(self, obj, name, module, globs, source_lines):
"""override default _get_test method to be able to skip tests
according to skipped attribute's value
Note: Python (<=2.4) use a _name_filter which could be used for that
purpose but it's no longer available in 2.5
Python 2.5 seems to have a [SKIP] flag
"""
if getattr(obj, '__name__', '') in self.skipped:
return None
return doctest.DocTestFinder._get_test(self, obj, name, module,
globs, source_lines)
class DocTest(TestCase):
"""trigger module doctest
I don't know how to make unittest.main consider the DocTestSuite instance
without this hack
"""
skipped = ()
def __call__(self, result=None, runcondition=None, options=None):\
# pylint: disable=W0613
try:
finder = DocTestFinder(skipped=self.skipped)
if sys.version_info >= (2, 4):
suite = doctest.DocTestSuite(self.module, test_finder=finder)
if sys.version_info >= (2, 5):
# XXX iirk
doctest.DocTestCase._TestCase__exc_info = sys.exc_info
else:
suite = doctest.DocTestSuite(self.module)
except AttributeError:
suite = SkippedSuite()
# doctest may gork the builtins dictionnary
# This happen to the "_" entry used by gettext
old_builtins = builtins.__dict__.copy()
try:
return suite.run(result)
finally:
builtins.__dict__.clear()
builtins.__dict__.update(old_builtins)
run = __call__
def test(self):
"""just there to trigger test execution"""
MAILBOX = None
class MockSMTP:
"""fake smtplib.SMTP"""
def __init__(self, host, port):
self.host = host
self.port = port
global MAILBOX
self.reveived = MAILBOX = []
def set_debuglevel(self, debuglevel):
"""ignore debug level"""
def sendmail(self, fromaddr, toaddres, body):
"""push sent mail in the mailbox"""
self.reveived.append((fromaddr, toaddres, body))
def quit(self):
"""ignore quit"""
class MockConfigParser(ConfigParser):
"""fake ConfigParser.ConfigParser"""
def __init__(self, options):
ConfigParser.__init__(self)
for section, pairs in options.iteritems():
self.add_section(section)
for key, value in pairs.iteritems():
self.set(section, key, value)
def write(self, _):
raise NotImplementedError()
class MockConnection:
"""fake DB-API 2.0 connexion AND cursor (i.e. cursor() return self)"""
def __init__(self, results):
self.received = []
self.states = []
self.results = results
def cursor(self):
"""Mock cursor method"""
return self
def execute(self, query, args=None):
"""Mock execute method"""
self.received.append( (query, args) )
def fetchone(self):
"""Mock fetchone method"""
return self.results[0]
def fetchall(self):
"""Mock fetchall method"""
return self.results
def commit(self):
"""Mock commiy method"""
self.states.append( ('commit', len(self.received)) )
def rollback(self):
"""Mock rollback method"""
self.states.append( ('rollback', len(self.received)) )
def close(self):
"""Mock close method"""
pass
def mock_object(**params):
"""creates an object using params to set attributes
>>> option = mock_object(verbose=False, index=range(5))
>>> option.verbose
False
>>> option.index
[0, 1, 2, 3, 4]
"""
return type('Mock', (), params)()
def create_files(paths, chroot):
"""Creates directories and files found in .
:param paths: list of relative paths to files or directories
:param chroot: the root directory in which paths will be created
>>> from os.path import isdir, isfile
>>> isdir('/tmp/a')
False
>>> create_files(['a/b/foo.py', 'a/b/c/', 'a/b/c/d/e.py'], '/tmp')
>>> isdir('/tmp/a')
True
>>> isdir('/tmp/a/b/c')
True
>>> isfile('/tmp/a/b/c/d/e.py')
True
>>> isfile('/tmp/a/b/foo.py')
True
"""
dirs, files = set(), set()
for path in paths:
path = osp.join(chroot, path)
filename = osp.basename(path)
# path is a directory path
if filename == '':
dirs.add(path)
# path is a filename path
else:
dirs.add(osp.dirname(path))
files.add(path)
for dirpath in dirs:
if not osp.isdir(dirpath):
os.makedirs(dirpath)
for filepath in files:
open(filepath, 'w').close()
class AttrObject: # XXX cf mock_object
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def tag(*args, **kwargs):
"""descriptor adding tag to a function"""
def desc(func):
assert not hasattr(func, 'tags')
func.tags = Tags(*args, **kwargs)
return func
return desc
def require_version(version):
""" Compare version of python interpreter to the given one. Skip the test
if older.
"""
def check_require_version(f):
version_elements = version.split('.')
try:
compare = tuple([int(v) for v in version_elements])
except ValueError:
raise ValueError('%s is not a correct version : should be X.Y[.Z].' % version)
current = sys.version_info[:3]
if current < compare:
def new_f(self, *args, **kwargs):
self.skipTest('Need at least %s version of python. Current version is %s.' % (version, '.'.join([str(element) for element in current])))
new_f.__name__ = f.__name__
return new_f
else:
return f
return check_require_version
def require_module(module):
""" Check if the given module is loaded. Skip the test if not.
"""
def check_require_module(f):
try:
__import__(module)
return f
except ImportError:
def new_f(self, *args, **kwargs):
self.skipTest('%s can not be imported.' % module)
new_f.__name__ = f.__name__
return new_f
return check_require_module
logilab-common-0.61.0/shellutils.py 0000644 0000151 0000155 00000033667 12276435602 016605 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""shell/term utilities, useful to write some python scripts instead of shell
scripts.
"""
__docformat__ = "restructuredtext en"
import os
import glob
import shutil
import stat
import sys
import tempfile
import time
import fnmatch
import errno
import string
import random
import subprocess
from os.path import exists, isdir, islink, basename, join
from logilab.common import STD_BLACKLIST, _handle_blacklist
from logilab.common.compat import raw_input
from logilab.common.compat import str_to_bytes
from logilab.common.deprecation import deprecated
try:
from logilab.common.proc import ProcInfo, NoSuchProcess
except ImportError:
# windows platform
class NoSuchProcess(Exception): pass
def ProcInfo(pid):
raise NoSuchProcess()
class tempdir(object):
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, exctype, value, traceback):
# rmtree in all cases
shutil.rmtree(self.path)
return traceback is None
class pushd(object):
def __init__(self, directory):
self.directory = directory
def __enter__(self):
self.cwd = os.getcwd()
os.chdir(self.directory)
return self.directory
def __exit__(self, exctype, value, traceback):
os.chdir(self.cwd)
def chown(path, login=None, group=None):
"""Same as `os.chown` function but accepting user login or group name as
argument. If login or group is omitted, it's left unchanged.
Note: you must own the file to chown it (or be root). Otherwise OSError is raised.
"""
if login is None:
uid = -1
else:
try:
uid = int(login)
except ValueError:
import pwd # Platforms: Unix
uid = pwd.getpwnam(login).pw_uid
if group is None:
gid = -1
else:
try:
gid = int(group)
except ValueError:
import grp
gid = grp.getgrnam(group).gr_gid
os.chown(path, uid, gid)
def mv(source, destination, _action=shutil.move):
"""A shell-like mv, supporting wildcards.
"""
sources = glob.glob(source)
if len(sources) > 1:
assert isdir(destination)
for filename in sources:
_action(filename, join(destination, basename(filename)))
else:
try:
source = sources[0]
except IndexError:
raise OSError('No file matching %s' % source)
if isdir(destination) and exists(destination):
destination = join(destination, basename(source))
try:
_action(source, destination)
except OSError, ex:
raise OSError('Unable to move %r to %r (%s)' % (
source, destination, ex))
def rm(*files):
"""A shell-like rm, supporting wildcards.
"""
for wfile in files:
for filename in glob.glob(wfile):
if islink(filename):
os.remove(filename)
elif isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
def cp(source, destination):
"""A shell-like cp, supporting wildcards.
"""
mv(source, destination, _action=shutil.copy)
def find(directory, exts, exclude=False, blacklist=STD_BLACKLIST):
"""Recursively find files ending with the given extensions from the directory.
:type directory: str
:param directory:
directory where the search should start
:type exts: basestring or list or tuple
:param exts:
extensions or lists or extensions to search
:type exclude: boolean
:param exts:
if this argument is True, returning files NOT ending with the given
extensions
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to the value of
`logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all matching files
"""
if isinstance(exts, basestring):
exts = (exts,)
if exclude:
def match(filename, exts):
for ext in exts:
if filename.endswith(ext):
return False
return True
else:
def match(filename, exts):
for ext in exts:
if filename.endswith(ext):
return True
return False
files = []
for dirpath, dirnames, filenames in os.walk(directory):
_handle_blacklist(blacklist, dirnames, filenames)
# don't append files if the directory is blacklisted
dirname = basename(dirpath)
if dirname in blacklist:
continue
files.extend([join(dirpath, f) for f in filenames if match(f, exts)])
return files
def globfind(directory, pattern, blacklist=STD_BLACKLIST):
"""Recursively finds files matching glob `pattern` under `directory`.
This is an alternative to `logilab.common.shellutils.find`.
:type directory: str
:param directory:
directory where the search should start
:type pattern: basestring
:param pattern:
the glob pattern (e.g *.py, foo*.py, etc.)
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to the value of
`logilab.common.STD_BLACKLIST`
:rtype: iterator
:return:
iterator over the list of all matching files
"""
for curdir, dirnames, filenames in os.walk(directory):
_handle_blacklist(blacklist, dirnames, filenames)
for fname in fnmatch.filter(filenames, pattern):
yield join(curdir, fname)
def unzip(archive, destdir):
import zipfile
if not exists(destdir):
os.mkdir(destdir)
zfobj = zipfile.ZipFile(archive)
for name in zfobj.namelist():
if name.endswith('/'):
os.mkdir(join(destdir, name))
else:
outfile = open(join(destdir, name), 'wb')
outfile.write(zfobj.read(name))
outfile.close()
@deprecated('Use subprocess.Popen instead')
class Execute:
"""This is a deadlock safe version of popen2 (no stdin), that returns
an object with errorlevel, out and err.
"""
def __init__(self, command):
cmd = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.out, self.err = cmd.communicate()
self.status = os.WEXITSTATUS(cmd.returncode)
def acquire_lock(lock_file, max_try=10, delay=10, max_delay=3600):
"""Acquire a lock represented by a file on the file system
If the process written in lock file doesn't exist anymore, we remove the
lock file immediately
If age of the lock_file is greater than max_delay, then we raise a UserWarning
"""
count = abs(max_try)
while count:
try:
fd = os.open(lock_file, os.O_EXCL | os.O_RDWR | os.O_CREAT)
os.write(fd, str_to_bytes(str(os.getpid())) )
os.close(fd)
return True
except OSError, e:
if e.errno == errno.EEXIST:
try:
fd = open(lock_file, "r")
pid = int(fd.readline())
pi = ProcInfo(pid)
age = (time.time() - os.stat(lock_file)[stat.ST_MTIME])
if age / max_delay > 1 :
raise UserWarning("Command '%s' (pid %s) has locked the "
"file '%s' for %s minutes"
% (pi.name(), pid, lock_file, age/60))
except UserWarning:
raise
except NoSuchProcess:
os.remove(lock_file)
except Exception:
# The try block is not essential. can be skipped.
# Note: ProcInfo object is only available for linux
# process information are not accessible...
# or lock_file is no more present...
pass
else:
raise
count -= 1
time.sleep(delay)
else:
raise Exception('Unable to acquire %s' % lock_file)
def release_lock(lock_file):
"""Release a lock represented by a file on the file system."""
os.remove(lock_file)
class ProgressBar(object):
"""A simple text progression bar."""
def __init__(self, nbops, size=20, stream=sys.stdout, title=''):
if title:
self._fstr = '\r%s [%%-%ss]' % (title, int(size))
else:
self._fstr = '\r[%%-%ss]' % int(size)
self._stream = stream
self._total = nbops
self._size = size
self._current = 0
self._progress = 0
self._current_text = None
self._last_text_write_size = 0
def _get_text(self):
return self._current_text
def _set_text(self, text=None):
if text != self._current_text:
self._current_text = text
self.refresh()
def _del_text(self):
self.text = None
text = property(_get_text, _set_text, _del_text)
def update(self, offset=1, exact=False):
"""Move FORWARD to new cursor position (cursor will never go backward).
:offset: fraction of ``size``
:exact:
- False: offset relative to current cursor position if True
- True: offset as an asbsolute position
"""
if exact:
self._current = offset
else:
self._current += offset
progress = int((float(self._current)/float(self._total))*self._size)
if progress > self._progress:
self._progress = progress
self.refresh()
def refresh(self):
"""Refresh the progression bar display."""
self._stream.write(self._fstr % ('=' * min(self._progress, self._size)) )
if self._last_text_write_size or self._current_text:
template = ' %%-%is' % (self._last_text_write_size)
text = self._current_text
if text is None:
text = ''
self._stream.write(template % text)
self._last_text_write_size = len(text.rstrip())
self._stream.flush()
def finish(self):
self._stream.write('\n')
self._stream.flush()
class DummyProgressBar(object):
__slot__ = ('text',)
def refresh(self):
pass
def update(self):
pass
def finish(self):
pass
_MARKER = object()
class progress(object):
def __init__(self, nbops=_MARKER, size=_MARKER, stream=_MARKER, title=_MARKER, enabled=True):
self.nbops = nbops
self.size = size
self.stream = stream
self.title = title
self.enabled = enabled
def __enter__(self):
if self.enabled:
kwargs = {}
for attr in ('nbops', 'size', 'stream', 'title'):
value = getattr(self, attr)
if value is not _MARKER:
kwargs[attr] = value
self.pb = ProgressBar(**kwargs)
else:
self.pb = DummyProgressBar()
return self.pb
def __exit__(self, exc_type, exc_val, exc_tb):
self.pb.finish()
class RawInput(object):
def __init__(self, input=None, printer=None):
self._input = input or raw_input
self._print = printer
def ask(self, question, options, default):
assert default in options
choices = []
for option in options:
if option == default:
label = option[0].upper()
else:
label = option[0].lower()
if len(option) > 1:
label += '(%s)' % option[1:].lower()
choices.append((option, label))
prompt = "%s [%s]: " % (question,
'/'.join([opt[1] for opt in choices]))
tries = 3
while tries > 0:
answer = self._input(prompt).strip().lower()
if not answer:
return default
possible = [option for option, label in choices
if option.lower().startswith(answer)]
if len(possible) == 1:
return possible[0]
elif len(possible) == 0:
msg = '%s is not an option.' % answer
else:
msg = ('%s is an ambiguous answer, do you mean %s ?' % (
answer, ' or '.join(possible)))
if self._print:
self._print(msg)
else:
print msg
tries -= 1
raise Exception('unable to get a sensible answer')
def confirm(self, question, default_is_yes=True):
default = default_is_yes and 'y' or 'n'
answer = self.ask(question, ('y', 'n'), default)
return answer == 'y'
ASK = RawInput()
def getlogin():
"""avoid using os.getlogin() because of strange tty / stdin problems
(man 3 getlogin)
Another solution would be to use $LOGNAME, $USER or $USERNAME
"""
if sys.platform != 'win32':
import pwd # Platforms: Unix
return pwd.getpwuid(os.getuid())[0]
else:
return os.environ['USERNAME']
def generate_password(length=8, vocab=string.ascii_letters + string.digits):
"""dumb password generation function"""
pwd = ''
for i in xrange(length):
pwd += random.choice(vocab)
return pwd
logilab-common-0.61.0/README.Python3 0000644 0000151 0000155 00000001075 12276435602 016252 0 ustar narval narval Python3
=======
Approach
--------
We maintain a Python 2 base and use 2to3 to generate Python 3 code.
2to3 is integrated into the distutils installation process and will be run as a
build step when invoked by the python3 interpreter::
python3 setup.py install
Tests
-----
Set your PYTHONPATH and run pytest3 against the test directory.
Debian
------
For the Debian packaging of python3-logilab-common, you can use the debian.sid/
content against the debian/ folder::
cp debian.sid/* debian/
Resources
---------
http://wiki.python.org/moin/PortingPythonToPy3k
logilab-common-0.61.0/__init__.py 0000644 0000151 0000155 00000011671 12276435602 016143 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Logilab common library (aka Logilab's extension to the standard library).
:type STD_BLACKLIST: tuple
:var STD_BLACKLIST: directories ignored by default by the functions in
this package which have to recurse into directories
:type IGNORED_EXTENSIONS: tuple
:var IGNORED_EXTENSIONS: file extensions that may usually be ignored
"""
__docformat__ = "restructuredtext en"
from logilab.common.__pkginfo__ import version as __version__
STD_BLACKLIST = ('CVS', '.svn', '.hg', 'debian', 'dist', 'build')
IGNORED_EXTENSIONS = ('.pyc', '.pyo', '.elc', '~', '.swp', '.orig')
# set this to False if you've mx DateTime installed but you don't want your db
# adapter to use it (should be set before you got a connection)
USE_MX_DATETIME = True
class attrdict(dict):
"""A dictionary for which keys are also accessible as attributes."""
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError(attr)
class dictattr(dict):
def __init__(self, proxy):
self.__proxy = proxy
def __getitem__(self, attr):
try:
return getattr(self.__proxy, attr)
except AttributeError:
raise KeyError(attr)
class nullobject(object):
def __repr__(self):
return ''
def __nonzero__(self):
return False
class tempattr(object):
def __init__(self, obj, attr, value):
self.obj = obj
self.attr = attr
self.value = value
def __enter__(self):
self.oldvalue = getattr(self.obj, self.attr)
setattr(self.obj, self.attr, self.value)
return self.obj
def __exit__(self, exctype, value, traceback):
setattr(self.obj, self.attr, self.oldvalue)
# flatten -----
# XXX move in a specific module and use yield instead
# do not mix flatten and translate
#
# def iterable(obj):
# try: iter(obj)
# except: return False
# return True
#
# def is_string_like(obj):
# try: obj +''
# except (TypeError, ValueError): return False
# return True
#
#def is_scalar(obj):
# return is_string_like(obj) or not iterable(obj)
#
#def flatten(seq):
# for item in seq:
# if is_scalar(item):
# yield item
# else:
# for subitem in flatten(item):
# yield subitem
def flatten(iterable, tr_func=None, results=None):
"""Flatten a list of list with any level.
If tr_func is not None, it should be a one argument function that'll be called
on each final element.
:rtype: list
>>> flatten([1, [2, 3]])
[1, 2, 3]
"""
if results is None:
results = []
for val in iterable:
if isinstance(val, (list, tuple)):
flatten(val, tr_func, results)
elif tr_func is None:
results.append(val)
else:
results.append(tr_func(val))
return results
# XXX is function below still used ?
def make_domains(lists):
"""
Given a list of lists, return a list of domain for each list to produce all
combinations of possibles values.
:rtype: list
Example:
>>> make_domains(['a', 'b'], ['c','d', 'e'])
[['a', 'b', 'a', 'b', 'a', 'b'], ['c', 'c', 'd', 'd', 'e', 'e']]
"""
domains = []
for iterable in lists:
new_domain = iterable[:]
for i in range(len(domains)):
domains[i] = domains[i]*len(iterable)
if domains:
missing = (len(domains[0]) - len(iterable)) / len(iterable)
i = 0
for j in range(len(iterable)):
value = iterable[j]
for dummy in range(missing):
new_domain.insert(i, value)
i += 1
i += 1
domains.append(new_domain)
return domains
# private stuff ################################################################
def _handle_blacklist(blacklist, dirnames, filenames):
"""remove files/directories in the black list
dirnames/filenames are usually from os.walk
"""
for norecurs in blacklist:
if norecurs in dirnames:
dirnames.remove(norecurs)
elif norecurs in filenames:
filenames.remove(norecurs)
logilab-common-0.61.0/tasksqueue.py 0000644 0000151 0000155 00000005644 12276435602 016601 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Prioritized tasks queue"""
__docformat__ = "restructuredtext en"
from bisect import insort_left
from Queue import Queue
LOW = 0
MEDIUM = 10
HIGH = 100
PRIORITY = {
'LOW': LOW,
'MEDIUM': MEDIUM,
'HIGH': HIGH,
}
REVERSE_PRIORITY = dict((values, key) for key, values in PRIORITY.iteritems())
class PrioritizedTasksQueue(Queue):
def _init(self, maxsize):
"""Initialize the queue representation"""
self.maxsize = maxsize
# ordered list of task, from the lowest to the highest priority
self.queue = []
def _put(self, item):
"""Put a new item in the queue"""
for i, task in enumerate(self.queue):
# equivalent task
if task == item:
# if new task has a higher priority, remove the one already
# queued so the new priority will be considered
if task < item:
item.merge(task)
del self.queue[i]
break
# else keep it so current order is kept
task.merge(item)
return
insort_left(self.queue, item)
def _get(self):
"""Get an item from the queue"""
return self.queue.pop()
def __iter__(self):
return iter(self.queue)
def remove(self, tid):
"""remove a specific task from the queue"""
# XXX acquire lock
for i, task in enumerate(self):
if task.id == tid:
self.queue.pop(i)
return
raise ValueError('not task of id %s in queue' % tid)
class Task(object):
def __init__(self, tid, priority=LOW):
# task id
self.id = tid
# task priority
self.priority = priority
def __repr__(self):
return '' % (self.id, id(self))
def __cmp__(self, other):
return cmp(self.priority, other.priority)
def __lt__(self, other):
return self.priority < other.priority
def __eq__(self, other):
return self.id == other.id
__hash__ = object.__hash__
def merge(self, other):
pass
logilab-common-0.61.0/sphinxutils.py 0000644 0000151 0000155 00000010534 12276435602 016773 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Sphinx utils
ModuleGenerator: Generate a file that lists all the modules of a list of
packages in order to pull all the docstring.
This should not be used in a makefile to systematically generate sphinx
documentation!
Typical usage:
>>> from logilab.common.sphinxutils import ModuleGenerator
>>> mgen = ModuleGenerator('logilab common', '/home/adim/src/logilab/common')
>>> mgen.generate('api_logilab_common.rst', exclude_dirs=('test',))
"""
import os, sys
import os.path as osp
import inspect
from logilab.common import STD_BLACKLIST
from logilab.common.shellutils import globfind
from logilab.common.modutils import load_module_from_file, modpath_from_file
def module_members(module):
members = []
for name, value in inspect.getmembers(module):
if getattr(value, '__module__', None) == module.__name__:
members.append( (name, value) )
return sorted(members)
def class_members(klass):
return sorted([name for name in vars(klass)
if name not in ('__doc__', '__module__',
'__dict__', '__weakref__')])
class ModuleGenerator:
file_header = """.. -*- coding: utf-8 -*-\n\n%s\n"""
module_def = """
:mod:`%s`
=======%s
.. automodule:: %s
:members: %s
"""
class_def = """
.. autoclass:: %s
:members: %s
"""
def __init__(self, project_title, code_dir):
self.title = project_title
self.code_dir = osp.abspath(code_dir)
def generate(self, dest_file, exclude_dirs=STD_BLACKLIST):
"""make the module file"""
self.fn = open(dest_file, 'w')
num = len(self.title) + 6
title = "=" * num + "\n %s API\n" % self.title + "=" * num
self.fn.write(self.file_header % title)
self.gen_modules(exclude_dirs=exclude_dirs)
self.fn.close()
def gen_modules(self, exclude_dirs):
"""generate all modules"""
for module in self.find_modules(exclude_dirs):
modname = module.__name__
classes = []
modmembers = []
for objname, obj in module_members(module):
if inspect.isclass(obj):
classmembers = class_members(obj)
classes.append( (objname, classmembers) )
else:
modmembers.append(objname)
self.fn.write(self.module_def % (modname, '=' * len(modname),
modname,
', '.join(modmembers)))
for klass, members in classes:
self.fn.write(self.class_def % (klass, ', '.join(members)))
def find_modules(self, exclude_dirs):
basepath = osp.dirname(self.code_dir)
basedir = osp.basename(basepath) + osp.sep
if basedir not in sys.path:
sys.path.insert(1, basedir)
for filepath in globfind(self.code_dir, '*.py', exclude_dirs):
if osp.basename(filepath) in ('setup.py', '__pkginfo__.py'):
continue
try:
module = load_module_from_file(filepath)
except: # module might be broken or magic
dotted_path = modpath_from_file(filepath)
module = type('.'.join(dotted_path), (), {}) # mock it
yield module
if __name__ == '__main__':
# example :
title, code_dir, outfile = sys.argv[1:]
generator = ModuleGenerator(title, code_dir)
# XXX modnames = ['logilab']
generator.generate(outfile, ('test', 'tests', 'examples',
'data', 'doc', '.hg', 'migration'))
logilab-common-0.61.0/compat.py 0000644 0000151 0000155 00000017115 12276435602 015666 0 ustar narval narval # pylint: disable=E0601,W0622,W0611
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Wrappers around some builtins introduced in python 2.3, 2.4 and
2.5, making them available in for earlier versions of python.
See another compatibility snippets from other projects:
:mod:`lib2to3.fixes`
:mod:`coverage.backward`
:mod:`unittest2.compatibility`
"""
from __future__ import generators
__docformat__ = "restructuredtext en"
import os
import sys
import types
from warnings import warn
import __builtin__ as builtins # 2to3 will tranform '__builtin__' to 'builtins'
if sys.version_info < (3, 0):
str_to_bytes = str
def str_encode(string, encoding):
if isinstance(string, unicode):
return string.encode(encoding)
return str(string)
else:
def str_to_bytes(string):
return str.encode(string)
# we have to ignore the encoding in py3k to be able to write a string into a
# TextIOWrapper or like object (which expect an unicode string)
def str_encode(string, encoding):
return str(string)
# XXX callable built-in seems back in all python versions
try:
callable = builtins.callable
except AttributeError:
from collections import Callable
def callable(something):
return isinstance(something, Callable)
del Callable
# See also http://bugs.python.org/issue11776
if sys.version_info[0] == 3:
def method_type(callable, instance, klass):
# api change. klass is no more considered
return types.MethodType(callable, instance)
else:
# alias types otherwise
method_type = types.MethodType
if sys.version_info < (3, 0):
raw_input = raw_input
else:
raw_input = input
# Pythons 2 and 3 differ on where to get StringIO
if sys.version_info < (3, 0):
from cStringIO import StringIO
FileIO = file
BytesIO = StringIO
reload = reload
else:
from io import FileIO, BytesIO, StringIO
from imp import reload
# Where do pickles come from?
try:
import cPickle as pickle
except ImportError:
import pickle
from logilab.common.deprecation import deprecated
from itertools import izip, chain, imap
if sys.version_info < (3, 0):# 2to3 will remove the imports
izip = deprecated('izip exists in itertools since py2.3')(izip)
imap = deprecated('imap exists in itertools since py2.3')(imap)
chain = deprecated('chain exists in itertools since py2.3')(chain)
sum = deprecated('sum exists in builtins since py2.3')(sum)
enumerate = deprecated('enumerate exists in builtins since py2.3')(enumerate)
frozenset = deprecated('frozenset exists in builtins since py2.4')(frozenset)
reversed = deprecated('reversed exists in builtins since py2.4')(reversed)
sorted = deprecated('sorted exists in builtins since py2.4')(sorted)
max = deprecated('max exists in builtins since py2.4')(max)
# Python2.5 builtins
try:
any = any
all = all
except NameError:
def any(iterable):
"""any(iterable) -> bool
Return True if bool(x) is True for any x in the iterable.
"""
for elt in iterable:
if elt:
return True
return False
def all(iterable):
"""all(iterable) -> bool
Return True if bool(x) is True for all values x in the iterable.
"""
for elt in iterable:
if not elt:
return False
return True
# Python2.5 subprocess added functions and exceptions
try:
from subprocess import Popen
except ImportError:
# gae or python < 2.3
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() returns
a non-zero exit status. The exit status will be stored in the
returncode attribute."""
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd,
self.returncode)
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
# workaround: subprocess.Popen(cmd, stdout=sys.stdout) fails
# see http://bugs.python.org/issue1531862
if "stdout" in kwargs:
fileno = kwargs.get("stdout").fileno()
del kwargs['stdout']
return Popen(stdout=os.dup(fileno), *popenargs, **kwargs).wait()
return Popen(*popenargs, **kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
if retcode:
raise CalledProcessError(retcode, cmd)
return retcode
try:
from os.path import relpath
except ImportError: # python < 2.6
from os.path import curdir, abspath, sep, commonprefix, pardir, join
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
# XXX don't know why tests don't pass if I don't do that :
_real_set, set = set, deprecated('set exists in builtins since py2.4')(set)
if (2, 5) <= sys.version_info[:2]:
InheritableSet = _real_set
else:
class InheritableSet(_real_set):
"""hacked resolving inheritancy issue from old style class in 2.4"""
def __new__(cls, *args, **kwargs):
if args:
new_args = (args[0], )
else:
new_args = ()
obj = _real_set.__new__(cls, *new_args)
obj.__init__(*args, **kwargs)
return obj
# XXX shouldn't we remove this and just let 2to3 do his job ?
# range or xrange?
try:
range = xrange
except NameError:
range = range
# ConfigParser was renamed to the more-standard configparser
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
json = None
logilab-common-0.61.0/debugger.py 0000644 0000151 0000155 00000015600 12276435602 016164 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Customized version of pdb's default debugger.
- sets up a history file
- uses ipython if available to colorize lines of code
- overrides list command to search for current block instead
of using 5 lines of context
"""
__docformat__ = "restructuredtext en"
try:
import readline
except ImportError:
readline = None
import os
import os.path as osp
import sys
from pdb import Pdb
from cStringIO import StringIO
import inspect
try:
from IPython import PyColorize
except ImportError:
def colorize(source, *args):
"""fallback colorize function"""
return source
def colorize_source(source, *args):
return source
else:
def colorize(source, start_lineno, curlineno):
"""colorize and annotate source with linenos
(as in pdb's list command)
"""
parser = PyColorize.Parser()
output = StringIO()
parser.format(source, output)
annotated = []
for index, line in enumerate(output.getvalue().splitlines()):
lineno = index + start_lineno
if lineno == curlineno:
annotated.append('%4s\t->\t%s' % (lineno, line))
else:
annotated.append('%4s\t\t%s' % (lineno, line))
return '\n'.join(annotated)
def colorize_source(source):
"""colorize given source"""
parser = PyColorize.Parser()
output = StringIO()
parser.format(source, output)
return output.getvalue()
def getsource(obj):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = inspect.getsourcelines(obj)
return ''.join(lines), lnum
################################################################
class Debugger(Pdb):
"""custom debugger
- sets up a history file
- uses ipython if available to colorize lines of code
- overrides list command to search for current block instead
of using 5 lines of context
"""
def __init__(self, tcbk=None):
Pdb.__init__(self)
self.reset()
if tcbk:
while tcbk.tb_next is not None:
tcbk = tcbk.tb_next
self._tcbk = tcbk
self._histfile = os.path.expanduser("~/.pdbhist")
def setup_history_file(self):
"""if readline is available, read pdb history file
"""
if readline is not None:
try:
# XXX try..except shouldn't be necessary
# read_history_file() can accept None
readline.read_history_file(self._histfile)
except IOError:
pass
def start(self):
"""starts the interactive mode"""
self.interaction(self._tcbk.tb_frame, self._tcbk)
def setup(self, frame, tcbk):
"""setup hook: set up history file"""
self.setup_history_file()
Pdb.setup(self, frame, tcbk)
def set_quit(self):
"""quit hook: save commands in the history file"""
if readline is not None:
readline.write_history_file(self._histfile)
Pdb.set_quit(self)
def complete_p(self, text, line, begin_idx, end_idx):
"""provide variable names completion for the ``p`` command"""
namespace = dict(self.curframe.f_globals)
namespace.update(self.curframe.f_locals)
if '.' in text:
return self.attr_matches(text, namespace)
return [varname for varname in namespace if varname.startswith(text)]
def attr_matches(self, text, namespace):
"""implementation coming from rlcompleter.Completer.attr_matches
Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return
expr, attr = m.group(1, 3)
object = eval(expr, namespace)
words = dir(object)
if hasattr(object, '__class__'):
words.append('__class__')
words = words + self.get_class_members(object.__class__)
matches = []
n = len(attr)
for word in words:
if word[:n] == attr and word != "__builtins__":
matches.append("%s.%s" % (expr, word))
return matches
def get_class_members(self, klass):
"""implementation coming from rlcompleter.get_class_members"""
ret = dir(klass)
if hasattr(klass, '__bases__'):
for base in klass.__bases__:
ret = ret + self.get_class_members(base)
return ret
## specific / overridden commands
def do_list(self, arg):
"""overrides default list command to display the surrounding block
instead of 5 lines of context
"""
self.lastcmd = 'list'
if not arg:
try:
source, start_lineno = getsource(self.curframe)
print colorize(''.join(source), start_lineno,
self.curframe.f_lineno)
except KeyboardInterrupt:
pass
except IOError:
Pdb.do_list(self, arg)
else:
Pdb.do_list(self, arg)
do_l = do_list
def do_open(self, arg):
"""opens source file corresponding to the current stack level"""
filename = self.curframe.f_code.co_filename
lineno = self.curframe.f_lineno
cmd = 'emacsclient --no-wait +%s %s' % (lineno, filename)
os.system(cmd)
do_o = do_open
def pm():
"""use our custom debugger"""
dbg = Debugger(sys.last_traceback)
dbg.start()
def set_trace():
Debugger().set_trace(sys._getframe().f_back)
logilab-common-0.61.0/corbautils.py 0000644 0000151 0000155 00000007511 12276435602 016551 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""A set of utility function to ease the use of OmniORBpy.
"""
__docformat__ = "restructuredtext en"
from omniORB import CORBA, PortableServer
import CosNaming
orb = None
def get_orb():
"""
returns a reference to the ORB.
The first call to the method initialized the ORB
This method is mainly used internally in the module.
"""
global orb
if orb is None:
import sys
orb = CORBA.ORB_init(sys.argv, CORBA.ORB_ID)
return orb
def get_root_context():
"""
returns a reference to the NameService object.
This method is mainly used internally in the module.
"""
orb = get_orb()
nss = orb.resolve_initial_references("NameService")
rootContext = nss._narrow(CosNaming.NamingContext)
assert rootContext is not None, "Failed to narrow root naming context"
return rootContext
def register_object_name(object, namepath):
"""
Registers a object in the NamingService.
The name path is a list of 2-uples (id,kind) giving the path.
For instance if the path of an object is [('foo',''),('bar','')],
it is possible to get a reference to the object using the URL
'corbaname::hostname#foo/bar'.
[('logilab','rootmodule'),('chatbot','application'),('chatter','server')]
is mapped to
'corbaname::hostname#logilab.rootmodule/chatbot.application/chatter.server'
The get_object_reference() function can be used to resolve such a URL.
"""
context = get_root_context()
for id, kind in namepath[:-1]:
name = [CosNaming.NameComponent(id, kind)]
try:
context = context.bind_new_context(name)
except CosNaming.NamingContext.AlreadyBound, ex:
context = context.resolve(name)._narrow(CosNaming.NamingContext)
assert context is not None, \
'test context exists but is not a NamingContext'
id, kind = namepath[-1]
name = [CosNaming.NameComponent(id, kind)]
try:
context.bind(name, object._this())
except CosNaming.NamingContext.AlreadyBound, ex:
context.rebind(name, object._this())
def activate_POA():
"""
This methods activates the Portable Object Adapter.
You need to call it to enable the reception of messages in your code,
on both the client and the server.
"""
orb = get_orb()
poa = orb.resolve_initial_references('RootPOA')
poaManager = poa._get_the_POAManager()
poaManager.activate()
def run_orb():
"""
Enters the ORB mainloop on the server.
You should not call this method on the client.
"""
get_orb().run()
def get_object_reference(url):
"""
Resolves a corbaname URL to an object proxy.
See register_object_name() for examples URLs
"""
return get_orb().string_to_object(url)
def get_object_string(host, namepath):
"""given an host name and a name path as described in register_object_name,
return a corba string identifier
"""
strname = '/'.join(['.'.join(path_elt) for path_elt in namepath])
return 'corbaname::%s#%s' % (host, strname)
logilab-common-0.61.0/setup.cfg 0000644 0000151 0000155 00000000140 12276435602 015640 0 ustar narval narval [bdist_rpm]
packager = Sylvain Thenault
provides = logilab.common
logilab-common-0.61.0/__pkginfo__.py 0000644 0000151 0000155 00000003537 12276435602 016637 0 ustar narval narval # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""logilab.common packaging information"""
__docformat__ = "restructuredtext en"
import sys
import os
distname = 'logilab-common'
modname = 'common'
subpackage_of = 'logilab'
subpackage_master = True
numversion = (0, 61, 0)
version = '.'.join([str(num) for num in numversion])
license = 'LGPL' # 2.1 or later
description = "collection of low-level Python packages and modules used by Logilab projects"
web = "http://www.logilab.org/project/%s" % distname
mailinglist = "mailto://python-projects@lists.logilab.org"
author = "Logilab"
author_email = "contact@logilab.fr"
from os.path import join
scripts = [join('bin', 'pytest')]
include_dirs = [join('test', 'data')]
install_requires = []
if sys.version_info < (2, 7):
install_requires.append('unittest2 >= 0.5.1')
if os.name == 'nt':
install_requires.append('colorama')
classifiers = ["Topic :: Utilities",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
]
logilab-common-0.61.0/urllib2ext.py 0000644 0000151 0000155 00000006456 12276435602 016505 0 ustar narval narval import logging
import urllib2
import kerberos as krb
class GssapiAuthError(Exception):
"""raised on error during authentication process"""
import re
RGX = re.compile('(?:.*,)*\s*Negotiate\s*([^,]*),?', re.I)
def get_negociate_value(headers):
for authreq in headers.getheaders('www-authenticate'):
match = RGX.search(authreq)
if match:
return match.group(1)
class HTTPGssapiAuthHandler(urllib2.BaseHandler):
"""Negotiate HTTP authentication using context from GSSAPI"""
handler_order = 400 # before Digest Auth
def __init__(self):
self._reset()
def _reset(self):
self._retried = 0
self._context = None
def clean_context(self):
if self._context is not None:
krb.authGSSClientClean(self._context)
def http_error_401(self, req, fp, code, msg, headers):
try:
if self._retried > 5:
raise urllib2.HTTPError(req.get_full_url(), 401,
"negotiate auth failed", headers, None)
self._retried += 1
logging.debug('gssapi handler, try %s' % self._retried)
negotiate = get_negociate_value(headers)
if negotiate is None:
logging.debug('no negociate found in a www-authenticate header')
return None
logging.debug('HTTPGssapiAuthHandler: negotiate 1 is %r' % negotiate)
result, self._context = krb.authGSSClientInit("HTTP@%s" % req.get_host())
if result < 1:
raise GssapiAuthError("HTTPGssapiAuthHandler: init failed with %d" % result)
result = krb.authGSSClientStep(self._context, negotiate)
if result < 0:
raise GssapiAuthError("HTTPGssapiAuthHandler: step 1 failed with %d" % result)
client_response = krb.authGSSClientResponse(self._context)
logging.debug('HTTPGssapiAuthHandler: client response is %s...' % client_response[:10])
req.add_unredirected_header('Authorization', "Negotiate %s" % client_response)
server_response = self.parent.open(req)
negotiate = get_negociate_value(server_response.info())
if negotiate is None:
logging.warning('HTTPGssapiAuthHandler: failed to authenticate server')
else:
logging.debug('HTTPGssapiAuthHandler negotiate 2: %s' % negotiate)
result = krb.authGSSClientStep(self._context, negotiate)
if result < 1:
raise GssapiAuthError("HTTPGssapiAuthHandler: step 2 failed with %d" % result)
return server_response
except GssapiAuthError, exc:
logging.error(repr(exc))
finally:
self.clean_context()
self._reset()
if __name__ == '__main__':
import sys
# debug
import httplib
httplib.HTTPConnection.debuglevel = 1
httplib.HTTPSConnection.debuglevel = 1
# debug
import logging
logging.basicConfig(level=logging.DEBUG)
# handle cookies
import cookielib
cj = cookielib.CookieJar()
ch = urllib2.HTTPCookieProcessor(cj)
# test with url sys.argv[1]
h = HTTPGssapiAuthHandler()
response = urllib2.build_opener(h, ch).open(sys.argv[1])
print '\nresponse: %s\n--------------\n' % response.code, response.info()
logilab-common-0.61.0/pytest.py 0000644 0000151 0000155 00000131653 12276435602 015737 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""pytest is a tool that eases test running and debugging.
To be able to use pytest, you should either write tests using
the logilab.common.testlib's framework or the unittest module of the
Python's standard library.
You can customize pytest's behaviour by defining a ``pytestconf.py`` file
somewhere in your test directory. In this file, you can add options or
change the way tests are run.
To add command line options, you must define a ``update_parser`` function in
your ``pytestconf.py`` file. The function must accept a single parameter
that will be the OptionParser's instance to customize.
If you wish to customize the tester, you'll have to define a class named
``CustomPyTester``. This class should extend the default `PyTester` class
defined in the pytest module. Take a look at the `PyTester` and `DjangoTester`
classes for more information about what can be done.
For instance, if you wish to add a custom -l option to specify a loglevel, you
could define the following ``pytestconf.py`` file ::
import logging
from logilab.common.pytest import PyTester
def update_parser(parser):
parser.add_option('-l', '--loglevel', dest='loglevel', action='store',
choices=('debug', 'info', 'warning', 'error', 'critical'),
default='critical', help="the default log level possible choices are "
"('debug', 'info', 'warning', 'error', 'critical')")
return parser
class CustomPyTester(PyTester):
def __init__(self, cvg, options):
super(CustomPyTester, self).__init__(cvg, options)
loglevel = options.loglevel.upper()
logger = logging.getLogger('erudi')
logger.setLevel(logging.getLevelName(loglevel))
In your TestCase class you can then get the value of a specific option with
the ``optval`` method::
class MyTestCase(TestCase):
def test_foo(self):
loglevel = self.optval('loglevel')
# ...
You can also tag your tag your test for fine filtering
With those tag::
from logilab.common.testlib import tag, TestCase
class Exemple(TestCase):
@tag('rouge', 'carre')
def toto(self):
pass
@tag('carre', 'vert')
def tata(self):
pass
@tag('rouge')
def titi(test):
pass
you can filter the function with a simple python expression
* ``toto`` and ``titi`` match ``rouge``
* ``toto``, ``tata`` and ``titi``, match ``rouge or carre``
* ``tata`` and ``titi`` match``rouge ^ carre``
* ``titi`` match ``rouge and not carre``
"""
__docformat__ = "restructuredtext en"
PYTEST_DOC = """%prog [OPTIONS] [testfile [testpattern]]
examples:
pytest path/to/mytests.py
pytest path/to/mytests.py TheseTests
pytest path/to/mytests.py TheseTests.test_thisone
pytest path/to/mytests.py -m '(not long and database) or regr'
pytest one (will run both test_thisone and test_thatone)
pytest path/to/mytests.py -s not (will skip test_notthisone)
pytest --coverage test_foo.py
(only if logilab.devtools is available)
"""
ENABLE_DBC = False
FILE_RESTART = ".pytest.restart"
import os, sys, re
import os.path as osp
from time import time, clock
import warnings
import types
from logilab.common.fileutils import abspath_listdir
from logilab.common import textutils
from logilab.common import testlib, STD_BLACKLIST
# use the same unittest module as testlib
from logilab.common.testlib import unittest, start_interactive_mode
from logilab.common.compat import any
import doctest
import unittest as unittest_legacy
if not getattr(unittest_legacy, "__package__", None):
try:
import unittest2.suite as unittest_suite
except ImportError:
sys.exit("You have to install python-unittest2 to use this module")
else:
import unittest.suite as unittest_suite
try:
import django
from logilab.common.modutils import modpath_from_file, load_module_from_modpath
DJANGO_FOUND = True
except ImportError:
DJANGO_FOUND = False
CONF_FILE = 'pytestconf.py'
## coverage hacks, do not read this, do not read this, do not read this
# hey, but this is an aspect, right ?!!!
class TraceController(object):
nesting = 0
def pause_tracing(cls):
if not cls.nesting:
cls.tracefunc = staticmethod(getattr(sys, '__settrace__', sys.settrace))
cls.oldtracer = getattr(sys, '__tracer__', None)
sys.__notrace__ = True
cls.tracefunc(None)
cls.nesting += 1
pause_tracing = classmethod(pause_tracing)
def resume_tracing(cls):
cls.nesting -= 1
assert cls.nesting >= 0
if not cls.nesting:
cls.tracefunc(cls.oldtracer)
delattr(sys, '__notrace__')
resume_tracing = classmethod(resume_tracing)
pause_tracing = TraceController.pause_tracing
resume_tracing = TraceController.resume_tracing
def nocoverage(func):
if hasattr(func, 'uncovered'):
return func
func.uncovered = True
def not_covered(*args, **kwargs):
pause_tracing()
try:
return func(*args, **kwargs)
finally:
resume_tracing()
not_covered.uncovered = True
return not_covered
## end of coverage hacks
TESTFILE_RE = re.compile("^((unit)?test.*|smoketest)\.py$")
def this_is_a_testfile(filename):
"""returns True if `filename` seems to be a test file"""
return TESTFILE_RE.match(osp.basename(filename))
TESTDIR_RE = re.compile("^(unit)?tests?$")
def this_is_a_testdir(dirpath):
"""returns True if `filename` seems to be a test directory"""
return TESTDIR_RE.match(osp.basename(dirpath))
def load_pytest_conf(path, parser):
"""loads a ``pytestconf.py`` file and update default parser
and / or tester.
"""
namespace = {}
execfile(path, namespace)
if 'update_parser' in namespace:
namespace['update_parser'](parser)
return namespace.get('CustomPyTester', PyTester)
def project_root(parser, projdir=os.getcwd()):
"""try to find project's root and add it to sys.path"""
previousdir = curdir = osp.abspath(projdir)
testercls = PyTester
conf_file_path = osp.join(curdir, CONF_FILE)
if osp.isfile(conf_file_path):
testercls = load_pytest_conf(conf_file_path, parser)
while this_is_a_testdir(curdir) or \
osp.isfile(osp.join(curdir, '__init__.py')):
newdir = osp.normpath(osp.join(curdir, os.pardir))
if newdir == curdir:
break
previousdir = curdir
curdir = newdir
conf_file_path = osp.join(curdir, CONF_FILE)
if osp.isfile(conf_file_path):
testercls = load_pytest_conf(conf_file_path, parser)
return previousdir, testercls
class GlobalTestReport(object):
"""this class holds global test statistics"""
def __init__(self):
self.ran = 0
self.skipped = 0
self.failures = 0
self.errors = 0
self.ttime = 0
self.ctime = 0
self.modulescount = 0
self.errmodules = []
def feed(self, filename, testresult, ttime, ctime):
"""integrates new test information into internal statistics"""
ran = testresult.testsRun
self.ran += ran
self.skipped += len(getattr(testresult, 'skipped', ()))
self.failures += len(testresult.failures)
self.errors += len(testresult.errors)
self.ttime += ttime
self.ctime += ctime
self.modulescount += 1
if not testresult.wasSuccessful():
problems = len(testresult.failures) + len(testresult.errors)
self.errmodules.append((filename[:-3], problems, ran))
def failed_to_test_module(self, filename):
"""called when the test module could not be imported by unittest
"""
self.errors += 1
self.modulescount += 1
self.ran += 1
self.errmodules.append((filename[:-3], 1, 1))
def skip_module(self, filename):
self.modulescount += 1
self.ran += 1
self.errmodules.append((filename[:-3], 0, 0))
def __str__(self):
"""this is just presentation stuff"""
line1 = ['Ran %s test cases in %.2fs (%.2fs CPU)'
% (self.ran, self.ttime, self.ctime)]
if self.errors:
line1.append('%s errors' % self.errors)
if self.failures:
line1.append('%s failures' % self.failures)
if self.skipped:
line1.append('%s skipped' % self.skipped)
modulesok = self.modulescount - len(self.errmodules)
if self.errors or self.failures:
line2 = '%s modules OK (%s failed)' % (modulesok,
len(self.errmodules))
descr = ', '.join(['%s [%s/%s]' % info for info in self.errmodules])
line3 = '\nfailures: %s' % descr
elif modulesok:
line2 = 'All %s modules OK' % modulesok
line3 = ''
else:
return ''
return '%s\n%s%s' % (', '.join(line1), line2, line3)
def remove_local_modules_from_sys(testdir):
"""remove all modules from cache that come from `testdir`
This is used to avoid strange side-effects when using the
testall() mode of pytest.
For instance, if we run pytest on this tree::
A/test/test_utils.py
B/test/test_utils.py
we **have** to clean sys.modules to make sure the correct test_utils
module is ran in B
"""
for modname, mod in sys.modules.items():
if mod is None:
continue
if not hasattr(mod, '__file__'):
# this is the case of some built-in modules like sys, imp, marshal
continue
modfile = mod.__file__
# if modfile is not an absolute path, it was probably loaded locally
# during the tests
if not osp.isabs(modfile) or modfile.startswith(testdir):
del sys.modules[modname]
class PyTester(object):
"""encapsulates testrun logic"""
def __init__(self, cvg, options):
self.report = GlobalTestReport()
self.cvg = cvg
self.options = options
self.firstwrite = True
self._errcode = None
def show_report(self):
"""prints the report and returns appropriate exitcode"""
# everything has been ran, print report
print "*" * 79
print self.report
def get_errcode(self):
# errcode set explicitly
if self._errcode is not None:
return self._errcode
return self.report.failures + self.report.errors
def set_errcode(self, errcode):
self._errcode = errcode
errcode = property(get_errcode, set_errcode)
def testall(self, exitfirst=False):
"""walks through current working directory, finds something
which can be considered as a testdir and runs every test there
"""
here = os.getcwd()
for dirname, dirs, _ in os.walk(here):
for skipped in STD_BLACKLIST:
if skipped in dirs:
dirs.remove(skipped)
basename = osp.basename(dirname)
if this_is_a_testdir(basename):
print "going into", dirname
# we found a testdir, let's explore it !
if not self.testonedir(dirname, exitfirst):
break
dirs[:] = []
if self.report.ran == 0:
print "no test dir found testing here:", here
# if no test was found during the visit, consider
# the local directory as a test directory even if
# it doesn't have a traditional test directory name
self.testonedir(here)
def testonedir(self, testdir, exitfirst=False):
"""finds each testfile in the `testdir` and runs it
return true when all tests has been executed, false if exitfirst and
some test has failed.
"""
for filename in abspath_listdir(testdir):
if this_is_a_testfile(filename):
if self.options.exitfirst and not self.options.restart:
# overwrite restart file
try:
restartfile = open(FILE_RESTART, "w")
restartfile.close()
except Exception, e:
print >> sys.__stderr__, "Error while overwriting \
succeeded test file :", osp.join(os.getcwd(), FILE_RESTART)
raise e
# run test and collect information
prog = self.testfile(filename, batchmode=True)
if exitfirst and (prog is None or not prog.result.wasSuccessful()):
return False
self.firstwrite = True
# clean local modules
remove_local_modules_from_sys(testdir)
return True
def testfile(self, filename, batchmode=False):
"""runs every test in `filename`
:param filename: an absolute path pointing to a unittest file
"""
here = os.getcwd()
dirname = osp.dirname(filename)
if dirname:
os.chdir(dirname)
# overwrite restart file if it has not been done already
if self.options.exitfirst and not self.options.restart and self.firstwrite:
try:
restartfile = open(FILE_RESTART, "w")
restartfile.close()
except Exception, e:
print >> sys.__stderr__, "Error while overwriting \
succeeded test file :", osp.join(os.getcwd(), FILE_RESTART)
raise e
modname = osp.basename(filename)[:-3]
try:
print >> sys.stderr, (' %s ' % osp.basename(filename)).center(70, '=')
except TypeError: # < py 2.4 bw compat
print >> sys.stderr, (' %s ' % osp.basename(filename)).center(70)
try:
tstart, cstart = time(), clock()
try:
testprog = SkipAwareTestProgram(modname, batchmode=batchmode, cvg=self.cvg,
options=self.options, outstream=sys.stderr)
except KeyboardInterrupt:
raise
except SystemExit, exc:
self.errcode = exc.code
raise
except testlib.SkipTest:
print "Module skipped:", filename
self.report.skip_module(filename)
return None
except Exception:
self.report.failed_to_test_module(filename)
print >> sys.stderr, 'unhandled exception occurred while testing', modname
import traceback
traceback.print_exc(file=sys.stderr)
return None
tend, cend = time(), clock()
ttime, ctime = (tend - tstart), (cend - cstart)
self.report.feed(filename, testprog.result, ttime, ctime)
return testprog
finally:
if dirname:
os.chdir(here)
class DjangoTester(PyTester):
def load_django_settings(self, dirname):
"""try to find project's setting and load it"""
curdir = osp.abspath(dirname)
previousdir = curdir
while not osp.isfile(osp.join(curdir, 'settings.py')) and \
osp.isfile(osp.join(curdir, '__init__.py')):
newdir = osp.normpath(osp.join(curdir, os.pardir))
if newdir == curdir:
raise AssertionError('could not find settings.py')
previousdir = curdir
curdir = newdir
# late django initialization
settings = load_module_from_modpath(modpath_from_file(osp.join(curdir, 'settings.py')))
from django.core.management import setup_environ
setup_environ(settings)
settings.DEBUG = False
self.settings = settings
# add settings dir to pythonpath since it's the project's root
if curdir not in sys.path:
sys.path.insert(1, curdir)
def before_testfile(self):
# Those imports must be done **after** setup_environ was called
from django.test.utils import setup_test_environment
from django.test.utils import create_test_db
setup_test_environment()
create_test_db(verbosity=0)
self.dbname = self.settings.TEST_DATABASE_NAME
def after_testfile(self):
# Those imports must be done **after** setup_environ was called
from django.test.utils import teardown_test_environment
from django.test.utils import destroy_test_db
teardown_test_environment()
print 'destroying', self.dbname
destroy_test_db(self.dbname, verbosity=0)
def testall(self, exitfirst=False):
"""walks through current working directory, finds something
which can be considered as a testdir and runs every test there
"""
for dirname, dirs, files in os.walk(os.getcwd()):
for skipped in ('CVS', '.svn', '.hg'):
if skipped in dirs:
dirs.remove(skipped)
if 'tests.py' in files:
if not self.testonedir(dirname, exitfirst):
break
dirs[:] = []
else:
basename = osp.basename(dirname)
if basename in ('test', 'tests'):
print "going into", dirname
# we found a testdir, let's explore it !
if not self.testonedir(dirname, exitfirst):
break
dirs[:] = []
def testonedir(self, testdir, exitfirst=False):
"""finds each testfile in the `testdir` and runs it
return true when all tests has been executed, false if exitfirst and
some test has failed.
"""
# special django behaviour : if tests are splitted in several files,
# remove the main tests.py file and tests each test file separately
testfiles = [fpath for fpath in abspath_listdir(testdir)
if this_is_a_testfile(fpath)]
if len(testfiles) > 1:
try:
testfiles.remove(osp.join(testdir, 'tests.py'))
except ValueError:
pass
for filename in testfiles:
# run test and collect information
prog = self.testfile(filename, batchmode=True)
if exitfirst and (prog is None or not prog.result.wasSuccessful()):
return False
# clean local modules
remove_local_modules_from_sys(testdir)
return True
def testfile(self, filename, batchmode=False):
"""runs every test in `filename`
:param filename: an absolute path pointing to a unittest file
"""
here = os.getcwd()
dirname = osp.dirname(filename)
if dirname:
os.chdir(dirname)
self.load_django_settings(dirname)
modname = osp.basename(filename)[:-3]
print >>sys.stderr, (' %s ' % osp.basename(filename)).center(70, '=')
try:
try:
tstart, cstart = time(), clock()
self.before_testfile()
testprog = SkipAwareTestProgram(modname, batchmode=batchmode, cvg=self.cvg)
tend, cend = time(), clock()
ttime, ctime = (tend - tstart), (cend - cstart)
self.report.feed(filename, testprog.result, ttime, ctime)
return testprog
except SystemExit:
raise
except Exception, exc:
import traceback
traceback.print_exc()
self.report.failed_to_test_module(filename)
print 'unhandled exception occurred while testing', modname
print 'error: %s' % exc
return None
finally:
self.after_testfile()
if dirname:
os.chdir(here)
def make_parser():
"""creates the OptionParser instance
"""
from optparse import OptionParser
parser = OptionParser(usage=PYTEST_DOC)
parser.newargs = []
def rebuild_cmdline(option, opt, value, parser):
"""carry the option to unittest_main"""
parser.newargs.append(opt)
def rebuild_and_store(option, opt, value, parser):
"""carry the option to unittest_main and store
the value on current parser
"""
parser.newargs.append(opt)
setattr(parser.values, option.dest, True)
def capture_and_rebuild(option, opt, value, parser):
warnings.simplefilter('ignore', DeprecationWarning)
rebuild_cmdline(option, opt, value, parser)
# pytest options
parser.add_option('-t', dest='testdir', default=None,
help="directory where the tests will be found")
parser.add_option('-d', dest='dbc', default=False,
action="store_true", help="enable design-by-contract")
# unittest_main options provided and passed through pytest
parser.add_option('-v', '--verbose', callback=rebuild_cmdline,
action="callback", help="Verbose output")
parser.add_option('-i', '--pdb', callback=rebuild_and_store,
dest="pdb", action="callback",
help="Enable test failure inspection (conflicts with --coverage)")
parser.add_option('-x', '--exitfirst', callback=rebuild_and_store,
dest="exitfirst", default=False,
action="callback", help="Exit on first failure "
"(only make sense when pytest run one test file)")
parser.add_option('-R', '--restart', callback=rebuild_and_store,
dest="restart", default=False,
action="callback",
help="Restart tests from where it failed (implies exitfirst) "
"(only make sense if tests previously ran with exitfirst only)")
parser.add_option('--color', callback=rebuild_cmdline,
action="callback",
help="colorize tracebacks")
parser.add_option('-s', '--skip',
# XXX: I wish I could use the callback action but it
# doesn't seem to be able to get the value
# associated to the option
action="store", dest="skipped", default=None,
help="test names matching this name will be skipped "
"to skip several patterns, use commas")
parser.add_option('-q', '--quiet', callback=rebuild_cmdline,
action="callback", help="Minimal output")
parser.add_option('-P', '--profile', default=None, dest='profile',
help="Profile execution and store data in the given file")
parser.add_option('-m', '--match', default=None, dest='tags_pattern',
help="only execute test whose tag match the current pattern")
try:
from logilab.devtools.lib.coverage import Coverage
parser.add_option('--coverage', dest="coverage", default=False,
action="store_true",
help="run tests with pycoverage (conflicts with --pdb)")
except ImportError:
pass
if DJANGO_FOUND:
parser.add_option('-J', '--django', dest='django', default=False,
action="store_true",
help='use pytest for django test cases')
return parser
def parseargs(parser):
"""Parse the command line and return (options processed), (options to pass to
unittest_main()), (explicitfile or None).
"""
# parse the command line
options, args = parser.parse_args()
if options.pdb and getattr(options, 'coverage', False):
parser.error("'pdb' and 'coverage' options are exclusive")
filenames = [arg for arg in args if arg.endswith('.py')]
if filenames:
if len(filenames) > 1:
parser.error("only one filename is acceptable")
explicitfile = filenames[0]
args.remove(explicitfile)
else:
explicitfile = None
# someone wants DBC
testlib.ENABLE_DBC = options.dbc
newargs = parser.newargs
if options.skipped:
newargs.extend(['--skip', options.skipped])
# restart implies exitfirst
if options.restart:
options.exitfirst = True
# append additional args to the new sys.argv and let unittest_main
# do the rest
newargs += args
return options, explicitfile
def run():
parser = make_parser()
rootdir, testercls = project_root(parser)
options, explicitfile = parseargs(parser)
# mock a new command line
sys.argv[1:] = parser.newargs
covermode = getattr(options, 'coverage', None)
cvg = None
if not '' in sys.path:
sys.path.insert(0, '')
if covermode:
# control_import_coverage(rootdir)
from logilab.devtools.lib.coverage import Coverage
cvg = Coverage([rootdir])
cvg.erase()
cvg.start()
if DJANGO_FOUND and options.django:
tester = DjangoTester(cvg, options)
else:
tester = testercls(cvg, options)
if explicitfile:
cmd, args = tester.testfile, (explicitfile,)
elif options.testdir:
cmd, args = tester.testonedir, (options.testdir, options.exitfirst)
else:
cmd, args = tester.testall, (options.exitfirst,)
try:
try:
if options.profile:
import hotshot
prof = hotshot.Profile(options.profile)
prof.runcall(cmd, *args)
prof.close()
print 'profile data saved in', options.profile
else:
cmd(*args)
except SystemExit:
raise
except:
import traceback
traceback.print_exc()
finally:
if covermode:
cvg.stop()
cvg.save()
tester.show_report()
if covermode:
print 'coverage information stored, use it with pycoverage -ra'
sys.exit(tester.errcode)
class SkipAwareTestProgram(unittest.TestProgram):
# XXX: don't try to stay close to unittest.py, use optparse
USAGE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-i, --pdb Enable test failure inspection
-x, --exitfirst Exit on first failure
-s, --skip skip test matching this pattern (no regexp for now)
-q, --quiet Minimal output
--color colorize tracebacks
-m, --match Run only test whose tag match this pattern
-P, --profile FILE: Run the tests using cProfile and saving results
in FILE
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
def __init__(self, module='__main__', defaultTest=None, batchmode=False,
cvg=None, options=None, outstream=sys.stderr):
self.batchmode = batchmode
self.cvg = cvg
self.options = options
self.outstream = outstream
super(SkipAwareTestProgram, self).__init__(
module=module, defaultTest=defaultTest,
testLoader=NonStrictTestLoader())
def parseArgs(self, argv):
self.pdbmode = False
self.exitfirst = False
self.skipped_patterns = []
self.test_pattern = None
self.tags_pattern = None
self.colorize = False
self.profile_name = None
import getopt
try:
options, args = getopt.getopt(argv[1:], 'hHvixrqcp:s:m:P:',
['help', 'verbose', 'quiet', 'pdb',
'exitfirst', 'restart',
'skip=', 'color', 'match=', 'profile='])
for opt, value in options:
if opt in ('-h', '-H', '--help'):
self.usageExit()
if opt in ('-i', '--pdb'):
self.pdbmode = True
if opt in ('-x', '--exitfirst'):
self.exitfirst = True
if opt in ('-r', '--restart'):
self.restart = True
self.exitfirst = True
if opt in ('-q', '--quiet'):
self.verbosity = 0
if opt in ('-v', '--verbose'):
self.verbosity = 2
if opt in ('-s', '--skip'):
self.skipped_patterns = [pat.strip() for pat in
value.split(', ')]
if opt == '--color':
self.colorize = True
if opt in ('-m', '--match'):
#self.tags_pattern = value
self.options["tag_pattern"] = value
if opt in ('-P', '--profile'):
self.profile_name = value
self.testLoader.skipped_patterns = self.skipped_patterns
if len(args) == 0 and self.defaultTest is None:
suitefunc = getattr(self.module, 'suite', None)
if isinstance(suitefunc, (types.FunctionType,
types.MethodType)):
self.test = self.module.suite()
else:
self.test = self.testLoader.loadTestsFromModule(self.module)
return
if len(args) > 0:
self.test_pattern = args[0]
self.testNames = args
else:
self.testNames = (self.defaultTest, )
self.createTests()
except getopt.error, msg:
self.usageExit(msg)
def runTests(self):
if self.profile_name:
import cProfile
cProfile.runctx('self._runTests()', globals(), locals(), self.profile_name )
else:
return self._runTests()
def _runTests(self):
self.testRunner = SkipAwareTextTestRunner(verbosity=self.verbosity,
stream=self.outstream,
exitfirst=self.exitfirst,
pdbmode=self.pdbmode,
cvg=self.cvg,
test_pattern=self.test_pattern,
skipped_patterns=self.skipped_patterns,
colorize=self.colorize,
batchmode=self.batchmode,
options=self.options)
def removeSucceededTests(obj, succTests):
""" Recursive function that removes succTests from
a TestSuite or TestCase
"""
if isinstance(obj, unittest.TestSuite):
removeSucceededTests(obj._tests, succTests)
if isinstance(obj, list):
for el in obj[:]:
if isinstance(el, unittest.TestSuite):
removeSucceededTests(el, succTests)
elif isinstance(el, unittest.TestCase):
descr = '.'.join((el.__class__.__module__,
el.__class__.__name__,
el._testMethodName))
if descr in succTests:
obj.remove(el)
# take care, self.options may be None
if getattr(self.options, 'restart', False):
# retrieve succeeded tests from FILE_RESTART
try:
restartfile = open(FILE_RESTART, 'r')
try:
succeededtests = list(elem.rstrip('\n\r') for elem in
restartfile.readlines())
removeSucceededTests(self.test, succeededtests)
finally:
restartfile.close()
except Exception, ex:
raise Exception("Error while reading succeeded tests into %s: %s"
% (osp.join(os.getcwd(), FILE_RESTART), ex))
result = self.testRunner.run(self.test)
# help garbage collection: we want TestSuite, which hold refs to every
# executed TestCase, to be gc'ed
del self.test
if getattr(result, "debuggers", None) and \
getattr(self, "pdbmode", None):
start_interactive_mode(result)
if not getattr(self, "batchmode", None):
sys.exit(not result.wasSuccessful())
self.result = result
class SkipAwareTextTestRunner(unittest.TextTestRunner):
def __init__(self, stream=sys.stderr, verbosity=1,
exitfirst=False, pdbmode=False, cvg=None, test_pattern=None,
skipped_patterns=(), colorize=False, batchmode=False,
options=None):
super(SkipAwareTextTestRunner, self).__init__(stream=stream,
verbosity=verbosity)
self.exitfirst = exitfirst
self.pdbmode = pdbmode
self.cvg = cvg
self.test_pattern = test_pattern
self.skipped_patterns = skipped_patterns
self.colorize = colorize
self.batchmode = batchmode
self.options = options
def _this_is_skipped(self, testedname):
return any([(pat in testedname) for pat in self.skipped_patterns])
def _runcondition(self, test, skipgenerator=True):
if isinstance(test, testlib.InnerTest):
testname = test.name
else:
if isinstance(test, testlib.TestCase):
meth = test._get_test_method()
func = meth.im_func
testname = '%s.%s' % (meth.im_class.__name__, func.__name__)
elif isinstance(test, types.FunctionType):
func = test
testname = func.__name__
elif isinstance(test, types.MethodType):
func = test.im_func
testname = '%s.%s' % (test.im_class.__name__, func.__name__)
else:
return True # Not sure when this happens
if testlib.is_generator(test) and skipgenerator:
return self.does_match_tags(test) # Let inner tests decide at run time
if self._this_is_skipped(testname):
return False # this was explicitly skipped
if self.test_pattern is not None:
try:
classpattern, testpattern = self.test_pattern.split('.')
klass, name = testname.split('.')
if classpattern not in klass or testpattern not in name:
return False
except ValueError:
if self.test_pattern not in testname:
return False
return self.does_match_tags(test)
def does_match_tags(self, test):
if self.options is not None:
tags_pattern = getattr(self.options, 'tags_pattern', None)
if tags_pattern is not None:
tags = getattr(test, 'tags', testlib.Tags())
if tags.inherit and isinstance(test, types.MethodType):
tags = tags | getattr(test.im_class, 'tags', testlib.Tags())
return tags.match(tags_pattern)
return True # no pattern
def _makeResult(self):
return testlib.SkipAwareTestResult(self.stream, self.descriptions,
self.verbosity, self.exitfirst,
self.pdbmode, self.cvg, self.colorize)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
startTime = time()
test(result, runcondition=self._runcondition, options=self.options)
stopTime = time()
timeTaken = stopTime - startTime
result.printErrors()
if not self.batchmode:
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
if self.colorize:
self.stream.write(textutils.colorize_ansi("FAILED", color='red'))
else:
self.stream.write("FAILED")
else:
if self.colorize:
self.stream.write(textutils.colorize_ansi("OK", color='green'))
else:
self.stream.write("OK")
failed, errored, skipped = map(len, (result.failures,
result.errors,
result.skipped))
det_results = []
for name, value in (("failures", result.failures),
("errors",result.errors),
("skipped", result.skipped)):
if value:
det_results.append("%s=%i" % (name, len(value)))
if det_results:
self.stream.write(" (")
self.stream.write(', '.join(det_results))
self.stream.write(")")
self.stream.writeln("")
return result
class NonStrictTestLoader(unittest.TestLoader):
"""
Overrides default testloader to be able to omit classname when
specifying tests to run on command line.
For example, if the file test_foo.py contains ::
class FooTC(TestCase):
def test_foo1(self): # ...
def test_foo2(self): # ...
def test_bar1(self): # ...
class BarTC(TestCase):
def test_bar2(self): # ...
'python test_foo.py' will run the 3 tests in FooTC
'python test_foo.py FooTC' will run the 3 tests in FooTC
'python test_foo.py test_foo' will run test_foo1 and test_foo2
'python test_foo.py test_foo1' will run test_foo1
'python test_foo.py test_bar' will run FooTC.test_bar1 and BarTC.test_bar2
"""
def __init__(self):
self.skipped_patterns = ()
# some magic here to accept empty list by extending
# and to provide callable capability
def loadTestsFromNames(self, names, module=None):
suites = []
for name in names:
suites.extend(self.loadTestsFromName(name, module))
return self.suiteClass(suites)
def _collect_tests(self, module):
tests = {}
for obj in vars(module).values():
if (issubclass(type(obj), (types.ClassType, type)) and
issubclass(obj, unittest.TestCase)):
classname = obj.__name__
if classname[0] == '_' or self._this_is_skipped(classname):
continue
methodnames = []
# obj is a TestCase class
for attrname in dir(obj):
if attrname.startswith(self.testMethodPrefix):
attr = getattr(obj, attrname)
if callable(attr):
methodnames.append(attrname)
# keep track of class (obj) for convenience
tests[classname] = (obj, methodnames)
return tests
def loadTestsFromSuite(self, module, suitename):
try:
suite = getattr(module, suitename)()
except AttributeError:
return []
assert hasattr(suite, '_tests'), \
"%s.%s is not a valid TestSuite" % (module.__name__, suitename)
# python2.3 does not implement __iter__ on suites, we need to return
# _tests explicitly
return suite._tests
def loadTestsFromName(self, name, module=None):
parts = name.split('.')
if module is None or len(parts) > 2:
# let the base class do its job here
return [super(NonStrictTestLoader, self).loadTestsFromName(name)]
tests = self._collect_tests(module)
collected = []
if len(parts) == 1:
pattern = parts[0]
if callable(getattr(module, pattern, None)
) and pattern not in tests:
# consider it as a suite
return self.loadTestsFromSuite(module, pattern)
if pattern in tests:
# case python unittest_foo.py MyTestTC
klass, methodnames = tests[pattern]
for methodname in methodnames:
collected = [klass(methodname)
for methodname in methodnames]
else:
# case python unittest_foo.py something
for klass, methodnames in tests.values():
# skip methodname if matched by skipped_patterns
for skip_pattern in self.skipped_patterns:
methodnames = [methodname
for methodname in methodnames
if skip_pattern not in methodname]
collected += [klass(methodname)
for methodname in methodnames
if pattern in methodname]
elif len(parts) == 2:
# case "MyClass.test_1"
classname, pattern = parts
klass, methodnames = tests.get(classname, (None, []))
for methodname in methodnames:
collected = [klass(methodname) for methodname in methodnames
if pattern in methodname]
return collected
def _this_is_skipped(self, testedname):
return any([(pat in testedname) for pat in self.skipped_patterns])
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
is_skipped = self._this_is_skipped
classname = testCaseClass.__name__
if classname[0] == '_' or is_skipped(classname):
return []
testnames = super(NonStrictTestLoader, self).getTestCaseNames(
testCaseClass)
return [testname for testname in testnames if not is_skipped(testname)]
def _ts_run(self, result, runcondition=None, options=None):
self._wrapped_run(result,runcondition=runcondition, options=options)
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
return result
def _ts_wrapped_run(self, result, debug=False, runcondition=None, options=None):
for test in self:
if result.shouldStop:
break
if unittest_suite._isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if hasattr(test, '_wrapped_run'):
try:
test._wrapped_run(result, debug, runcondition=runcondition, options=options)
except TypeError:
test._wrapped_run(result, debug)
elif not debug:
try:
test(result, runcondition, options)
except TypeError:
test(result)
else:
test.debug()
def enable_dbc(*args):
"""
Without arguments, return True if contracts can be enabled and should be
enabled (see option -d), return False otherwise.
With arguments, return False if contracts can't or shouldn't be enabled,
otherwise weave ContractAspect with items passed as arguments.
"""
if not ENABLE_DBC:
return False
try:
from logilab.aspects.weaver import weaver
from logilab.aspects.lib.contracts import ContractAspect
except ImportError:
sys.stderr.write(
'Warning: logilab.aspects is not available. Contracts disabled.')
return False
for arg in args:
weaver.weave_module(arg, ContractAspect)
return True
# monkeypatch unittest and doctest (ouch !)
unittest._TextTestResult = testlib.SkipAwareTestResult
unittest.TextTestRunner = SkipAwareTextTestRunner
unittest.TestLoader = NonStrictTestLoader
unittest.TestProgram = SkipAwareTestProgram
if sys.version_info >= (2, 4):
doctest.DocTestCase.__bases__ = (testlib.TestCase,)
# XXX check python2.6 compatibility
#doctest.DocTestCase._cleanups = []
#doctest.DocTestCase._out = []
else:
unittest.FunctionTestCase.__bases__ = (testlib.TestCase,)
unittest.TestSuite.run = _ts_run
unittest.TestSuite._wrapped_run = _ts_wrapped_run
logilab-common-0.61.0/table.py 0000644 0000151 0000155 00000075151 12276435602 015476 0 ustar narval narval # copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Table management module."""
__docformat__ = "restructuredtext en"
class Table(object):
"""Table defines a data table with column and row names.
inv:
len(self.data) <= len(self.row_names)
forall(self.data, lambda x: len(x) <= len(self.col_names))
"""
def __init__(self, default_value=0, col_names=None, row_names=None):
self.col_names = []
self.row_names = []
self.data = []
self.default_value = default_value
if col_names:
self.create_columns(col_names)
if row_names:
self.create_rows(row_names)
def _next_row_name(self):
return 'row%s' % (len(self.row_names)+1)
def __iter__(self):
return iter(self.data)
def __eq__(self, other):
if other is None:
return False
else:
return list(self) == list(other)
__hash__ = object.__hash__
def __ne__(self, other):
return not self == other
def __len__(self):
return len(self.row_names)
## Rows / Columns creation #################################################
def create_rows(self, row_names):
"""Appends row_names to the list of existing rows
"""
self.row_names.extend(row_names)
for row_name in row_names:
self.data.append([self.default_value]*len(self.col_names))
def create_columns(self, col_names):
"""Appends col_names to the list of existing columns
"""
for col_name in col_names:
self.create_column(col_name)
def create_row(self, row_name=None):
"""Creates a rowname to the row_names list
"""
row_name = row_name or self._next_row_name()
self.row_names.append(row_name)
self.data.append([self.default_value]*len(self.col_names))
def create_column(self, col_name):
"""Creates a colname to the col_names list
"""
self.col_names.append(col_name)
for row in self.data:
row.append(self.default_value)
## Sort by column ##########################################################
def sort_by_column_id(self, col_id, method = 'asc'):
"""Sorts the table (in-place) according to data stored in col_id
"""
try:
col_index = self.col_names.index(col_id)
self.sort_by_column_index(col_index, method)
except ValueError:
raise KeyError("Col (%s) not found in table" % (col_id))
def sort_by_column_index(self, col_index, method = 'asc'):
"""Sorts the table 'in-place' according to data stored in col_index
method should be in ('asc', 'desc')
"""
sort_list = sorted([(row[col_index], row, row_name)
for row, row_name in zip(self.data, self.row_names)])
# Sorting sort_list will sort according to col_index
# If we want reverse sort, then reverse list
if method.lower() == 'desc':
sort_list.reverse()
# Rebuild data / row names
self.data = []
self.row_names = []
for val, row, row_name in sort_list:
self.data.append(row)
self.row_names.append(row_name)
def groupby(self, colname, *others):
"""builds indexes of data
:returns: nested dictionaries pointing to actual rows
"""
groups = {}
colnames = (colname,) + others
col_indexes = [self.col_names.index(col_id) for col_id in colnames]
for row in self.data:
ptr = groups
for col_index in col_indexes[:-1]:
ptr = ptr.setdefault(row[col_index], {})
ptr = ptr.setdefault(row[col_indexes[-1]],
Table(default_value=self.default_value,
col_names=self.col_names))
ptr.append_row(tuple(row))
return groups
def select(self, colname, value):
grouped = self.groupby(colname)
try:
return grouped[value]
except KeyError:
return []
def remove(self, colname, value):
col_index = self.col_names.index(colname)
for row in self.data[:]:
if row[col_index] == value:
self.data.remove(row)
## The 'setter' part #######################################################
def set_cell(self, row_index, col_index, data):
"""sets value of cell 'row_indew', 'col_index' to data
"""
self.data[row_index][col_index] = data
def set_cell_by_ids(self, row_id, col_id, data):
"""sets value of cell mapped by row_id and col_id to data
Raises a KeyError if row_id or col_id are not found in the table
"""
try:
row_index = self.row_names.index(row_id)
except ValueError:
raise KeyError("Row (%s) not found in table" % (row_id))
else:
try:
col_index = self.col_names.index(col_id)
self.data[row_index][col_index] = data
except ValueError:
raise KeyError("Column (%s) not found in table" % (col_id))
def set_row(self, row_index, row_data):
"""sets the 'row_index' row
pre:
type(row_data) == types.ListType
len(row_data) == len(self.col_names)
"""
self.data[row_index] = row_data
def set_row_by_id(self, row_id, row_data):
"""sets the 'row_id' column
pre:
type(row_data) == types.ListType
len(row_data) == len(self.row_names)
Raises a KeyError if row_id is not found
"""
try:
row_index = self.row_names.index(row_id)
self.set_row(row_index, row_data)
except ValueError:
raise KeyError('Row (%s) not found in table' % (row_id))
def append_row(self, row_data, row_name=None):
"""Appends a row to the table
pre:
type(row_data) == types.ListType
len(row_data) == len(self.col_names)
"""
row_name = row_name or self._next_row_name()
self.row_names.append(row_name)
self.data.append(row_data)
return len(self.data) - 1
def insert_row(self, index, row_data, row_name=None):
"""Appends row_data before 'index' in the table. To make 'insert'
behave like 'list.insert', inserting in an out of range index will
insert row_data to the end of the list
pre:
type(row_data) == types.ListType
len(row_data) == len(self.col_names)
"""
row_name = row_name or self._next_row_name()
self.row_names.insert(index, row_name)
self.data.insert(index, row_data)
def delete_row(self, index):
"""Deletes the 'index' row in the table, and returns it.
Raises an IndexError if index is out of range
"""
self.row_names.pop(index)
return self.data.pop(index)
def delete_row_by_id(self, row_id):
"""Deletes the 'row_id' row in the table.
Raises a KeyError if row_id was not found.
"""
try:
row_index = self.row_names.index(row_id)
self.delete_row(row_index)
except ValueError:
raise KeyError('Row (%s) not found in table' % (row_id))
def set_column(self, col_index, col_data):
"""sets the 'col_index' column
pre:
type(col_data) == types.ListType
len(col_data) == len(self.row_names)
"""
for row_index, cell_data in enumerate(col_data):
self.data[row_index][col_index] = cell_data
def set_column_by_id(self, col_id, col_data):
"""sets the 'col_id' column
pre:
type(col_data) == types.ListType
len(col_data) == len(self.col_names)
Raises a KeyError if col_id is not found
"""
try:
col_index = self.col_names.index(col_id)
self.set_column(col_index, col_data)
except ValueError:
raise KeyError('Column (%s) not found in table' % (col_id))
def append_column(self, col_data, col_name):
"""Appends the 'col_index' column
pre:
type(col_data) == types.ListType
len(col_data) == len(self.row_names)
"""
self.col_names.append(col_name)
for row_index, cell_data in enumerate(col_data):
self.data[row_index].append(cell_data)
def insert_column(self, index, col_data, col_name):
"""Appends col_data before 'index' in the table. To make 'insert'
behave like 'list.insert', inserting in an out of range index will
insert col_data to the end of the list
pre:
type(col_data) == types.ListType
len(col_data) == len(self.row_names)
"""
self.col_names.insert(index, col_name)
for row_index, cell_data in enumerate(col_data):
self.data[row_index].insert(index, cell_data)
def delete_column(self, index):
"""Deletes the 'index' column in the table, and returns it.
Raises an IndexError if index is out of range
"""
self.col_names.pop(index)
return [row.pop(index) for row in self.data]
def delete_column_by_id(self, col_id):
"""Deletes the 'col_id' col in the table.
Raises a KeyError if col_id was not found.
"""
try:
col_index = self.col_names.index(col_id)
self.delete_column(col_index)
except ValueError:
raise KeyError('Column (%s) not found in table' % (col_id))
## The 'getter' part #######################################################
def get_shape(self):
"""Returns a tuple which represents the table's shape
"""
return len(self.row_names), len(self.col_names)
shape = property(get_shape)
def __getitem__(self, indices):
"""provided for convenience"""
rows, multirows = None, False
cols, multicols = None, False
if isinstance(indices, tuple):
rows = indices[0]
if len(indices) > 1:
cols = indices[1]
else:
rows = indices
# define row slice
if isinstance(rows, str):
try:
rows = self.row_names.index(rows)
except ValueError:
raise KeyError("Row (%s) not found in table" % (rows))
if isinstance(rows, int):
rows = slice(rows, rows+1)
multirows = False
else:
rows = slice(None)
multirows = True
# define col slice
if isinstance(cols, str):
try:
cols = self.col_names.index(cols)
except ValueError:
raise KeyError("Column (%s) not found in table" % (cols))
if isinstance(cols, int):
cols = slice(cols, cols+1)
multicols = False
else:
cols = slice(None)
multicols = True
# get sub-table
tab = Table()
tab.default_value = self.default_value
tab.create_rows(self.row_names[rows])
tab.create_columns(self.col_names[cols])
for idx, row in enumerate(self.data[rows]):
tab.set_row(idx, row[cols])
if multirows :
if multicols:
return tab
else:
return [item[0] for item in tab.data]
else:
if multicols:
return tab.data[0]
else:
return tab.data[0][0]
def get_cell_by_ids(self, row_id, col_id):
"""Returns the element at [row_id][col_id]
"""
try:
row_index = self.row_names.index(row_id)
except ValueError:
raise KeyError("Row (%s) not found in table" % (row_id))
else:
try:
col_index = self.col_names.index(col_id)
except ValueError:
raise KeyError("Column (%s) not found in table" % (col_id))
return self.data[row_index][col_index]
def get_row_by_id(self, row_id):
"""Returns the 'row_id' row
"""
try:
row_index = self.row_names.index(row_id)
except ValueError:
raise KeyError("Row (%s) not found in table" % (row_id))
return self.data[row_index]
def get_column_by_id(self, col_id, distinct=False):
"""Returns the 'col_id' col
"""
try:
col_index = self.col_names.index(col_id)
except ValueError:
raise KeyError("Column (%s) not found in table" % (col_id))
return self.get_column(col_index, distinct)
def get_columns(self):
"""Returns all the columns in the table
"""
return [self[:, index] for index in range(len(self.col_names))]
def get_column(self, col_index, distinct=False):
"""get a column by index"""
col = [row[col_index] for row in self.data]
if distinct:
col = list(set(col))
return col
def apply_stylesheet(self, stylesheet):
"""Applies the stylesheet to this table
"""
for instruction in stylesheet.instructions:
eval(instruction)
def transpose(self):
"""Keeps the self object intact, and returns the transposed (rotated)
table.
"""
transposed = Table()
transposed.create_rows(self.col_names)
transposed.create_columns(self.row_names)
for col_index, column in enumerate(self.get_columns()):
transposed.set_row(col_index, column)
return transposed
def pprint(self):
"""returns a string representing the table in a pretty
printed 'text' format.
"""
# The maximum row name (to know the start_index of the first col)
max_row_name = 0
for row_name in self.row_names:
if len(row_name) > max_row_name:
max_row_name = len(row_name)
col_start = max_row_name + 5
lines = []
# Build the 'first' line <=> the col_names one
# The first cell <=> an empty one
col_names_line = [' '*col_start]
for col_name in self.col_names:
col_names_line.append(col_name + ' '*5)
lines.append('|' + '|'.join(col_names_line) + '|')
max_line_length = len(lines[0])
# Build the table
for row_index, row in enumerate(self.data):
line = []
# First, build the row_name's cell
row_name = self.row_names[row_index]
line.append(row_name + ' '*(col_start-len(row_name)))
# Then, build all the table's cell for this line.
for col_index, cell in enumerate(row):
col_name_length = len(self.col_names[col_index]) + 5
data = str(cell)
line.append(data + ' '*(col_name_length - len(data)))
lines.append('|' + '|'.join(line) + '|')
if len(lines[-1]) > max_line_length:
max_line_length = len(lines[-1])
# Wrap the table with '-' to make a frame
lines.insert(0, '-'*max_line_length)
lines.append('-'*max_line_length)
return '\n'.join(lines)
def __repr__(self):
return repr(self.data)
def as_text(self):
data = []
# We must convert cells into strings before joining them
for row in self.data:
data.append([str(cell) for cell in row])
lines = ['\t'.join(row) for row in data]
return '\n'.join(lines)
class TableStyle:
"""Defines a table's style
"""
def __init__(self, table):
self._table = table
self.size = dict([(col_name, '1*') for col_name in table.col_names])
# __row_column__ is a special key to define the first column which
# actually has no name (<=> left most column <=> row names column)
self.size['__row_column__'] = '1*'
self.alignment = dict([(col_name, 'right')
for col_name in table.col_names])
self.alignment['__row_column__'] = 'right'
# We shouldn't have to create an entry for
# the 1st col (the row_column one)
self.units = dict([(col_name, '') for col_name in table.col_names])
self.units['__row_column__'] = ''
# XXX FIXME : params order should be reversed for all set() methods
def set_size(self, value, col_id):
"""sets the size of the specified col_id to value
"""
self.size[col_id] = value
def set_size_by_index(self, value, col_index):
"""Allows to set the size according to the column index rather than
using the column's id.
BE CAREFUL : the '0' column is the '__row_column__' one !
"""
if col_index == 0:
col_id = '__row_column__'
else:
col_id = self._table.col_names[col_index-1]
self.size[col_id] = value
def set_alignment(self, value, col_id):
"""sets the alignment of the specified col_id to value
"""
self.alignment[col_id] = value
def set_alignment_by_index(self, value, col_index):
"""Allows to set the alignment according to the column index rather than
using the column's id.
BE CAREFUL : the '0' column is the '__row_column__' one !
"""
if col_index == 0:
col_id = '__row_column__'
else:
col_id = self._table.col_names[col_index-1]
self.alignment[col_id] = value
def set_unit(self, value, col_id):
"""sets the unit of the specified col_id to value
"""
self.units[col_id] = value
def set_unit_by_index(self, value, col_index):
"""Allows to set the unit according to the column index rather than
using the column's id.
BE CAREFUL : the '0' column is the '__row_column__' one !
(Note that in the 'unit' case, you shouldn't have to set a unit
for the 1st column (the __row__column__ one))
"""
if col_index == 0:
col_id = '__row_column__'
else:
col_id = self._table.col_names[col_index-1]
self.units[col_id] = value
def get_size(self, col_id):
"""Returns the size of the specified col_id
"""
return self.size[col_id]
def get_size_by_index(self, col_index):
"""Allows to get the size according to the column index rather than
using the column's id.
BE CAREFUL : the '0' column is the '__row_column__' one !
"""
if col_index == 0:
col_id = '__row_column__'
else:
col_id = self._table.col_names[col_index-1]
return self.size[col_id]
def get_alignment(self, col_id):
"""Returns the alignment of the specified col_id
"""
return self.alignment[col_id]
def get_alignment_by_index(self, col_index):
"""Allors to get the alignment according to the column index rather than
using the column's id.
BE CAREFUL : the '0' column is the '__row_column__' one !
"""
if col_index == 0:
col_id = '__row_column__'
else:
col_id = self._table.col_names[col_index-1]
return self.alignment[col_id]
def get_unit(self, col_id):
"""Returns the unit of the specified col_id
"""
return self.units[col_id]
def get_unit_by_index(self, col_index):
"""Allors to get the unit according to the column index rather than
using the column's id.
BE CAREFUL : the '0' column is the '__row_column__' one !
"""
if col_index == 0:
col_id = '__row_column__'
else:
col_id = self._table.col_names[col_index-1]
return self.units[col_id]
import re
CELL_PROG = re.compile("([0-9]+)_([0-9]+)")
class TableStyleSheet:
"""A simple Table stylesheet
Rules are expressions where cells are defined by the row_index
and col_index separated by an underscore ('_').
For example, suppose you want to say that the (2,5) cell must be
the sum of its two preceding cells in the row, you would create
the following rule :
2_5 = 2_3 + 2_4
You can also use all the math.* operations you want. For example:
2_5 = sqrt(2_3**2 + 2_4**2)
"""
def __init__(self, rules = None):
rules = rules or []
self.rules = []
self.instructions = []
for rule in rules:
self.add_rule(rule)
def add_rule(self, rule):
"""Adds a rule to the stylesheet rules
"""
try:
source_code = ['from math import *']
source_code.append(CELL_PROG.sub(r'self.data[\1][\2]', rule))
self.instructions.append(compile('\n'.join(source_code),
'table.py', 'exec'))
self.rules.append(rule)
except SyntaxError:
print "Bad Stylesheet Rule : %s [skipped]"%rule
def add_rowsum_rule(self, dest_cell, row_index, start_col, end_col):
"""Creates and adds a rule to sum over the row at row_index from
start_col to end_col.
dest_cell is a tuple of two elements (x,y) of the destination cell
No check is done for indexes ranges.
pre:
start_col >= 0
end_col > start_col
"""
cell_list = ['%d_%d'%(row_index, index) for index in range(start_col,
end_col + 1)]
rule = '%d_%d=' % dest_cell + '+'.join(cell_list)
self.add_rule(rule)
def add_rowavg_rule(self, dest_cell, row_index, start_col, end_col):
"""Creates and adds a rule to make the row average (from start_col
to end_col)
dest_cell is a tuple of two elements (x,y) of the destination cell
No check is done for indexes ranges.
pre:
start_col >= 0
end_col > start_col
"""
cell_list = ['%d_%d'%(row_index, index) for index in range(start_col,
end_col + 1)]
num = (end_col - start_col + 1)
rule = '%d_%d=' % dest_cell + '('+'+'.join(cell_list)+')/%f'%num
self.add_rule(rule)
def add_colsum_rule(self, dest_cell, col_index, start_row, end_row):
"""Creates and adds a rule to sum over the col at col_index from
start_row to end_row.
dest_cell is a tuple of two elements (x,y) of the destination cell
No check is done for indexes ranges.
pre:
start_row >= 0
end_row > start_row
"""
cell_list = ['%d_%d'%(index, col_index) for index in range(start_row,
end_row + 1)]
rule = '%d_%d=' % dest_cell + '+'.join(cell_list)
self.add_rule(rule)
def add_colavg_rule(self, dest_cell, col_index, start_row, end_row):
"""Creates and adds a rule to make the col average (from start_row
to end_row)
dest_cell is a tuple of two elements (x,y) of the destination cell
No check is done for indexes ranges.
pre:
start_row >= 0
end_row > start_row
"""
cell_list = ['%d_%d'%(index, col_index) for index in range(start_row,
end_row + 1)]
num = (end_row - start_row + 1)
rule = '%d_%d=' % dest_cell + '('+'+'.join(cell_list)+')/%f'%num
self.add_rule(rule)
class TableCellRenderer:
"""Defines a simple text renderer
"""
def __init__(self, **properties):
"""keywords should be properties with an associated boolean as value.
For example :
renderer = TableCellRenderer(units = True, alignment = False)
An unspecified property will have a 'False' value by default.
Possible properties are :
alignment, unit
"""
self.properties = properties
def render_cell(self, cell_coord, table, table_style):
"""Renders the cell at 'cell_coord' in the table, using table_style
"""
row_index, col_index = cell_coord
cell_value = table.data[row_index][col_index]
final_content = self._make_cell_content(cell_value,
table_style, col_index +1)
return self._render_cell_content(final_content,
table_style, col_index + 1)
def render_row_cell(self, row_name, table, table_style):
"""Renders the cell for 'row_id' row
"""
cell_value = row_name
return self._render_cell_content(cell_value, table_style, 0)
def render_col_cell(self, col_name, table, table_style):
"""Renders the cell for 'col_id' row
"""
cell_value = col_name
col_index = table.col_names.index(col_name)
return self._render_cell_content(cell_value, table_style, col_index +1)
def _render_cell_content(self, content, table_style, col_index):
"""Makes the appropriate rendering for this cell content.
Rendering properties will be searched using the
*table_style.get_xxx_by_index(col_index)' methods
**This method should be overridden in the derived renderer classes.**
"""
return content
def _make_cell_content(self, cell_content, table_style, col_index):
"""Makes the cell content (adds decoration data, like units for
example)
"""
final_content = cell_content
if 'skip_zero' in self.properties:
replacement_char = self.properties['skip_zero']
else:
replacement_char = 0
if replacement_char and final_content == 0:
return replacement_char
try:
units_on = self.properties['units']
if units_on:
final_content = self._add_unit(
cell_content, table_style, col_index)
except KeyError:
pass
return final_content
def _add_unit(self, cell_content, table_style, col_index):
"""Adds unit to the cell_content if needed
"""
unit = table_style.get_unit_by_index(col_index)
return str(cell_content) + " " + unit
class DocbookRenderer(TableCellRenderer):
"""Defines how to render a cell for a docboook table
"""
def define_col_header(self, col_index, table_style):
"""Computes the colspec element according to the style
"""
size = table_style.get_size_by_index(col_index)
return '\n' % \
(col_index, size)
def _render_cell_content(self, cell_content, table_style, col_index):
"""Makes the appropriate rendering for this cell content.
Rendering properties will be searched using the
table_style.get_xxx_by_index(col_index)' methods.
"""
try:
align_on = self.properties['alignment']
alignment = table_style.get_alignment_by_index(col_index)
if align_on:
return "%s\n" % \
(alignment, cell_content)
except KeyError:
# KeyError <=> Default alignment
return "%s\n" % cell_content
class TableWriter:
"""A class to write tables
"""
def __init__(self, stream, table, style, **properties):
self._stream = stream
self.style = style or TableStyle(table)
self._table = table
self.properties = properties
self.renderer = None
def set_style(self, style):
"""sets the table's associated style
"""
self.style = style
def set_renderer(self, renderer):
"""sets the way to render cell
"""
self.renderer = renderer
def update_properties(self, **properties):
"""Updates writer's properties (for cell rendering)
"""
self.properties.update(properties)
def write_table(self, title = ""):
"""Writes the table
"""
raise NotImplementedError("write_table must be implemented !")
class DocbookTableWriter(TableWriter):
"""Defines an implementation of TableWriter to write a table in Docbook
"""
def _write_headers(self):
"""Writes col headers
"""
# Define col_headers (colstpec elements)
for col_index in range(len(self._table.col_names)+1):
self._stream.write(self.renderer.define_col_header(col_index,
self.style))
self._stream.write("\n\n")
# XXX FIXME : write an empty entry <=> the first (__row_column) column
self._stream.write('\n')
for col_name in self._table.col_names:
self._stream.write(self.renderer.render_col_cell(
col_name, self._table,
self.style))
self._stream.write("
\n\n")
def _write_body(self):
"""Writes the table body
"""
self._stream.write('\n')
for row_index, row in enumerate(self._table.data):
self._stream.write('\n')
row_name = self._table.row_names[row_index]
# Write the first entry (row_name)
self._stream.write(self.renderer.render_row_cell(row_name,
self._table,
self.style))
for col_index, cell in enumerate(row):
self._stream.write(self.renderer.render_cell(
(row_index, col_index),
self._table, self.style))
self._stream.write('
\n')
self._stream.write('\n')
def write_table(self, title = ""):
"""Writes the table
"""
self._stream.write('\n%s>\n'%(title))
self._stream.write(
'\n'%
(len(self._table.col_names)+1))
self._write_headers()
self._write_body()
self._stream.write('\n
\n')
logilab-common-0.61.0/daemon.py 0000644 0000151 0000155 00000006411 12276435602 015643 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""A daemonize function (for Unices)"""
__docformat__ = "restructuredtext en"
import os
import errno
import signal
import sys
import time
import warnings
def setugid(user):
"""Change process user and group ID
Argument is a numeric user id or a user name"""
try:
from pwd import getpwuid
passwd = getpwuid(int(user))
except ValueError:
from pwd import getpwnam
passwd = getpwnam(user)
if hasattr(os, 'initgroups'): # python >= 2.7
os.initgroups(passwd.pw_name, passwd.pw_gid)
else:
import ctypes
if ctypes.CDLL(None).initgroups(passwd.pw_name, passwd.pw_gid) < 0:
err = ctypes.c_int.in_dll(ctypes.pythonapi,"errno").value
raise OSError(err, os.strerror(err), 'initgroups')
os.setgid(passwd.pw_gid)
os.setuid(passwd.pw_uid)
os.environ['HOME'] = passwd.pw_dir
def daemonize(pidfile=None, uid=None, umask=077):
"""daemonize a Unix process. Set paranoid umask by default.
Return 1 in the original process, 2 in the first fork, and None for the
second fork (eg daemon process).
"""
# http://www.faqs.org/faqs/unix-faq/programmer/faq/
#
# fork so the parent can exit
if os.fork(): # launch child and...
return 1
# disconnect from tty and create a new session
os.setsid()
# fork again so the parent, (the session group leader), can exit.
# as a non-session group leader, we can never regain a controlling
# terminal.
if os.fork(): # launch child again.
return 2
# move to the root to avoit mount pb
os.chdir('/')
# set umask if specified
if umask is not None:
os.umask(umask)
# redirect standard descriptors
null = os.open('/dev/null', os.O_RDWR)
for i in range(3):
try:
os.dup2(null, i)
except OSError, e:
if e.errno != errno.EBADF:
raise
os.close(null)
# filter warnings
warnings.filterwarnings('ignore')
# write pid in a file
if pidfile:
# ensure the directory where the pid-file should be set exists (for
# instance /var/run/cubicweb may be deleted on computer restart)
piddir = os.path.dirname(pidfile)
if not os.path.exists(piddir):
os.makedirs(piddir)
f = file(pidfile, 'w')
f.write(str(os.getpid()))
f.close()
os.chmod(pidfile, 0644)
# change process uid
if uid:
setugid(uid)
return None
logilab-common-0.61.0/optik_ext.py 0000644 0000151 0000155 00000032316 12276435602 016411 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Add an abstraction level to transparently import optik classes from optparse
(python >= 2.3) or the optik package.
It also defines three new types for optik/optparse command line parser :
* regexp
argument of this type will be converted using re.compile
* csv
argument of this type will be converted using split(',')
* yn
argument of this type will be true if 'y' or 'yes', false if 'n' or 'no'
* named
argument of this type are in the form = or :
* password
argument of this type wont be converted but this is used by other tools
such as interactive prompt for configuration to double check value and
use an invisible field
* multiple_choice
same as default "choice" type but multiple choices allowed
* file
argument of this type wont be converted but checked that the given file exists
* color
argument of this type wont be converted but checked its either a
named color or a color specified using hexadecimal notation (preceded by a #)
* time
argument of this type will be converted to a float value in seconds
according to time units (ms, s, min, h, d)
* bytes
argument of this type will be converted to a float value in bytes
according to byte units (b, kb, mb, gb, tb)
"""
__docformat__ = "restructuredtext en"
import re
import sys
import time
from copy import copy
from os.path import exists
# python >= 2.3
from optparse import OptionParser as BaseParser, Option as BaseOption, \
OptionGroup, OptionContainer, OptionValueError, OptionError, \
Values, HelpFormatter, NO_DEFAULT, SUPPRESS_HELP
try:
from mx import DateTime
HAS_MX_DATETIME = True
except ImportError:
HAS_MX_DATETIME = False
from logilab.common.textutils import splitstrip
def check_regexp(option, opt, value):
"""check a regexp value by trying to compile it
return the compiled regexp
"""
if hasattr(value, 'pattern'):
return value
try:
return re.compile(value)
except ValueError:
raise OptionValueError(
"option %s: invalid regexp value: %r" % (opt, value))
def check_csv(option, opt, value):
"""check a csv value by trying to split it
return the list of separated values
"""
if isinstance(value, (list, tuple)):
return value
try:
return splitstrip(value)
except ValueError:
raise OptionValueError(
"option %s: invalid csv value: %r" % (opt, value))
def check_yn(option, opt, value):
"""check a yn value
return true for yes and false for no
"""
if isinstance(value, int):
return bool(value)
if value in ('y', 'yes'):
return True
if value in ('n', 'no'):
return False
msg = "option %s: invalid yn value %r, should be in (y, yes, n, no)"
raise OptionValueError(msg % (opt, value))
def check_named(option, opt, value):
"""check a named value
return a dictionary containing (name, value) associations
"""
if isinstance(value, dict):
return value
values = []
for value in check_csv(option, opt, value):
if value.find('=') != -1:
values.append(value.split('=', 1))
elif value.find(':') != -1:
values.append(value.split(':', 1))
if values:
return dict(values)
msg = "option %s: invalid named value %r, should be = or \
:"
raise OptionValueError(msg % (opt, value))
def check_password(option, opt, value):
"""check a password value (can't be empty)
"""
# no actual checking, monkey patch if you want more
return value
def check_file(option, opt, value):
"""check a file value
return the filepath
"""
if exists(value):
return value
msg = "option %s: file %r does not exist"
raise OptionValueError(msg % (opt, value))
# XXX use python datetime
def check_date(option, opt, value):
"""check a file value
return the filepath
"""
try:
return DateTime.strptime(value, "%Y/%m/%d")
except DateTime.Error :
raise OptionValueError(
"expected format of %s is yyyy/mm/dd" % opt)
def check_color(option, opt, value):
"""check a color value and returns it
/!\ does *not* check color labels (like 'red', 'green'), only
checks hexadecimal forms
"""
# Case (1) : color label, we trust the end-user
if re.match('[a-z0-9 ]+$', value, re.I):
return value
# Case (2) : only accepts hexadecimal forms
if re.match('#[a-f0-9]{6}', value, re.I):
return value
# Else : not a color label neither a valid hexadecimal form => error
msg = "option %s: invalid color : %r, should be either hexadecimal \
value or predefined color"
raise OptionValueError(msg % (opt, value))
def check_time(option, opt, value):
from logilab.common.textutils import TIME_UNITS, apply_units
if isinstance(value, (int, long, float)):
return value
return apply_units(value, TIME_UNITS)
def check_bytes(option, opt, value):
from logilab.common.textutils import BYTE_UNITS, apply_units
if hasattr(value, '__int__'):
return value
return apply_units(value, BYTE_UNITS)
import types
class Option(BaseOption):
"""override optik.Option to add some new option types
"""
TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
'multiple_choice', 'file', 'color',
'time', 'bytes')
ATTRS = BaseOption.ATTRS + ['hide', 'level']
TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
TYPE_CHECKER['regexp'] = check_regexp
TYPE_CHECKER['csv'] = check_csv
TYPE_CHECKER['yn'] = check_yn
TYPE_CHECKER['named'] = check_named
TYPE_CHECKER['multiple_choice'] = check_csv
TYPE_CHECKER['file'] = check_file
TYPE_CHECKER['color'] = check_color
TYPE_CHECKER['password'] = check_password
TYPE_CHECKER['time'] = check_time
TYPE_CHECKER['bytes'] = check_bytes
if HAS_MX_DATETIME:
TYPES += ('date',)
TYPE_CHECKER['date'] = check_date
def __init__(self, *opts, **attrs):
BaseOption.__init__(self, *opts, **attrs)
if hasattr(self, "hide") and self.hide:
self.help = SUPPRESS_HELP
def _check_choice(self):
"""FIXME: need to override this due to optik misdesign"""
if self.type in ("choice", "multiple_choice"):
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif type(self.choices) not in (types.TupleType, types.ListType):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
BaseOption.CHECK_METHODS[2] = _check_choice
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
if self.type == 'named':
existant = getattr(values, self.dest)
if existant:
existant.update(value)
value = existant
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
class OptionParser(BaseParser):
"""override optik.OptionParser to use our Option class
"""
def __init__(self, option_class=Option, *args, **kwargs):
BaseParser.__init__(self, option_class=Option, *args, **kwargs)
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
outputlevel = getattr(formatter, 'output_level', 0)
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading("Options"))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
if group.level <= outputlevel and (
group.description or level_options(group, outputlevel)):
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
OptionGroup.level = 0
def level_options(group, outputlevel):
return [option for option in group.option_list
if (getattr(option, 'level', 0) or 0) <= outputlevel
and not option.help is SUPPRESS_HELP]
def format_option_help(self, formatter):
result = []
outputlevel = getattr(formatter, 'output_level', 0) or 0
for option in level_options(self, outputlevel):
result.append(formatter.format_option(option))
return "".join(result)
OptionContainer.format_option_help = format_option_help
class ManHelpFormatter(HelpFormatter):
"""Format help using man pages ROFF format"""
def __init__ (self,
indent_increment=0,
max_help_position=24,
width=79,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_heading(self, heading):
return '.SH %s\n' % heading.upper()
def format_description(self, description):
return description
def format_option(self, option):
try:
optstring = option.option_strings
except AttributeError:
optstring = self.format_option_strings(option)
if option.help:
help_text = self.expand_default(option)
help = ' '.join([l.strip() for l in help_text.splitlines()])
else:
help = ''
return '''.IP "%s"
%s
''' % (optstring, help)
def format_head(self, optparser, pkginfo, section=1):
long_desc = ""
try:
pgm = optparser._get_prog_name()
except AttributeError:
# py >= 2.4.X (dunno which X exactly, at least 2)
pgm = optparser.get_prog_name()
short_desc = self.format_short_description(pgm, pkginfo.description)
if hasattr(pkginfo, "long_desc"):
long_desc = self.format_long_description(pgm, pkginfo.long_desc)
return '%s\n%s\n%s\n%s' % (self.format_title(pgm, section),
short_desc, self.format_synopsis(pgm),
long_desc)
def format_title(self, pgm, section):
date = '-'.join([str(num) for num in time.localtime()[:3]])
return '.TH %s %s "%s" %s' % (pgm, section, date, pgm)
def format_short_description(self, pgm, short_desc):
return '''.SH NAME
.B %s
\- %s
''' % (pgm, short_desc.strip())
def format_synopsis(self, pgm):
return '''.SH SYNOPSIS
.B %s
[
.I OPTIONS
] [
.I
]
''' % pgm
def format_long_description(self, pgm, long_desc):
long_desc = '\n'.join([line.lstrip()
for line in long_desc.splitlines()])
long_desc = long_desc.replace('\n.\n', '\n\n')
if long_desc.lower().startswith(pgm):
long_desc = long_desc[len(pgm):]
return '''.SH DESCRIPTION
.B %s
%s
''' % (pgm, long_desc.strip())
def format_tail(self, pkginfo):
tail = '''.SH SEE ALSO
/usr/share/doc/pythonX.Y-%s/
.SH BUGS
Please report bugs on the project\'s mailing list:
%s
.SH AUTHOR
%s <%s>
''' % (getattr(pkginfo, 'debian_name', pkginfo.modname),
pkginfo.mailinglist, pkginfo.author, pkginfo.author_email)
if hasattr(pkginfo, "copyright"):
tail += '''
.SH COPYRIGHT
%s
''' % pkginfo.copyright
return tail
def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout, level=0):
"""generate a man page from an optik parser"""
formatter = ManHelpFormatter()
formatter.output_level = level
formatter.parser = optparser
print >> stream, formatter.format_head(optparser, pkginfo, section)
print >> stream, optparser.format_option_help(formatter)
print >> stream, formatter.format_tail(pkginfo)
__all__ = ('OptionParser', 'Option', 'OptionGroup', 'OptionValueError',
'Values')
logilab-common-0.61.0/COPYING 0000644 0000151 0000155 00000043103 12276435602 015060 0 ustar narval narval GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.
logilab-common-0.61.0/doc/ 0000775 0000151 0000155 00000000000 12276435607 014600 5 ustar narval narval logilab-common-0.61.0/doc/pytest.1 0000644 0000151 0000155 00000002410 12276435602 016200 0 ustar narval narval .TH pytest "1" "January 2008" pytest
.SH NAME
.B pytest
\- run python unit tests
.SH SYNOPSIS
usage: pytest [OPTIONS] [testfile [testpattern]]
.PP
examples:
.PP
pytest path/to/mytests.py
pytest path/to/mytests.py TheseTests
pytest path/to/mytests.py TheseTests.test_thisone
.PP
pytest one (will run both test_thisone and test_thatone)
pytest path/to/mytests.py \fB\-s\fR not (will skip test_notthisone)
.PP
pytest \fB\-\-coverage\fR test_foo.py
.IP
(only if logilab.devtools is available)
.SS "options:"
.TP
\fB\-h\fR, \fB\-\-help\fR
show this help message and exit
.TP
\fB\-t\fR TESTDIR
directory where the tests will be found
.TP
\fB\-d\fR
enable design\-by\-contract
.TP
\fB\-v\fR, \fB\-\-verbose\fR
Verbose output
.TP
\fB\-i\fR, \fB\-\-pdb\fR
Enable test failure inspection (conflicts with
\fB\-\-coverage\fR)
.TP
\fB\-x\fR, \fB\-\-exitfirst\fR
Exit on first failure (only make sense when pytest run
one test file)
.TP
\fB\-s\fR SKIPPED, \fB\-\-skip\fR=\fISKIPPED\fR
test names matching this name will be skipped to skip
several patterns, use commas
.TP
\fB\-q\fR, \fB\-\-quiet\fR
Minimal output
.TP
\fB\-P\fR PROFILE, \fB\-\-profile\fR=\fIPROFILE\fR
Profile execution and store data in the given file
.TP
\fB\-\-coverage\fR
run tests with pycoverage (conflicts with \fB\-\-pdb\fR)
logilab-common-0.61.0/doc/makefile 0000644 0000151 0000155 00000000414 12276435602 016270 0 ustar narval narval all: epydoc
epydoc:
mkdir -p apidoc
-epydoc --parse-only -o apidoc --html -v --no-private --exclude='test' --exclude="__pkginfo__" --exclude="setup" -n "Logilab's common library" $(shell dirname $(CURDIR))/build/lib/logilab/common >/dev/null
clean:
rm -rf apidoc
logilab-common-0.61.0/cli.py 0000644 0000151 0000155 00000015536 12276435602 015157 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Command line interface helper classes.
It provides some default commands, a help system, a default readline
configuration with completion and persistent history.
Example::
class BookShell(CLIHelper):
def __init__(self):
# quit and help are builtins
# CMD_MAP keys are commands, values are topics
self.CMD_MAP['pionce'] = _("Sommeil")
self.CMD_MAP['ronfle'] = _("Sommeil")
CLIHelper.__init__(self)
help_do_pionce = ("pionce", "pionce duree", _("met ton corps en veille"))
def do_pionce(self):
print 'nap is good'
help_do_ronfle = ("ronfle", "ronfle volume", _("met les autres en veille"))
def do_ronfle(self):
print 'fuuuuuuuuuuuu rhhhhhrhrhrrh'
cl = BookShell()
"""
__docformat__ = "restructuredtext en"
from logilab.common.compat import raw_input, builtins
if not hasattr(builtins, '_'):
builtins._ = str
def init_readline(complete_method, histfile=None):
"""Init the readline library if available."""
try:
import readline
readline.parse_and_bind("tab: complete")
readline.set_completer(complete_method)
string = readline.get_completer_delims().replace(':', '')
readline.set_completer_delims(string)
if histfile is not None:
try:
readline.read_history_file(histfile)
except IOError:
pass
import atexit
atexit.register(readline.write_history_file, histfile)
except:
print 'readline is not available :-('
class Completer :
"""Readline completer."""
def __init__(self, commands):
self.list = commands
def complete(self, text, state):
"""Hook called by readline when is pressed."""
n = len(text)
matches = []
for cmd in self.list :
if cmd[:n] == text :
matches.append(cmd)
try:
return matches[state]
except IndexError:
return None
class CLIHelper:
"""An abstract command line interface client which recognize commands
and provide an help system.
"""
CMD_MAP = {'help': _("Others"),
'quit': _("Others"),
}
CMD_PREFIX = ''
def __init__(self, histfile=None) :
self._topics = {}
self.commands = None
self._completer = Completer(self._register_commands())
init_readline(self._completer.complete, histfile)
def run(self):
"""loop on user input, exit on EOF"""
while True:
try:
line = raw_input('>>> ')
except EOFError:
print
break
s_line = line.strip()
if not s_line:
continue
args = s_line.split()
if args[0] in self.commands:
try:
cmd = 'do_%s' % self.commands[args[0]]
getattr(self, cmd)(*args[1:])
except EOFError:
break
except:
import traceback
traceback.print_exc()
else:
try:
self.handle_line(s_line)
except:
import traceback
traceback.print_exc()
def handle_line(self, stripped_line):
"""Method to overload in the concrete class (should handle
lines which are not commands).
"""
raise NotImplementedError()
# private methods #########################################################
def _register_commands(self):
""" register available commands method and return the list of
commands name
"""
self.commands = {}
self._command_help = {}
commands = [attr[3:] for attr in dir(self) if attr[:3] == 'do_']
for command in commands:
topic = self.CMD_MAP[command]
help_method = getattr(self, 'help_do_%s' % command)
self._topics.setdefault(topic, []).append(help_method)
self.commands[self.CMD_PREFIX + command] = command
self._command_help[command] = help_method
return self.commands.keys()
def _print_help(self, cmd, syntax, explanation):
print _('Command %s') % cmd
print _('Syntax: %s') % syntax
print '\t', explanation
print
# predefined commands #####################################################
def do_help(self, command=None) :
"""base input of the help system"""
if command in self._command_help:
self._print_help(*self._command_help[command])
elif command is None or command not in self._topics:
print _("Use help or help .")
print _("Available topics are:")
topics = sorted(self._topics.keys())
for topic in topics:
print '\t', topic
print
print _("Available commands are:")
commands = self.commands.keys()
commands.sort()
for command in commands:
print '\t', command[len(self.CMD_PREFIX):]
else:
print _('Available commands about %s:') % command
print
for command_help_method in self._topics[command]:
try:
if callable(command_help_method):
self._print_help(*command_help_method())
else:
self._print_help(*command_help_method)
except:
import traceback
traceback.print_exc()
print 'ERROR in help method %s'% (
command_help_method.func_name)
help_do_help = ("help", "help [topic|command]",
_("print help message for the given topic/command or \
available topics when no argument"))
def do_quit(self):
"""quit the CLI"""
raise EOFError()
def help_do_quit(self):
return ("quit", "quit", _("quit the application"))
logilab-common-0.61.0/setup.py 0000644 0000151 0000155 00000015345 12276435602 015546 0 ustar narval narval #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=W0404,W0622,W0704,W0613
# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Generic Setup script, takes package info from __pkginfo__.py file.
"""
__docformat__ = "restructuredtext en"
import os
import sys
import shutil
from os.path import isdir, exists, join
try:
if os.environ.get('NO_SETUPTOOLS'):
raise ImportError()
from setuptools import setup
from setuptools.command import install_lib
USE_SETUPTOOLS = 1
except ImportError:
from distutils.core import setup
from distutils.command import install_lib
USE_SETUPTOOLS = 0
try:
# python3
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
# python2.x
from distutils.command.build_py import build_py
sys.modules.pop('__pkginfo__', None)
# import optional features
__pkginfo__ = __import__("__pkginfo__")
# import required features
from __pkginfo__ import modname, version, license, description, \
web, author, author_email
distname = getattr(__pkginfo__, 'distname', modname)
scripts = getattr(__pkginfo__, 'scripts', [])
data_files = getattr(__pkginfo__, 'data_files', None)
subpackage_of = getattr(__pkginfo__, 'subpackage_of', None)
include_dirs = getattr(__pkginfo__, 'include_dirs', [])
ext_modules = getattr(__pkginfo__, 'ext_modules', None)
install_requires = getattr(__pkginfo__, 'install_requires', None)
dependency_links = getattr(__pkginfo__, 'dependency_links', [])
classifiers = getattr(__pkginfo__, 'classifiers', [])
STD_BLACKLIST = ('CVS', '.svn', '.hg', 'debian', 'dist', 'build')
IGNORED_EXTENSIONS = ('.pyc', '.pyo', '.elc', '~')
if exists('README'):
long_description = open('README').read()
else:
long_description = ''
def ensure_scripts(linux_scripts):
"""Creates the proper script names required for each platform
(taken from 4Suite)
"""
from distutils import util
if util.get_platform()[:3] == 'win':
scripts_ = [script + '.bat' for script in linux_scripts]
else:
scripts_ = linux_scripts
return scripts_
def get_packages(directory, prefix):
"""return a list of subpackages for the given directory"""
result = []
for package in os.listdir(directory):
absfile = join(directory, package)
if isdir(absfile):
if exists(join(absfile, '__init__.py')) or \
package in ('test', 'tests'):
if prefix:
result.append('%s.%s' % (prefix, package))
else:
result.append(package)
result += get_packages(absfile, result[-1])
return result
EMPTY_FILE = '''"""generated file, don't modify or your data will be lost"""
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
pass
'''
class MyInstallLib(install_lib.install_lib):
"""extend install_lib command to handle package __init__.py if necessary
"""
def run(self):
"""overridden from install_lib class"""
install_lib.install_lib.run(self)
# create Products.__init__.py if needed
if subpackage_of:
product_init = join(self.install_dir, subpackage_of, '__init__.py')
if not exists(product_init):
self.announce('creating %s' % product_init)
stream = open(product_init, 'w')
stream.write(EMPTY_FILE)
stream.close()
class MyBuildPy(build_py):
"""extend build_by command to handle include_dirs variable if necessary
"""
def run(self):
"""overridden from install_lib class"""
build_py.run(self)
# manually install included directories if any
if include_dirs:
if subpackage_of:
base = join(subpackage_of, modname)
else:
base = modname
basedir = os.path.join(self.build_lib, base)
for directory in include_dirs:
dest = join(basedir, directory)
shutil.rmtree(dest, ignore_errors=True)
shutil.copytree(directory, dest)
if sys.version_info >= (3, 0):
# process manually python file in include_dirs (test data)
from distutils.util import run_2to3
# brackets are NOT optional here for py3k compat
print('running 2to3 on', dest)
run_2to3([dest])
def install(**kwargs):
"""setup entry point"""
if USE_SETUPTOOLS:
if '--force-manifest' in sys.argv:
sys.argv.remove('--force-manifest')
# install-layout option was introduced in 2.5.3-1~exp1
elif sys.version_info < (2, 5, 4) and '--install-layout=deb' in sys.argv:
sys.argv.remove('--install-layout=deb')
if subpackage_of:
package = subpackage_of + '.' + modname
kwargs['package_dir'] = {package : '.'}
packages = [package] + get_packages(os.getcwd(), package)
if USE_SETUPTOOLS:
kwargs['namespace_packages'] = [subpackage_of]
else:
kwargs['package_dir'] = {modname : '.'}
packages = [modname] + get_packages(os.getcwd(), modname)
if USE_SETUPTOOLS and install_requires:
kwargs['install_requires'] = install_requires
kwargs['dependency_links'] = dependency_links
kwargs['packages'] = packages
return setup(name = distname,
version = version,
license = license,
description = description,
long_description = long_description,
classifiers = classifiers,
author = author,
author_email = author_email,
url = web,
scripts = ensure_scripts(scripts),
data_files = data_files,
ext_modules = ext_modules,
cmdclass = {'install_lib': MyInstallLib,
'build_py': MyBuildPy},
**kwargs
)
if __name__ == '__main__' :
install()
logilab-common-0.61.0/vcgutils.py 0000644 0000151 0000155 00000016771 12276435602 016252 0 ustar narval narval # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see .
"""Functions to generate files readable with Georg Sander's vcg
(Visualization of Compiler Graphs).
You can download vcg at http://rw4.cs.uni-sb.de/~sander/html/gshome.html
Note that vcg exists as a debian package.
See vcg's documentation for explanation about the different values that
maybe used for the functions parameters.
"""
__docformat__ = "restructuredtext en"
import string
ATTRS_VAL = {
'algos': ('dfs', 'tree', 'minbackward',
'left_to_right', 'right_to_left',
'top_to_bottom', 'bottom_to_top',
'maxdepth', 'maxdepthslow', 'mindepth', 'mindepthslow',
'mindegree', 'minindegree', 'minoutdegree',
'maxdegree', 'maxindegree', 'maxoutdegree'),
'booleans': ('yes', 'no'),
'colors': ('black', 'white', 'blue', 'red', 'green', 'yellow',
'magenta', 'lightgrey',
'cyan', 'darkgrey', 'darkblue', 'darkred', 'darkgreen',
'darkyellow', 'darkmagenta', 'darkcyan', 'gold',
'lightblue', 'lightred', 'lightgreen', 'lightyellow',
'lightmagenta', 'lightcyan', 'lilac', 'turquoise',
'aquamarine', 'khaki', 'purple', 'yellowgreen', 'pink',
'orange', 'orchid'),
'shapes': ('box', 'ellipse', 'rhomb', 'triangle'),
'textmodes': ('center', 'left_justify', 'right_justify'),
'arrowstyles': ('solid', 'line', 'none'),
'linestyles': ('continuous', 'dashed', 'dotted', 'invisible'),
}
# meaning of possible values:
# O -> string
# 1 -> int
# list -> value in list
GRAPH_ATTRS = {
'title': 0,
'label': 0,
'color': ATTRS_VAL['colors'],
'textcolor': ATTRS_VAL['colors'],
'bordercolor': ATTRS_VAL['colors'],
'width': 1,
'height': 1,
'borderwidth': 1,
'textmode': ATTRS_VAL['textmodes'],
'shape': ATTRS_VAL['shapes'],
'shrink': 1,
'stretch': 1,
'orientation': ATTRS_VAL['algos'],
'vertical_order': 1,
'horizontal_order': 1,
'xspace': 1,
'yspace': 1,
'layoutalgorithm': ATTRS_VAL['algos'],
'late_edge_labels': ATTRS_VAL['booleans'],
'display_edge_labels': ATTRS_VAL['booleans'],
'dirty_edge_labels': ATTRS_VAL['booleans'],
'finetuning': ATTRS_VAL['booleans'],
'manhattan_edges': ATTRS_VAL['booleans'],
'smanhattan_edges': ATTRS_VAL['booleans'],
'port_sharing': ATTRS_VAL['booleans'],
'edges': ATTRS_VAL['booleans'],
'nodes': ATTRS_VAL['booleans'],
'splines': ATTRS_VAL['booleans'],
}
NODE_ATTRS = {
'title': 0,
'label': 0,
'color': ATTRS_VAL['colors'],
'textcolor': ATTRS_VAL['colors'],
'bordercolor': ATTRS_VAL['colors'],
'width': 1,
'height': 1,
'borderwidth': 1,
'textmode': ATTRS_VAL['textmodes'],
'shape': ATTRS_VAL['shapes'],
'shrink': 1,
'stretch': 1,
'vertical_order': 1,
'horizontal_order': 1,
}
EDGE_ATTRS = {
'sourcename': 0,
'targetname': 0,
'label': 0,
'linestyle': ATTRS_VAL['linestyles'],
'class': 1,
'thickness': 0,
'color': ATTRS_VAL['colors'],
'textcolor': ATTRS_VAL['colors'],
'arrowcolor': ATTRS_VAL['colors'],
'backarrowcolor': ATTRS_VAL['colors'],
'arrowsize': 1,
'backarrowsize': 1,
'arrowstyle': ATTRS_VAL['arrowstyles'],
'backarrowstyle': ATTRS_VAL['arrowstyles'],
'textmode': ATTRS_VAL['textmodes'],
'priority': 1,
'anchor': 1,
'horizontal_order': 1,
}
# Misc utilities ###############################################################
def latin_to_vcg(st):
"""Convert latin characters using vcg escape sequence.
"""
for char in st:
if char not in string.ascii_letters:
try:
num = ord(char)
if num >= 192:
st = st.replace(char, r'\fi%d'%ord(char))
except:
pass
return st
class VCGPrinter:
"""A vcg graph writer.
"""
def __init__(self, output_stream):
self._stream = output_stream
self._indent = ''
def open_graph(self, **args):
"""open a vcg graph
"""
self._stream.write('%sgraph:{\n'%self._indent)
self._inc_indent()
self._write_attributes(GRAPH_ATTRS, **args)
def close_graph(self):
"""close a vcg graph
"""
self._dec_indent()
self._stream.write('%s}\n'%self._indent)
def node(self, title, **args):
"""draw a node
"""
self._stream.write('%snode: {title:"%s"' % (self._indent, title))
self._write_attributes(NODE_ATTRS, **args)
self._stream.write('}\n')
def edge(self, from_node, to_node, edge_type='', **args):
"""draw an edge from a node to another.
"""
self._stream.write(
'%s%sedge: {sourcename:"%s" targetname:"%s"' % (
self._indent, edge_type, from_node, to_node))
self._write_attributes(EDGE_ATTRS, **args)
self._stream.write('}\n')
# private ##################################################################
def _write_attributes(self, attributes_dict, **args):
"""write graph, node or edge attributes
"""
for key, value in args.items():
try:
_type = attributes_dict[key]
except KeyError:
raise Exception('''no such attribute %s
possible attributes are %s''' % (key, attributes_dict.keys()))
if not _type:
self._stream.write('%s%s:"%s"\n' % (self._indent, key, value))
elif _type == 1:
self._stream.write('%s%s:%s\n' % (self._indent, key,
int(value)))
elif value in _type:
self._stream.write('%s%s:%s\n' % (self._indent, key, value))
else:
raise Exception('''value %s isn\'t correct for attribute %s
correct values are %s''' % (value, key, _type))
def _inc_indent(self):
"""increment indentation
"""
self._indent = ' %s' % self._indent
def _dec_indent(self):
"""decrement indentation
"""
self._indent = self._indent[:-2]