fs-0.3.0/0000755000175000017500000000000011407431454010625 5ustar willwillfs-0.3.0/fs/0000755000175000017500000000000011407431454011235 5ustar willwillfs-0.3.0/fs/path.py0000644000175000017500000003063311406251000012532 0ustar willwill""" fs.path ======= Useful functions for FS path manipulation. This is broadly similar to the standard ``os.path`` module but works with paths in the canonical format expected by all FS objects (forwardslash-separated, optional leading slash). """ def normpath(path): """Normalizes a path to be in the format expected by FS objects. This function remove any leading or trailing slashes, collapses duplicate slashes, replaces forward with backward slashes, and generally tries very hard to return a new path string the canonical FS format. If the path is invalid, ValueError will be raised. :param path: path to normalize :returns: a valid FS path >>> normpath(r"foo\\bar\\baz") 'foo/bar/baz' >>> normpath("/foo//bar/frob/../baz") '/foo/bar/baz' >>> normpath("foo/../../bar") Traceback (most recent call last) ... ValueError: too many backrefs in path 'foo/../../bar' """ if not path: return path components = [] for comp in path.replace('\\','/').split("/"): if not comp or comp == ".": pass elif comp == "..": try: components.pop() except IndexError: err = "too many backrefs in path '%s'" % (path,) raise ValueError(err) else: components.append(comp) if path[0] in "\\/": if not components: components = [""] components.insert(0, "") if isinstance(path, unicode): return u"/".join(components) else: return '/'.join(components) def iteratepath(path, numsplits=None): """Iterate over the individual components of a path. :param path: Path to iterate over :numsplits: Maximum number of splits """ path = relpath(normpath(path)) if not path: return [] if numsplits == None: return map(None, path.split('/')) else: return map(None, path.split('/', numsplits)) def recursepath(path, reverse=False): """Returns intermediate paths from the root to the given path :param reverse: reverses the order of the paths >>> recursepath('a/b/c') ['/', u'/a', u'/a/b', u'/a/b/c'] """ if reverse: paths = [] path = abspath(path).rstrip("/") while path: paths.append(path) path = dirname(path).rstrip("/") return paths + ["/"] else: paths = [""] + list(iteratepath(path)) return ["/"] + [u'/'.join(paths[:i+1]) for i in xrange(1,len(paths))] def abspath(path): """Convert the given path to an absolute path. Since FS objects have no concept of a 'current directory' this simply adds a leading '/' character if the path doesn't already have one. """ if not path: return u'/' if not path.startswith('/'): return u'/' + path return path def relpath(path): """Convert the given path to a relative path. This is the inverse of abspath(), stripping a leading '/' from the path if it is present. :param path: Path to adjust >>> relpath('/a/b') 'a/b' """ while path and path[0] == "/": path = path[1:] return path def pathjoin(*paths): """Joins any number of paths together, returning a new path string. :param paths: Paths to join are given in positional arguments >>> pathjoin('foo', 'bar', 'baz') 'foo/bar/baz' >>> pathjoin('foo/bar', '../baz') 'foo/baz' >>> pathjoin('foo/bar', '/baz') '/baz' """ absolute = False relpaths = [] for p in paths: if p: if p[0] in '\\/': del relpaths[:] absolute = True relpaths.append(p) path = normpath("/".join(relpaths)) if absolute and not path.startswith("/"): path = u"/" + path return path # Allow pathjoin() to be used as fs.path.join() join = pathjoin def pathsplit(path): """Splits a path into (head, tail) pair. This function splits a path into a pair (head, tail) where 'tail' is the last pathname component and 'head' is all preceeding components. :param path: Path to split >>> pathsplit("foo/bar") ('foo', 'bar') >>> pathsplit("foo/bar/baz") ('foo/bar', 'baz') """ split = normpath(path).rsplit('/', 1) if len(split) == 1: return (u'', split[0]) return tuple(split) # Allow pathsplit() to be used as fs.path.split() split = pathsplit def dirname(path): """Returns the parent directory of a path. This is always equivalent to the 'head' component of the value returned by pathsplit(path). :param path: A FS path >>> dirname('foo/bar/baz') 'foo/bar' """ return pathsplit(path)[0] def basename(path): """Returns the basename of the resource referenced by a path. This is always equivalent to the 'head' component of the value returned by pathsplit(path). :param path: A FS path >>> basename('foo/bar/baz') 'baz' """ return pathsplit(path)[1] def issamedir(path1, path2): """Return true if two paths reference a resource in the same directory. :param path1: An FS path :param path2: An FS path >>> issamedir("foo/bar/baz.txt", "foo/bar/spam.txt") True >>> issamedir("foo/bar/baz/txt", "spam/eggs/spam.txt") False """ return pathsplit(normpath(path1))[0] == pathsplit(normpath(path2))[0] def isprefix(path1, path2): """Return true is path1 is a prefix of path2. :param path1: An FS path :param path2: An FS path >>> isprefix("foo/bar", "foo/bar/spam.txt") True >>> isprefix("foo/bar/", "foo/bar") True >>> isprefix("foo/barry", "foo/baz/bar") False >>> isprefix("foo/bar/baz/", "foo/baz/bar") False """ bits1 = path1.split("/") bits2 = path2.split("/") while bits1 and bits1[-1] == "": bits1.pop() if len(bits1) > len(bits2): return False for (bit1,bit2) in zip(bits1,bits2): if bit1 != bit2: return False return True def forcedir(path): """Ensure the path ends with a trailing / :param path: An FS path >>> forcedir("foo/bar") 'foo/bar/' >>> forcedir("foo/bar/") 'foo/bar/' """ if not path.endswith('/'): return path + '/' return path def frombase(path1, path2): if not isprefix(path1, path2): raise ValueError("path1 must be a prefix of path2") return path2[len(path1):] class PathMap(object): """Dict-like object with paths for keys. A PathMap is like a dictionary where the keys are all FS paths. It allows various dictionary operations (e.g. listing values, clearing values) to be performed on a subset of the keys sharing some common prefix, e.g.:: # list all values in the map pm.values() # list all values for paths starting with "/foo/bar" pm.values("/foo/bar") Under the hood, a PathMap is a trie-like structure where each level is indexed by path name component. This allows lookups to be performed in O(number of path components) while permitting efficient prefix-based operations. """ def __init__(self): self._map = {} def __getitem__(self,path): """Get the value stored under the given path.""" m = self._map for name in iteratepath(path): try: m = m[name] except KeyError: raise KeyError(path) try: return m[""] except KeyError: raise KeyError(path) def __contains__(self,path): """Check whether the given path has a value stored in the map.""" try: self[path] except KeyError: return False else: return True def __setitem__(self,path,value): """Set the value stored under the given path.""" m = self._map for name in iteratepath(path): try: m = m[name] except KeyError: m = m.setdefault(name,{}) m[""] = value def __delitem__(self,path): """Delete the value stored under the given path.""" ms = [[self._map,None]] for name in iteratepath(path): try: ms.append([ms[-1][0][name],None]) except KeyError: raise KeyError(path) else: ms[-2][1] = name try: del ms[-1][0][""] except KeyError: raise KeyError(path) else: while len(ms) > 1 and not ms[-1][0]: del ms[-1] del ms[-1][0][ms[-1][1]] def get(self,path,default=None): """Get the value stored under the given path, or the given default.""" try: return self[path] except KeyError: return default def pop(self,path,default=None): """Pop the value stored under the given path, or the given default.""" ms = [[self._map,None]] for name in iteratepath(path): try: ms.append([ms[-1][0][name],None]) except KeyError: return default else: ms[-2][1] = name try: val = ms[-1][0].pop("") except KeyError: val = default else: while len(ms) > 1 and not ms[-1][0]: del ms[-1] del ms[-1][0][ms[-1][1]] return val def setdefault(self,path,value): m = self._map for name in iteratepath(path): try: m = m[name] except KeyError: m = m.setdefault(name,{}) return m.setdefault("",value) def clear(self,root="/"): """Clear all entries beginning with the given root path.""" m = self._map for name in iteratepath(root): try: m = m[name] except KeyError: return m.clear() def iterkeys(self,root="/",m=None): """Iterate over all keys beginning with the given root path.""" if m is None: m = self._map for name in iteratepath(root): try: m = m[name] except KeyError: return for (nm,subm) in m.iteritems(): if not nm: yield abspath(normpath(root)) else: k = pathjoin(root,nm) for subk in self.iterkeys(k,subm): yield subk def keys(self,root="/"): return list(self.iterkeys(root)) def itervalues(self,root="/",m=None): """Iterate over all values whose keys begin with the given root path.""" if m is None: m = self._map for name in iteratepath(root): try: m = m[name] except KeyError: return for (nm,subm) in m.iteritems(): if not nm: yield subm else: k = pathjoin(root,nm) for subv in self.itervalues(k,subm): yield subv def values(self,root="/"): return list(self.itervalues(root)) def iteritems(self,root="/",m=None): """Iterate over all (key,value) pairs beginning with the given root.""" if m is None: m = self._map for name in iteratepath(root): try: m = m[name] except KeyError: return for (nm,subm) in m.iteritems(): if not nm: yield (abspath(normpath(root)),subm) else: k = pathjoin(root,nm) for (subk,subv) in self.iteritems(k,subm): yield (subk,subv) def items(self,root="/"): return list(self.iteritems(root)) def iternames(self,root="/"): """Iterate over all names beneath the given root path. This is basically the equivalent of listdir() for a PathMap - it yields the next level of name components beneath the given path. """ m = self._map for name in iteratepath(root): try: m = m[name] except KeyError: return for (nm,subm) in m.iteritems(): if nm and subm: yield nm def names(self,root="/"): return list(self.iternames(root)) fs-0.3.0/fs/ftpfs.py0000644000175000017500000010665111407422447012744 0ustar willwill""" fs.ftpfs ======== FTPFS is a filesystem for accessing an FTP server (uses ftplib in standard library) """ __all__ = ['FTPFS'] import fs from fs.base import * from fs.path import pathsplit from ftplib import FTP, error_perm, error_temp, error_proto, error_reply try: from ftplib import _GLOBAL_DEFAULT_TIMEOUT _FTPLIB_TIMEOUT = True except ImportError: _GLOBAL_DEFAULT_TIMEOUT = None _FTPLIB_TIMEOUT = False import threading from time import sleep import datetime import re from socket import error as socket_error try: from functools import wraps except ImportError: wraps = lambda f: lambda f: f try: from cStringIO import StringIO except ImportError: from StringIO import StringIO import time import sys # ----------------------------------------------- # Taken from http://www.clapper.org/software/python/grizzled/ # ----------------------------------------------- class Enum(object): def __init__(self, *names): self._names_map = dict((name, i) for i, name in enumerate(names)) def __getattr__(self, name): return self._names_map[name] MONTHS = ('jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec') MTIME_TYPE = Enum('UNKNOWN', 'LOCAL', 'REMOTE_MINUTE', 'REMOTE_DAY') """ ``MTIME_TYPE`` identifies how a modification time ought to be interpreted (assuming the caller cares). - ``LOCAL``: Time is local to the client, granular to (at least) the minute - ``REMOTE_MINUTE``: Time is local to the server and granular to the minute - ``REMOTE_DAY``: Time is local to the server and granular to the day. - ``UNKNOWN``: Time's locale is unknown. """ ID_TYPE = Enum('UNKNOWN', 'FULL') """ ``ID_TYPE`` identifies how a file's identifier should be interpreted. - ``FULL``: The ID is known to be complete. - ``UNKNOWN``: The ID is not set or its type is unknown. """ # --------------------------------------------------------------------------- # Globals # --------------------------------------------------------------------------- now = time.time() current_year = time.localtime().tm_year # --------------------------------------------------------------------------- # Classes # --------------------------------------------------------------------------- class FTPListData(object): """ The `FTPListDataParser` class's ``parse_line()`` method returns an instance of this class, capturing the parsed data. :IVariables: name : str The name of the file, if parsable try_cwd : bool ``True`` if the entry might be a directory (i.e., the caller might want to try an FTP ``CWD`` command), ``False`` if it cannot possibly be a directory. try_retr : bool ``True`` if the entry might be a retrievable file (i.e., the caller might want to try an FTP ``RETR`` command), ``False`` if it cannot possibly be a file. size : long The file's size, in bytes mtime : long The file's modification time, as a value that can be passed to ``time.localtime()``. mtime_type : `MTIME_TYPE` How to interpret the modification time. See `MTIME_TYPE`. id : str A unique identifier for the file. The unique identifier is unique on the *server*. On a Unix system, this identifier might be the device number and the file's inode; on other system's, it might be something else. It's also possible for this field to be ``None``. id_type : `ID_TYPE` How to interpret the identifier. See `ID_TYPE`. """ def __init__(self, raw_line): self.raw_line = raw_line self.name = None self.try_cwd = False self.try_retr = False self.size = 0 self.mtime_type = MTIME_TYPE.UNKNOWN self.mtime = 0 self.id_type = ID_TYPE.UNKNOWN self.id = None class FTPListDataParser(object): """ An ``FTPListDataParser`` object can be used to parse one or more lines that were retrieved by an FTP ``LIST`` command that was sent to a remote server. """ def __init__(self): pass def parse_line(self, ftp_list_line): """ Parse a line from an FTP ``LIST`` command. :Parameters: ftp_list_line : str The line of output :rtype: `FTPListData` :return: An `FTPListData` object describing the parsed line, or ``None`` if the line could not be parsed. Note that it's possible for this method to return a partially-filled `FTPListData` object (e.g., one without a name). """ buf = ftp_list_line if len(buf) < 2: # an empty name in EPLF, with no info, could be 2 chars return None c = buf[0] if c == '+': return self._parse_EPLF(buf) elif c in 'bcdlps-': return self._parse_unix_style(buf) i = buf.find(';') if i > 0: return self._parse_multinet(buf, i) if c in '0123456789': return self._parse_msdos(buf) return None # UNIX ls does not show the year for dates in the last six months. # So we have to guess the year. # # Apparently NetWare uses ``twelve months'' instead of ``six months''; ugh. # Some versions of ls also fail to show the year for future dates. def _guess_time(self, month, mday, hour=0, minute=0): year = None t = None for year in range(current_year - 1, current_year + 100): t = self._get_mtime(year, month, mday, hour, minute) if (now - t) < (350 * 86400): return t return 0 def _get_mtime(self, year, month, mday, hour=0, minute=0, second=0): return time.mktime((year, month, mday, hour, minute, second, 0, 0, -1)) def _get_month(self, buf): if len(buf) == 3: for i in range(0, 12): if buf.lower().startswith(MONTHS[i]): return i+1 return -1 def _parse_EPLF(self, buf): result = FTPListData(buf) # see http://cr.yp.to/ftp/list/eplf.html #"+i8388621.29609,m824255902,/,\tdev" #"+i8388621.44468,m839956783,r,s10376,\tRFCEPLF" i = 1 for j in range(1, len(buf)): if buf[j] == '\t': result.name = buf[j+1:] break if buf[j] == ',': c = buf[i] if c == '/': result.try_cwd = True elif c == 'r': result.try_retr = True elif c == 's': result.size = long(buf[i+1:j]) elif c == 'm': result.mtime_type = MTIME_TYPE.LOCAL result.mtime = long(buf[i+1:j]) elif c == 'i': result.id_type = ID_TYPE.FULL result.id = buf[i+1:j-i-1] i = j + 1 return result def _parse_unix_style(self, buf): # UNIX-style listing, without inum and without blocks: # "-rw-r--r-- 1 root other 531 Jan 29 03:26 README" # "dr-xr-xr-x 2 root other 512 Apr 8 1994 etc" # "dr-xr-xr-x 2 root 512 Apr 8 1994 etc" # "lrwxrwxrwx 1 root other 7 Jan 25 00:17 bin -> usr/bin" # # Also produced by Microsoft's FTP servers for Windows: # "---------- 1 owner group 1803128 Jul 10 10:18 ls-lR.Z" # "d--------- 1 owner group 0 May 9 19:45 Softlib" # # Also WFTPD for MSDOS: # "-rwxrwxrwx 1 noone nogroup 322 Aug 19 1996 message.ftp" # # Also NetWare: # "d [R----F--] supervisor 512 Jan 16 18:53 login" # "- [R----F--] rhesus 214059 Oct 20 15:27 cx.exe" # # Also NetPresenz for the Mac: # "-------r-- 326 1391972 1392298 Nov 22 1995 MegaPhone.sit" # "drwxrwxr-x folder 2 May 10 1996 network" result = FTPListData(buf) buflen = len(buf) c = buf[0] if c == 'd': result.try_cwd = True if c == '-': result.try_retr = True if c == 'l': result.try_retr = True result.try_cwd = True state = 1 i = 0 tokens = buf.split() for j in range(1, buflen): if (buf[j] == ' ') and (buf[j - 1] != ' '): if state == 1: # skipping perm state = 2 elif state == 2: # skipping nlink state = 3 if ((j - i) == 6) and (buf[i] == 'f'): # NetPresenz state = 4 elif state == 3: # skipping UID/GID state = 4 elif state == 4: # getting tentative size try: size = long(buf[i:j]) except ValueError: pass state = 5 elif state == 5: # searching for month, else getting tentative size month = self._get_month(buf[i:j]) if month >= 0: state = 6 else: size = long(buf[i:j]) elif state == 6: # have size and month mday = long(buf[i:j]) state = 7 elif state == 7: # have size, month, mday if (j - i == 4) and (buf[i+1] == ':'): hour = long(buf[i]) minute = long(buf[i+2:i+4]) result.mtime_type = MTIME_TYPE.REMOTE_MINUTE result.mtime = self._guess_time(month, mday, hour, minute) elif (j - i == 5) and (buf[i+2] == ':'): hour = long(buf[i:i+2]) minute = long(buf[i+3:i+5]) result.mtime_type = MTIME_TYPE.REMOTE_MINUTE result.mtime = self._guess_time(month, mday, hour, minute) elif j - i >= 4: year = long(buf[i:j]) result.mtimetype = MTIME_TYPE.REMOTE_DAY result.mtime = self._get_mtime(year, month, mday) else: break result.name = buf[j+1:] state = 8 elif state == 8: # twiddling thumbs pass i = j + 1 while (i < buflen) and (buf[i] == ' '): i += 1 #if state != 8: #return None result.size = size if c == 'l': i = 0 while (i + 3) < len(result.name): if result.name[i:i+4] == ' -> ': result.name = result.name[:i] break i += 1 # eliminate extra NetWare spaces if (buf[1] == ' ') or (buf[1] == '['): namelen = len(result.name) if namelen > 3: result.name = result.name.strip() return result def _parse_multinet(self, buf, i): # MultiNet (some spaces removed from examples) # "00README.TXT;1 2 30-DEC-1996 17:44 [SYSTEM] (RWED,RWED,RE,RE)" # "CORE.DIR;1 1 8-SEP-1996 16:09 [SYSTEM] (RWE,RWE,RE,RE)" # and non-MultiNet VMS: #"CII-MANUAL.TEX;1 213/216 29-JAN-1996 03:33:12 [ANONYMOU,ANONYMOUS] (RWED,RWED,,)" result = FTPListData(buf) result.name = buf[:i] buflen = len(buf) if i > 4: if buf[i-4:i] == '.DIR': result.name = result.name[0:-4] result.try_cwd = True if not result.try_cwd: result.try_retr = True try: i = buf.index(' ', i) i = _skip(buf, i, ' ') i = buf.index(' ', i) i = _skip(buf, i, ' ') j = i j = buf.index('-', j) mday = long(buf[i:j]) j = _skip(buf, j, '-') i = j j = buf.index('-', j) month = self._get_month(buf[i:j]) if month < 0: raise IndexError j = _skip(buf, j, '-') i = j j = buf.index(' ', j) year = long(buf[i:j]) j = _skip(buf, j, ' ') i = j j = buf.index(':', j) hour = long(buf[i:j]) j = _skip(buf, j, ':') i = j while (buf[j] != ':') and (buf[j] != ' '): j += 1 if j == buflen: raise IndexError # abort, abort! minute = long(buf[i:j]) result.mtimetype = MTIME_TYPE.REMOTE_MINUTE result.mtime = self._get_mtime(year, month, mday, hour, minute) except IndexError: pass return result def _parse_msdos(self, buf): # MSDOS format # 04-27-00 09:09PM licensed # 07-18-00 10:16AM pub # 04-14-00 03:47PM 589 readme.htm buflen = len(buf) i = 0 j = 0 try: result = FTPListData(buf) j = buf.index('-', j) month = long(buf[i:j]) j = _skip(buf, j, '-') i = j j = buf.index('-', j) mday = long(buf[i:j]) j = _skip(buf, j, '-') i = j j = buf.index(' ', j) year = long(buf[i:j]) if year < 50: year += 2000 if year < 1000: year += 1900 j = _skip(buf, j, ' ') i = j j = buf.index(':', j) hour = long(buf[i:j]) j = _skip(buf, j, ':') i = j while not (buf[j] in 'AP'): j += 1 if j == buflen: raise IndexError minute = long(buf[i:j]) if buf[j] == 'A': j += 1 if j == buflen: raise IndexError if buf[j] == 'P': hour = (hour + 12) % 24 j += 1 if j == buflen: raise IndexError if buf[j] == 'M': j += 1 if j == buflen: raise IndexError j = _skip(buf, j, ' ') if buf[j] == '<': result.try_cwd = True j = buf.index(' ', j) else: i = j j = buf.index(' ', j) result.size = long(buf[i:j]) result.try_retr = True j = _skip(buf, j, ' ') result.name = buf[j:] result.mtimetype = MTIME_TYPE.REMOTE_MINUTE result.mtime = self._get_mtime(year, month, mday, hour, minute) except IndexError: pass return result # --------------------------------------------------------------------------- # Public Functions # --------------------------------------------------------------------------- def parse_ftp_list_line(ftp_list_line): """ Convenience function that instantiates an `FTPListDataParser` object and passes ``ftp_list_line`` to the object's ``parse_line()`` method, returning the result. :Parameters: ftp_list_line : str The line of output :rtype: `FTPListData` :return: An `FTPListData` object describing the parsed line, or ``None`` if the line could not be parsed. Note that it's possible for this method to return a partially-filled `FTPListData` object (e.g., one without a name). """ return FTPListDataParser().parse_line(ftp_list_line) # --------------------------------------------------------------------------- # Private Functions # --------------------------------------------------------------------------- def _skip(s, i, c): while s[i] == c: i += 1 if i == len(s): raise IndexError return i class _FTPFile(object): """ A file-like that provides access to a file being streamed over ftp.""" def __init__(self, ftpfs, ftp, path, mode): if not hasattr(self, '_lock'): self._lock = threading.RLock() self.ftpfs = ftpfs self.ftp = ftp self.path = path self.mode = mode self.read_pos = 0 self.write_pos = 0 self.closed = False if 'r' in mode or 'a' in mode: self.file_size = ftpfs.getsize(path) self.conn = None path = _encode(path) #self._lock = ftpfs._lock if 'r' in mode: self.ftp.voidcmd('TYPE I') self.conn = ftp.transfercmd('RETR '+path, None) elif 'w' in mode or 'a' in mode: self.ftp.voidcmd('TYPE I') if 'a' in mode: self.write_pos = self.file_size self.conn = self.ftp.transfercmd('APPE '+path) else: self.conn = self.ftp.transfercmd('STOR '+path) @synchronize def read(self, size=None): if self.conn is None: return '' chunks = [] if size is None: while 1: data = self.conn.recv(4096) if not data: self.conn.close() self.conn = None self.ftp.voidresp() break chunks.append(data) self.read_pos += len(data) return ''.join(chunks) remaining_bytes = size while remaining_bytes: read_size = min(remaining_bytes, 4096) data = self.conn.recv(read_size) if not data: self.conn.close() self.conn = None self.ftp.voidresp() break chunks.append(data) self.read_pos += len(data) remaining_bytes -= len(data) return ''.join(chunks) @synchronize def write(self, data): data_pos = 0 remaining_data = len(data) while remaining_data: chunk_size = min(remaining_data, 4096) self.conn.sendall(data[data_pos:data_pos+chunk_size]) data_pos += chunk_size remaining_data -= chunk_size self.write_pos += chunk_size def __enter__(self): return self def __exit__(self,exc_type,exc_value,traceback): self.close() @synchronize def flush(self): return def seek(self, pos, where=fs.SEEK_SET): # Ftp doesn't support a real seek, so we close the transfer and resume # it at the new position with the REST command # I'm not sure how reliable this method is! if not self.file_size: raise ValueError("Seek only works with files open for read") self._lock.acquire() try: current = self.tell() new_pos = None if where == fs.SEEK_SET: new_pos = pos elif where == fs.SEEK_CUR: new_pos = current + pos elif where == fs.SEEK_END: new_pos = self.file_size + pos if new_pos < 0: raise ValueError("Can't seek before start of file") if self.conn is not None: self.conn.close() finally: self._lock.release() self.close() self._lock.acquire() try: self.ftp = self.ftpfs._open_ftp() self.ftp.sendcmd('TYPE I') self.ftp.sendcmd('REST %i' % (new_pos)) self.__init__(self.ftpfs, self.ftp, _encode(self.path), self.mode) self.read_pos = new_pos finally: self._lock.release() #raise UnsupportedError('ftp seek') @synchronize def tell(self): if 'r' in self.mode: return self.read_pos else: return self.write_pos @synchronize def close(self): if self.conn is not None: self.conn.close() self.conn = None self.ftp.voidresp() if self.ftp is not None: self.ftp.close() self.closed = True if 'w' in self.mode or 'a' in self.mode: self.ftpfs._on_file_written(self.path) def __iter__(self): return self.next() def next(self): """ Line iterator This isn't terribly efficient. It would probably be better to do a read followed by splitlines. """ endings = '\r\n' chars = [] append = chars.append read = self.read join = ''.join while True: char = read(1) if not char: if chars: yield join(chars) break append(char) if char in endings: line = join(chars) del chars[:] c = read(1) if not char: yield line break if c in endings and c != char: yield line + c else: yield line append(c) def ftperrors(f): @wraps(f) def deco(self, *args, **kwargs): self._lock.acquire() try: self._enter_dircache() try: try: ret = f(self, *args, **kwargs) except Exception, e: self._translate_exception(args[0] if args else '', e) finally: self._leave_dircache() finally: self._lock.release() if not self.use_dircache: self.clear_dircache() return ret return deco def _encode(s): if isinstance(s, unicode): return s.encode('utf-8') return s class FTPFS(FS): _locals = threading.local() def __init__(self, host='', user='', passwd='', acct='', timeout=_GLOBAL_DEFAULT_TIMEOUT, port=21, dircache=True, max_buffer_size=128*1024*1024): """ Connect to a FTP server. :param host: Host to connect to :param user: Username, or a blank string for anonymous :param passwd: Password, if required :param acct: Accounting information (few servers require this) :param timeout: Timeout in seconds :param port: Port to connection (default is 21) :param dircache: If True then directory information will be cached, which will speed up operations such as getinfo, isdi, isfile, but changes to the ftp file structure will not be visible until `~fs.ftpfs.FTPFS.clear_dircache` is called :param dircache: If True directory information will be cached for fast access :param max_buffer_size: Number of bytes to hold before blocking write operations """ super(FTPFS, self).__init__() self.host = host self.port = port self.user = user self.passwd = passwd self.acct = acct self.timeout = timeout self.use_dircache = dircache self.get_dircache() self.max_buffer_size = max_buffer_size self._cache_hint = False self._locals._ftp = None self._thread_ftps = set() self.ftp @synchronize def cache_hint(self, enabled): self._cache_hint = enabled @synchronize def _enter_dircache(self): self.get_dircache() count = getattr(self._locals, '_dircache_count', 0) count += 1 self._locals._dircache_count = count @synchronize def _leave_dircache(self): self._locals._dircache_count -= 1 if not self._locals._dircache_count and not self._cache_hint: self.clear_dircache() assert self._locals._dircache_count >= 0, "dircache count should never be negative" @synchronize def get_dircache(self): dircache = getattr(self._locals, '_dircache', None) if dircache is None: dircache = {} self._locals._dircache = dircache self._locals._dircache_count = 0 return dircache @synchronize def _on_file_written(self, path): self.clear_dircache(dirname(path)) @synchronize def _readdir(self, path): dircache = self.get_dircache() dircache_count = self._locals._dircache_count if dircache_count: cached_dirlist = dircache.get(path) if cached_dirlist is not None: return cached_dirlist dirlist = {} parser = FTPListDataParser() def on_line(line): #print repr(line) if not isinstance(line, unicode): line = line.decode('utf-8') info = parser.parse_line(line) if info: info = info.__dict__ dirlist[info['name']] = info try: self.ftp.dir(_encode(path), on_line) except error_reply: pass dircache[path] = dirlist return dirlist @synchronize def clear_dircache(self, *paths): """ Clear cached directory information. :param path: Path of directory to clear cache for, or all directories if None (the default) """ dircache = self.get_dircache() if not paths: dircache.clear() else: for path in paths: dircache.pop(path, None) @synchronize def _check_path(self, path): base, fname = pathsplit(abspath(path)) dirlist = self._readdir(base) if fname and fname not in dirlist: raise ResourceNotFoundError(path) return dirlist, fname def _get_dirlist(self, path): base, fname = pathsplit(abspath(path)) dirlist = self._readdir(base) return dirlist, fname @synchronize def get_ftp(self): if getattr(self._locals, '_ftp', None) is None: self._locals._ftp = self._open_ftp() ftp = self._locals._ftp self._thread_ftps.add(ftp) return self._locals._ftp @synchronize def set_ftp(self, ftp): self._locals._ftp = ftp ftp = property(get_ftp, set_ftp) @synchronize def _open_ftp(self): try: ftp = FTP() if _FTPLIB_TIMEOUT: ftp.connect(self.host, self.port, self.timeout) else: ftp.connect(self.host, self.port) ftp.login(self.user, self.passwd, self.acct) except socket_error, e: raise RemoteConnectionError(str(e), details=e) return ftp def __getstate__(self): state = super(FTPFS, self).__getstate__() del state["_thread_ftps"] return state def __setstate__(self,state): super(FTPFS, self).__setstate__(state) self._thread_ftps = set() self.ftp def __str__(self): return '' % self.host def __unicode__(self): return u'' % self.host @convert_os_errors def _translate_exception(self, path, exception): """ Translates exceptions that my be thrown by the ftp code in to FS exceptions TODO: Flesh this out with more specific exceptions """ if isinstance(exception, socket_error): raise RemoteConnectionError(str(exception), details=exception) elif isinstance(exception, error_temp): code, message = str(exception).split(' ', 1) raise RemoteConnectionError(str(exception), path=path, msg="FTP error: %s (see details)" % str(exception), details=exception) elif isinstance(exception, error_perm): code, message = str(exception).split(' ', 1) code = int(code) if code == 550: raise ResourceNotFoundError(path) if code == 552: raise StorageSpaceError raise PermissionDeniedError(str(exception), path=path, msg="FTP error: %s (see details)" % str(exception), details=exception) raise exception @ftperrors def close(self): for ftp in self._thread_ftps: ftp.close() self._thread_ftps.clear() self.closed = True @ftperrors def open(self, path, mode='r'): mode = mode.lower() if 'r' in mode: if not self.isfile(path): raise ResourceNotFoundError(path) if 'w' in mode or 'a' in mode: self.clear_dircache(dirname(path)) ftp = self._open_ftp() f = _FTPFile(self, ftp, path, mode) return f @ftperrors def exists(self, path): if path in ('', '/'): return True dirlist, fname = self._get_dirlist(path) return fname in dirlist @ftperrors def isdir(self, path): if path in ('', '/'): return True dirlist, fname = self._get_dirlist(path) info = dirlist.get(fname) if info is None: return False return info['try_cwd'] @ftperrors def isfile(self, path): if path in ('', '/'): return False dirlist, fname = self._get_dirlist(path) info = dirlist.get(fname) if info is None: return False return not info['try_cwd'] @ftperrors def listdir(self, path="./", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False): path = normpath(path) if not self.exists(path): raise ResourceNotFoundError(path) if not self.isdir(path): raise ResourceInvalidError(path) paths = self._readdir(path).keys() return self._listdir_helper(path, paths, wildcard, full, absolute, dirs_only, files_only) @ftperrors def makedir(self, path, recursive=False, allow_recreate=False): if path in ('', '/'): return def checkdir(path): self.clear_dircache(dirname(path), path) try: self.ftp.mkd(_encode(path)) except error_reply: return except error_perm, e: if recursive or allow_recreate: return if str(e).split(' ', 1)[0]=='550': raise DestinationExistsError(path) else: raise if recursive: for p in recursepath(path): checkdir(p) else: base = dirname(path) if not self.exists(base): raise ParentDirectoryMissingError(path) if not allow_recreate: if self.exists(path): if self.isfile(path): raise ResourceInvalidError(path) raise DestinationExistsError(path) checkdir(path) @ftperrors def remove(self, path): if not self.exists(path): raise ResourceNotFoundError(path) if not self.isfile(path): raise ResourceInvalidError(path) self.clear_dircache(dirname(path)) self.ftp.delete(_encode(path)) @ftperrors def removedir(self, path, recursive=False, force=False): if not self.exists(path): raise ResourceNotFoundError(path) if self.isfile(path): raise ResourceInvalidError(path) if not force: for checkpath in self.listdir(path): raise DirectoryNotEmptyError(path) try: if force: for rpath in self.listdir(path, full=True): try: if self.isfile(rpath): self.remove(rpath) elif self.isdir(rpath): self.removedir(rpath, force=force) except FSError: pass self.clear_dircache(dirname(path), path) self.ftp.rmd(_encode(path)) except error_reply: pass if recursive: try: self.removedir(dirname(path), recursive=True) except DirectoryNotEmptyError: pass @ftperrors def rename(self, src, dst): self.clear_dircache(dirname(src), dirname(dst), src, dst) try: self.ftp.rename(_encode(src), _encode(dst)) except error_perm, exception: code, message = str(exception).split(' ', 1) if code == "550": if not self.exists(dirname(dst)): raise ParentDirectoryMissingError(dst) except error_reply: pass @ftperrors def getinfo(self, path): dirlist, fname = self._check_path(path) if not fname: return {} info = dirlist[fname].copy() info['modified_time'] = datetime.datetime.fromtimestamp(info['mtime']) info['created_time'] = info['modified_time'] return info @ftperrors def getsize(self, path): size = None if self._locals._dircache_count: dirlist, fname = self._check_path(path) size = dirlist[fname].get('size') if size is not None: return size self.ftp.sendcmd('TYPE I') size = self.ftp.size(_encode(path)) if size is None: dirlist, fname = self._check_path(path) size = dirlist[fname].get('size') if size is None: raise OperationFailedError('getsize', path) return size @ftperrors def desc(self, path): dirlist, fname = self._check_path(path) if fname not in dirlist: raise ResourceNotFoundError(path) return dirlist[fname].get('raw_line', 'No description available') @ftperrors def move(self, src, dst, overwrite=False, chunk_size=16384): if not overwrite and self.exists(dst): raise DestinationExistsError(dst) self.clear_dircache(dirname(src), dirname(dst)) try: self.rename(src, dst) except error_reply: pass except: self.copy(src, dst) self.remove(src) @ftperrors def movedir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384): self.clear_dircache(src, dst, dirname(src), dirname(dst)) super(FTPFS, self).movedir(src, dst, overwrite, ignore_errors, chunk_size) @ftperrors def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384): self.clear_dircache(src, dst, dirname(src), dirname(dst)) super(FTPFS, self).copydir(src, dst, overwrite, ignore_errors, chunk_size) if __name__ == "__main__": ftp_fs = FTPFS('ftp.ncsa.uiuc.edu') ftp_fs.cache_hint(True) from fs.browsewin import browse browse(ftp_fs) #ftp_fs = FTPFS('127.0.0.1', 'user', '12345', dircache=True) #f = ftp_fs.open('testout.txt', 'w') #f.write("Testing writing to an ftp file!") #f.write("\nHai!") #f.close() #ftp_fs.createfile(u"\N{GREEK CAPITAL LETTER KAPPA}", 'unicode!') #kappa = u"\N{GREEK CAPITAL LETTER KAPPA}" #ftp_fs.makedir(kappa) #print repr(ftp_fs.listdir()) #print repr(ftp_fs.listdir()) #ftp_fs.makedir('a/b/c/d', recursive=True) #print ftp_fs.getsize('/testout.txt') #print f.read() #for p in ftp_fs: # print p #from fs.utils import print_fs #print_fs(ftp_fs) #print ftp_fs.getsize('test.txt') #from fs.browsewin import browse #browse(ftp_fs) fs-0.3.0/fs/watch.py0000644000175000017500000004424211375762640012733 0ustar willwill""" fs.watch ======== Change notification support for FS. This module defines a standard interface for FS subclasses that support change notification callbacks. It also offers some WrapFS subclasses that can simulate such an ability on top of an ordinary FS object. An FS object that wants to be "watchable" must provide the following methods: * add_watcher(path,callback,events=None,recursive=True) Request that the given callback be executed in response to changes to the given path. A specific set of change events can be specified. This method returns a Watcher object. * del_watcher(watcher_or_callback) Remove the given watcher object, or any watchers associated with the given callback. """ import weakref import threading import Queue from fs.path import * from fs.errors import * from fs.wrapfs import WrapFS from fs.base import FS class EVENT(object): """Base class for change notification events.""" def __init__(self,fs,path): self.fs = fs if path is not None: path = abspath(normpath(path)) self.path = path def __str__(self): return unicode(self).encode("utf8") def __unicode__(self): return u"" % (self.__class__.__name__,self.path,hex(id(self))) class ACCESSED(EVENT): """Event fired when a file's contents are accessed.""" pass class CREATED(EVENT): """Event fired when a new file or directory is created.""" pass class REMOVED(EVENT): """Event fired when a file or directory is removed.""" pass class MODIFIED(EVENT): """Event fired when a file or directory is modified.""" def __init__(self,fs,path,data_changed=False): super(MODIFIED,self).__init__(fs,path) self.data_changed = data_changed class MOVED_DST(EVENT): """Event fired when a file or directory is the target of a move.""" def __init__(self,fs,path,source): super(MOVED_DST,self).__init__(fs,path) if source is not None: source = abspath(normpath(source)) self.source = source class MOVED_SRC(EVENT): """Event fired when a file or directory is the source of a move.""" def __init__(self,fs,path,destination): super(MOVED_SRC,self).__init__(fs,path) if destination is not None: destination = abspath(normpath(destination)) self.destination = destination class CLOSED(EVENT): """Event fired when the filesystem is closed.""" pass class ERROR(EVENT): """Event fired when some miscellaneous error occurs.""" pass class OVERFLOW(ERROR): """Event fired when some events could not be processed.""" pass class Watcher(object): """Object encapsulating filesystem watch info.""" def __init__(self,fs,callback,path="/",events=None,recursive=True): if events is None: events = (EVENT,) else: events = tuple(events) # Since the FS probably holds a reference to the Watcher, keeping # a reference back to the FS would create a cycle containing a # __del__ method. Use a weakref to avoid this. self._w_fs = weakref.ref(fs) self.callback = callback self.path = abspath(normpath(path)) self.events = events self.recursive = recursive @property def fs(self): return self._w_fs() def delete(self): fs = self.fs if fs is not None: fs.del_watcher(self) def handle_event(self,event): if not isinstance(event,self.events): return if event.path is not None: if not isprefix(self.path,event.path): return if not self.recursive: if event.path != self.path: if dirname(event.path) != self.path: return self.callback(event) class WatchableFSMixin(FS): """Mixin class providing watcher management functions.""" def __init__(self,*args,**kwds): self._watchers = PathMap() super(WatchableFSMixin,self).__init__(*args,**kwds) def add_watcher(self,callback,path="/",events=None,recursive=True): """Add a watcher callback to the FS.""" w = Watcher(self,callback,path,events,recursive=recursive) self._watchers.setdefault(path,[]).append(w) return w def del_watcher(self,watcher_or_callback): """Delete a watcher callback from the FS.""" if isinstance(watcher_or_callback,Watcher): self._watchers[watcher_or_callback.path].remove(watcher_or_callback) else: for watchers in self._watchers.itervalues(): for i,watcher in enumerate(watchers): if watcher.callback is watcher_or_callback: del watchers[i] break def _find_watchers(self,callback): """Find watchers registered with the given callback.""" for watchers in self._watchers.itervalues(): for watcher in watchers: if watcher.callback is callback: yield watcher def notify_watchers(self,event_class,path=None,*args,**kwds): """Notify watchers of the given event data.""" event = event_class(self,path,*args,**kwds) if path is None: for watchers in self._watchers.itervalues(): for watcher in watchers: watcher.handle_event(event) else: for prefix in recursepath(path): if prefix in self._watchers: for watcher in self._watchers[prefix]: watcher.handle_event(event) class WatchedFile(object): """File wrapper for use with WatchableFS. This file wrapper provides access to a file opened from a WatchableFS instance, and fires MODIFIED events when the file is modified. """ def __init__(self,file,fs,path,mode): self.file = file self.fs = fs self.path = path self.mode = mode def __del__(self): self.close() def __getattr__(self,name): file = self.__dict__['file'] a = getattr(file, name) if callable(a): setattr(self,name,a) return a def __enter__(self): self.file.__enter__() return self def __exit__(self,exc_type,exc_value,traceback): self.close() return False def __iter__(self): return iter(self.file) def flush(self): self.file.flush() self.fs.notify_watchers(MODIFIED,self.path,True) def close(self): self.file.close() self.fs.notify_watchers(MODIFIED,self.path,True) class WatchableFS(WrapFS,WatchableFSMixin): """FS wrapper simulating watcher callbacks. This FS wrapper intercepts method calls that modify the underlying FS and generates appropriate notification events. It thus allows watchers to monitor changes made through the underlying FS object, but not changes that might be made through other interfaces to the same filesystem. """ def __init__(self,*args,**kwds): super(WatchableFS,self).__init__(*args,**kwds) def close(self): super(WatchableFS,self).close() self.notify_watchers(CLOSED) def open(self,path,mode="r",**kwargs): existed = self.wrapped_fs.isfile(path) f = super(WatchableFS,self).open(path,mode,**kwargs) if not existed: self.notify_watchers(CREATED,path) self.notify_watchers(ACCESSED,path) return WatchedFile(f,self,path,mode) def makedir(self,path,*args,**kwds): existed = self.wrapped_fs.isdir(path) super(WatchableFS,self).makedir(path,*args,**kwds) if not existed: self.notify_watchers(CREATED,path) def remove(self,path): super(WatchableFS,self).remove(path) self.notify_watchers(REMOVED,path) def removedir(self,path,*args,**kwds): super(WatchableFS,self).removedir(path,*args,**kwds) self.notify_watchers(REMOVED,path) def rename(self,src,dst): d_existed = self.wrapped_fs.exists(dst) super(WatchableFS,self).rename(src,dst) if d_existed: self.notify_watchers(REMOVED,dst) self.notify_watchers(MOVED_SRC,src,dst) self.notify_watchers(MOVED_DST,dst,src) def copy(self,src,dst,**kwds): d = self._pre_copy(src,dst) super(WatchableFS,self).copy(src,dst,**kwds) self._post_copy(src,dst,d) def copydir(self,src,dst,**kwds): d = self._pre_copy(src,dst) super(WatchableFS,self).copydir(src,dst,**kwds) self._post_copy(src,dst,d) def move(self,src,dst,**kwds): d = self._pre_copy(src,dst) super(WatchableFS,self).move(src,dst,**kwds) self._post_copy(src,dst,d) self.notify_watchers(REMOVED,src) def movedir(self,src,dst,**kwds): d = self._pre_copy(src,dst) super(WatchableFS,self).movedir(src,dst,**kwds) self._post_copy(src,dst,d) self.notify_watchers(REMOVED,src) def _pre_copy(self,src,dst): dst_paths = {} try: for (dirnm,filenms) in self.wrapped_fs.walk(dst): dirnm = dirnm[len(dst):] dst_paths[dirnm] = True for filenm in filenms: dst_paths[filenm] = False except ResourceNotFoundError: pass except ResourceInvalidError: dst_paths[dst] = False src_paths = {} try: for (dirnm,filenms) in self.wrapped_fs.walk(src): dirnm = dirnm[len(src):] src_paths[dirnm] = True for filenm in filenms: src_paths[pathjoin(dirnm,filenm)] = False except ResourceNotFoundError: pass except ResourceInvalidError: src_paths[src] = False return (src_paths,dst_paths) def _post_copy(self,src,dst,data): (src_paths,dst_paths) = data for src_path,isdir in src_paths.iteritems(): path = pathjoin(dst,src_path) if src_path in dst_paths: self.notify_watchers(MODIFIED,path,not isdir) else: self.notify_watchers(CREATED,path) for dst_path,isdir in dst_paths.iteritems(): path = pathjoin(dst,dst_path) if not self.wrapped_fs.exists(path): self.notify_watchers(REMOVED,path) def setxattr(self,path,name,value): super(WatchableFS,self).setxattr(path,name,value) self.notify_watchers(MODIFIED,path,False) def delxattr(self,path,name): super(WatchableFS,self).delxattr(path,name,value) self.notify_watchers(MODIFIED,path,False) class PollingWatchableFS(WatchableFS): """FS wrapper simulating watcher callbacks by periodic polling. This FS wrapper augments the funcionality of WatchableFS by periodically polling the underlying FS for changes. It is thus capable of detecting changes made to the underlying FS via other interfaces, albeit with a (configurable) delay to account for the polling interval. """ def __init__(self,wrapped_fs,poll_interval=60*5): super(PollingWatchableFS,self).__init__(wrapped_fs) self.poll_interval = poll_interval self.add_watcher(self._on_path_modify,"/",(CREATED,MOVED_DST,)) self.add_watcher(self._on_path_modify,"/",(MODIFIED,ACCESSED,)) self.add_watcher(self._on_path_delete,"/",(REMOVED,MOVED_SRC,)) self._path_info = PathMap() self._poll_thread = threading.Thread(target=self._poll_for_changes) self._poll_cond = threading.Condition() self._poll_close_event = threading.Event() self._poll_thread.start() def close(self): self._poll_close_event.set() self._poll_thread.join() super(PollingWatchableFS,self).close() def _on_path_modify(self,event): path = event.path try: try: self._path_info[path] = self.wrapped_fs.getinfo(path) except ResourceNotFoundError: self._path_info.clear(path) except FSError: pass def _on_path_delete(self,event): self._path_info.clear(event.path) def _poll_for_changes(self): try: while not self._poll_close_event.isSet(): # Walk all directories looking for changes. # Come back to any that give us an error. error_paths = set() for dirnm in self.wrapped_fs.walkdirs(): if self._poll_close_event.isSet(): break try: self._check_for_changes(dirnm) except FSError: error_paths.add(dirnm) # Retry the directories that gave us an error, until # we have successfully updated them all while error_paths and not self._poll_close_event.isSet(): dirnm = error_paths.pop() if self.wrapped_fs.isdir(dirnm): try: self._check_for_changes(dirnm) except FSError: error_paths.add(dirnm) # Notify that we have completed a polling run self._poll_cond.acquire() self._poll_cond.notifyAll() self._poll_cond.release() # Sleep for the specified interval, or until closed. self._poll_close_event.wait(timeout=self.poll_interval) except FSError: if not self.closed: raise def _check_for_changes(self,dirnm): # Check the metadata for the directory itself. new_info = self.wrapped_fs.getinfo(dirnm) try: old_info = self._path_info[dirnm] except KeyError: self.notify_watchers(CREATED,dirnm) else: if new_info != old_info: self.notify_watchers(MODIFIED,dirnm,False) # Check the metadata for each file in the directory. # We assume that if the file's data changes, something in its # metadata will also change; don't want to read through each file! # Subdirectories will be handled by the outer polling loop. for filenm in self.wrapped_fs.listdir(dirnm,files_only=True): if self._poll_close_event.isSet(): return fpath = pathjoin(dirnm,filenm) new_info = self.wrapped_fs.getinfo(fpath) try: old_info = self._path_info[fpath] except KeyError: self.notify_watchers(CREATED,fpath) else: was_accessed = False was_modified = False for (k,v) in new_info.iteritems(): if k not in old_info: was_modified = True break elif old_info[k] != v: if k in ("accessed_time","st_atime",): was_accessed = True elif k: was_modified = True break else: for k in old_info: if k not in new_info: was_modified = True break if was_modified: self.notify_watchers(MODIFIED,fpath,True) elif was_accessed: self.notify_watchers(ACCESSED,fpath) # Check for deletion of cached child entries. for childnm in self._path_info.iternames(dirnm): if self._poll_close_event.isSet(): return cpath = pathjoin(dirnm,childnm) if not self.wrapped_fs.exists(cpath): self.notify_watchers(REMOVED,cpath) def ensure_watchable(fs,wrapper_class=PollingWatchableFS,*args,**kwds): """Ensure that the given fs supports watching, simulating it if necessary. Given an FS object, this function returns an equivalent FS that has support for watcher callbacks. This may be the original object if it supports them natively, or a wrapper class if they must be simulated. """ try: w = fs.add_watcher(lambda e: None,"/somepaththatsnotlikelytoexist") except (AttributeError,UnsupportedError): return wrapper_class(fs,*args,**kwds) except FSError: pass else: fs.del_watcher(w) return fs class iter_changes(object): """Blocking iterator over the change events produced by an FS. This class can be used to transform the callback-based watcher mechanism into a blocking stream of events. It operates by having the callbacks push events onto a queue as they come in, then reading them off one at a time. """ def __init__(self,fs=None,path="/",events=None,**kwds): self.closed = False self._queue = Queue.Queue() self._watching = set() if fs is not None: self.add_watcher(fs,path,events,**kwds) def __iter__(self): return self def __del__(self): self.close() def next(self,timeout=None): if not self._watching: raise StopIteration try: event = self._queue.get(timeout=timeout) except Queue.Empty: raise StopIteration if event is None: raise StopIteration if isinstance(event,CLOSED): event.fs.del_watcher(self._enqueue) self._watching.remove(event.fs) return event def close(self): if not self.closed: self.closed = True for fs in self._watching: fs.del_watcher(self._enqueue) self._queue.put(None) def add_watcher(self,fs,path="/",events=None,**kwds): w = fs.add_watcher(self._enqueue,path,events,**kwds) self._watching.add(fs) return w def _enqueue(self,event): self._queue.put(event) def del_watcher(self,watcher): for fs in self._watching: try: fs.del_watcher(watcher) break except ValueError: pass else: raise ValueError("watcher not found: %s" % (watcher,)) fs-0.3.0/fs/browsewin.py0000644000175000017500000001213311317702526013627 0ustar willwill#!/usr/bin/env python """ fs.browsewin ============ Creates a window which can be used to browse the contents of a filesystem. To use, call the 'browse' method with a filesystem object. Double click a file or directory to display its properties. Requires wxPython. """ import wx import wx.gizmos import base as fs class InfoFrame(wx.Frame): def __init__(self, path, desc, info): wx.Frame.__init__(self, None, -1, style=wx.DEFAULT_FRAME_STYLE, size=(500, 500)) self.SetTitle("FS Object info - %s (%s)" % (path, desc)) keys = info.keys() keys.sort() self.list_ctrl = wx.ListCtrl(self, -1, style=wx.LC_REPORT|wx.SUNKEN_BORDER) self.list_ctrl.InsertColumn(0, "Key") self.list_ctrl.InsertColumn(1, "Value") self.list_ctrl.SetColumnWidth(0, 190) self.list_ctrl.SetColumnWidth(1, 300) for key in keys: self.list_ctrl.Append((key, str(info.get(key)))) class BrowseFrame(wx.Frame): def __init__(self, fs): wx.Frame.__init__(self, None, size=(1000, 600)) self.fs = fs self.SetTitle("FS Browser - "+str(fs)) self.tree = wx.gizmos.TreeListCtrl(self, -1, style=wx.TR_DEFAULT_STYLE | wx.TR_HIDE_ROOT) self.tree.AddColumn("File System") self.tree.AddColumn("Description") self.tree.AddColumn("Size") self.tree.AddColumn("Created") self.tree.SetColumnWidth(0, 300) self.tree.SetColumnWidth(1, 250) self.tree.SetColumnWidth(2, 150) self.tree.SetColumnWidth(3, 250) self.root_id = self.tree.AddRoot('root', data=wx.TreeItemData( {'path':"/", 'expanded':False} )) rid = self.tree.GetItemData(self.root_id) isz = (16, 16) il = wx.ImageList(isz[0], isz[1]) self.fldridx = il.Add(wx.ArtProvider_GetBitmap(wx.ART_FOLDER, wx.ART_OTHER, isz)) self.fldropenidx = il.Add(wx.ArtProvider_GetBitmap(wx.ART_FILE_OPEN, wx.ART_OTHER, isz)) self.fileidx = il.Add(wx.ArtProvider_GetBitmap(wx.ART_NORMAL_FILE, wx.ART_OTHER, isz)) self.tree.SetImageList(il) self.il = il self.tree.SetItemImage(self.root_id, self.fldridx, wx.TreeItemIcon_Normal) self.tree.SetItemImage(self.root_id, self.fldropenidx, wx.TreeItemIcon_Expanded) self.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.OnItemExpanding) self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnItemActivated) wx.CallAfter(self.OnInit) def OnInit(self): self.expand(self.root_id) def expand(self, item_id): item_data = self.tree.GetItemData(item_id).GetData() path = item_data["path"] if not self.fs.isdir(path): return if item_data['expanded']: return paths = [(self.fs.isdir(p), p) for p in self.fs.listdir(path, absolute=True)] if not paths: #self.tree.SetItemHasChildren(item_id, False) #self.tree.Collapse(item_id) return paths.sort(key=lambda p:(not p[0], p[1].lower())) for is_dir, new_path in paths: name = fs.pathsplit(new_path)[-1] new_item = self.tree.AppendItem(item_id, name, data=wx.TreeItemData({'path':new_path, 'expanded':False})) info = self.fs.getinfo(new_path) if is_dir: self.tree.SetItemHasChildren(new_item) self.tree.SetItemImage(new_item, self.fldridx, 0, wx.TreeItemIcon_Normal) self.tree.SetItemImage(new_item, self.fldropenidx, 0, wx.TreeItemIcon_Expanded) self.tree.SetItemText(new_item, "", 2) ct = info.get('created_time', None) if ct is not None: self.tree.SetItemText(new_item, ct.ctime(), 3) else: self.tree.SetItemText(new_item, 'unknown', 3) else: self.tree.SetItemImage(new_item, self.fileidx, 0, wx.TreeItemIcon_Normal) self.tree.SetItemText(new_item, str(info.get('size', '?'))+ " bytes", 2) ct = info.get('created_time', None) if ct is not None: self.tree.SetItemText(new_item, ct.ctime(), 3) else: self.tree.SetItemText(new_item, 'unknown', 3) self.tree.SetItemText(new_item, self.fs.desc(new_path), 1) item_data['expanded'] = True self.tree.Expand(item_id) def OnItemExpanding(self, e): self.expand(e.GetItem()) e.Skip() def OnItemActivated(self, e): item_data = self.tree.GetItemData(e.GetItem()).GetData() path = item_data["path"] info = self.fs.getinfo(path) info_frame = InfoFrame(path, self.fs.desc(path), info) info_frame.Show() def browse(fs): """Displays a window containing a tree control that displays an FS object. Double-click a file/folder to display extra info. :param fs: A filesystem object """ app = wx.PySimpleApp() frame = BrowseFrame(fs) frame.Show() app.MainLoop() if __name__ == "__main__": from osfs import OSFS home_fs = OSFS("~/") browse(home_fs) fs-0.3.0/fs/xattrs.py0000644000175000017500000001613011406424034013130 0ustar willwill""" fs.xattrs ========= Extended attribute support for FS This module defines a standard interface for FS subclasses that want to support extended file attributes, and a WrapFS subclass that can simulate extended attributes on top of an ordinary FS. FS instances offering extended attribute support must provide the following methods: * getxattr(path,name) Get the named attribute for the given path, or None if it does not exist * setxattr(path,name,value) Set the named attribute for the given path to the given value * delxattr(path,name) Delete the named attribute for the given path, raising KeyError if it does not exist * listxattrs(path) Iterate over all stored attribute names for the given path If extended attributes are required by FS-consuming code, it should use the function 'ensure_xattrs'. This will interrogate an FS object to determine if it has native xattr support, and return a wrapped version if it does not. """ import sys try: import cPickle as pickle except ImportError: import pickle from fs.path import * from fs.errors import * from fs.wrapfs import WrapFS from fs.base import synchronize def ensure_xattrs(fs): """Ensure that the given FS supports xattrs, simulating them if required. Given an FS object, this function returns an equivalent FS that has support for extended attributes. This may be the original object if they are supported natively, or a wrapper class is they must be simulated. :param fs: An FS object that must have xattrs """ try: # This attr doesn't have to exist, None should be returned by default fs.getxattr("/","testing-xattr") return fs except (AttributeError,UnsupportedError): return SimulateXAttr(fs) class SimulateXAttr(WrapFS): """FS wrapper class that simulates xattr support. The following methods are supplied for manipulating extended attributes: * listxattrs: list all extended attribute names for a path * getxattr: get an xattr of a path by name * setxattr: set an xattr of a path by name * delxattr: delete an xattr of a path by name For each file in the underlying FS, this class maintains a corresponding '.xattrs.FILENAME' file containing its extended attributes. Extended attributes of a directory are stored in the file '.xattrs' within the directory itself. """ def _get_attr_path(self, path, isdir=None): """Get the path of the file containing xattrs for the given path.""" if isdir is None: isdir = self.wrapped_fs.isdir(path) if isdir: attr_path = pathjoin(path, '.xattrs') else: dir_path, file_name = pathsplit(path) attr_path = pathjoin(dir_path, '.xattrs.'+file_name) return attr_path def _is_attr_path(self, path): """Check whether the given path references an xattrs file.""" _,name = pathsplit(path) if name.startswith(".xattrs"): return True return False def _get_attr_dict(self, path): """Retrieve the xattr dictionary for the given path.""" attr_path = self._get_attr_path(path) if self.wrapped_fs.exists(attr_path): try: return pickle.loads(self.wrapped_fs.getcontents(attr_path)) except EOFError: return {} else: return {} def _set_attr_dict(self, path, attrs): """Store the xattr dictionary for the given path.""" attr_path = self._get_attr_path(path) self.wrapped_fs.setcontents(attr_path, pickle.dumps(attrs)) @synchronize def setxattr(self, path, key, value): """Set an extended attribute on the given path.""" if not self.exists(path): raise ResourceNotFoundError(path) key = unicode(key) attrs = self._get_attr_dict(path) attrs[key] = str(value) self._set_attr_dict(path, attrs) @synchronize def getxattr(self, path, key, default=None): """Retrieve an extended attribute for the given path.""" if not self.exists(path): raise ResourceNotFoundError(path) attrs = self._get_attr_dict(path) return attrs.get(key, default) @synchronize def delxattr(self, path, key): if not self.exists(path): raise ResourceNotFoundError(path) attrs = self._get_attr_dict(path) try: del attrs[key] except KeyError: pass self._set_attr_dict(path, attrs) @synchronize def listxattrs(self,path): """List all the extended attribute keys set on the given path.""" if not self.exists(path): raise ResourceNotFoundError(path) return self._get_attr_dict(path).keys() def _encode(self,path): """Prevent requests for operations on .xattr files.""" if self._is_attr_path(path): raise PathError(path,msg="Paths cannot contain '.xattrs': %(path)s") return path def _decode(self,path): return path def listdir(self,path="",**kwds): """Prevent .xattr from appearing in listings.""" entries = self.wrapped_fs.listdir(path,**kwds) return [e for e in entries if not self._is_attr_path(e)] def remove(self,path): """Remove .xattr when removing a file.""" attr_file = self._get_attr_path(path,isdir=False) self.wrapped_fs.remove(path) try: self.wrapped_fs.remove(attr_file) except ResourceNotFoundError: pass def removedir(self,path,recursive=False,force=False): """Remove .xattr when removing a directory.""" try: self.wrapped_fs.removedir(path,recursive=recursive,force=force) except DirectoryNotEmptyError: # The xattr file could block the underlying removedir(). # Remove it, but be prepared to restore it on error. if self.listdir(path) != []: raise attr_file = self._get_attr_path(path,isdir=True) attr_file_contents = self.wrapped_fs.getcontents(attr_file) self.wrapped_fs.remove(attr_file) try: self.wrapped_fs.removedir(path,recursive=recursive) except FSError: self.wrapped_fs.createfile(attr_file,attr_file_contents) raise def copy(self,src,dst,**kwds): """Ensure xattrs are copied when copying a file.""" self.wrapped_fs.copy(self._encode(src),self._encode(dst),**kwds) s_attr_file = self._get_attr_path(src) d_attr_file = self._get_attr_path(dst) try: self.wrapped_fs.copy(s_attr_file,d_attr_file,overwrite=True) except ResourceNotFoundError,e: pass def move(self,src,dst,**kwds): """Ensure xattrs are preserved when moving a file.""" self.wrapped_fs.move(self._encode(src),self._encode(dst),**kwds) s_attr_file = self._get_attr_path(src) d_attr_file = self._get_attr_path(dst) try: self.wrapped_fs.move(s_attr_file,d_attr_file,overwrite=True) except ResourceNotFoundError: pass fs-0.3.0/fs/zipfs.py0000644000175000017500000001575111407420302012741 0ustar willwill""" fs.zipfs ======== A FS object that represents the contents of a Zip file """ from fs.base import * from zipfile import ZipFile, ZIP_DEFLATED, ZIP_STORED from memoryfs import MemoryFS try: from cStringIO import StringIO except ImportError: from StringIO import StringIO import tempfs class _TempWriteFile(object): """Proxies a file object and calls a callback when the file is closed.""" def __init__(self, fs, filename, close_callback): self.fs = fs self.filename = filename self._file = self.fs.open(filename, 'w+') self.close_callback = close_callback def write(self, data): return self._file.write(data) def tell(self): return self._file.tell() def close(self): self._file.close() self.close_callback(self.filename) class _ExceptionProxy(object): """A placeholder for an object that may no longer be used.""" def __getattr__(self, name): raise ValueError("Zip file has been closed") def __setattr__(self, name, value): raise ValueError("Zip file has been closed") def __nonzero__(self): return False class ZipFS(FS): """A FileSystem that represents a zip file.""" def __init__(self, zip_file, mode="r", compression="deflated", allow_zip_64=False, encoding="CP437", thread_synchronize=True): """Create a FS that maps on to a zip file. :param zip_file: A (system) path, or a file-like object :param mode: Mode to open zip file: 'r' for reading, 'w' for writing or 'a' for appending :param compression: Can be 'deflated' (default) to compress data or 'stored' to just store date :param allow_zip_64: -- Set to True to use zip files greater than 2 GB, default is False :param encoding: -- The encoding to use for unicode filenames :param thread_synchronize: -- Set to True (default) to enable thread-safety """ super(ZipFS, self).__init__(thread_synchronize=thread_synchronize) if compression == "deflated": compression_type = ZIP_DEFLATED elif compression == "stored": compression_type = ZIP_STORED else: raise ValueError("Compression should be 'deflated' (default) or 'stored'") if len(mode) > 1 or mode not in "rwa": raise ValueError("mode must be 'r', 'w' or 'a'") self.zip_mode = mode self.encoding = encoding try: self.zf = ZipFile(zip_file, mode, compression_type, allow_zip_64) except IOError: raise ResourceNotFoundError(str(zip_file), msg="Zip file does not exist: %(path)s") self.zip_path = str(zip_file) self.temp_fs = None if mode in 'wa': self.temp_fs = tempfs.TempFS() self._path_fs = MemoryFS() if mode in 'ra': self._parse_resource_list() def __str__(self): return "" % self.zip_path def __unicode__(self): return unicode(self.__str__()) def _parse_resource_list(self): for path in self.zf.namelist(): self._add_resource(path.decode(self.encoding)) def _add_resource(self, path): if path.endswith('/'): path = path[:-1] if path: self._path_fs.makedir(path, recursive=True, allow_recreate=True) else: dirpath, filename = pathsplit(path) if dirpath: self._path_fs.makedir(dirpath, recursive=True, allow_recreate=True) f = self._path_fs.open(path, 'w') f.close() def close(self): """Finalizes the zip file so that it can be read. No further operations will work after this method is called.""" if hasattr(self, 'zf') and self.zf: self.zf.close() self.zf = _ExceptionProxy() @synchronize def open(self, path, mode="r", **kwargs): path = normpath(relpath(path)) if 'r' in mode: if self.zip_mode not in 'ra': raise OperationFailedError("open file", path=path, msg="Zip file must be opened for reading ('r') or appending ('a')") try: contents = self.zf.read(path.encode(self.encoding)) except KeyError: raise ResourceNotFoundError(path) return StringIO(contents) if 'w' in mode: dirname, filename = pathsplit(path) if dirname: self.temp_fs.makedir(dirname, recursive=True, allow_recreate=True) self._add_resource(path) f = _TempWriteFile(self.temp_fs, path, self._on_write_close) return f raise ValueError("Mode must contain be 'r' or 'w'") @synchronize def getcontents(self, path): if not self.exists(path): raise ResourceNotFoundError(path) path = normpath(path) try: contents = self.zf.read(path.encode(self.encoding)) except KeyError: raise ResourceNotFoundError(path) except RuntimeError: raise OperationFailedError("read file", path=path, msg="Zip file must be oppened with 'r' or 'a' to read") return contents @synchronize def _on_write_close(self, filename): sys_path = self.temp_fs.getsyspath(filename) self.zf.write(sys_path, filename.encode(self.encoding)) def desc(self, path): if self.isdir(path): return "Dir in zip file: %s" % self.zip_path else: return "File in zip file: %s" % self.zip_path def isdir(self, path): return self._path_fs.isdir(path) def isfile(self, path): return self._path_fs.isfile(path) def exists(self, path): return self._path_fs.exists(path) @synchronize def makedir(self, dirname, recursive=False, allow_recreate=False): dirname = normpath(dirname) if self.zip_mode not in "wa": raise OperationFailedError("create directory", path=dirname, msg="Zip file must be opened for writing ('w') or appending ('a')") if not dirname.endswith('/'): dirname += '/' self._add_resource(dirname) def listdir(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False): return self._path_fs.listdir(path, wildcard, full, absolute, dirs_only, files_only) @synchronize def getinfo(self, path): if not self.exists(path): raise ResourceNotFoundError(path) path = normpath(path).lstrip('/') try: zi = self.zf.getinfo(path.encode(self.encoding)) zinfo = dict((attrib, getattr(zi, attrib)) for attrib in dir(zi) if not attrib.startswith('_')) for k, v in zinfo.iteritems(): if callable(v): zinfo[k] = v() except KeyError: zinfo = {'file_size':0} info = {'size' : zinfo['file_size'] } if 'date_time' in zinfo: info['created_time'] = datetime.datetime(*zinfo['date_time']) info.update(zinfo) return info fs-0.3.0/fs/errors.py0000644000175000017500000001773011407422442013130 0ustar willwill""" Defines the Exception classes thrown by PyFilesystem objects. Exceptions relating to the underling filesystem are translated in to one of the following Exceptions. Exceptions that relate to a path store that path in `self.path`. All Exception classes are derived from `FSError` which can be used as a catch-all exception. """ import sys import errno from fs.path import * try: from functools import wraps except ImportError: wraps = lambda f: lambda f: f class FSError(Exception): """Base exception class for the FS module.""" default_message = "Unspecified error" def __init__(self,msg=None,details=None): if msg is None: msg = self.default_message self.msg = msg self.details = details def __str__(self): keys = {} for k,v in self.__dict__.iteritems(): if isinstance(v,unicode): v = v.encode(sys.getfilesystemencoding()) keys[k] = v return str(self.msg % keys) def __unicode__(self): return unicode(self.msg) % self.__dict__ def __getstate__(self): return self.__dict__.copy() class PathError(FSError): """Exception for errors to do with a path string. """ default_message = "Path is invalid: %(path)s" def __init__(self,path="",**kwds): self.path = path super(PathError,self).__init__(**kwds) class OperationFailedError(FSError): """Base exception class for errors associated with a specific operation.""" default_message = "Unable to %(opname)s: unspecified error [%(errno)s - %(details)s]" def __init__(self,opname="",path=None,**kwds): self.opname = opname self.path = path self.errno = getattr(kwds.get("details",None),"errno",None) super(OperationFailedError,self).__init__(**kwds) class UnsupportedError(OperationFailedError): """Exception raised for operations that are not supported by the FS.""" default_message = "Unable to %(opname)s: not supported by this filesystem" class RemoteConnectionError(OperationFailedError): """Exception raised when operations encounter remote connection trouble.""" default_message = "%(opname)s: remote connection errror" class StorageSpaceError(OperationFailedError): """Exception raised when operations encounter storage space trouble.""" default_message = "Unable to %(opname)s: insufficient storage space" class PermissionDeniedError(OperationFailedError): default_message = "Unable to %(opname)s: permission denied" class FSClosedError(OperationFailedError): default_message = "Unable to %(opname)s: the FS has been closed" class OperationTimeoutError(OperationFailedError): default_message = "Unable to %(opname)s: operation timed out" class ResourceError(FSError): """Base exception class for error associated with a specific resource.""" default_message = "Unspecified resource error: %(path)s" def __init__(self,path="",**kwds): self.path = path self.opname = kwds.pop("opname",None) super(ResourceError,self).__init__(**kwds) class NoSysPathError(ResourceError): """Exception raised when there is no syspath for a given path.""" default_message = "No mapping to OS filesystem: %(path)s" class ResourceNotFoundError(ResourceError): """Exception raised when a required resource is not found.""" default_message = "Resource not found: %(path)s" class ResourceInvalidError(ResourceError): """Exception raised when a resource is the wrong type.""" default_message = "Resource is invalid: %(path)s" class DestinationExistsError(ResourceError): """Exception raised when a target destination already exists.""" default_message = "Destination exists: %(path)s" class DirectoryNotEmptyError(ResourceError): """Exception raised when a directory to be removed is not empty.""" default_message = "Directory is not empty: %(path)s" class ParentDirectoryMissingError(ResourceError): """Exception raised when a parent directory is missing.""" default_message = "Parent directory is missing: %(path)s" class ResourceLockedError(ResourceError): """Exception raised when a resource can't be used because it is locked.""" default_message = "Resource is locked: %(path)s" def convert_fs_errors(func): """Function wrapper to convert FSError instances into OSErrors.""" @wraps(func) def wrapper(*args,**kwds): try: return func(*args,**kwds) except ResourceNotFoundError, e: raise OSError(errno.ENOENT,str(e)) except ResourceInvalidError, e: raise OSError(errno.EINVAL,str(e)) except PermissionDeniedError, e: raise OSError(errno.EACCES,str(e)) except ResourceLockedError, e: if sys.platform == "win32": raise WindowsError(32,str(e)) else: raise OSError(errno.EACCES,str(e)) except DirectoryNotEmptyError, e: raise OSError(errno.ENOTEMPTY,str(e)) except DestinationExistsError, e: raise OSError(errno.EEXIST,str(e)) except StorageSpaceError, e: raise OSError(errno.ENOSPC,str(e)) except RemoteConnectionError, e: raise OSError(errno.ENONET,str(e)) except UnsupportedError, e: raise OSError(errno.ENOSYS,str(e)) except FSError, e: raise OSError(errno.EFAULT,str(e)) return wrapper def convert_os_errors(func): """Function wrapper to convert OSError/IOError instances into FSErrors.""" opname = func.__name__ @wraps(func) def wrapper(self,*args,**kwds): try: return func(self,*args,**kwds) except (OSError,IOError), e: (exc_type,exc_inst,tb) = sys.exc_info() path = getattr(e,"filename",None) if path and path[0] == "/" and hasattr(self,"root_path"): path = normpath(path) if isprefix(self.root_path,path): path = path[len(self.root_path):] if not hasattr(e,"errno") or not e.errno: raise OperationFailedError(opname,details=e),None,tb if e.errno == errno.ENOENT: raise ResourceNotFoundError(path,opname=opname,details=e),None,tb if e.errno == errno.ESRCH: raise ResourceNotFoundError(path,opname=opname,details=e),None,tb if e.errno == errno.ENOTEMPTY: raise DirectoryNotEmptyError(path,opname=opname,details=e),None,tb if e.errno == errno.EEXIST: raise DestinationExistsError(path,opname=opname,details=e),None,tb if e.errno == 183: # some sort of win32 equivalent to EEXIST raise DestinationExistsError(path,opname=opname,details=e),None,tb if e.errno == errno.ENOTDIR: raise ResourceInvalidError(path,opname=opname,details=e),None,tb if e.errno == errno.EISDIR: raise ResourceInvalidError(path,opname=opname,details=e),None,tb if e.errno == errno.EINVAL: raise ResourceInvalidError(path,opname=opname,details=e),None,tb if e.errno == errno.EOPNOTSUPP: raise UnsupportedError(opname,details=e),None,tb if e.errno == errno.ENOSPC: raise StorageSpaceError(opname,details=e),None,tb if e.errno == errno.EPERM: raise PermissionDeniedError(opname,details=e),None,tb if e.errno == errno.EACCES: if sys.platform == "win32": if e.args[0] and e.args[0] == 32: raise ResourceLockedError(path,opname=opname,details=e),None,tb raise PermissionDeniedError(opname,details=e),None,tb # Sometimes windows gives some random errors... if sys.platform == "win32": if e.errno in (13,): raise ResourceInvalidError(path,opname=opname,details=e),None,tb raise OperationFailedError(opname,details=e),None,tb return wrapper fs-0.3.0/fs/__init__.py0000644000175000017500000000276611407425114013355 0ustar willwill""" fs: a filesystem abstraction. This module provides an abstract base class 'FS' that defines a consistent interface to different kinds of filesystem, along with a range of concrete implementations of this interface such as: OSFS: access the local filesystem, through the 'os' module TempFS: a temporary filesystem that's automatically cleared on exit MemoryFS: a filesystem that exists only in memory ZipFS: access a zipfile like a filesystem SFTPFS: access files on a SFTP server S3FS: access files stored in Amazon S3 """ __version__ = "0.3.0" __author__ = "Will McGugan (will@willmcgugan.com)" # 'base' imports * from 'path' and 'errors', so their # contents will be available here as well. from base import * # provide these by default so people can use 'fs.path.basename' etc. import errors import path _thread_synchronize_default = True def set_thread_synchronize_default(sync): """Sets the default thread synchronisation flag. FS objects are made thread-safe through the use of a per-FS threading Lock object. Since this can introduce an small overhead it can be disabled with this function if the code is single-threaded. :param sync: Set whether to use thread synchronisation for new FS objects """ global _thread_synchronization_default _thread_synchronization_default = sync # Store some identifiers in the fs namespace import os SEEK_CUR = os.SEEK_CUR SEEK_END = os.SEEK_END SEEK_SET = os.SEEK_SET fs-0.3.0/fs/batch.py0000644000175000017500000001066011405671055012674 0ustar willwillimport fnmatch from itertools import chain import re class BatchError(Exception): pass def _params(*args, **kwargs): return (args, kwargs) class BatchBase(object): def __init__(self): self._stack = [] self._eval_cache = None self._eval_level = 0 def _eval(self, paths): operations = [] for cmd in self._stack[::-1]: cmd_name, (args, kwargs) = cmd cmd_func = getattr(self, '_cmd_' + cmd_name, None) assert cmd_func is not None, "Unknown batch command" operations.append(lambda paths:cmd_func(paths, *args, **kwargs)) def recurse_operations(op_index=0): if op_index >= len(operations): for fs, path in paths: yield fs, path else: for fs, path in operations[op_index](recurse_operations(op_index+1), ): yield fs, path for fs, path in recurse_operations(): yield fs, path def filter(self, *wildcards): cmd = ('filter', _params(wildcards)) self._stack.append(cmd) return self def exclude(self, *wildcards): cmd = ('exclude', _params(wildcards)) self._stack.append(cmd) return self def _cmd_filter(self, fs_paths, wildcards): wildcard_res = [re.compile(fnmatch.translate(w)) for w in wildcards] for fs, path in fs_paths: for wildcard_re in wildcard_res: if wildcard_re.match(path): yield fs, path def _cmd_exclude(self, fs_paths, wildcards): wildcard_res = [re.compile(fnmatch.translate(w)) for w in wildcards] for fs, path in fs_paths: for wildcard_re in wildcard_res: if wildcard_re.match(path): break else: yield fs, path class Batch(BatchBase): def __init__(self, *fs, **kwargs): super(Batch, self).__init__() self.fs_list = fs self.recursive = kwargs.get('recursive', False) def path_iter(self, fs_list): if self.recursive: for fs in fs_list: for path in fs.walkfiles(): yield fs, path else: for fs in fs_list: for path in fs.listdir(full=True, absolute=True): yield fs, path def __iter__(self): return self._eval(self.path_iter(self.fs_list)) def paths(self): for fs, path in self: yield path class BatchList(BatchBase): def __init__(self, fs, paths): self.fs_list = [(fs, path) for path in paths] def __iter__(self): return self.fs_list class BatchOp(Batch): def __init__(self): super(BatchBase, self).__init__(None) self._op_stack = [] def remove(self): cmd = ('remove', _params()) self._op_stack.append(cmd) return self def _op_remove(self, fs, path): fs.remove(path) def apply(self, fs=None, ignore_errors=False): def do_call(func, *args, **kwargs): return func(*args, **kwargs) def ignore_exceptions(func, *arg, **kwargs): try: return func(*args, **kwargs) except: return None if ignore_errors: call_cmd = ignore_exceptions else: call_cmd = do_call for fs, path in self.path_iter(): for cmd in self._op_stack: cmd_name, (args, kwargs) = cmd cmd_func = getattr(self, '_op_' + cmd_name) call_cmd(cmd_func, fs, path, *args, **kwargs) if __name__ == "__main__": from fs.osfs import OSFS test_fs = OSFS("/home/will/projects/meshminds/meshminds") b = Batch(test_fs, recursive=True).exclude("*.py", "*.html") print list(b.paths()) #b=BatchBase() #b.filter('*.py') #print b._eval([[None, 'a/b/c.py'], # [None, 'a/b/c.pyw']]) fs-0.3.0/fs/osfs/0000755000175000017500000000000011407431454012207 5ustar willwillfs-0.3.0/fs/osfs/watch.py0000644000175000017500000000147211375762640013703 0ustar willwill""" fs.osfs.watch ============= Change watcher support for OSFS """ import os import sys import errno import threading from fs.errors import * from fs.path import * from fs.watch import * OSFSWatchMixin = None # Try using native implementation on win32 if sys.platform == "win32": try: from fs.osfs.watch_win32 import OSFSWatchMixin except ImportError: pass # Try using pyinotify if available if OSFSWatchMixin is None: try: from fs.osfs.watch_inotify import OSFSWatchMixin except ImportError: pass # Fall back to raising UnsupportedError if OSFSWatchMixin is None: class OSFSWatchMixin(object): def add_watcher(self,*args,**kwds): raise UnsupportedError def del_watcher(self,watcher_or_callback): raise UnsupportedError fs-0.3.0/fs/osfs/xattrs.py0000644000175000017500000000266511375762640014127 0ustar willwill""" fs.osfs.xattrs ============== Extended-attribute support for OSFS """ import os import sys import errno from fs.errors import * from fs.path import * from fs.base import FS try: import xattr except ImportError: xattr = None if xattr is not None: class OSFSXAttrMixin(FS): """Mixin providing extended-attribute support via the 'xattr' module""" @convert_os_errors def setxattr(self, path, key, value): xattr.xattr(self.getsyspath(path))[key]=value @convert_os_errors def getxattr(self, path, key, default=None): try: return xattr.xattr(self.getsyspath(path)).get(key) except KeyError: return default @convert_os_errors def delxattr(self, path, key): try: del xattr.xattr(self.getsyspath(path))[key] except KeyError: pass @convert_os_errors def listxattrs(self, path): return xattr.xattr(self.getsyspath(path)).keys() else: class OSFSXAttrMixin(object): """Mixin disable extended-attribute support.""" def getxattr(self,path,key,default=None): raise UnsupportedError def setxattr(self,path,key,value): raise UnsupportedError def delxattr(self,path,key): raise UnsupportedError def listxattrs(self,path): raise UnsupportedError fs-0.3.0/fs/osfs/__init__.py0000644000175000017500000002155711406635052014331 0ustar willwill""" fs.osfs ======= Exposes the OS Filesystem as an FS object. For example, to print all the files and directories in the OS root:: >>> from fs.osfs import OSFS >>> home_fs = OSFS('/') >>> print home_fs.listdir() """ import os import sys import errno from fs.base import * from fs.path import * from fs import _thread_synchronize_default from fs.osfs.xattrs import OSFSXAttrMixin from fs.osfs.watch import OSFSWatchMixin @convert_os_errors def _os_stat(path): """Replacement for os.stat that raises FSError subclasses.""" return os.stat(path) class OSFS(OSFSXAttrMixin, OSFSWatchMixin, FS): """Expose the underlying operating-system filesystem as an FS object. This is the most basic of filesystems, which simply shadows the underlaying filesytem of the OS. Most of its methods simply defer to the corresponding methods in the os and os.path modules. """ def __init__(self, root_path, thread_synchronize=_thread_synchronize_default, encoding=None, create=False, dir_mode=0700): """ Creates an FS object that represents the OS Filesystem under a given root path :param root_path: The root OS path :param thread_synchronize: If True, this object will be thread-safe by use of a threading.Lock object :param encoding: The encoding method for path strings :param create: If True, then root_path will be created if it doesn't already exist :param dir_mode: The mode to use when creating the directory """ super(OSFS, self).__init__(thread_synchronize=thread_synchronize) self.encoding = encoding or sys.getfilesystemencoding() root_path = os.path.expanduser(os.path.expandvars(root_path)) root_path = os.path.normpath(os.path.abspath(root_path)) # Enable long pathnames on win32 if sys.platform == "win32": if not root_path.startswith("\\\\?\\"): root_path = u"\\\\?\\" + root_path if create: try: os.makedirs(root_path, mode=dir_mode) except OSError: pass if not os.path.exists(root_path): raise ResourceNotFoundError(root_path,msg="Root directory does not exist: %(path)s") if not os.path.isdir(root_path): raise ResourceInvalidError(root_path,msg="Root path is not a directory: %(path)s") self.root_path = root_path self.dir_mode = dir_mode def __str__(self): return "" % self.root_path def __unicode__(self): return u"" % self.root_path def _decode_path(self, p): if isinstance(p, unicode): return p return p.decode(self.encoding, 'replace') def getsyspath(self, path, allow_none=False): path = relpath(normpath(path)).replace("/",os.sep) path = os.path.join(self.root_path, path) path = self._decode_path(path) return path def unsyspath(self, path): """Convert a system-level path into an FS-level path. This basically the reverse of getsyspath(). If the path does not refer to a location within this filesystem, ValueError is raised. :param path: a system path :returns: a path within this FS object :rtype: string """ path = os.path.normpath(os.path.abspath(path)) if not path.startswith(self.root_path + os.path.sep): raise ValueError("path not within this FS: %s" % (path,)) return path[len(self.root_path):] @convert_os_errors def open(self, path, mode="r", **kwargs): mode = filter(lambda c: c in "rwabt+",mode) return open(self.getsyspath(path), mode, kwargs.get("buffering", -1)) @convert_os_errors def exists(self, path): path = self.getsyspath(path) return os.path.exists(path) @convert_os_errors def isdir(self, path): path = self.getsyspath(path) return os.path.isdir(path) @convert_os_errors def isfile(self, path): path = self.getsyspath(path) return os.path.isfile(path) @convert_os_errors def listdir(self, path="./", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False): paths = [self._decode_path(p) for p in os.listdir(self.getsyspath(path))] return self._listdir_helper(path, paths, wildcard, full, absolute, dirs_only, files_only) @convert_os_errors def makedir(self, path, recursive=False, allow_recreate=False): sys_path = self.getsyspath(path) try: if recursive: os.makedirs(sys_path, self.dir_mode) else: os.mkdir(sys_path, self.dir_mode) except OSError, e: if e.errno == errno.EEXIST or e.errno == 183: if self.isfile(path): raise ResourceInvalidError(path,msg="Cannot create directory, there's already a file of that name: %(path)s") if not allow_recreate: raise DestinationExistsError(path,msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s") elif e.errno == errno.ENOENT: raise ParentDirectoryMissingError(path) else: raise @convert_os_errors def remove(self, path): sys_path = self.getsyspath(path) try: os.remove(sys_path) except OSError, e: if e.errno == errno.EACCES and sys.platform == "win32": # sometimes windows says this for attempts to remove a dir if os.path.isdir(sys_path): raise ResourceInvalidError(path) if e.errno == errno.EPERM and sys.platform == "darwin": # sometimes OSX says this for attempts to remove a dir if os.path.isdir(sys_path): raise ResourceInvalidError(path) raise @convert_os_errors def removedir(self, path, recursive=False, force=False): sys_path = self.getsyspath(path) if force: for path2 in self.listdir(path, absolute=True, files_only=True): try: self.remove(path2) except ResourceNotFoundError: pass for path2 in self.listdir(path, absolute=True, dirs_only=True): try: self.removedir(path2, force=True) except ResourceNotFoundError: pass # Don't remove the root directory of this FS if path in ("","/"): return os.rmdir(sys_path) # Using os.removedirs() for this can result in dirs being # removed outside the root of this FS, so we recurse manually. if recursive: try: self.removedir(dirname(path),recursive=True) except DirectoryNotEmptyError: pass @convert_os_errors def rename(self, src, dst): path_src = self.getsyspath(src) path_dst = self.getsyspath(dst) try: os.rename(path_src, path_dst) except OSError, e: if e.errno: # POSIX rename() can rename over an empty directory but gives # ENOTEMPTY if the dir has contents. Raise UnsupportedError # instead of DirectoryEmptyError in this case. if e.errno == errno.ENOTEMPTY: raise UnsupportedError("rename") # Linux (at least) gives ENOENT when trying to rename into # a directory that doesn't exist. We want ParentMissingError # in this case. if e.errno == errno.ENOENT: if not os.path.exists(dirname(path_dst)): raise ParentDirectoryMissingError(dst) raise def _stat(self,path): """Stat the given path, normalising error codes.""" sys_path = self.getsyspath(path) try: return _os_stat(sys_path) except ResourceInvalidError: raise ResourceNotFoundError(path) @convert_os_errors def getinfo(self, path): stats = self._stat(path) info = dict((k, getattr(stats, k)) for k in dir(stats) if not k.startswith('__') ) info['size'] = info['st_size'] # TODO: this doesn't actually mean 'creation time' on unix ct = info.get('st_ctime', None) if ct is not None: info['created_time'] = datetime.datetime.fromtimestamp(ct) at = info.get('st_atime', None) if at is not None: info['accessed_time'] = datetime.datetime.fromtimestamp(at) mt = info.get('st_mtime', None) if mt is not None: info['modified_time'] = datetime.datetime.fromtimestamp(mt) return info @convert_os_errors def getsize(self, path): return self._stat(path).st_size fs-0.3.0/fs/osfs/watch_win32.py0000644000175000017500000003661411407352565014730 0ustar willwill""" fs.osfs.watch_win32 ============= Change watcher support for OSFS, using ReadDirectoryChangesW on win32. """ import os import sys import errno import threading import Queue import stat import struct import ctypes import ctypes.wintypes import traceback try: LPVOID = ctypes.wintypes.LPVOID except AttributeError: # LPVOID wasn't defined in Py2.5, guess it was introduced in Py2.6 LPVOID = ctypes.c_void_p from fs.errors import * from fs.path import * from fs.watch import * INVALID_HANDLE_VALUE = 0xFFFFFFFF FILE_NOTIFY_CHANGE_FILE_NAME = 0x01 FILE_NOTIFY_CHANGE_DIR_NAME = 0x02 FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x04 FILE_NOTIFY_CHANGE_SIZE = 0x08 FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010 FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020 FILE_NOTIFY_CHANGE_CREATION = 0x040 FILE_NOTIFY_CHANGE_SECURITY = 0x0100 FILE_LIST_DIRECTORY = 0x01 FILE_SHARE_READ = 0x01 FILE_SHARE_WRITE = 0x02 OPEN_EXISTING = 3 FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 FILE_FLAG_OVERLAPPED = 0x40000000 THREAD_TERMINATE = 0x0001 FILE_ACTION_ADDED = 1 FILE_ACTION_REMOVED = 2 FILE_ACTION_MODIFIED = 3 FILE_ACTION_RENAMED_OLD_NAME = 4 FILE_ACTION_RENAMED_NEW_NAME = 5 FILE_ACTION_OVERFLOW = 0xFFFF WAIT_ABANDONED = 0x00000080 WAIT_IO_COMPLETION = 0x000000C0 WAIT_OBJECT_0 = 0x00000000 WAIT_TIMEOUT = 0x00000102 def _errcheck_bool(value,func,args): if not value: raise ctypes.WinError() return args def _errcheck_handle(value,func,args): if not value: raise ctypes.WinError() if value == INVALID_HANDLE_VALUE: raise ctypes.WinError() return args def _errcheck_dword(value,func,args): if value == 0xFFFFFFFF: raise ctypes.WinError() return args class OVERLAPPED(ctypes.Structure): _fields_ = [('Internal', LPVOID), ('InternalHigh', LPVOID), ('Offset', ctypes.wintypes.DWORD), ('OffsetHigh', ctypes.wintypes.DWORD), ('Pointer', LPVOID), ('hEvent', ctypes.wintypes.HANDLE), ] try: ReadDirectoryChangesW = ctypes.windll.kernel32.ReadDirectoryChangesW except AttributeError: raise ImportError("ReadDirectoryChangesW is not available") ReadDirectoryChangesW.restype = ctypes.wintypes.BOOL ReadDirectoryChangesW.errcheck = _errcheck_bool ReadDirectoryChangesW.argtypes = ( ctypes.wintypes.HANDLE, # hDirectory LPVOID, # lpBuffer ctypes.wintypes.DWORD, # nBufferLength ctypes.wintypes.BOOL, # bWatchSubtree ctypes.wintypes.DWORD, # dwNotifyFilter ctypes.POINTER(ctypes.wintypes.DWORD), # lpBytesReturned ctypes.POINTER(OVERLAPPED), # lpOverlapped LPVOID #FileIOCompletionRoutine # lpCompletionRoutine ) CreateFileW = ctypes.windll.kernel32.CreateFileW CreateFileW.restype = ctypes.wintypes.HANDLE CreateFileW.errcheck = _errcheck_handle CreateFileW.argtypes = ( ctypes.wintypes.LPCWSTR, # lpFileName ctypes.wintypes.DWORD, # dwDesiredAccess ctypes.wintypes.DWORD, # dwShareMode LPVOID, # lpSecurityAttributes ctypes.wintypes.DWORD, # dwCreationDisposition ctypes.wintypes.DWORD, # dwFlagsAndAttributes ctypes.wintypes.HANDLE # hTemplateFile ) CloseHandle = ctypes.windll.kernel32.CloseHandle CloseHandle.restype = ctypes.wintypes.BOOL CloseHandle.argtypes = ( ctypes.wintypes.HANDLE, # hObject ) CreateEvent = ctypes.windll.kernel32.CreateEventW CreateEvent.restype = ctypes.wintypes.HANDLE CreateEvent.errcheck = _errcheck_handle CreateEvent.argtypes = ( LPVOID, # lpEventAttributes ctypes.wintypes.BOOL, # bManualReset ctypes.wintypes.BOOL, # bInitialState ctypes.wintypes.LPCWSTR, #lpName ) SetEvent = ctypes.windll.kernel32.SetEvent SetEvent.restype = ctypes.wintypes.BOOL SetEvent.errcheck = _errcheck_bool SetEvent.argtypes = ( ctypes.wintypes.HANDLE, # hEvent ) WaitForSingleObjectEx = ctypes.windll.kernel32.WaitForSingleObjectEx WaitForSingleObjectEx.restype = ctypes.wintypes.DWORD WaitForSingleObjectEx.errcheck = _errcheck_dword WaitForSingleObjectEx.argtypes = ( ctypes.wintypes.HANDLE, # hObject ctypes.wintypes.DWORD, # dwMilliseconds ctypes.wintypes.BOOL, # bAlertable ) CreateIoCompletionPort = ctypes.windll.kernel32.CreateIoCompletionPort CreateIoCompletionPort.restype = ctypes.wintypes.HANDLE CreateIoCompletionPort.errcheck = _errcheck_handle CreateIoCompletionPort.argtypes = ( ctypes.wintypes.HANDLE, # FileHandle ctypes.wintypes.HANDLE, # ExistingCompletionPort LPVOID, # CompletionKey ctypes.wintypes.DWORD, # NumberOfConcurrentThreads ) GetQueuedCompletionStatus = ctypes.windll.kernel32.GetQueuedCompletionStatus GetQueuedCompletionStatus.restype = ctypes.wintypes.BOOL GetQueuedCompletionStatus.errcheck = _errcheck_bool GetQueuedCompletionStatus.argtypes = ( ctypes.wintypes.HANDLE, # CompletionPort LPVOID, # lpNumberOfBytesTransferred LPVOID, # lpCompletionKey ctypes.POINTER(OVERLAPPED), # lpOverlapped ctypes.wintypes.DWORD, # dwMilliseconds ) PostQueuedCompletionStatus = ctypes.windll.kernel32.PostQueuedCompletionStatus PostQueuedCompletionStatus.restype = ctypes.wintypes.BOOL PostQueuedCompletionStatus.errcheck = _errcheck_bool PostQueuedCompletionStatus.argtypes = ( ctypes.wintypes.HANDLE, # CompletionPort ctypes.wintypes.DWORD, # lpNumberOfBytesTransferred ctypes.wintypes.DWORD, # lpCompletionKey ctypes.POINTER(OVERLAPPED), # lpOverlapped ) class WatchedDirectory(object): def __init__(self,callback,path,flags,recursive=True): self.path = path self.flags = flags self.callback = callback self.recursive = recursive self.handle = None self.handle = CreateFileW(path, FILE_LIST_DIRECTORY, FILE_SHARE_READ | FILE_SHARE_WRITE, None, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS|FILE_FLAG_OVERLAPPED, None) self.result = ctypes.create_string_buffer(1024) self.overlapped = overlapped = OVERLAPPED() self.ready = threading.Event() def __del__(self): self.close() def close(self): if self.handle is not None: CloseHandle(self.handle) self.handle = None def post(self): overlapped = self.overlapped overlapped.Internal = 0 overlapped.InternalHigh = 0 overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.Pointer = 0 overlapped.hEvent = 0 ReadDirectoryChangesW(self.handle, ctypes.byref(self.result),len(self.result), self.recursive,self.flags,None,overlapped,None) def complete(self,nbytes): if nbytes == 0: self.callback(None,0) else: res = self.result.raw[:nbytes] for (name,action) in self._extract_change_info(res): if self.callback: self.callback(os.path.join(self.path,name),action) def _extract_change_info(self,buffer): """Extract the information out of a FILE_NOTIFY_INFORMATION structure.""" pos = 0 while pos < len(buffer): jump, action, namelen = struct.unpack("iii",buffer[pos:pos+12]) name = buffer[pos+12:pos+12+namelen].decode("utf16") yield (name,action) if not jump: break pos += jump class WatchThread(threading.Thread): """Thread for watching filesystem changes.""" def __init__(self): super(WatchThread,self).__init__() self.closed = False self.watched_directories = {} self.ready = threading.Event() self._iocp = None self._new_watches = Queue.Queue() def close(self): if not self.closed: self.closed = True if self._iocp: PostQueuedCompletionStatus(self._iocp,0,1,None) def add_watcher(self,callback,path,events,recursive): if os.path.isfile(path): path = os.path.dirname(path) watched_dirs = [] for w in self._get_watched_dirs(callback,path,events,recursive): self.attach_watched_directory(w) watched_dirs.append(w) return watched_dirs def del_watcher(self,w): w = self.watched_directories.pop(hash(w)) w.callback = None w.close() def _get_watched_dirs(self,callback,path,events,recursive): do_access = False do_change = False flags = 0 for evt in events: if issubclass(ACCESSED,evt): do_access = True if issubclass(MODIFIED,evt): do_change = True flags |= FILE_NOTIFY_CHANGE_ATTRIBUTES flags |= FILE_NOTIFY_CHANGE_CREATION flags |= FILE_NOTIFY_CHANGE_SECURITY if issubclass(CREATED,evt): flags |= FILE_NOTIFY_CHANGE_FILE_NAME flags |= FILE_NOTIFY_CHANGE_DIR_NAME if issubclass(REMOVED,evt): flags |= FILE_NOTIFY_CHANGE_FILE_NAME flags |= FILE_NOTIFY_CHANGE_DIR_NAME if issubclass(MOVED_SRC,evt): flags |= FILE_NOTIFY_CHANGE_FILE_NAME flags |= FILE_NOTIFY_CHANGE_DIR_NAME if issubclass(MOVED_DST,evt): flags |= FILE_NOTIFY_CHANGE_FILE_NAME flags |= FILE_NOTIFY_CHANGE_DIR_NAME if do_access: # Separately capture FILE_NOTIFY_CHANGE_LAST_ACCESS events # so we can reliably generate ACCESSED events. def on_access_event(path,action): if action == FILE_ACTION_OVERFLOW: callback(OVERFLOW,path) else: callback(ACCESSED,path) yield WatchedDirectory(on_access_event,path, FILE_NOTIFY_CHANGE_LAST_ACCESS,recursive) if do_change: # Separately capture FILE_NOTIFY_CHANGE_LAST_WRITE events # so we can generate MODIFIED(data_changed=True) events. cflags = FILE_NOTIFY_CHANGE_LAST_WRITE | FILE_NOTIFY_CHANGE_SIZE def on_change_event(path,action): if action == FILE_ACTION_OVERFLOW: callback(OVERFLOW,path) else: callback(MODIFIED,path,True) yield WatchedDirectory(on_change_event,path,cflags,recursive) if flags: # All other events we can route through a common handler. old_name = [None] def on_misc_event(path,action): if action == FILE_ACTION_OVERFLOW: callback(OVERFLOW,path) elif action == FILE_ACTION_ADDED: callback(CREATED,path) elif action == FILE_ACTION_REMOVED: callback(REMOVED,path) elif action == FILE_ACTION_MODIFIED: callback(MODIFIED,path) elif action == FILE_ACTION_RENAMED_OLD_NAME: old_name[0] = path elif action == FILE_ACTION_RENAMED_NEW_NAME: callback(MOVED_DST,path,old_name[0]) callback(MOVED_SRC,old_name[0],path) old_name[0] = None yield WatchedDirectory(on_misc_event,path,flags,recursive) def run(self): try: self._iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE,None,0,1) self.ready.set() nbytes = ctypes.wintypes.DWORD() iocpkey = ctypes.wintypes.DWORD() overlapped = OVERLAPPED() while not self.closed: try: GetQueuedCompletionStatus(self._iocp, ctypes.byref(nbytes), ctypes.byref(iocpkey), ctypes.byref(overlapped), -1) except WindowsError: traceback.print_exc() else: if iocpkey.value > 1: w = self.watched_directories[iocpkey.value] w.complete(nbytes.value) w.post() elif not self.closed: try: while True: w = self._new_watches.get_nowait() CreateIoCompletionPort(w.handle,self._iocp,hash(w),0) w.post() w.ready.set() except Queue.Empty: pass finally: self.ready.set() for w in self.watched_directories.itervalues(): w.close() if self._iocp: CloseHandle(self._iocp) def attach_watched_directory(self,w): self.watched_directories[hash(w)] = w self._new_watches.put(w) PostQueuedCompletionStatus(self._iocp,0,1,None) w.ready.wait() class OSFSWatchMixin(WatchableFSMixin): """Mixin providing change-watcher support via pyinotify.""" __watch_lock = threading.Lock() __watch_thread = None def close(self): super(OSFSWatchMixin,self).close() self.__shutdown_watch_thread(force=True) self.notify_watchers(CLOSED) @convert_os_errors def add_watcher(self,callback,path="/",events=None,recursive=True): w = super(OSFSWatchMixin,self).add_watcher(callback,path,events,recursive) syspath = self.getsyspath(path) wt = self.__get_watch_thread() def handle_event(event_class,path,*args,**kwds): try: path = self.unsyspath(path) except ValueError: raise else: event = event_class(self,path,*args,**kwds) w.handle_event(event) w._watch_obj = wt.add_watcher(handle_event,syspath,w.events,w.recursive) return w @convert_os_errors def del_watcher(self,watcher_or_callback): wt = self.__get_watch_thread() if isinstance(watcher_or_callback,Watcher): watchers = [watcher_or_callback] else: watchers = self._find_watchers(watcher_or_callback) for watcher in watchers: wt.del_watcher(watcher._watch_obj) super(OSFSWatchMixin,self).del_watcher(watcher) if not wt.watched_directories: self.__shutdown_watch_thread() def __get_watch_thread(self): """Get the shared watch thread, initializing if necessary.""" if self.__watch_thread is None: self.__watch_lock.acquire() try: if self.__watch_thread is None: wt = WatchThread() wt.start() wt.ready.wait() OSFSWatchMixin.__watch_thread = wt finally: self.__watch_lock.release() return self.__watch_thread def __shutdown_watch_thread(self,force=False): """Stop the shared watch manager, if there are no watches left.""" self.__watch_lock.acquire() try: if OSFSWatchMixin.__watch_thread is None: return if not force and OSFSWatchMixin.__watch_thread.watched_directories: return try: OSFSWatchMixin.__watch_thread.close() except EnvironmentError: pass OSFSWatchMixin.__watch_thread = None finally: self.__watch_lock.release() fs-0.3.0/fs/osfs/watch_inotify.py0000644000175000017500000001572611406635052015442 0ustar willwill""" fs.osfs.watch_inotify ============= Change watcher support for OSFS, backed by pyinotify. """ import os import sys import errno import threading from fs.errors import * from fs.path import * from fs.watch import * try: import pyinotify except Exception, e: # pyinotify sometimes raises its own custom errors on import. # How on earth are we supposed to catch them when we can't import them? if isinstance(e,ImportError): raise raise ImportError("could not import pyinotify") class OSFSWatchMixin(WatchableFSMixin): """Mixin providing change-watcher support via pyinotify.""" __watch_lock = threading.Lock() __watch_manager = None __watch_notifier = None def close(self): super(OSFSWatchMixin,self).close() self.__shutdown_watch_manager(force=True) self.notify_watchers(CLOSED) def add_watcher(self,callback,path="/",events=None,recursive=True): w = super(OSFSWatchMixin,self).add_watcher(callback,path,events,recursive) syspath = self.getsyspath(path) if isinstance(syspath,unicode): syspath = syspath.encode(sys.getfilesystemencoding()) wm = self.__get_watch_manager() evtmask = self.__get_event_mask(events) def process_events(event): self.__route_event(w,event) kwds = dict(rec=recursive,auto_add=recursive,quiet=False) try: wids = wm.add_watch(syspath,evtmask,process_events,**kwds) except pyinotify.WatchManagerError, e: raise OperationFailedError("add_watcher",details=e) w._pyinotify_id = wids[syspath] return w def del_watcher(self,watcher_or_callback): wm = self.__get_watch_manager() if isinstance(watcher_or_callback,Watcher): watchers = [watcher_or_callback] else: watchers = self._find_watchers(watcher_or_callback) for watcher in watchers: wm.rm_watch(watcher._pyinotify_id,rec=watcher.recursive) super(OSFSWatchMixin,self).del_watcher(watcher) if not wm._wmd: self.__shutdown_watch_manager() def __get_event_mask(self,events): """Convert the given set of events into a pyinotify event mask.""" if events is None: events = (EVENT,) mask = 0 for evt in events: if issubclass(ACCESSED,evt): mask |= pyinotify.IN_ACCESS if issubclass(CREATED,evt): mask |= pyinotify.IN_CREATE if issubclass(REMOVED,evt): mask |= pyinotify.IN_DELETE mask |= pyinotify.IN_DELETE_SELF if issubclass(MODIFIED,evt): mask |= pyinotify.IN_ATTRIB mask |= pyinotify.IN_MODIFY mask |= pyinotify.IN_CLOSE_WRITE if issubclass(MOVED_SRC,evt): mask |= pyinotify.IN_MOVED_FROM mask |= pyinotify.IN_MOVED_TO if issubclass(MOVED_DST,evt): mask |= pyinotify.IN_MOVED_FROM mask |= pyinotify.IN_MOVED_TO if issubclass(OVERFLOW,evt): mask |= pyinotify.IN_Q_OVERFLOW if issubclass(CLOSED,evt): mask |= pyinotify.IN_UNMOUNT return mask def __route_event(self,watcher,inevt): """Convert pyinotify event into fs.watch event, then handle it.""" try: path = self.unsyspath(inevt.pathname) except ValueError: return try: src_path = inevt.src_pathname if src_path is not None: src_path = self.unsyspath(src_path) except (AttributeError,ValueError): src_path = None if inevt.mask & pyinotify.IN_ACCESS: watcher.handle_event(ACCESSED(self,path)) if inevt.mask & pyinotify.IN_CREATE: watcher.handle_event(CREATED(self,path)) # Recursive watching of directories in pyinotify requires # the creation of a new watch for each subdir, resulting in # a race condition whereby events in the subdir are missed. # We'd prefer to duplicate events than to miss them. if inevt.mask & pyinotify.IN_ISDIR: try: # pyinotify does this for dirs itself, we only. # need to worry about newly-created files. for child in self.listdir(path,files_only=True): cpath = pathjoin(path,child) self.notify_watchers(CREATED,cpath) self.notify_watchers(MODIFIED,cpath,True) except FSError: pass if inevt.mask & pyinotify.IN_DELETE: watcher.handle_event(REMOVED(self,path)) if inevt.mask & pyinotify.IN_DELETE_SELF: watcher.handle_event(REMOVED(self,path)) if inevt.mask & pyinotify.IN_ATTRIB: watcher.handle_event(MODIFIED(self,path,False)) if inevt.mask & pyinotify.IN_MODIFY: watcher.handle_event(MODIFIED(self,path,True)) if inevt.mask & pyinotify.IN_CLOSE_WRITE: watcher.handle_event(MODIFIED(self,path,True)) if inevt.mask & pyinotify.IN_MOVED_FROM: # Sorry folks, I'm not up for decoding the destination path. watcher.handle_event(MOVED_SRC(self,path,None)) if inevt.mask & pyinotify.IN_MOVED_TO: if getattr(inevt,"src_pathname",None): watcher.handle_event(MOVED_SRC(self,src_path,path)) watcher.handle_event(MOVED_DST(self,path,src_path)) else: watcher.handle_event(MOVED_DST(self,path,None)) if inevt.mask & pyinotify.IN_Q_OVERFLOW: watcher.handle_event(OVERFLOW(self)) if inevt.mask & pyinotify.IN_UNMOUNT: watcher.handle_event(CLOSE(self)) def __get_watch_manager(self): """Get the shared watch manager, initializing if necessary.""" if OSFSWatchMixin.__watch_notifier is None: self.__watch_lock.acquire() try: if self.__watch_notifier is None: wm = pyinotify.WatchManager() n = pyinotify.ThreadedNotifier(wm) n.start() OSFSWatchMixin.__watch_manager = wm OSFSWatchMixin.__watch_notifier = n finally: self.__watch_lock.release() return OSFSWatchMixin.__watch_manager def __shutdown_watch_manager(self,force=False): """Stop the shared watch manager, if there are no watches left.""" self.__watch_lock.acquire() try: if OSFSWatchMixin.__watch_manager is None: return if not force and OSFSWatchMixin.__watch_manager._wmd: return OSFSWatchMixin.__watch_notifier.stop() OSFSWatchMixin.__watch_notifier = None OSFSWatchMixin.__watch_manager = None finally: self.__watch_lock.release() fs-0.3.0/fs/memoryfs.py0000644000175000017500000004504411407423140013450 0ustar willwill#!/usr/bin/env python """ fs.memoryfs =========== A Filesystem that exists in memory only. Which makes them extremely fast, but non-permanent. If you open a file from a `memoryfs` you will get back a StringIO object from the standard library. """ import datetime from fs.path import iteratepath from fs.base import * from fs import _thread_synchronize_default try: from cStringIO import StringIO except ImportError: from StringIO import StringIO def _check_mode(mode, mode_chars): for c in mode_chars: if c not in mode: return False return True class MemoryFile(object): def __init__(self, path, memory_fs, value, mode): self.closed = False self.path = path self.memory_fs = memory_fs self.mode = mode self.mem_file = None if '+' in mode: self.mem_file = StringIO() self.mem_file.write(value) self.mem_file.seek(0) elif _check_mode(mode, 'wa'): self.mem_file = StringIO() self.mem_file.write(value) elif _check_mode(mode, 'w'): self.mem_file = StringIO() elif _check_mode(mode, 'ra'): self.mem_file = StringIO() self.mem_file.write(value) elif _check_mode(mode, 'r'): self.mem_file = StringIO(value) self.mem_file.seek(0) elif _check_mode(mode, "a"): self.mem_file = StringIO() self.mem_file.write(value) else: if value is not None: self.mem_file = StringIO(value) else: self.mem_file = StringIO() assert self.mem_file is not None, "self.mem_file should have a value" def __str__(self): return "" % (self.memory_fs, self.path) __repr__ = __str__ def __unicode__(self): return u"" % (self.memory_fs, self.path) def __del__(self): if not self.closed: self.close() def flush(self): value = self.mem_file.getvalue() self.memory_fs._on_flush_memory_file(self.path, value) def __iter__(self): return iter(self.mem_file) def next(self): return self.mem_file.next() def readline(self, *args, **kwargs): return self.mem_file.readline(*args, **kwargs) def close(self): if not self.closed and self.mem_file is not None: value = self.mem_file.getvalue() self.memory_fs._on_close_memory_file(self, self.path, value) self.mem_file.close() self.closed = True def read(self, size=None): if size is None: size = -1 return self.mem_file.read(size) def seek(self, *args, **kwargs): return self.mem_file.seek(*args, **kwargs) def tell(self): return self.mem_file.tell() def truncate(self, *args, **kwargs): return self.mem_file.truncate(*args, **kwargs) def write(self, data): self.memory_fs._on_modify_memory_file(self.path) return self.mem_file.write(data) def writelines(self, *args, **kwargs): return self.mem_file.writelines(*args, **kwargs) def __enter__(self): return self def __exit__(self,exc_type,exc_value,traceback): self.close() return False class DirEntry(object): def __init__(self, type, name, contents=None): assert type in ("dir", "file"), "Type must be dir or file!" self.type = type self.name = name if contents is None and type == "dir": contents = {} self.open_files = [] self.contents = contents self.data = None self.locks = 0 self.created_time = datetime.datetime.now() self.modified_time = self.created_time self.accessed_time = self.created_time self.xattrs = {} def lock(self): self.locks += 1 def unlock(self): self.locks -=1 assert self.locks >=0, "Lock / Unlock mismatch!" def desc_contents(self): if self.isfile(): return "" % self.name elif self.isdir(): return "" % "".join( "%s: %s"% (k, v.desc_contents()) for k, v in self.contents.iteritems()) def isdir(self): return self.type == "dir" def isfile(self): return self.type == "file" def islocked(self): return self.locks > 0 def __str__(self): return "%s: %s" % (self.name, self.desc_contents()) class MemoryFS(FS): """An in-memory filesystem. """ def _make_dir_entry(self, *args, **kwargs): return self.dir_entry_factory(*args, **kwargs) def __init__(self, file_factory=None): super(MemoryFS, self).__init__(thread_synchronize=_thread_synchronize_default) self.dir_entry_factory = DirEntry self.file_factory = file_factory or MemoryFile self.root = self._make_dir_entry('dir', 'root') def __str__(self): return "" __repr__ = __str__ def __unicode__(self): return unicode(self.__str__()) @synchronize def _get_dir_entry(self, dirpath): current_dir = self.root for path_component in iteratepath(dirpath): if current_dir.contents is None: return None dir_entry = current_dir.contents.get(path_component, None) if dir_entry is None: return None current_dir = dir_entry return current_dir @synchronize def _dir_entry(self, path): dir_entry = self._get_dir_entry(path) if dir_entry is None: raise ResourceNotFoundError(path) return dir_entry @synchronize def desc(self, path): if self.isdir(path): return "Memory dir" elif self.isfile(path): return "Memory file object" else: return "No description available" @synchronize def isdir(self, path): dir_item = self._get_dir_entry(normpath(path)) if dir_item is None: return False return dir_item.isdir() @synchronize def isfile(self, path): dir_item = self._get_dir_entry(normpath(path)) if dir_item is None: return False return dir_item.isfile() @synchronize def exists(self, path): return self._get_dir_entry(path) is not None @synchronize def makedir(self, dirname, recursive=False, allow_recreate=False): if not dirname: raise PathError(dirname) fullpath = dirname dirpath, dirname = pathsplit(dirname) if recursive: parent_dir = self._get_dir_entry(dirpath) if parent_dir is not None: if parent_dir.isfile(): raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s") else: if not allow_recreate: if dirname in parent_dir.contents: raise DestinationExistsError(dirname, msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s") current_dir = self.root for path_component in iteratepath(dirpath)[:-1]: dir_item = current_dir.contents.get(path_component, None) if dir_item is None: break if not dir_item.isdir(): raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s") current_dir = dir_item current_dir = self.root for path_component in iteratepath(dirpath): dir_item = current_dir.contents.get(path_component, None) if dir_item is None: new_dir = self._make_dir_entry("dir", path_component) current_dir.contents[path_component] = new_dir current_dir = new_dir else: current_dir = dir_item parent_dir = current_dir else: parent_dir = self._get_dir_entry(dirpath) if parent_dir is None: raise ParentDirectoryMissingError(dirname, msg="Could not make dir, as parent dir does not exist: %(path)s") dir_item = parent_dir.contents.get(dirname, None) if dir_item is not None: if dir_item.isdir(): if not allow_recreate: raise DestinationExistsError(dirname) else: raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s") if dir_item is None: parent_dir.contents[dirname] = self._make_dir_entry("dir", dirname) def _orphan_files(self, file_dir_entry): for f in file_dir_entry.open_files: f.close() @synchronize def _lock_dir_entry(self, path): dir_entry = self._get_dir_entry(path) dir_entry.lock() @synchronize def _unlock_dir_entry(self, path): dir_entry = self._get_dir_entry(path) dir_entry.unlock() @synchronize def _is_dir_locked(self, path): dir_entry = self._get_dir_entry(path) return dir_entry.islocked() @synchronize def open(self, path, mode="r", **kwargs): filepath, filename = pathsplit(path) parent_dir_entry = self._get_dir_entry(filepath) if parent_dir_entry is None or not parent_dir_entry.isdir(): raise ResourceNotFoundError(path) if 'r' in mode or 'a' in mode: if filename not in parent_dir_entry.contents: raise ResourceNotFoundError(path) file_dir_entry = parent_dir_entry.contents[filename] if 'a' in mode: if file_dir_entry.islocked(): raise ResourceLockedError(path) file_dir_entry.accessed_time = datetime.datetime.now() self._lock_dir_entry(path) mem_file = self.file_factory(path, self, file_dir_entry.data, mode) file_dir_entry.open_files.append(mem_file) return mem_file elif 'w' in mode: if filename not in parent_dir_entry.contents: file_dir_entry = self._make_dir_entry("file", filename) parent_dir_entry.contents[filename] = file_dir_entry else: file_dir_entry = parent_dir_entry.contents[filename] if file_dir_entry.islocked(): raise ResourceLockedError(path) file_dir_entry.accessed_time = datetime.datetime.now() self._lock_dir_entry(path) mem_file = self.file_factory(path, self, None, mode) file_dir_entry.open_files.append(mem_file) return mem_file if parent_dir_entry is None: raise ResourceNotFoundError(path) @synchronize def remove(self, path): dir_entry = self._get_dir_entry(path) if dir_entry is None: raise ResourceNotFoundError(path) if dir_entry.islocked(): self._orphan_files(dir_entry) #raise ResourceLockedError(path) if dir_entry.isdir(): raise ResourceInvalidError(path,msg="That's a directory, not a file: %(path)s") pathname, dirname = pathsplit(path) parent_dir = self._get_dir_entry(pathname) del parent_dir.contents[dirname] @synchronize def removedir(self, path, recursive=False, force=False): dir_entry = self._get_dir_entry(path) if dir_entry is None: raise ResourceNotFoundError(path) if dir_entry.islocked(): raise ResourceLockedError(path) if not dir_entry.isdir(): raise ResourceInvalidError(path, msg="Can't remove resource, its not a directory: %(path)s" ) if dir_entry.contents and not force: raise DirectoryNotEmptyError(path) if recursive: rpathname = path while rpathname: rpathname, dirname = pathsplit(rpathname) parent_dir = self._get_dir_entry(rpathname) del parent_dir.contents[dirname] else: pathname, dirname = pathsplit(path) parent_dir = self._get_dir_entry(pathname) del parent_dir.contents[dirname] @synchronize def rename(self, src, dst): src_dir,src_name = pathsplit(src) src_entry = self._get_dir_entry(src) if src_entry is None: raise ResourceNotFoundError(src) open_files = src_entry.open_files[:] for f in open_files: f.flush() f.path = dst dst_dir,dst_name = pathsplit(dst) dst_entry = self._get_dir_entry(dst) if dst_entry is not None: raise DestinationExistsError(path) src_dir_entry = self._get_dir_entry(src_dir) src_xattrs = src_dir_entry.xattrs.copy() dst_dir_entry = self._get_dir_entry(dst_dir) if dst_dir_entry is None: raise ParentDirectoryMissingError(dst) dst_dir_entry.contents[dst_name] = src_dir_entry.contents[src_name] dst_dir_entry.contents[dst_name].name = dst_name dst_dir_entry.xattrs.update(src_xattrs) del src_dir_entry.contents[src_name] def settimes(self, path, accessed_time=None, modified_time=None): now = datetime.datetime.now() if accessed_time is None: accessed_time = now if modified_time is None: modified_time = now dir_entry = self._get_dir_entry(path) if dir_entry is not None: dir_entry.accessed_time = accessed_time dir_entry.modified_time = modified_time return True return False @synchronize def _on_close_memory_file(self, open_file, path, value): dir_entry = self._get_dir_entry(path) if dir_entry is not None and value is not None: dir_entry.data = value dir_entry.open_files.remove(open_file) self._unlock_dir_entry(path) @synchronize def _on_flush_memory_file(self, path, value): dir_entry = self._get_dir_entry(path) dir_entry.data = value @synchronize def _on_modify_memory_file(self, path): dir_entry = self._get_dir_entry(path) dir_entry.modified_time = datetime.datetime.now() @synchronize def listdir(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False): dir_entry = self._get_dir_entry(path) if dir_entry is None: raise ResourceNotFoundError(path) if dir_entry.isfile(): raise ResourceInvalidError(path,msg="that's a file, not a directory: %(path)s") paths = dir_entry.contents.keys() for (i,p) in enumerate(paths): if not isinstance(p,unicode): paths[i] = unicode(p) return self._listdir_helper(path, paths, wildcard, full, absolute, dirs_only, files_only) @synchronize def getinfo(self, path): dir_entry = self._get_dir_entry(path) if dir_entry is None: raise ResourceNotFoundError(path) info = {} info['created_time'] = dir_entry.created_time info['modified_time'] = dir_entry.modified_time info['accessed_time'] = dir_entry.accessed_time if dir_entry.isdir(): info['st_mode'] = 0755 else: info['size'] = len(dir_entry.data or '') info['st_mode'] = 0666 return info @synchronize def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384): src_dir_entry = self._get_dir_entry(src) src_xattrs = src_dir_entry.xattrs.copy() super(MemoryFS, self).copydir(src, dst, overwrite, ignore_errors=ignore_errors, chunk_size=chunk_size) dst_dir_entry = self._get_dir_entry(dst) if dst_dir_entry is not None: dst_dir_entry.xattrs.update(src_xattrs) @synchronize def movedir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384): src_dir_entry = self._get_dir_entry(src) src_xattrs = src_dir_entry.xattrs.copy() super(MemoryFS, self).movedir(src, dst, overwrite, ignore_errors=ignore_errors, chunk_size=chunk_size) dst_dir_entry = self._get_dir_entry(dst) if dst_dir_entry is not None: dst_dir_entry.xattrs.update(src_xattrs) @synchronize def copy(self, src, dst, overwrite=False, chunk_size=16384): src_dir_entry = self._get_dir_entry(src) src_xattrs = src_dir_entry.xattrs.copy() super(MemoryFS, self).copy(src, dst, overwrite, chunk_size) dst_dir_entry = self._get_dir_entry(dst) if dst_dir_entry is not None: dst_dir_entry.xattrs.update(src_xattrs) @synchronize def move(self, src, dst, overwrite=False, chunk_size=16384): src_dir_entry = self._get_dir_entry(src) src_xattrs = src_dir_entry.xattrs.copy() super(MemoryFS, self).move(src, dst, overwrite, chunk_size) dst_dir_entry = self._get_dir_entry(dst) if dst_dir_entry is not None: dst_dir_entry.xattrs.update(src_xattrs) @synchronize def setxattr(self, path, key, value): dir_entry = self._dir_entry(path) key = unicode(key) dir_entry.xattrs[key] = value @synchronize def getxattr(self, path, key, default=None): key = unicode(key) dir_entry = self._dir_entry(path) return dir_entry.xattrs.get(key, default) @synchronize def delxattr(self, path, key): dir_entry = self._dir_entry(path) try: del dir_entry.xattrs[key] except KeyError: pass @synchronize def listxattrs(self, path): dir_entry = self._dir_entry(path) return dir_entry.xattrs.keys()fs-0.3.0/fs/rpcfs.py0000644000175000017500000002055011407373043012725 0ustar willwill""" fs.rpcfs ======== This module provides the class 'RPCFS' to access a remote FS object over XML-RPC. You probably want to use this in conjunction with the 'RPCFSServer' class from the :mod:`fs.expose.xmlrpc` module. """ import xmlrpclib from fs.base import * from StringIO import StringIO if hasattr(StringIO,"__exit__"): class StringIO(StringIO): pass else: class StringIO(StringIO): def __enter__(self): return self def __exit__(self,exc_type,exc_value,traceback): self.close() return False def re_raise_faults(func): """Decorator to re-raise XML-RPC faults as proper exceptions.""" def wrapper(*args,**kwds): try: return func(*args,**kwds) except xmlrpclib.Fault, f: # Make sure it's in a form we can handle bits = f.faultString.split(" ") if bits[0] not in [":") cls = bits[0] msg = ">:".join(bits[1:]) while cls[0] in ["'",'"']: cls = cls[1:] while cls[-1] in ["'",'"']: cls = cls[:-1] cls = _object_by_name(cls) # Re-raise using the remainder of the fault code as message if cls: raise cls(msg) raise f return wrapper def _object_by_name(name,root=None): """Look up an object by dotted-name notation.""" bits = name.split(".") if root is None: try: obj = globals()[bits[0]] except KeyError: try: obj = __builtins__[bits[0]] except KeyError: obj = __import__(bits[0],globals()) else: obj = getattr(root,bits[0]) if len(bits) > 1: return _object_by_name(".".join(bits[1:]),obj) else: return obj class ReRaiseFaults: """XML-RPC proxy wrapper that re-raises Faults as proper Exceptions.""" def __init__(self,obj): self._obj = obj def __getattr__(self,attr): val = getattr(self._obj,attr) if callable(val): val = re_raise_faults(val) self.__dict__[attr] = val return val class RPCFS(FS): """Access a filesystem exposed via XML-RPC. This class provides the client-side logic for accessing a remote FS object, and is dual to the RPCFSServer class defined in fs.expose.xmlrpc. Example:: fs = RPCFS("http://my.server.com/filesystem/location/") """ def __init__(self, uri, transport=None): """Constructor for RPCFS objects. The only required argument is the uri of the server to connect to. This will be passed to the underlying XML-RPC server proxy object, along with the 'transport' argument if it is provided. :param uri: address of the server """ self.uri = uri self._transport = transport self.proxy = self._make_proxy() FS.__init__(self,thread_synchronize=False) def _make_proxy(self): kwds = dict(allow_none=True) if self._transport is not None: proxy = xmlrpclib.ServerProxy(self.uri,self._transport,**kwds) else: proxy = xmlrpclib.ServerProxy(self.uri,**kwds) return ReRaiseFaults(proxy) def __str__(self): return '' % (self.uri,) def __getstate__(self): state = super(RPCFS,self).__getstate__() try: del state['proxy'] except KeyError: pass return state def __setstate__(self, state): for (k,v) in state.iteritems(): self.__dict__[k] = v self.proxy = self._make_proxy() def encode_path(self, path): """Encode a filesystem path for sending over the wire. Unfortunately XMLRPC only supports ASCII strings, so this method must return something that can be represented in ASCII. The default is base64-encoded UTF8. """ return path.encode("utf8").encode("base64") def decode_path(self, path): """Decode paths arriving over the wire.""" return path.decode("base64").decode("utf8") def open(self, path, mode="r"): # TODO: chunked transport of large files path = self.encode_path(path) if "w" in mode: self.proxy.set_contents(path,xmlrpclib.Binary("")) if "r" in mode or "a" in mode or "+" in mode: try: data = self.proxy.get_contents(path).data except IOError: if "w" not in mode and "a" not in mode: raise ResourceNotFoundError(path) if not self.isdir(dirname(path)): raise ParentDirectoryMissingError(path) self.proxy.set_contents(path,xmlrpclib.Binary("")) else: data = "" f = StringIO(data) if "a" not in mode: f.seek(0,0) else: f.seek(0,2) oldflush = f.flush oldclose = f.close def newflush(): oldflush() self.proxy.set_contents(path,xmlrpclib.Binary(f.getvalue())) def newclose(): f.flush() oldclose() f.flush = newflush f.close = newclose return f def exists(self, path): path = self.encode_path(path) return self.proxy.exists(path) def isdir(self, path): path = self.encode_path(path) return self.proxy.isdir(path) def isfile(self, path): path = self.encode_path(path) return self.proxy.isfile(path) def listdir(self, path="./", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False): path = self.encode_path(path) entries = self.proxy.listdir(path,wildcard,full,absolute,dirs_only,files_only) return [self.decode_path(e) for e in entries] def makedir(self, path, recursive=False, allow_recreate=False): path = self.encode_path(path) return self.proxy.makedir(path,recursive,allow_recreate) def remove(self, path): path = self.encode_path(path) return self.proxy.remove(path) def removedir(self, path, recursive=False, force=False): path = self.encode_path(path) return self.proxy.removedir(path,recursive,force) def rename(self, src, dst): src = self.encode_path(src) dst = self.encode_path(dst) return self.proxy.rename(src,dst) def settimes(self, path, accessed_time, modified_time): path = self.encode_path(path) return self.proxy.settimes(path, accessed_time, modified_time) def getinfo(self, path): path = self.encode_path(path) return self.proxy.getinfo(path) def desc(self, path): path = self.encode_path(path) return self.proxy.desc(path) def getxattr(self, path, attr, default=None): path = self.encode_path(path) attr = self.encode_path(attr) return self.fs.getxattr(path,attr,default) def setxattr(self, path, attr, value): path = self.encode_path(path) attr = self.encode_path(attr) return self.fs.setxattr(path,attr,value) def delxattr(self, path, attr): path = self.encode_path(path) attr = self.encode_path(attr) return self.fs.delxattr(path,attr) def listxattrs(self, path): path = self.encode_path(path) return [self.decode_path(a) for a in self.fs.listxattrs(path)] def copy(self, src, dst, overwrite=False, chunk_size=16384): src = self.encode_path(src) dst = self.encode_path(dst) return self.proxy.copy(src,dst,overwrite,chunk_size) def move(self, src, dst, overwrite=False, chunk_size=16384): src = self.encode_path(src) dst = self.encode_path(dst) return self.proxy.move(src,dst,overwrite,chunk_size) def movedir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384): src = self.encode_path(src) dst = self.encode_path(dst) return self.proxy.movedir(src, dst, overwrite, ignore_errors, chunk_size) def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384): src = self.encode_path(src) dst = self.encode_path(dst) return self.proxy.copydir(src,dst,overwrite,ignore_errors,chunk_size) fs-0.3.0/fs/wrapfs/0000755000175000017500000000000011407431454012537 5ustar willwillfs-0.3.0/fs/wrapfs/hidedotfilesfs.py0000644000175000017500000000514311406473006016106 0ustar willwill""" fs.wrapfs.hidedotfilesfs ======================== An FS wrapper class for hiding dot-files in directory listings. """ from fs.wrapfs import WrapFS class HideDotFilesFS(WrapFS): """FS wrapper class that hides dot-files in directory listings. The listdir() function takes an extra keyword argument 'hidden' indicating whether hidden dotfiles shoud be included in the output. It is False by default. """ def is_hidden(self, path): """Check whether the given path should be hidden.""" return path and basename(path)[0] == "." def _encode(self, path): return path def _decode(self, path): return path def listdir(self, path="", **kwds): hidden = kwds.pop("hidden",True) entries = self.wrapped_fs.listdir(path,**kwds) if not hidden: entries = [e for e in entries if not self.is_hidden(e)] return entries def walk(self, path="/", wildcard=None, dir_wildcard=None, search="breadth",hidden=False): if search == "breadth": dirs = [path] while dirs: current_path = dirs.pop() paths = [] for filename in self.listdir(current_path,hidden=hidden): path = pathjoin(current_path, filename) if self.isdir(path): if dir_wildcard is not None: if fnmatch(path, dir_wilcard): dirs.append(path) else: dirs.append(path) else: if wildcard is not None: if fnmatch(path, wildcard): paths.append(filename) else: paths.append(filename) yield (current_path, paths) elif search == "depth": def recurse(recurse_path): for path in self.listdir(recurse_path, wildcard=dir_wildcard, full=True, dirs_only=True,hidden=hidden): for p in recurse(path): yield p yield (recurse_path, self.listdir(recurse_path, wildcard=wildcard, files_only=True,hidden=hidden)) for p in recurse(path): yield p else: raise ValueError("Search should be 'breadth' or 'depth'") def isdirempty(self, path): path = normpath(path) iter_dir = iter(self.listdir(path,hidden=True)) try: iter_dir.next() except StopIteration: return True return False fs-0.3.0/fs/wrapfs/__init__.py0000644000175000017500000002230411407374233014652 0ustar willwill""" fs.wrapfs ========= A class for wrapping an existing FS object with additional functionality. This module provides the class WrapFS, a base class for objects that wrap another FS object and provide some transformation of its contents. It could be very useful for implementing e.g. transparent encryption or compression services. For a simple example of how this class could be used, see the 'HideDotFilesFS' class in the module fs.wrapfs.hidedotfilesfs. This wrapper implements the standard unix shell functionality of hiding dot-files in directory listings. """ import sys from fnmatch import fnmatch from fs.base import FS, threading, synchronize from fs.errors import * def rewrite_errors(func): """Re-write paths in errors raised by wrapped FS objects.""" @wraps(func) def wrapper(self,*args,**kwds): try: return func(self,*args,**kwds) except ResourceError, e: (exc_type,exc_inst,tb) = sys.exc_info() try: e.path = self._decode(e.path) except (AttributeError, ValueError, TypeError): raise e, None, tb raise return wrapper class WrapFS(FS): """FS that wraps another FS, providing translation etc. This class allows simple transforms to be applied to the names and/or contents of files in an FS. It could be used to implement e.g. compression or encryption in a relatively painless manner. The following methods can be overridden to control how files are accessed in the underlying FS object: * _file_wrap(file, mode): called for each file that is opened from the underlying FS; may return a modified file-like object. * _encode(path): encode a path for access in the underlying FS * _decode(path): decode a path from the underlying FS If the required path translation proceeds one component at a time, it may be simpler to override the _encode_name() and _decode_name() methods. """ def __init__(self, fs): super(WrapFS,self).__init__() try: self._lock = fs._lock except (AttributeError,FSError): self._lock = None self.wrapped_fs = fs def _file_wrap(self, f, mode): """Apply wrapping to an opened file.""" return f def _encode_name(self, name): """Encode path component for the underlying FS.""" return name def _decode_name(self, name): """Decode path component from the underlying FS.""" return name def _encode(self, path): """Encode path for the underlying FS.""" names = path.split("/") e_names = [] for name in names: if name == "": e_names.append("") else: e_names.append(self._encode_name(name)) return "/".join(e_names) def _decode(self, path): """Decode path from the underlying FS.""" names = path.split("/") d_names = [] for name in names: if name == "": d_names.append("") else: d_names.append(self._decode_name(name)) return "/".join(d_names) def _adjust_mode(self, mode): """Adjust the mode used to open a file in the underlying FS. This method takes the mode given when opening a file, and should return a two-tuple giving the mode to be used in this FS as first item, and the mode to be used in the underlying FS as the second. An example of why this is needed is a WrapFS subclass that does transparent file compression - in this case files from the wrapped FS cannot be opened in append mode. """ return (mode,mode) @rewrite_errors def getsyspath(self, path, allow_none=False): return self.wrapped_fs.getsyspath(self._encode(path),allow_none) @rewrite_errors def hassyspath(self, path): return self.wrapped_fs.hassyspath(self._encode(path)) @rewrite_errors def open(self, path, mode="r", **kwargs): (mode, wmode) = self._adjust_mode(mode) f = self.wrapped_fs.open(self._encode(path), wmode, **kwargs) return self._file_wrap(f, mode) @rewrite_errors def exists(self, path): return self.wrapped_fs.exists(self._encode(path)) @rewrite_errors def isdir(self, path): return self.wrapped_fs.isdir(self._encode(path)) @rewrite_errors def isfile(self, path): return self.wrapped_fs.isfile(self._encode(path)) @rewrite_errors def listdir(self, path="", **kwds): wildcard = kwds.pop("wildcard","*") info = kwds.get("info",False) entries = [] for e in self.wrapped_fs.listdir(self._encode(path),**kwds): if info: e = e.copy() e["name"] = self._decode(e["name"]) if wildcard is not None and not fnmatch(e["name"],wildcard): continue else: e = self._decode(e) if wildcard is not None and not fnmatch(e,wildcard): continue entries.append(e) return entries @rewrite_errors def makedir(self, path, *args, **kwds): return self.wrapped_fs.makedir(self._encode(path),*args,**kwds) @rewrite_errors def remove(self, path): return self.wrapped_fs.remove(self._encode(path)) @rewrite_errors def removedir(self, path, *args, **kwds): return self.wrapped_fs.removedir(self._encode(path),*args,**kwds) @rewrite_errors def rename(self, src, dst): return self.wrapped_fs.rename(self._encode(src),self._encode(dst)) @rewrite_errors def getinfo(self, path): return self.wrapped_fs.getinfo(self._encode(path)) @rewrite_errors def settimes(self, path, *args, **kwds): return self.wrapped_fs.settimes(self._encode(path), *args,**kwds) @rewrite_errors def desc(self, path): return self.wrapped_fs.desc(self._encode(path)) @rewrite_errors def copy(self, src, dst, **kwds): return self.wrapped_fs.copy(self._encode(src),self._encode(dst),**kwds) @rewrite_errors def move(self, src, dst, **kwds): return self.wrapped_fs.move(self._encode(src),self._encode(dst),**kwds) @rewrite_errors def movedir(self, src, dst, **kwds): return self.wrapped_fs.movedir(self._encode(src),self._encode(dst),**kwds) @rewrite_errors def copydir(self, src, dst, **kwds): return self.wrapped_fs.copydir(self._encode(src),self._encode(dst),**kwds) @rewrite_errors def getxattr(self, path, name, default=None): try: return self.wrapped_fs.getxattr(self._encode(path),name,default) except AttributeError: raise UnsupportedError("getxattr") @rewrite_errors def setxattr(self, path, name, value): try: return self.wrapped_fs.setxattr(self._encode(path),name,value) except AttributeError: raise UnsupportedError("setxattr") @rewrite_errors def delxattr(self, path, name): try: return self.wrapped_fs.delxattr(self._encode(path),name) except AttributeError: raise UnsupportedError("delxattr") @rewrite_errors def listxattrs(self, path): try: return self.wrapped_fs.listxattrs(self._encode(path)) except AttributeError: raise UnsupportedError("listxattrs") def __getattr__(self, attr): # These attributes can be used by the destructor, but may not be # defined if there are errors in the constructor. if attr == "closed": return False if attr == "wrapped_fs": return None return getattr(self.wrapped_fs,attr) @rewrite_errors def close(self): if not self.closed: self.wrapped_fs.close() super(WrapFS,self).close() def wrap_fs_methods(decorator, cls=None, exclude=[]): """Apply the given decorator to all FS methods on the given class. This function can be used in two ways. When called with two arguments it applies the given function 'decorator' to each FS method of the given class. When called with just a single argument, it creates and returns a class decorator which will do the same thing when applied. So you can use it like this:: wrap_fs_methods(mydecorator,MyFSClass) Or on more recent Python versions, like this:: @wrap_fs_methods(mydecorator) class MyFSClass(FS): ... """ methods = ("open","exists","isdir","isfile","listdir","makedir","remove", "setcontents","removedir","rename","getinfo","copy","move", "copydir","movedir","close","getxattr","setxattr","delxattr", "listxattrs","getsyspath","createfile") def apply_decorator(cls): for method_name in methods: if method_name in exclude: continue method = getattr(cls,method_name,None) if method is not None: setattr(cls,method_name,decorator(method)) return cls if cls is not None: return apply_decorator(cls) else: return apply_decorator fs-0.3.0/fs/wrapfs/lazyfs.py0000644000175000017500000000473311406167217014432 0ustar willwill""" fs.wrapfs.lazyfs ================ A class for lazy initialisation of an FS object. This module provides the class LazyFS, an FS wrapper class that can lazily initialise its underlying FS object. """ try: from threading import Lock except ImportError: from fs.base import DummyLock as Lock from fs.base import FS from fs.wrapfs import WrapFS class LazyFS(WrapFS): """Simple 'lazy initialization' for FS objects. This FS wrapper can be created with an FS instance, an FS class, or a (class,args,kwds) tuple. The actual FS instance will be created on demand the first time it is accessed. """ def __init__(self, fs): super(LazyFS,self).__init__(fs) self._lazy_creation_lock = Lock() def __getstate__(self): state = super(LazyFS,self).__getstate__() del state["_lazy_creation_lock"] return state def __setstate__(self, state): self.__dict__.update(state) self._lazy_creation_lock = Lock() def _get_wrapped_fs(self): """Obtain the wrapped FS instance, creating it if necessary.""" try: return self.__dict__["wrapped_fs"] except KeyError: self._lazy_creation_lock.acquire() try: try: return self.__dict__["wrapped_fs"] except KeyError: fs = self._fsclass(*self._fsargs,**self._fskwds) self.__dict__["wrapped_fs"] = fs return fs finally: self._lazy_creation_lock.release() def _set_wrapped_fs(self, fs): if isinstance(fs,FS): self.__dict__["wrapped_fs"] = fs elif isinstance(fs,type): self._fsclass = fs self._fsargs = [] self._fskwds = {} else: self._fsclass = fs[0] try: self._fsargs = fs[1] except IndexError: self._fsargs = [] try: self._fskwds = fs[2] except IndexError: self._fskwds = {} wrapped_fs = property(_get_wrapped_fs,_set_wrapped_fs) def setcontents(self, path, data): return self.wrapped_fs.setcontents(path,data) def close(self): if not self.closed: # If it was never initialized, create a fake one to close. if "wrapped_fs" not in self.__dict__: self.__dict__["wrapped_fs"] = FS() super(LazyFS,self).close() fs-0.3.0/fs/wrapfs/readonlyfs.py0000644000175000017500000000270711406167406015267 0ustar willwill""" fs.wrapfs.readonlyfs ==================== An FS wrapper class for blocking operations that would modify the FS. """ from fs.wrapfs import WrapFS class ReadOnlyFS(WrapFS): """ Makes a FS object read only. Any operation that could potentially modify the underlying file system will throw an UnsupportedError Note that this isn't a secure sandbox, untrusted code could work around the read-only restrictions by getting the base class. Its main purpose is to provide a degree of safety if you want to protect an FS object from accidental modification. """ def getsyspath(self, path, allow_none=False): """ Doesn't technically modify the filesystem but could be used to work around read-only restrictions. """ if allow_none: return None raise NoSysPathError(path) def open(self, path, mode='r', **kwargs): """ Only permit read access """ if 'w' in mode or 'a' in mode: raise UnsupportedError('write') return super(ReadOnlyFS, self).open(path, mode, **kwargs) def _no_can_do(self, *args, **kwargs): """ Replacement method for methods that can modify the file system """ raise UnsupportedError('write') move = _no_can_do movedir = _no_can_do copy = _no_can_do copydir = _no_can_do makedir = _no_can_do rename = _no_can_do setxattr = _no_can_do delattr = _no_can_do fs-0.3.0/fs/wrapfs/limitsizefs.py0000644000175000017500000001151511406167342015457 0ustar willwill""" fs.wrapfs.limitsizefs ===================== An FS wrapper class for limiting the size of the underlying FS. This module provides the class LimitSizeFS, an FS wrapper that can limit the total size of files stored in the wrapped FS. """ # for Python2.5 compatibility from __future__ import with_statement from fs.errors import * from fs.base import FS, threading, synchronize from fs.wrapfs import WrapFS class LimitSizeFS(WrapFS): """FS wrapper class to limit total size of files stored.""" def __init__(self, fs, max_size): super(LimitSizeFS,self).__init__(fs) self.max_size = max_size self.cur_size = sum(self.getsize(f) for f in self.walkfiles()) self._size_lock = threading.Lock() self._file_sizes = {} def _decr_size(self, decr): with self._size_lock: self.cur_size -= decr def __getstate__(self): state = super(LimitSizeFS,self).__getstate__() del state["_size_lock"] del state["_file_sizes"] return state def __setstate__(self, state): super(LimitSizeFS,self).__setstate__(state) self._size_lock = threading.Lock() def getsyspath(self, path, allow_none=False): if not allow_none: raise NoSysPathError(path) return None def open(self, path, mode="r"): path = relpath(normpath(path)) with self._size_lock: try: size = self.getsize(path) except ResourceNotFoundError: size = 0 f = super(LimitSizeFS,self).open(path,mode) if path not in self._file_sizes: self._file_sizes[path] = size if "w" in mode: self.cur_size -= size size = 0 self._file_sizes[path] = 0 return LimitSizeFile(self,path,f,mode,size) def _ensure_file_size(self, path, size): path = relpath(normpath(path)) with self._size_lock: if path not in self._file_sizes: self._file_sizes[path] = self.getsize(path) cur_size = self._file_sizes[path] diff = size - cur_size if diff > 0: if self.cur_size + diff > self.max_size: raise StorageSpaceError("write") self.cur_size += diff self._file_sizes[path] = size def copy(self, src, dst, **kwds): FS.copy(self,src,dst,**kwds) def copydir(self, src, dst, **kwds): FS.copydir(self,src,dst,**kwds) def move(self, src, dst, **kwds): FS.move(self,src,dst,**kwds) path = relpath(normpath(src)) with self._size_lock: self._file_sizes.pop(path,None) def movedir(self, src, dst, **kwds): FS.movedir(self,src,dst,**kwds) def remove(self, path): size = self.getsize(path) super(LimitSizeFS,self).remove(path) self._decr_size(size) path = relpath(normpath(path)) with self._size_lock: self._file_sizes.pop(path,None) def removedir(self, path, recursive=False, force=False): size = sum(self.getsize(f) for f in self.walkfiles(path)) super(LimitSizeFS,self).removedir(path,recursive=recursive,force=force) self._decr_size(size) def rename(self, src, dst): try: size = self.getsize(dst) except ResourceNotFoundError: size = 0 super(LimitSizeFS,self).rename(src,dst) self._decr_size(size) path = relpath(normpath(src)) with self._size_lock: self._file_sizes.pop(path,None) class LimitSizeFile(object): """Filelike wrapper class for use by LimitSizeFS.""" def __init__(self, fs, path, file, mode, size): self._lock = fs._lock self.fs = fs self.path = path self.file = file self.mode = mode self.size = size self.closed = False @synchronize def write(self, data): pos = self.file.tell() self.size = self.fs._ensure_file_size(self.path,pos+len(data)) self.file.write(data) def writelines(self, lines): for line in lines: self.write(line) @synchronize def truncate(self, size=None): pos = self.file.tell() if size is None: size = pos self.fs._ensure_file_size(self.path,size) self.file.truncate(size) self.size = size # This is lifted straight from the stdlib's tempfile.py def __getattr__(self, name): file = self.__dict__['file'] a = getattr(file, name) if not issubclass(type(a), type(0)): setattr(self, name, a) return a def __enter__(self): self.file.__enter__() return self def __exit__(self, exc, value, tb): self.close() return False def __iter__(self): return iter(self.file) fs-0.3.0/fs/tests/0000755000175000017500000000000011407431454012377 5ustar willwillfs-0.3.0/fs/tests/test_s3fs.py0000644000175000017500000000231311301265156014662 0ustar willwill""" fs.tests.test_s3fs: testcases for the S3FS module These tests are set up to be skipped by default, since they're very slow, require a valid AWS account, and cost money. You'll have to set the '__test__' attribute the True on te TestS3FS class to get them running. """ import unittest from fs.tests import FSTestCases, ThreadingTestCases from fs.path import * from fs import s3fs class TestS3FS(unittest.TestCase,FSTestCases,ThreadingTestCases): # Disable the tests by default __test__ = False bucket = "test-s3fs.rfk.id.au" def setUp(self): self.fs = s3fs.S3FS(self.bucket) for k in self.fs._s3bukt.list(): self.fs._s3bukt.delete_key(k) def test_concurrent_copydir(self): # makedir() on S3FS is currently not atomic pass def test_makedir_winner(self): # makedir() on S3FS is currently not atomic pass def test_multiple_overwrite(self): # S3's eventual-consistency seems to be breaking this test pass class TestS3FS_prefix(TestS3FS): def setUp(self): self.fs = s3fs.S3FS(self.bucket,"/unittest/files") for k in self.fs._s3bukt.list(): self.fs._s3bukt.delete_key(k) fs-0.3.0/fs/tests/test_path.py0000644000175000017500000001246711375762633014770 0ustar willwill""" fs.tests.test_path: testcases for the fs path functions """ import unittest import fs.tests from fs.path import * class TestPathFunctions(unittest.TestCase): """Testcases for FS path functions.""" def test_normpath(self): tests = [ ("\\a\\b\\c", "/a/b/c"), ("", ""), ("/a/b/c", "/a/b/c"), ("a/b/c", "a/b/c"), ("a/b/../c/", "a/c"), ("/","/"), (u"a/\N{GREEK SMALL LETTER BETA}\\c",u"a/\N{GREEK SMALL LETTER BETA}/c"), ] for path, result in tests: self.assertEqual(normpath(path), result) def test_pathjoin(self): tests = [ ("", "a", "a"), ("a", "a", "a/a"), ("a/b", "../c", "a/c"), ("a/b/../c", "d", "a/c/d"), ("/a/b/c", "d", "/a/b/c/d"), ("/a/b/c", "../../../d", "/d"), ("a", "b", "c", "a/b/c"), ("a/b/c", "../d", "c", "a/b/d/c"), ("a/b/c", "../d", "/a", "/a"), ("aaa", "bbb/ccc", "aaa/bbb/ccc"), ("aaa", "bbb\ccc", "aaa/bbb/ccc"), ("aaa", "bbb", "ccc", "/aaa", "eee", "/aaa/eee"), ("a/b", "./d", "e", "a/b/d/e"), ("/", "/", "/"), ("/", "", "/"), (u"a/\N{GREEK SMALL LETTER BETA}","c",u"a/\N{GREEK SMALL LETTER BETA}/c"), ] for testpaths in tests: paths = testpaths[:-1] result = testpaths[-1] self.assertEqual(fs.pathjoin(*paths), result) self.assertRaises(ValueError, fs.pathjoin, "../") self.assertRaises(ValueError, fs.pathjoin, "./../") self.assertRaises(ValueError, fs.pathjoin, "a/b", "../../..") self.assertRaises(ValueError, fs.pathjoin, "a/b/../../../d") def test_relpath(self): tests = [ ("/a/b", "a/b"), ("a/b", "a/b"), ("/", "") ] for path, result in tests: self.assertEqual(fs.relpath(path), result) def test_abspath(self): tests = [ ("/a/b", "/a/b"), ("a/b", "/a/b"), ("/", "/") ] for path, result in tests: self.assertEqual(fs.abspath(path), result) def test_iteratepath(self): tests = [ ("a/b", ["a", "b"]), ("", [] ), ("aaa/bbb/ccc", ["aaa", "bbb", "ccc"]), ("a/b/c/../d", ["a", "b", "d"]) ] for path, results in tests: for path_component, expected in zip(iteratepath(path), results): self.assertEqual(path_component, expected) self.assertEqual(list(iteratepath("a/b/c/d", 1)), ["a", "b/c/d"]) self.assertEqual(list(iteratepath("a/b/c/d", 2)), ["a", "b", "c/d"]) def test_pathsplit(self): tests = [ ("a/b", ("a", "b")), ("a/b/c", ("a/b", "c")), ("a", ("", "a")), ("", ("", "")), ("/", ("", "")), ("foo/bar", ("foo", "bar")), ("foo/bar/baz", ("foo/bar", "baz")), ] for path, result in tests: self.assertEqual(fs.pathsplit(path), result) def test_recursepath(self): self.assertEquals(recursepath("/"),["/"]) self.assertEquals(recursepath("hello"),["/","/hello"]) self.assertEquals(recursepath("/hello/world/"),["/","/hello","/hello/world"]) self.assertEquals(recursepath("/hello/world/",reverse=True),["/hello/world","/hello","/"]) self.assertEquals(recursepath("hello",reverse=True),["/hello","/"]) self.assertEquals(recursepath("",reverse=True),["/"]) class Test_PathMap(unittest.TestCase): def test_basics(self): map = PathMap() map["hello"] = "world" self.assertEquals(map["/hello"],"world") self.assertEquals(map["/hello/"],"world") self.assertEquals(map.get("hello"),"world") def test_iteration(self): map = PathMap() map["hello/world"] = 1 map["hello/world/howareya"] = 2 map["hello/world/iamfine"] = 3 map["hello/kitty"] = 4 map["hello/kitty/islame"] = 5 map["batman/isawesome"] = 6 self.assertEquals(set(map.iterkeys()),set(("/hello/world","/hello/world/howareya","/hello/world/iamfine","/hello/kitty","/hello/kitty/islame","/batman/isawesome"))) self.assertEquals(sorted(map.values()),range(1,7)) self.assertEquals(sorted(map.items("/hello/world/")),[("/hello/world",1),("/hello/world/howareya",2),("/hello/world/iamfine",3)]) self.assertEquals(zip(map.keys(),map.values()),map.items()) self.assertEquals(zip(map.keys("batman"),map.values("batman")),map.items("batman")) self.assertEquals(set(map.iternames("hello")),set(("world","kitty"))) self.assertEquals(set(map.iternames("/hello/kitty")),set(("islame",))) del map["hello/kitty/islame"] self.assertEquals(set(map.iternames("/hello/kitty")),set()) self.assertEquals(set(map.iterkeys()),set(("/hello/world","/hello/world/howareya","/hello/world/iamfine","/hello/kitty","/batman/isawesome"))) self.assertEquals(set(map.values()),set(range(1,7)) - set((5,))) fs-0.3.0/fs/tests/test_objecttree.py0000644000175000017500000000233211223657261016140 0ustar willwill""" fs.tests.test_objectree: testcases for the fs objecttree module """ import unittest import fs.tests from fs import objecttree class TestObjectTree(unittest.TestCase): """Testcases for the ObjectTree class.""" def test_getset(self): ot = objecttree.ObjectTree() ot['foo'] = "bar" self.assertEqual(ot['foo'], 'bar') ot = objecttree.ObjectTree() ot['foo/bar'] = "baz" self.assertEqual(ot['foo'], {'bar':'baz'}) self.assertEqual(ot['foo/bar'], 'baz') del ot['foo/bar'] self.assertEqual(ot['foo'], {}) ot = objecttree.ObjectTree() ot['a/b/c'] = "A" ot['a/b/d'] = "B" ot['a/b/e'] = "C" ot['a/b/f'] = "D" self.assertEqual(sorted(ot['a/b'].values()), ['A', 'B', 'C', 'D']) self.assert_(ot.get('a/b/x', -1) == -1) self.assert_('a/b/c' in ot) self.assert_('a/b/x' not in ot) self.assert_(ot.isobject('a/b/c')) self.assert_(ot.isobject('a/b/d')) self.assert_(not ot.isobject('a/b')) left, object, right = ot.partialget('a/b/e/f/g') self.assertEqual(left, "a/b/e") self.assertEqual(object, "C") self.assertEqual(right, "f/g") fs-0.3.0/fs/tests/test_fs.py0000644000175000017500000000404311375762633014433 0ustar willwill""" fs.tests.test_fs: testcases for basic FS implementations """ from fs.tests import FSTestCases, ThreadingTestCases import unittest import os import sys import shutil import tempfile from fs.path import * from fs import osfs class TestOSFS(unittest.TestCase,FSTestCases,ThreadingTestCases): def setUp(self): self.temp_dir = tempfile.mkdtemp(u"fstest") self.fs = osfs.OSFS(self.temp_dir) def tearDown(self): shutil.rmtree(self.temp_dir) def check(self, p): return os.path.exists(os.path.join(self.temp_dir, relpath(p))) class TestSubFS(unittest.TestCase,FSTestCases,ThreadingTestCases): def setUp(self): self.temp_dir = tempfile.mkdtemp(u"fstest") self.parent_fs = osfs.OSFS(self.temp_dir) self.parent_fs.makedir("foo/bar", recursive=True) self.fs = self.parent_fs.opendir("foo/bar") def tearDown(self): shutil.rmtree(self.temp_dir) def check(self, p): p = os.path.join("foo/bar", relpath(p)) full_p = os.path.join(self.temp_dir, p) return os.path.exists(full_p) from fs import memoryfs class TestMemoryFS(unittest.TestCase,FSTestCases,ThreadingTestCases): def setUp(self): self.fs = memoryfs.MemoryFS() from fs import mountfs class TestMountFS(unittest.TestCase,FSTestCases,ThreadingTestCases): def setUp(self): self.mount_fs = mountfs.MountFS() self.mem_fs = memoryfs.MemoryFS() self.mount_fs.mountdir("mounted/memfs", self.mem_fs) self.fs = self.mount_fs.opendir("mounted/memfs") def check(self, p): return self.mount_fs.exists(os.path.join("mounted/memfs", relpath(p))) from fs import tempfs class TestTempFS(unittest.TestCase,FSTestCases,ThreadingTestCases): def setUp(self): self.fs = tempfs.TempFS() def tearDown(self): td = self.fs._temp_dir self.fs.close() self.assert_(not os.path.exists(td)) def check(self, p): td = self.fs._temp_dir return os.path.exists(os.path.join(td, relpath(p))) fs-0.3.0/fs/tests/__init__.py0000644000175000017500000007423011407377505014524 0ustar willwill#!/usr/bin/env python """ fs.tests: testcases for the fs module """ # Send any output from the logging module to stdout, so it will # be captured by nose and reported appropriately import sys import logging logging.basicConfig(level=logging.ERROR, stream=sys.stdout) from fs.base import * import datetime import unittest import os, os.path import pickle import random import copy import time try: import threading except ImportError: import dummy_threading as threading class FSTestCases(object): """Base suite of testcases for filesystem implementations. Any FS subclass should be capable of passing all of these tests. To apply the tests to your own FS implementation, simply use FSTestCase as a mixin for your own unittest.TestCase subclass and have the setUp method set self.fs to an instance of your FS implementation. This class is designed as a mixin so that it's not detected by test loading tools such as nose. """ def check(self, p): """Check that a file exists within self.fs""" return self.fs.exists(p) def test_root_dir(self): self.assertTrue(self.fs.isdir("")) self.assertTrue(self.fs.isdir("/")) # These may be false (e.g. empty dict) but mustn't raise errors self.fs.getinfo("") self.assertTrue(self.fs.getinfo("/") is not None) def test_getsyspath(self): try: syspath = self.fs.getsyspath("/") except NoSysPathError: pass else: self.assertTrue(isinstance(syspath,unicode)) syspath = self.fs.getsyspath("/",allow_none=True) if syspath is not None: self.assertTrue(isinstance(syspath,unicode)) def test_debug(self): str(self.fs) repr(self.fs) self.assert_(hasattr(self.fs, 'desc')) def test_writefile(self): self.assertRaises(ResourceNotFoundError,self.fs.open,"test1.txt") f = self.fs.open("test1.txt","w") f.write("testing") f.close() self.assertTrue(self.check("test1.txt")) f = self.fs.open("test1.txt","r") self.assertEquals(f.read(),"testing") f.close() f = self.fs.open("test1.txt","w") f.write("test file overwrite") f.close() self.assertTrue(self.check("test1.txt")) f = self.fs.open("test1.txt","r") self.assertEquals(f.read(),"test file overwrite") f.close() def test_isdir_isfile(self): self.assertFalse(self.fs.exists("dir1")) self.assertFalse(self.fs.isdir("dir1")) self.assertFalse(self.fs.isfile("a.txt")) self.fs.createfile("a.txt") self.assertFalse(self.fs.isdir("dir1")) self.assertTrue(self.fs.exists("a.txt")) self.assertTrue(self.fs.isfile("a.txt")) self.assertFalse(self.fs.exists("a.txt/thatsnotadir")) self.fs.makedir("dir1") self.assertTrue(self.fs.isdir("dir1")) self.assertTrue(self.fs.exists("dir1")) self.assertTrue(self.fs.exists("a.txt")) self.fs.remove("a.txt") self.assertFalse(self.fs.exists("a.txt")) def test_listdir(self): def check_unicode(items): for item in items: self.assertTrue(isinstance(item,unicode)) self.fs.createfile(u"a") self.fs.createfile("b") self.fs.createfile("foo") self.fs.createfile("bar") # Test listing of the root directory d1 = self.fs.listdir() self.assertEqual(len(d1), 4) self.assertEqual(sorted(d1), [u"a", u"b", u"bar", u"foo"]) check_unicode(d1) d1 = self.fs.listdir("") self.assertEqual(len(d1), 4) self.assertEqual(sorted(d1), [u"a", u"b", u"bar", u"foo"]) check_unicode(d1) d1 = self.fs.listdir("/") self.assertEqual(len(d1), 4) check_unicode(d1) # Test listing absolute paths d2 = self.fs.listdir(absolute=True) self.assertEqual(len(d2), 4) self.assertEqual(sorted(d2), [u"/a", u"/b", u"/bar", u"/foo"]) check_unicode(d2) # Create some deeper subdirectories, to make sure their # contents are not inadvertantly included self.fs.makedir("p/1/2/3",recursive=True) self.fs.createfile("p/1/2/3/a") self.fs.createfile("p/1/2/3/b") self.fs.createfile("p/1/2/3/foo") self.fs.createfile("p/1/2/3/bar") self.fs.makedir("q") # Test listing just files, just dirs, and wildcards dirs_only = self.fs.listdir(dirs_only=True) files_only = self.fs.listdir(files_only=True) contains_a = self.fs.listdir(wildcard="*a*") self.assertEqual(sorted(dirs_only), [u"p", u"q"]) self.assertEqual(sorted(files_only), [u"a", u"b", u"bar", u"foo"]) self.assertEqual(sorted(contains_a), [u"a",u"bar"]) check_unicode(dirs_only) check_unicode(files_only) check_unicode(contains_a) # Test listing a subdirectory d3 = self.fs.listdir("p/1/2/3") self.assertEqual(len(d3), 4) self.assertEqual(sorted(d3), [u"a", u"b", u"bar", u"foo"]) check_unicode(d3) # Test listing a subdirectory with absoliute and full paths d4 = self.fs.listdir("p/1/2/3", absolute=True) self.assertEqual(len(d4), 4) self.assertEqual(sorted(d4), [u"/p/1/2/3/a", u"/p/1/2/3/b", u"/p/1/2/3/bar", u"/p/1/2/3/foo"]) check_unicode(d4) d4 = self.fs.listdir("p/1/2/3", full=True) self.assertEqual(len(d4), 4) self.assertEqual(sorted(d4), [u"p/1/2/3/a", u"p/1/2/3/b", u"p/1/2/3/bar", u"p/1/2/3/foo"]) check_unicode(d4) # Test that appropriate errors are raised self.assertRaises(ResourceNotFoundError,self.fs.listdir,"zebra") self.assertRaises(ResourceInvalidError,self.fs.listdir,"foo") def test_unicode(self): alpha = u"\N{GREEK SMALL LETTER ALPHA}" beta = u"\N{GREEK SMALL LETTER BETA}" self.fs.makedir(alpha) self.fs.createfile(alpha+"/a") self.fs.createfile(alpha+"/"+beta) self.assertTrue(self.check(alpha)) self.assertEquals(sorted(self.fs.listdir(alpha)),["a",beta]) def test_makedir(self): check = self.check self.fs.makedir("a") self.assertTrue(check("a")) self.assertRaises(ParentDirectoryMissingError,self.fs.makedir,"a/b/c") self.fs.makedir("a/b/c", recursive=True) self.assert_(check("a/b/c")) self.fs.makedir("foo/bar/baz", recursive=True) self.assert_(check("foo/bar/baz")) self.fs.makedir("a/b/child") self.assert_(check("a/b/child")) self.assertRaises(DestinationExistsError,self.fs.makedir,"/a/b") self.fs.makedir("/a/b",allow_recreate=True) self.fs.createfile("/a/file") self.assertRaises(ResourceInvalidError,self.fs.makedir,"a/file") def test_remove(self): self.fs.createfile("a.txt") self.assertTrue(self.check("a.txt")) self.fs.remove("a.txt") self.assertFalse(self.check("a.txt")) self.assertRaises(ResourceNotFoundError,self.fs.remove,"a.txt") self.fs.makedir("dir1") self.assertRaises(ResourceInvalidError,self.fs.remove,"dir1") self.fs.createfile("/dir1/a.txt") self.assertTrue(self.check("dir1/a.txt")) self.fs.remove("dir1/a.txt") self.assertFalse(self.check("/dir1/a.txt")) def test_removedir(self): check = self.check self.fs.makedir("a") self.assert_(check("a")) self.fs.removedir("a") self.assertRaises(ResourceNotFoundError, self.fs.removedir, "a") self.assert_(not check("a")) self.fs.makedir("a/b/c/d", recursive=True) self.assertRaises(DirectoryNotEmptyError, self.fs.removedir, "a/b") self.fs.removedir("a/b/c/d") self.assert_(not check("a/b/c/d")) self.fs.removedir("a/b/c") self.assert_(not check("a/b/c")) self.fs.removedir("a/b") self.assert_(not check("a/b")) # Test recursive removal of empty parent dirs self.fs.makedir("foo/bar/baz", recursive=True) self.fs.removedir("foo/bar/baz", recursive=True) self.assert_(not check("foo/bar/baz")) self.assert_(not check("foo/bar")) self.assert_(not check("foo")) # Ensure that force=True works as expected self.fs.makedir("frollic/waggle", recursive=True) self.fs.createfile("frollic/waddle.txt","waddlewaddlewaddle") self.assertRaises(DirectoryNotEmptyError,self.fs.removedir,"frollic") self.assertRaises(ResourceInvalidError,self.fs.removedir,"frollic/waddle.txt") self.fs.removedir("frollic",force=True) self.assert_(not check("frollic")) # Test removing unicode dirs kappa = u"\N{GREEK CAPITAL LETTER KAPPA}" self.fs.makedir(kappa) self.assert_(self.fs.isdir(kappa)) self.fs.removedir(kappa) self.assertRaises(ResourceNotFoundError, self.fs.removedir, kappa) self.assert_(not self.fs.isdir(kappa)) self.fs.makedir(pathjoin("test",kappa),recursive=True) self.assert_(check(pathjoin("test",kappa))) self.fs.removedir("test",force=True) self.assert_(not check("test")) def test_rename(self): check = self.check # test renaming a file in the same directory self.fs.createfile("foo.txt","Hello, World!") self.assert_(check("foo.txt")) self.fs.rename("foo.txt", "bar.txt") self.assert_(check("bar.txt")) self.assert_(not check("foo.txt")) # test renaming a directory in the same directory self.fs.makedir("dir_a") self.fs.createfile("dir_a/test.txt","testerific") self.assert_(check("dir_a")) self.fs.rename("dir_a","dir_b") self.assert_(check("dir_b")) self.assert_(check("dir_b/test.txt")) self.assert_(not check("dir_a/test.txt")) self.assert_(not check("dir_a")) # test renaming a file into a different directory self.fs.makedir("dir_a") self.fs.rename("dir_b/test.txt","dir_a/test.txt") self.assert_(not check("dir_b/test.txt")) self.assert_(check("dir_a/test.txt")) # test renaming a file into a non-existent directory self.assertRaises(ParentDirectoryMissingError,self.fs.rename,"dir_a/test.txt","nonexistent/test.txt") def test_info(self): test_str = "Hello, World!" self.fs.createfile("info.txt",test_str) info = self.fs.getinfo("info.txt") self.assertEqual(info['size'], len(test_str)) self.fs.desc("info.txt") self.assertRaises(ResourceNotFoundError,self.fs.getinfo,"notafile") self.assertRaises(ResourceNotFoundError,self.fs.getinfo,"info.txt/inval") def test_getsize(self): test_str = "*"*23 self.fs.createfile("info.txt",test_str) size = self.fs.getsize("info.txt") self.assertEqual(size, len(test_str)) def test_movefile(self): check = self.check contents = "If the implementation is hard to explain, it's a bad idea." def makefile(path): self.fs.createfile(path,contents) def checkcontents(path): check_contents = self.fs.getcontents(path) self.assertEqual(check_contents,contents) return contents == check_contents self.fs.makedir("foo/bar", recursive=True) makefile("foo/bar/a.txt") self.assert_(check("foo/bar/a.txt")) self.assert_(checkcontents("foo/bar/a.txt")) self.fs.move("foo/bar/a.txt", "foo/b.txt") self.assert_(not check("foo/bar/a.txt")) self.assert_(check("foo/b.txt")) self.assert_(checkcontents("foo/b.txt")) self.fs.move("foo/b.txt", "c.txt") self.assert_(not check("foo/b.txt")) self.assert_(check("/c.txt")) self.assert_(checkcontents("/c.txt")) makefile("foo/bar/a.txt") self.assertRaises(DestinationExistsError,self.fs.move,"foo/bar/a.txt","/c.txt") self.assert_(check("foo/bar/a.txt")) self.assert_(check("/c.txt")) self.fs.move("foo/bar/a.txt","/c.txt",overwrite=True) self.assert_(not check("foo/bar/a.txt")) self.assert_(check("/c.txt")) def test_movedir(self): check = self.check contents = "If the implementation is hard to explain, it's a bad idea." def makefile(path): self.fs.createfile(path, contents) self.fs.makedir("a") self.fs.makedir("b") makefile("a/1.txt") makefile("a/2.txt") makefile("a/3.txt") self.fs.makedir("a/foo/bar", recursive=True) makefile("a/foo/bar/baz.txt") self.fs.movedir("a", "copy of a") self.assert_(self.fs.isdir("copy of a")) self.assert_(check("copy of a/1.txt")) self.assert_(check("copy of a/2.txt")) self.assert_(check("copy of a/3.txt")) self.assert_(check("copy of a/foo/bar/baz.txt")) self.assert_(not check("a/1.txt")) self.assert_(not check("a/2.txt")) self.assert_(not check("a/3.txt")) self.assert_(not check("a/foo/bar/baz.txt")) self.assert_(not check("a/foo/bar")) self.assert_(not check("a/foo")) self.assert_(not check("a")) self.fs.makedir("a") self.assertRaises(DestinationExistsError,self.fs.movedir,"copy of a","a") self.fs.movedir("copy of a","a",overwrite=True) self.assert_(not check("copy of a")) self.assert_(check("a/1.txt")) self.assert_(check("a/2.txt")) self.assert_(check("a/3.txt")) self.assert_(check("a/foo/bar/baz.txt")) def test_copyfile(self): check = self.check contents = "If the implementation is hard to explain, it's a bad idea." def makefile(path,contents=contents): self.fs.createfile(path,contents) def checkcontents(path,contents=contents): check_contents = self.fs.getcontents(path) self.assertEqual(check_contents,contents) return contents == check_contents self.fs.makedir("foo/bar", recursive=True) makefile("foo/bar/a.txt") self.assert_(check("foo/bar/a.txt")) self.assert_(checkcontents("foo/bar/a.txt")) self.fs.copy("foo/bar/a.txt", "foo/b.txt") self.assert_(check("foo/bar/a.txt")) self.assert_(check("foo/b.txt")) self.assert_(checkcontents("foo/b.txt")) self.fs.copy("foo/b.txt", "c.txt") self.assert_(check("foo/b.txt")) self.assert_(check("/c.txt")) self.assert_(checkcontents("/c.txt")) makefile("foo/bar/a.txt","different contents") self.assert_(checkcontents("foo/bar/a.txt","different contents")) self.assertRaises(DestinationExistsError,self.fs.copy,"foo/bar/a.txt","/c.txt") self.assert_(checkcontents("/c.txt")) self.fs.copy("foo/bar/a.txt","/c.txt",overwrite=True) self.assert_(checkcontents("foo/bar/a.txt","different contents")) self.assert_(checkcontents("/c.txt","different contents")) def test_copydir(self): check = self.check contents = "If the implementation is hard to explain, it's a bad idea." def makefile(path): self.fs.createfile(path,contents) def checkcontents(path): check_contents = self.fs.getcontents(path) self.assertEqual(check_contents,contents) return contents == check_contents self.fs.makedir("a") self.fs.makedir("b") makefile("a/1.txt") makefile("a/2.txt") makefile("a/3.txt") self.fs.makedir("a/foo/bar", recursive=True) makefile("a/foo/bar/baz.txt") self.fs.copydir("a", "copy of a") self.assert_(check("copy of a/1.txt")) self.assert_(check("copy of a/2.txt")) self.assert_(check("copy of a/3.txt")) self.assert_(check("copy of a/foo/bar/baz.txt")) checkcontents("copy of a/1.txt") self.assert_(check("a/1.txt")) self.assert_(check("a/2.txt")) self.assert_(check("a/3.txt")) self.assert_(check("a/foo/bar/baz.txt")) checkcontents("a/1.txt") self.assertRaises(DestinationExistsError,self.fs.copydir,"a","b") self.fs.copydir("a","b",overwrite=True) self.assert_(check("b/1.txt")) self.assert_(check("b/2.txt")) self.assert_(check("b/3.txt")) self.assert_(check("b/foo/bar/baz.txt")) checkcontents("b/1.txt") def test_copydir_with_dotfile(self): check = self.check contents = "If the implementation is hard to explain, it's a bad idea." def makefile(path): self.fs.createfile(path,contents) self.fs.makedir("a") makefile("a/1.txt") makefile("a/2.txt") makefile("a/.hidden.txt") self.fs.copydir("a", "copy of a") self.assert_(check("copy of a/1.txt")) self.assert_(check("copy of a/2.txt")) self.assert_(check("copy of a/.hidden.txt")) self.assert_(check("a/1.txt")) self.assert_(check("a/2.txt")) self.assert_(check("a/.hidden.txt")) def test_readwriteappendseek(self): def checkcontents(path, check_contents): read_contents = self.fs.getcontents(path) self.assertEqual(read_contents,check_contents) return read_contents == check_contents test_strings = ["Beautiful is better than ugly.", "Explicit is better than implicit.", "Simple is better than complex."] all_strings = "".join(test_strings) self.assertRaises(ResourceNotFoundError, self.fs.open, "a.txt", "r") self.assert_(not self.fs.exists("a.txt")) f1 = self.fs.open("a.txt", "wb") pos = 0 for s in test_strings: f1.write(s) pos += len(s) self.assertEqual(pos, f1.tell()) f1.close() self.assert_(self.fs.exists("a.txt")) self.assert_(checkcontents("a.txt", all_strings)) f2 = self.fs.open("b.txt", "wb") f2.write(test_strings[0]) f2.close() self.assert_(checkcontents("b.txt", test_strings[0])) f3 = self.fs.open("b.txt", "ab") # On win32, tell() gives zero until you actually write to the file #self.assertEquals(f3.tell(),len(test_strings[0])) f3.write(test_strings[1]) self.assertEquals(f3.tell(),len(test_strings[0])+len(test_strings[1])) f3.write(test_strings[2]) self.assertEquals(f3.tell(),len(all_strings)) f3.close() self.assert_(checkcontents("b.txt", all_strings)) f4 = self.fs.open("b.txt", "wb") f4.write(test_strings[2]) f4.close() self.assert_(checkcontents("b.txt", test_strings[2])) f5 = self.fs.open("c.txt", "wb") for s in test_strings: f5.write(s+"\n") f5.close() f6 = self.fs.open("c.txt", "rb") for s, t in zip(f6, test_strings): self.assertEqual(s, t+"\n") f6.close() f7 = self.fs.open("c.txt", "rb") f7.seek(13) word = f7.read(6) self.assertEqual(word, "better") f7.seek(1, os.SEEK_CUR) word = f7.read(4) self.assertEqual(word, "than") f7.seek(-9, os.SEEK_END) word = f7.read(7) self.assertEqual(word, "complex") f7.close() self.assertEqual(self.fs.getcontents("a.txt"), all_strings) def test_with_statement(self): # This is a little tricky since 'with' is actually new syntax. # We use eval() to make this method safe for old python versions. import sys if sys.version_info[0] >= 2 and sys.version_info[1] >= 5: # A successful 'with' statement contents = "testing the with statement" code = "from __future__ import with_statement\n" code += "with self.fs.open('f.txt','w-') as testfile:\n" code += " testfile.write(contents)\n" code += "self.assertEquals(self.fs.getcontents('f.txt'),contents)" code = compile(code,"",'exec') eval(code) # A 'with' statement raising an error contents = "testing the with statement" code = "from __future__ import with_statement\n" code += "with self.fs.open('f.txt','w-') as testfile:\n" code += " testfile.write(contents)\n" code += " raise ValueError\n" code = compile(code,"",'exec') self.assertRaises(ValueError,eval,code,globals(),locals()) self.assertEquals(self.fs.getcontents('f.txt'),contents) def test_pickling(self): self.fs.createfile("test1","hello world") fs2 = pickle.loads(pickle.dumps(self.fs)) self.assert_(fs2.isfile("test1")) fs3 = pickle.loads(pickle.dumps(self.fs,-1)) self.assert_(fs3.isfile("test1")) def test_big_file(self): chunk_size = 1024 * 256 num_chunks = 4 def chunk_stream(): """Generate predictable-but-randomy binary content.""" r = random.Random(0) for i in xrange(num_chunks): c = "".join(chr(r.randint(0,255)) for j in xrange(chunk_size/8)) yield c * 8 f = self.fs.open("bigfile","wb") try: for chunk in chunk_stream(): f.write(chunk) finally: f.close() chunks = chunk_stream() f = self.fs.open("bigfile","rb") try: try: while True: if chunks.next() != f.read(chunk_size): assert False, "bigfile was corrupted" except StopIteration: if f.read() != "": assert False, "bigfile was corrupted" finally: f.close() def test_settimes(self): def cmp_datetimes(d1, d2): """Test datetime objects are the same to within the timestamp accuracy""" dts1 = time.mktime(d1.timetuple()) dts2 = time.mktime(d2.timetuple()) return int(dts1) == int(dts2) d1 = datetime.datetime(2010, 6, 20, 11, 0, 9, 987699) d2 = datetime.datetime(2010, 7, 5, 11, 0, 9, 500000) self.fs.createfile('/dates.txt', 'check dates') # If the implementation supports settimes, check that the times # can be set and then retrieved try: self.fs.settimes('/dates.txt', d1, d2) except UnsupportedError: pass else: info = self.fs.getinfo('/dates.txt') self.assertTrue(cmp_datetimes(d1, info['accessed_time'])) self.assertTrue(cmp_datetimes(d2, info['modified_time'])) class ThreadingTestCases: """Testcases for thread-safety of FS implementations.""" # These are either too slow to be worth repeating, # or cannot possibly break cross-thread. _dont_retest = ("test_pickling","test_multiple_overwrite",) __lock = threading.RLock() def _yield(self): time.sleep(0.001) def _lock(self): self.__lock.acquire() def _unlock(self): self.__lock.release() def _makeThread(self,func,errors): def runThread(): try: func() except Exception: errors.append(sys.exc_info()) return threading.Thread(target=runThread) def _runThreads(self,*funcs): check_interval = sys.getcheckinterval() sys.setcheckinterval(1) try: errors = [] threads = [self._makeThread(f,errors) for f in funcs] for t in threads: t.start() for t in threads: t.join() for (c,e,t) in errors: raise c,e,t finally: sys.setcheckinterval(check_interval) def test_setcontents(self): def setcontents(name,contents): f = self.fs.open(name,"w") self._yield() try: f.write(contents) self._yield() finally: f.close() def thread1(): c = "thread1 was 'ere" setcontents("thread1.txt",c) self.assertEquals(self.fs.getcontents("thread1.txt"),c) def thread2(): c = "thread2 was 'ere" setcontents("thread2.txt",c) self.assertEquals(self.fs.getcontents("thread2.txt"),c) self._runThreads(thread1,thread2) def test_setcontents_samefile(self): def setcontents(name,contents): f = self.fs.open(name,"w") self._yield() try: f.write(contents) self._yield() finally: f.close() def thread1(): c = "thread1 was 'ere" setcontents("threads.txt",c) self._yield() self.assertEquals(self.fs.listdir("/"),["threads.txt"]) def thread2(): c = "thread2 was 'ere" setcontents("threads.txt",c) self._yield() self.assertEquals(self.fs.listdir("/"),["threads.txt"]) def thread3(): c = "thread3 was 'ere" setcontents("threads.txt",c) self._yield() self.assertEquals(self.fs.listdir("/"),["threads.txt"]) try: self._runThreads(thread1,thread2,thread3) except ResourceLockedError: # that's ok, some implementations don't support concurrent writes pass def test_cases_in_separate_dirs(self): class TestCases_in_subdir(self.__class__): """Run all testcases against a subdir of self.fs""" def __init__(this,subdir): this.subdir = subdir for meth in dir(this): if not meth.startswith("test_"): continue if meth in self._dont_retest: continue if not hasattr(FSTestCases,meth): continue if self.fs.exists(subdir): self.fs.removedir(subdir,force=True) self.assertFalse(self.fs.isdir(subdir)) self.assertTrue(self.fs.isdir("/")) self.fs.makedir(subdir) self._yield() getattr(this,meth)() @property def fs(this): return self.fs.opendir(this.subdir) def check(this,p): return self.check(pathjoin(this.subdir,relpath(p))) def thread1(): TestCases_in_subdir("thread1") def thread2(): TestCases_in_subdir("thread2") def thread3(): TestCases_in_subdir("thread3") self._runThreads(thread1,thread2,thread3) def test_makedir_winner(self): errors = [] def makedir(): try: self.fs.makedir("testdir") except DestinationExistsError, e: errors.append(e) def makedir_noerror(): try: self.fs.makedir("testdir",allow_recreate=True) except DestinationExistsError, e: errors.append(e) def removedir(): try: self.fs.removedir("testdir") except (ResourceNotFoundError,ResourceLockedError), e: errors.append(e) # One thread should succeed, one should error self._runThreads(makedir,makedir) self.assertEquals(len(errors),1) self.fs.removedir("testdir") # One thread should succeed, two should error errors = [] self._runThreads(makedir,makedir,makedir) if len(errors) != 2: raise AssertionError(errors) self.fs.removedir("testdir") # All threads should succeed errors = [] self._runThreads(makedir_noerror,makedir_noerror,makedir_noerror) self.assertEquals(len(errors),0) self.assertTrue(self.fs.isdir("testdir")) self.fs.removedir("testdir") # makedir() can beat removedir() and vice-versa errors = [] self._runThreads(makedir,removedir) if self.fs.isdir("testdir"): self.assertEquals(len(errors),1) self.assertFalse(isinstance(errors[0],DestinationExistsError)) self.fs.removedir("testdir") else: self.assertEquals(len(errors),0) def test_concurrent_copydir(self): self.fs.makedir("a") self.fs.makedir("a/b") self.fs.setcontents("a/hello.txt","hello world") self.fs.setcontents("a/guido.txt","is a space alien") self.fs.setcontents("a/b/parrot.txt","pining for the fiords") def copydir(): self._yield() self.fs.copydir("a","copy of a") def copydir_overwrite(): self._yield() self.fs.copydir("a","copy of a",overwrite=True) # This should error out since we're not overwriting self.assertRaises(DestinationExistsError,self._runThreads,copydir,copydir) # This should run to completion and give a valid state, unless # files get locked when written to. try: self._runThreads(copydir_overwrite,copydir_overwrite) except ResourceLockedError: pass self.assertTrue(self.fs.isdir("copy of a")) self.assertTrue(self.fs.isdir("copy of a/b")) self.assertEqual(self.fs.getcontents("copy of a/b/parrot.txt"),"pining for the fiords") self.assertEqual(self.fs.getcontents("copy of a/hello.txt"),"hello world") self.assertEqual(self.fs.getcontents("copy of a/guido.txt"),"is a space alien") def test_multiple_overwrite(self): contents = ["contents one","contents the second","number three"] def thread1(): for i in xrange(30): for c in contents: self.fs.setcontents("thread1.txt",c) self.assertEquals(self.fs.getsize("thread1.txt"),len(c)) self.assertEquals(self.fs.getcontents("thread1.txt"),c) def thread2(): for i in xrange(30): for c in contents: self.fs.setcontents("thread2.txt",c) self.assertEquals(self.fs.getsize("thread2.txt"),len(c)) self.assertEquals(self.fs.getcontents("thread2.txt"),c) self._runThreads(thread1,thread2) fs-0.3.0/fs/tests/test_expose.py0000644000175000017500000000745411301265156015322 0ustar willwill""" fs.tests.test_expose: testcases for fs.expose and associated FS classes """ import unittest import sys import os, os.path import socket import threading import time from fs.tests import FSTestCases, ThreadingTestCases from fs.tempfs import TempFS from fs.osfs import OSFS from fs.path import * from fs import rpcfs from fs.expose.xmlrpc import RPCFSServer class TestRPCFS(unittest.TestCase,FSTestCases,ThreadingTestCases): def makeServer(self,fs,addr): return RPCFSServer(fs,addr,logRequests=False) def startServer(self): port = 8000 self.temp_fs = TempFS() self.server = None while not self.server: try: self.server = self.makeServer(self.temp_fs,("localhost",port)) except socket.error, e: if e.args[1] == "Address already in use": port += 1 else: raise self.server_addr = ("localhost",port) self.serve_more_requests = True self.server_thread = threading.Thread(target=self.runServer) self.server_thread.start() def runServer(self): """Run the server, swallowing shutdown-related execptions.""" if sys.platform != "win32": try: self.server.socket.settimeout(0.1) except socket.error: pass try: while self.serve_more_requests: self.server.handle_request() except Exception, e: pass def setUp(self): self.startServer() self.fs = rpcfs.RPCFS("http://%s:%d" % self.server_addr) def tearDown(self): self.serve_more_requests = False try: self.bump() self.server.server_close() except Exception: pass self.server_thread.join() self.temp_fs.close() def bump(self): host, port = self.server_addr for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): af, socktype, proto, cn, sa = res sock = None try: sock = socket.socket(af, socktype, proto) sock.settimeout(1) sock.connect(sa) sock.send("\n") except socket.error, e: pass finally: if sock is not None: sock.close() from fs import sftpfs from fs.expose.sftp import BaseSFTPServer class TestSFTPFS(TestRPCFS): def makeServer(self,fs,addr): return BaseSFTPServer(addr,fs) def setUp(self): self.startServer() self.fs = sftpfs.SFTPFS(self.server_addr) def bump(self): # paramiko doesn't like being bumped, just wait for it to timeout. # TODO: do this using a paramiko.Transport() connection pass try: from fs.expose import fuse except ImportError: pass else: from fs.osfs import OSFS class TestFUSE(unittest.TestCase,FSTestCases,ThreadingTestCases): def setUp(self): self.temp_fs = TempFS() self.temp_fs.makedir("root") self.temp_fs.makedir("mount") self.mounted_fs = self.temp_fs.opendir("root") self.mount_point = self.temp_fs.getsyspath("mount") self.fs = OSFS(self.temp_fs.getsyspath("mount")) self.mount_proc = fuse.mount(self.mounted_fs,self.mount_point) def tearDown(self): self.mount_proc.unmount() try: self.temp_fs.close() except OSError: # Sometimes FUSE hangs onto the mountpoint if mount_proc is # forcibly killed. Shell out to fusermount to make sure. fuse.unmount(self.mount_point) self.temp_fs.close() def check(self,p): return self.mounted_fs.exists(p) fs-0.3.0/fs/tests/test_remote.py0000644000175000017500000000720711375762632015322 0ustar willwill""" fs.tests.test_remote: testcases for FS remote support utilities """ from fs.tests import FSTestCases, ThreadingTestCases import unittest import threading import random import time from fs.remote import * from fs.wrapfs import WrapFS, wrap_fs_methods from fs.tempfs import TempFS from fs.path import * class TestCacheFS(unittest.TestCase,FSTestCases,ThreadingTestCases): """Test simple operation of CacheFS""" def setUp(self): self._check_interval = sys.getcheckinterval() sys.setcheckinterval(10) self.fs = CacheFS(TempFS()) def tearDown(self): self.fs.close() sys.setcheckinterval(self._check_interval) class TestConnectionManagerFS(unittest.TestCase,FSTestCases,ThreadingTestCases): """Test simple operation of ConnectionManagerFS""" def setUp(self): self._check_interval = sys.getcheckinterval() sys.setcheckinterval(10) self.fs = ConnectionManagerFS(TempFS()) def tearDown(self): self.fs.close() sys.setcheckinterval(self._check_interval) class DisconnectingFS(WrapFS): """FS subclass that raises lots of RemoteConnectionErrors.""" def __init__(self,fs=None): if fs is None: fs = TempFS() self._connected = True self._continue = True self._bounce_thread = threading.Thread(target=self._bounce) self._bounce_thread.start() super(DisconnectingFS,self).__init__(fs) if random.choice([True,False]): raise RemoteConnectionError("") def __getstate__(self): state = super(DisconnectingFS,self).__getstate__() del state["_bounce_thread"] return state def __setstate__(self,state): super(DisconnectingFS,self).__setstate__(state) self._bounce_thread = threading.Thread(target=self._bounce) self._bounce_thread.start() def _bounce(self): while self._continue: time.sleep(random.random()*0.1) self._connected = not self._connected def close(self): if not self.closed: self._continue = False self._bounce_thread.join() self._connected = True super(DisconnectingFS,self).close() def disconnecting_wrapper(func): """Method wrapper to raise RemoteConnectionError if not connected.""" @wraps(func) def wrapper(self,*args,**kwds): if not self._connected: raise RemoteConnectionError("") return func(self,*args,**kwds) return wrapper DisconnectingFS = wrap_fs_methods(disconnecting_wrapper,DisconnectingFS,exclude=["close"]) class DisconnectRecoveryFS(WrapFS): """FS subclass that recovers from RemoteConnectionErrors by waiting.""" pass def recovery_wrapper(func): """Method wrapper to recover from RemoteConnectionErrors by waiting.""" @wraps(func) def wrapper(self,*args,**kwds): while True: try: return func(self,*args,**kwds) except RemoteConnectionError: self.wrapped_fs.wait_for_connection() return wrapper # this also checks that wrap_fs_methods works as a class decorator DisconnectRecoveryFS = wrap_fs_methods(recovery_wrapper)(DisconnectRecoveryFS) class TestConnectionManagerFS_disconnect(TestConnectionManagerFS): """Test ConnectionManagerFS's ability to wait for reconnection.""" def setUp(self): self._check_interval = sys.getcheckinterval() sys.setcheckinterval(10) c_fs = ConnectionManagerFS(DisconnectingFS,poll_interval=0.1) self.fs = DisconnectRecoveryFS(c_fs) def tearDown(self): self.fs.close() sys.setcheckinterval(self._check_interval) fs-0.3.0/fs/tests/test_ftpfs.py0000644000175000017500000000345411375762633015152 0ustar willwill from fs.tests import FSTestCases, ThreadingTestCases import unittest import os import sys import shutil import tempfile import subprocess import time from os.path import abspath try: from pyftpdlib import ftpserver except ImportError: raise ImportError("Requires pyftpdlib ") from fs.path import * from fs import ftpfs ftp_port = 30000 class TestFTPFS(unittest.TestCase, FSTestCases, ThreadingTestCases): def setUp(self): global ftp_port #ftp_port += 1 use_port = str(ftp_port) #ftp_port = 10000 self.temp_dir = tempfile.mkdtemp(u"ftpfstests") self.ftp_server = subprocess.Popen([sys.executable, abspath(__file__), self.temp_dir, str(use_port)]) # Need to sleep to allow ftp server to start time.sleep(.2) self.fs = ftpfs.FTPFS('127.0.0.1', 'user', '12345', port=use_port, timeout=5.0) def tearDown(self): if sys.platform == 'win32': import win32api win32api.TerminateProcess(int(process._handle), -1) else: os.system('kill '+str(self.ftp_server.pid)) shutil.rmtree(self.temp_dir) def check(self, p): return os.path.exists(os.path.join(self.temp_dir, relpath(p))) if __name__ == "__main__": # Run an ftp server that exposes a given directory import sys authorizer = ftpserver.DummyAuthorizer() authorizer.add_user("user", "12345", sys.argv[1], perm="elradfmw") authorizer.add_anonymous(sys.argv[1]) def nolog(*args): pass ftpserver.log = nolog ftpserver.logline = nolog handler = ftpserver.FTPHandler handler.authorizer = authorizer address = ("127.0.0.1", int(sys.argv[2])) #print address ftpd = ftpserver.FTPServer(address, handler) ftpd.serve_forever() fs-0.3.0/fs/tests/test_xattr.py0000644000175000017500000001524411375762633015172 0ustar willwill""" fs.tests.test_xattr: testcases for extended attribute support """ import unittest import os from fs.path import * from fs.errors import * from fs.tests import FSTestCases class XAttrTestCases: """Testcases for filesystems providing extended attribute support. This class should be used as a mixin to the unittest.TestCase class for filesystems that provide extended attribute support. """ def test_getsetdel(self): def do_getsetdel(p): self.assertEqual(self.fs.getxattr(p,"xattr1"),None) self.fs.setxattr(p,"xattr1","value1") self.assertEqual(self.fs.getxattr(p,"xattr1"),"value1") self.fs.delxattr(p,"xattr1") self.assertEqual(self.fs.getxattr(p,"xattr1"),None) self.fs.createfile("test.txt","hello") do_getsetdel("test.txt") self.assertRaises(ResourceNotFoundError,self.fs.getxattr,"test2.txt","xattr1") self.fs.makedir("mystuff") self.fs.createfile("/mystuff/test.txt","") do_getsetdel("mystuff") do_getsetdel("mystuff/test.txt") def test_list_xattrs(self): def do_list(p): self.assertEquals(sorted(self.fs.listxattrs(p)),[]) self.fs.setxattr(p,"xattr1","value1") self.assertEquals(self.fs.getxattr(p,"xattr1"),"value1") self.assertEquals(sorted(self.fs.listxattrs(p)),["xattr1"]) self.assertTrue(isinstance(self.fs.listxattrs(p)[0],unicode)) self.fs.setxattr(p,"attr2","value2") self.assertEquals(sorted(self.fs.listxattrs(p)),["attr2","xattr1"]) self.assertTrue(isinstance(self.fs.listxattrs(p)[0],unicode)) self.assertTrue(isinstance(self.fs.listxattrs(p)[1],unicode)) self.fs.delxattr(p,"xattr1") self.assertEquals(sorted(self.fs.listxattrs(p)),["attr2"]) self.fs.delxattr(p,"attr2") self.assertEquals(sorted(self.fs.listxattrs(p)),[]) self.fs.createfile("test.txt","hello") do_list("test.txt") self.fs.makedir("mystuff") self.fs.createfile("/mystuff/test.txt","") do_list("mystuff") do_list("mystuff/test.txt") def test_copy_xattrs(self): self.fs.createfile("a.txt","content") self.fs.setxattr("a.txt","myattr","myvalue") self.fs.setxattr("a.txt","testattr","testvalue") self.fs.makedir("stuff") self.fs.copy("a.txt","stuff/a.txt") self.assertTrue(self.fs.exists("stuff/a.txt")) self.assertEquals(self.fs.getxattr("stuff/a.txt","myattr"),"myvalue") self.assertEquals(self.fs.getxattr("stuff/a.txt","testattr"),"testvalue") self.assertEquals(self.fs.getxattr("a.txt","myattr"),"myvalue") self.assertEquals(self.fs.getxattr("a.txt","testattr"),"testvalue") self.fs.setxattr("stuff","dirattr","a directory") self.fs.copydir("stuff","stuff2") self.assertEquals(self.fs.getxattr("stuff2/a.txt","myattr"),"myvalue") self.assertEquals(self.fs.getxattr("stuff2/a.txt","testattr"),"testvalue") self.assertEquals(self.fs.getxattr("stuff2","dirattr"),"a directory") self.assertEquals(self.fs.getxattr("stuff","dirattr"),"a directory") def test_move_xattrs(self): self.fs.createfile("a.txt","content") self.fs.setxattr("a.txt","myattr","myvalue") self.fs.setxattr("a.txt","testattr","testvalue") self.fs.makedir("stuff") self.fs.move("a.txt","stuff/a.txt") self.assertTrue(self.fs.exists("stuff/a.txt")) self.assertEquals(self.fs.getxattr("stuff/a.txt","myattr"),"myvalue") self.assertEquals(self.fs.getxattr("stuff/a.txt","testattr"),"testvalue") self.fs.setxattr("stuff","dirattr","a directory") self.fs.movedir("stuff","stuff2") self.assertEquals(self.fs.getxattr("stuff2/a.txt","myattr"),"myvalue") self.assertEquals(self.fs.getxattr("stuff2/a.txt","testattr"),"testvalue") self.assertEquals(self.fs.getxattr("stuff2","dirattr"),"a directory") def test_remove_file(self): def listxattrs(path): return list(self.fs.listxattrs(path)) # Check that xattrs aren't preserved after a file is removed self.fs.createfile("myfile") self.assertEquals(listxattrs("myfile"),[]) self.fs.setxattr("myfile","testattr","testvalue") self.assertEquals(listxattrs("myfile"),["testattr"]) self.fs.remove("myfile") self.assertRaises(ResourceNotFoundError,listxattrs,"myfile") self.fs.createfile("myfile") self.assertEquals(listxattrs("myfile"),[]) self.fs.setxattr("myfile","testattr2","testvalue2") self.assertEquals(listxattrs("myfile"),["testattr2"]) self.assertEquals(self.fs.getxattr("myfile","testattr2"),"testvalue2") # Check that removing a file without xattrs still works self.fs.createfile("myfile2") self.fs.remove("myfile2") def test_remove_dir(self): def listxattrs(path): return list(self.fs.listxattrs(path)) # Check that xattrs aren't preserved after a dir is removed self.fs.makedir("mydir") self.assertEquals(listxattrs("mydir"),[]) self.fs.setxattr("mydir","testattr","testvalue") self.assertEquals(listxattrs("mydir"),["testattr"]) self.fs.removedir("mydir") self.assertRaises(ResourceNotFoundError,listxattrs,"mydir") self.fs.makedir("mydir") self.assertEquals(listxattrs("mydir"),[]) self.fs.setxattr("mydir","testattr2","testvalue2") self.assertEquals(listxattrs("mydir"),["testattr2"]) self.assertEquals(self.fs.getxattr("mydir","testattr2"),"testvalue2") # Check that removing a dir without xattrs still works self.fs.makedir("mydir2") self.fs.removedir("mydir2") # Check that forcibly removing a dir with xattrs still works self.fs.makedir("mydir3") self.fs.createfile("mydir3/testfile") self.fs.removedir("mydir3",force=True) self.assertFalse(self.fs.exists("mydir3")) from fs.xattrs import ensure_xattrs from fs import tempfs class TestXAttr_TempFS(unittest.TestCase,FSTestCases,XAttrTestCases): def setUp(self): fs = tempfs.TempFS() self.fs = ensure_xattrs(fs) def tearDown(self): td = self.fs._temp_dir self.fs.close() self.assert_(not os.path.exists(td)) def check(self, p): td = self.fs._temp_dir return os.path.exists(os.path.join(td, relpath(p))) from fs import memoryfs class TestXAttr_MemoryFS(unittest.TestCase,FSTestCases,XAttrTestCases): def setUp(self): self.fs = ensure_xattrs(memoryfs.MemoryFS()) def check(self, p): return self.fs.exists(p) fs-0.3.0/fs/tests/test_errors.py0000644000175000017500000000112211301265156015315 0ustar willwill""" fs.tests.test_errors: testcases for the fs error classes functions """ import unittest import fs.tests from fs.errors import * import pickle from fs.path import * class TestErrorPickling(unittest.TestCase): def test_pickling(self): def assert_dump_load(e): e2 = pickle.loads(pickle.dumps(e)) self.assertEqual(e.__dict__,e2.__dict__) assert_dump_load(FSError()) assert_dump_load(PathError("/some/path")) assert_dump_load(ResourceNotFoundError("/some/other/path")) assert_dump_load(UnsupportedError("makepony")) fs-0.3.0/fs/tests/test_watch.py0000644000175000017500000001627011375762633015136 0ustar willwill""" fs.tests.test_watch: testcases for change watcher support """ import os import sys import time import unittest from fs.path import * from fs.errors import * from fs.watch import * from fs.tests import FSTestCases try: from fs.osfs import watch_inotify except ImportError: watch_inotify = None if sys.platform == "win32": try: from fs.osfs import watch_win32 except ImportError: watch_win32 = None else: watch_win32 = None class WatcherTestCases: """Testcases for filesystems providing change watcher support. This class should be used as a mixin to the unittest.TestCase class for filesystems that provide change watcher support. """ def setupWatchers(self): self._captured_events = [] self.watchfs.add_watcher(self._captured_events.append) def clearCapturedEvents(self): del self._captured_events[:] def waitForEvents(self): if isinstance(self.watchfs,PollingWatchableFS): self.watchfs._poll_cond.acquire() self.watchfs._poll_cond.wait() self.watchfs._poll_cond.wait() self.watchfs._poll_cond.release() else: time.sleep(2)#0.5) def assertEventOccurred(self,cls,path=None,**attrs): if not self.checkEventOccurred(cls,path,**attrs): args = (cls.__name__,path,attrs) assert False, "Event did not occur: %s(%s,%s)" % args def checkEventOccurred(self,cls,path=None,**attrs): self.waitForEvents() for event in self._captured_events: if isinstance(event,cls): if path is None or event.path == path: for (k,v) in attrs.iteritems(): if getattr(event,k) != v: break else: # all attrs match - found it! return True return False def test_watch_makedir(self): self.setupWatchers() self.fs.makedir("test1") self.assertEventOccurred(CREATED,"/test1") def test_watch_readfile(self): self.setupWatchers() self.fs.setcontents("hello","hello world") self.assertEventOccurred(CREATED,"/hello") self.clearCapturedEvents() old_atime = self.fs.getinfo("hello").get("accessed_time") self.assertEquals(self.fs.getcontents("hello"),"hello world") if not isinstance(self.watchfs,PollingWatchableFS): # Help it along by updting the atime. # TODO: why is this necessary? if self.fs.hassyspath("hello"): syspath = self.fs.getsyspath("hello") mtime = os.stat(syspath).st_mtime atime = int(time.time()) os.utime(self.fs.getsyspath("hello"),(atime,mtime)) self.assertEventOccurred(ACCESSED,"/hello") elif old_atime is not None: # Some filesystems don't update atime synchronously, or only # update it if it's too old, or don't update it at all! # Try to force the issue, wait for it to change, but eventually # give up and bail out. for i in xrange(10): if self.fs.getinfo("hello").get("accessed_time") != old_atime: if not self.checkEventOccurred(MODIFIED,"/hello"): self.assertEventOccurred(ACCESSED,"/hello") break time.sleep(0.2) if self.fs.hassyspath("hello"): syspath = self.fs.getsyspath("hello") mtime = os.stat(syspath).st_mtime atime = int(time.time()) os.utime(self.fs.getsyspath("hello"),(atime,mtime)) def test_watch_writefile(self): self.setupWatchers() self.fs.setcontents("hello","hello world") self.assertEventOccurred(CREATED,"/hello") self.clearCapturedEvents() self.fs.setcontents("hello","hello again world") self.assertEventOccurred(MODIFIED,"/hello") def test_watch_single_file(self): self.fs.setcontents("hello","hello world") events = [] self.watchfs.add_watcher(events.append,"/hello",(MODIFIED,)) self.fs.setcontents("hello","hello again world") self.fs.remove("hello") self.waitForEvents() for evt in events: assert isinstance(evt,MODIFIED) self.assertEquals(evt.path,"/hello") def test_watch_single_file_remove(self): self.fs.makedir("testing") self.fs.setcontents("testing/hello","hello world") events = [] self.watchfs.add_watcher(events.append,"/testing/hello",(REMOVED,)) self.fs.setcontents("testing/hello","hello again world") self.waitForEvents() self.fs.remove("testing/hello") self.waitForEvents() self.assertEquals(len(events),1) assert isinstance(events[0],REMOVED) self.assertEquals(events[0].path,"/testing/hello") def test_watch_iter_changes(self): changes = iter_changes(self.watchfs) self.fs.makedir("test1") self.fs.setcontents("test1/hello","hello world") self.waitForEvents() self.fs.removedir("test1",force=True) self.waitForEvents() self.watchfs.close() # Locate the CREATED(test1) event event = changes.next(timeout=1) while not isinstance(event,CREATED) or event.path != "/test1": event = changes.next(timeout=1) # Locate the CREATED(test1/hello) event event = changes.next(timeout=1) while not isinstance(event,CREATED) or event.path != "/test1/hello": event = changes.next(timeout=1) # Locate the REMOVED(test1) event event = changes.next(timeout=1) while not isinstance(event,REMOVED) or event.path != "/test1": event = changes.next(timeout=1) # Locate the CLOSED event event = changes.next(timeout=1) while not isinstance(event,CLOSED): event = changes.next(timeout=1) # That should be the last event in the list self.assertRaises(StopIteration,changes.next,timeout=1) changes.close() from fs import tempfs, osfs class TestWatchers_TempFS(unittest.TestCase,FSTestCases,WatcherTestCases): def setUp(self): self.fs = tempfs.TempFS() watchfs = osfs.OSFS(self.fs.root_path) self.watchfs = ensure_watchable(watchfs,poll_interval=0.1) if watch_inotify is not None: self.assertEquals(watchfs,self.watchfs) if watch_win32 is not None: self.assertEquals(watchfs,self.watchfs) def tearDown(self): self.watchfs.close() self.fs.close() def check(self, p): return self.fs.exists(p) from fs import memoryfs class TestWatchers_MemoryFS(unittest.TestCase,FSTestCases,WatcherTestCases): def setUp(self): self.fs = self.watchfs = WatchableFS(memoryfs.MemoryFS()) def tearDown(self): self.watchfs.close() self.fs.close() def check(self, p): return self.fs.exists(p) class TestWatchers_MemoryFS_polling(TestWatchers_MemoryFS): def setUp(self): self.fs = memoryfs.MemoryFS() self.watchfs = ensure_watchable(self.fs,poll_interval=0.1) fs-0.3.0/fs/tests/test_zipfs.py0000644000175000017500000001225211301467747015154 0ustar willwill""" fs.tests.test_zipfs: testcases for the ZipFS class """ import unittest import os import random import zipfile import tempfile import fs.tests from fs.path import * from fs import zipfs class TestReadZipFS(unittest.TestCase): def setUp(self): self.temp_filename = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(6))+".zip" self.temp_filename = os.path.join(tempfile.gettempdir(), self.temp_filename) self.zf = zipfile.ZipFile(self.temp_filename, "w") zf = self.zf zf.writestr("a.txt", "Hello, World!") zf.writestr("b.txt", "b") zf.writestr("1.txt", "1") zf.writestr("foo/bar/baz.txt", "baz") zf.writestr("foo/second.txt", "hai") zf.close() self.fs = zipfs.ZipFS(self.temp_filename, "r") def tearDown(self): self.fs.close() os.remove(self.temp_filename) def check(self, p): try: self.zipfile.getinfo(p) return True except: return False def test_reads(self): def read_contents(path): f = self.fs.open(path) contents = f.read() return contents def check_contents(path, expected): self.assert_(read_contents(path)==expected) check_contents("a.txt", "Hello, World!") check_contents("1.txt", "1") check_contents("foo/bar/baz.txt", "baz") def test_getcontents(self): def read_contents(path): return self.fs.getcontents(path) def check_contents(path, expected): self.assert_(read_contents(path)==expected) check_contents("a.txt", "Hello, World!") check_contents("1.txt", "1") check_contents("foo/bar/baz.txt", "baz") def test_is(self): self.assert_(self.fs.isfile('a.txt')) self.assert_(self.fs.isfile('1.txt')) self.assert_(self.fs.isfile('foo/bar/baz.txt')) self.assert_(self.fs.isdir('foo')) self.assert_(self.fs.isdir('foo/bar')) self.assert_(self.fs.exists('a.txt')) self.assert_(self.fs.exists('1.txt')) self.assert_(self.fs.exists('foo/bar/baz.txt')) self.assert_(self.fs.exists('foo')) self.assert_(self.fs.exists('foo/bar')) def test_listdir(self): def check_listing(path, expected): dir_list = self.fs.listdir(path) self.assert_(sorted(dir_list) == sorted(expected)) for item in dir_list: self.assert_(isinstance(item,unicode)) check_listing('/', ['a.txt', '1.txt', 'foo', 'b.txt']) check_listing('foo', ['second.txt', 'bar']) check_listing('foo/bar', ['baz.txt']) class TestWriteZipFS(unittest.TestCase): def setUp(self): self.temp_filename = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(6))+".zip" self.temp_filename = os.path.join(tempfile.gettempdir(), self.temp_filename) zip_fs = zipfs.ZipFS(self.temp_filename, 'w') def makefile(filename, contents): if dirname(filename): zip_fs.makedir(dirname(filename), recursive=True, allow_recreate=True) f = zip_fs.open(filename, 'w') f.write(contents) f.close() makefile("a.txt", "Hello, World!") makefile("b.txt", "b") makefile(u"\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", "this is the alpha and the omega") makefile("foo/bar/baz.txt", "baz") makefile("foo/second.txt", "hai") zip_fs.close() def tearDown(self): os.remove(self.temp_filename) def test_valid(self): zf = zipfile.ZipFile(self.temp_filename, "r") self.assert_(zf.testzip() is None) zf.close() def test_creation(self): zf = zipfile.ZipFile(self.temp_filename, "r") def check_contents(filename, contents): zcontents = zf.read(filename.encode("CP437")) self.assertEqual(contents, zcontents) check_contents("a.txt", "Hello, World!") check_contents("b.txt", "b") check_contents("foo/bar/baz.txt", "baz") check_contents("foo/second.txt", "hai") check_contents(u"\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", "this is the alpha and the omega") class TestAppendZipFS(TestWriteZipFS): def setUp(self): self.temp_filename = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(6))+".zip" self.temp_filename = os.path.join(tempfile.gettempdir(), self.temp_filename) zip_fs = zipfs.ZipFS(self.temp_filename, 'w') def makefile(filename, contents): if dirname(filename): zip_fs.makedir(dirname(filename), recursive=True, allow_recreate=True) f = zip_fs.open(filename, 'w') f.write(contents) f.close() makefile("a.txt", "Hello, World!") makefile("b.txt", "b") zip_fs.close() zip_fs = zipfs.ZipFS(self.temp_filename, 'a') makefile("foo/bar/baz.txt", "baz") makefile(u"\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", "this is the alpha and the omega") makefile("foo/second.txt", "hai") zip_fs.close() fs-0.3.0/fs/tests/test_wrapfs.py0000644000175000017500000000370111375762633015325 0ustar willwill""" fs.tests.test_wrapfs: testcases for FS wrapper implementations """ import unittest from fs.tests import FSTestCases, ThreadingTestCases import os import sys import shutil import tempfile from fs import osfs from fs.errors import * from fs.path import * from fs import wrapfs class TestWrapFS(unittest.TestCase, FSTestCases, ThreadingTestCases): def setUp(self): self.temp_dir = tempfile.mkdtemp(u"fstest") self.fs = wrapfs.WrapFS(osfs.OSFS(self.temp_dir)) def tearDown(self): shutil.rmtree(self.temp_dir) def check(self, p): return os.path.exists(os.path.join(self.temp_dir, relpath(p))) from fs.wrapfs.lazyfs import LazyFS class TestLazyFS(unittest.TestCase, FSTestCases, ThreadingTestCases): def setUp(self): self.temp_dir = tempfile.mkdtemp(u"fstest") self.fs = LazyFS((osfs.OSFS,(self.temp_dir,))) def tearDown(self): shutil.rmtree(self.temp_dir) def check(self, p): return os.path.exists(os.path.join(self.temp_dir, relpath(p))) from fs.wrapfs.limitsizefs import LimitSizeFS class TestLimitSizeFS(TestWrapFS): _dont_retest = TestWrapFS._dont_retest + ("test_big_file",) def setUp(self): super(TestLimitSizeFS,self).setUp() self.fs = LimitSizeFS(self.fs,1024*1024*2) # 2MB limit def tearDown(self): self.fs.removedir("/",force=True) self.assertEquals(self.fs.cur_size,0) super(TestLimitSizeFS,self).tearDown() def test_storage_error(self): total_written = 0 for i in xrange(1024*2): try: total_written += 1030 self.fs.setcontents("file"+str(i),"C"*1030) except StorageSpaceError: self.assertTrue(total_written > 1024*1024*2) self.assertTrue(total_written < 1024*1024*2 + 1030) break else: self.assertTrue(False,"StorageSpaceError not raised") fs-0.3.0/fs/tests/ftpserver.py0000644000175000017500000000071711317506511014773 0ustar willwilltry: from pyftpdlib import ftpserver except ImportError: print "Requires pyftpdlib " raise import sys authorizer = ftpserver.DummyAuthorizer() authorizer.add_user("user", "12345", sys.argv[1], perm="elradfmw") authorizer.add_anonymous(sys.argv[1]) handler = ftpserver.FTPHandler handler.authorizer = authorizer address = ("127.0.0.1", 21) ftpd = ftpserver.FTPServer(address, handler) ftpd.serve_forever() fs-0.3.0/fs/expose/0000755000175000017500000000000011407431454012540 5ustar willwillfs-0.3.0/fs/expose/fuse/0000755000175000017500000000000011407431454013502 5ustar willwillfs-0.3.0/fs/expose/fuse/fuse_ctypes.py0000644000175000017500000005361011302546744016415 0ustar willwill# Copyright (c) 2008 Giorgos Verigakis # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import division from ctypes import * from ctypes.util import find_library from errno import * from functools import partial from platform import machine, system from stat import S_IFDIR from traceback import print_exc class c_timespec(Structure): _fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)] class c_utimbuf(Structure): _fields_ = [('actime', c_timespec), ('modtime', c_timespec)] class c_stat(Structure): pass # Platform dependent _system = system() if _system in ('Darwin', 'FreeBSD'): _libiconv = CDLL(find_library("iconv"), RTLD_GLOBAL) # libfuse dependency ENOTSUP = 45 c_dev_t = c_int32 c_fsblkcnt_t = c_ulong c_fsfilcnt_t = c_ulong c_gid_t = c_uint32 c_mode_t = c_uint16 c_off_t = c_int64 c_pid_t = c_int32 c_uid_t = c_uint32 setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int, c_uint32) getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_uint32) c_stat._fields_ = [ ('st_dev', c_dev_t), ('st_ino', c_uint32), ('st_mode', c_mode_t), ('st_nlink', c_uint16), ('st_uid', c_uid_t), ('st_gid', c_gid_t), ('st_rdev', c_dev_t), ('st_atimespec', c_timespec), ('st_mtimespec', c_timespec), ('st_ctimespec', c_timespec), ('st_size', c_off_t), ('st_blocks', c_int64), ('st_blksize', c_int32)] elif _system == 'Linux': ENOTSUP = 95 c_dev_t = c_ulonglong c_fsblkcnt_t = c_ulonglong c_fsfilcnt_t = c_ulonglong c_gid_t = c_uint c_mode_t = c_uint c_off_t = c_longlong c_pid_t = c_int c_uid_t = c_uint setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int) getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t) _machine = machine() if _machine == 'x86_64': c_stat._fields_ = [ ('st_dev', c_dev_t), ('st_ino', c_ulong), ('st_nlink', c_ulong), ('st_mode', c_mode_t), ('st_uid', c_uid_t), ('st_gid', c_gid_t), ('__pad0', c_int), ('st_rdev', c_dev_t), ('st_size', c_off_t), ('st_blksize', c_long), ('st_blocks', c_long), ('st_atimespec', c_timespec), ('st_mtimespec', c_timespec), ('st_ctimespec', c_timespec)] elif _machine == 'ppc': c_stat._fields_ = [ ('st_dev', c_dev_t), ('st_ino', c_ulonglong), ('st_mode', c_mode_t), ('st_nlink', c_uint), ('st_uid', c_uid_t), ('st_gid', c_gid_t), ('st_rdev', c_dev_t), ('__pad2', c_ushort), ('st_size', c_off_t), ('st_blksize', c_long), ('st_blocks', c_longlong), ('st_atimespec', c_timespec), ('st_mtimespec', c_timespec), ('st_ctimespec', c_timespec)] else: # i686, use as fallback for everything else c_stat._fields_ = [ ('st_dev', c_dev_t), ('__pad1', c_ushort), ('__st_ino', c_ulong), ('st_mode', c_mode_t), ('st_nlink', c_uint), ('st_uid', c_uid_t), ('st_gid', c_gid_t), ('st_rdev', c_dev_t), ('__pad2', c_ushort), ('st_size', c_off_t), ('st_blksize', c_long), ('st_blocks', c_longlong), ('st_atimespec', c_timespec), ('st_mtimespec', c_timespec), ('st_ctimespec', c_timespec), ('st_ino', c_ulonglong)] else: raise NotImplementedError('%s is not supported.' % _system) class c_statvfs(Structure): _fields_ = [ ('f_bsize', c_ulong), ('f_frsize', c_ulong), ('f_blocks', c_fsblkcnt_t), ('f_bfree', c_fsblkcnt_t), ('f_bavail', c_fsblkcnt_t), ('f_files', c_fsfilcnt_t), ('f_ffree', c_fsfilcnt_t), ('f_favail', c_fsfilcnt_t)] if _system == 'FreeBSD': c_fsblkcnt_t = c_uint64 c_fsfilcnt_t = c_uint64 setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int) getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t) class c_statvfs(Structure): _fields_ = [ ('f_bavail', c_fsblkcnt_t), ('f_bfree', c_fsblkcnt_t), ('f_blocks', c_fsblkcnt_t), ('f_favail', c_fsfilcnt_t), ('f_ffree', c_fsfilcnt_t), ('f_files', c_fsfilcnt_t), ('f_bsize', c_ulong), ('f_flag', c_ulong), ('f_frsize', c_ulong)] class fuse_file_info(Structure): _fields_ = [ ('flags', c_int), ('fh_old', c_ulong), ('writepage', c_int), ('direct_io', c_uint, 1), ('keep_cache', c_uint, 1), ('flush', c_uint, 1), ('padding', c_uint, 29), ('fh', c_uint64), ('lock_owner', c_uint64)] class fuse_context(Structure): _fields_ = [ ('fuse', c_voidp), ('uid', c_uid_t), ('gid', c_gid_t), ('pid', c_pid_t), ('private_data', c_voidp)] class fuse_operations(Structure): _fields_ = [ ('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))), ('readlink', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)), ('getdir', c_voidp), # Deprecated, use readdir ('mknod', CFUNCTYPE(c_int, c_char_p, c_mode_t, c_dev_t)), ('mkdir', CFUNCTYPE(c_int, c_char_p, c_mode_t)), ('unlink', CFUNCTYPE(c_int, c_char_p)), ('rmdir', CFUNCTYPE(c_int, c_char_p)), ('symlink', CFUNCTYPE(c_int, c_char_p, c_char_p)), ('rename', CFUNCTYPE(c_int, c_char_p, c_char_p)), ('link', CFUNCTYPE(c_int, c_char_p, c_char_p)), ('chmod', CFUNCTYPE(c_int, c_char_p, c_mode_t)), ('chown', CFUNCTYPE(c_int, c_char_p, c_uid_t, c_gid_t)), ('truncate', CFUNCTYPE(c_int, c_char_p, c_off_t)), ('utime', c_voidp), # Deprecated, use utimens ('open', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), ('read', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t, POINTER(fuse_file_info))), ('write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t, POINTER(fuse_file_info))), ('statfs', CFUNCTYPE(c_int, c_char_p, POINTER(c_statvfs))), ('flush', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), ('release', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), ('fsync', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))), ('setxattr', setxattr_t), ('getxattr', getxattr_t), ('listxattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)), ('removexattr', CFUNCTYPE(c_int, c_char_p, c_char_p)), ('opendir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), ('readdir', CFUNCTYPE(c_int, c_char_p, c_voidp, CFUNCTYPE(c_int, c_voidp, c_char_p, POINTER(c_stat), c_off_t), c_off_t, POINTER(fuse_file_info))), ('releasedir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), ('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))), ('init', CFUNCTYPE(c_voidp, c_voidp)), ('destroy', CFUNCTYPE(c_voidp, c_voidp)), ('access', CFUNCTYPE(c_int, c_char_p, c_int)), ('create', CFUNCTYPE(c_int, c_char_p, c_mode_t, POINTER(fuse_file_info))), ('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t, POINTER(fuse_file_info))), ('fgetattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat), POINTER(fuse_file_info))), ('lock', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info), c_int, c_voidp)), ('utimens', CFUNCTYPE(c_int, c_char_p, POINTER(c_utimbuf))), ('bmap', CFUNCTYPE(c_int, c_char_p, c_size_t, POINTER(c_ulonglong)))] def time_of_timespec(ts): return ts.tv_sec + ts.tv_nsec / 10 ** 9 def set_st_attrs(st, attrs): for key, val in attrs.items(): if key in ('st_atime', 'st_mtime', 'st_ctime'): timespec = getattr(st, key + 'spec') timespec.tv_sec = int(val) timespec.tv_nsec = int((val - timespec.tv_sec) * 10 ** 9) elif hasattr(st, key): setattr(st, key, val) _libfuse_path = find_library('fuse') if not _libfuse_path: raise EnvironmentError('Unable to find libfuse') _libfuse = CDLL(_libfuse_path) _libfuse.fuse_get_context.restype = POINTER(fuse_context) def fuse_get_context(): """Returns a (uid, gid, pid) tuple""" ctxp = _libfuse.fuse_get_context() ctx = ctxp.contents return ctx.uid, ctx.gid, ctx.pid class FUSE(object): """This class is the lower level interface and should not be subclassed under normal use. Its methods are called by fuse. Assumes API version 2.6 or later.""" def __init__(self, operations, mountpoint, raw_fi=False, **kwargs): """Setting raw_fi to True will cause FUSE to pass the fuse_file_info class as is to Operations, instead of just the fh field. This gives you access to direct_io, keep_cache, etc.""" self.operations = operations self.raw_fi = raw_fi args = ['fuse'] if kwargs.pop('foreground', False): args.append('-f') if kwargs.pop('debug', False): args.append('-d') if kwargs.pop('nothreads', False): args.append('-s') kwargs.setdefault('fsname', operations.__class__.__name__) args.append('-o') args.append(','.join(key if val == True else '%s=%s' % (key, val) for key, val in kwargs.items())) args.append(mountpoint) argv = (c_char_p * len(args))(*args) fuse_ops = fuse_operations() for name, prototype in fuse_operations._fields_: if prototype != c_voidp and getattr(operations, name, None): op = partial(self._wrapper_, getattr(self, name)) setattr(fuse_ops, name, prototype(op)) _libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops), sizeof(fuse_ops), None) del self.operations # Invoke the destructor def _wrapper_(self, func, *args, **kwargs): """Decorator for the methods that follow""" try: return func(*args, **kwargs) or 0 except OSError, e: return -(e.errno or EFAULT) except: print_exc() return -EFAULT def getattr(self, path, buf): return self.fgetattr(path, buf, None) def readlink(self, path, buf, bufsize): ret = self.operations('readlink', path) data = create_string_buffer(ret[:bufsize - 1]) memmove(buf, data, len(data)) return 0 def mknod(self, path, mode, dev): return self.operations('mknod', path, mode, dev) def mkdir(self, path, mode): return self.operations('mkdir', path, mode) def unlink(self, path): return self.operations('unlink', path) def rmdir(self, path): return self.operations('rmdir', path) def symlink(self, source, target): return self.operations('symlink', target, source) def rename(self, old, new): return self.operations('rename', old, new) def link(self, source, target): return self.operations('link', target, source) def chmod(self, path, mode): return self.operations('chmod', path, mode) def chown(self, path, uid, gid): return self.operations('chown', path, uid, gid) def truncate(self, path, length): return self.operations('truncate', path, length) def open(self, path, fip): fi = fip.contents if self.raw_fi: return self.operations('open', path, fi) else: fi.fh = self.operations('open', path, fi.flags) return 0 def read(self, path, buf, size, offset, fip): fh = fip.contents if self.raw_fi else fip.contents.fh ret = self.operations('read', path, size, offset, fh) if ret: strbuf = create_string_buffer(ret) memmove(buf, strbuf, len(strbuf)) return len(ret) def write(self, path, buf, size, offset, fip): data = string_at(buf, size) fh = fip.contents if self.raw_fi else fip.contents.fh return self.operations('write', path, data, offset, fh) def statfs(self, path, buf): stv = buf.contents attrs = self.operations('statfs', path) for key, val in attrs.items(): if hasattr(stv, key): setattr(stv, key, val) return 0 def flush(self, path, fip): fh = fip.contents if self.raw_fi else fip.contents.fh return self.operations('flush', path, fh) def release(self, path, fip): fh = fip.contents if self.raw_fi else fip.contents.fh return self.operations('release', path, fh) def fsync(self, path, datasync, fip): fh = fip.contents if self.raw_fi else fip.contents.fh return self.operations('fsync', path, datasync, fh) def setxattr(self, path, name, value, size, options, *args): data = string_at(value, size) return self.operations('setxattr', path, name, data, options, *args) def getxattr(self, path, name, value, size, *args): ret = self.operations('getxattr', path, name, *args) retsize = len(ret) buf = create_string_buffer(ret, retsize) # Does not add trailing 0 if bool(value): if retsize > size: return -ERANGE memmove(value, buf, retsize) return retsize def listxattr(self, path, namebuf, size): ret = self.operations('listxattr', path) if ret: buf = create_string_buffer('\x00'.join(ret)) else: buf = '' bufsize = len(buf) if bool(namebuf): if bufsize > size: return -ERANGE memmove(namebuf, buf, bufsize) return bufsize def removexattr(self, path, name): return self.operations('removexattr', path, name) def opendir(self, path, fip): # Ignore raw_fi fip.contents.fh = self.operations('opendir', path) return 0 def readdir(self, path, buf, filler, offset, fip): # Ignore raw_fi for item in self.operations('readdir', path, fip.contents.fh): if isinstance(item, str): name, st, offset = item, None, 0 else: name, attrs, offset = item if attrs: st = c_stat() set_st_attrs(st, attrs) else: st = None if filler(buf, name, st, offset) != 0: break return 0 def releasedir(self, path, fip): # Ignore raw_fi return self.operations('releasedir', path, fip.contents.fh) def fsyncdir(self, path, datasync, fip): # Ignore raw_fi return self.operations('fsyncdir', path, datasync, fip.contents.fh) def init(self, conn): return self.operations('init', '/') def destroy(self, private_data): return self.operations('destroy', '/') def access(self, path, amode): return self.operations('access', path, amode) def create(self, path, mode, fip): fi = fip.contents if self.raw_fi: return self.operations('create', path, mode, fi) else: fi.fh = self.operations('create', path, mode) return 0 def ftruncate(self, path, length, fip): fh = fip.contents if self.raw_fi else fip.contents.fh return self.operations('truncate', path, length, fh) def fgetattr(self, path, buf, fip): memset(buf, 0, sizeof(c_stat)) st = buf.contents fh = fip and (fip.contents if self.raw_fi else fip.contents.fh) attrs = self.operations('getattr', path, fh) set_st_attrs(st, attrs) return 0 def lock(self, path, fip, cmd, lock): fh = fip.contents if self.raw_fi else fip.contents.fh return self.operations('lock', path, fh, cmd, lock) def utimens(self, path, buf): if buf: atime = time_of_timespec(buf.contents.actime) mtime = time_of_timespec(buf.contents.modtime) times = (atime, mtime) else: times = None return self.operations('utimens', path, times) def bmap(self, path, blocksize, idx): return self.operations('bmap', path, blocksize, idx) class Operations(object): """This class should be subclassed and passed as an argument to FUSE on initialization. All operations should raise an OSError exception on error. When in doubt of what an operation should do, check the FUSE header file or the corresponding system call man page.""" def __call__(self, op, *args): if not hasattr(self, op): raise OSError(EFAULT, '') return getattr(self, op)(*args) def access(self, path, amode): return 0 bmap = None def chmod(self, path, mode): raise OSError(EROFS, '') def chown(self, path, uid, gid): raise OSError(EROFS, '') def create(self, path, mode, fi=None): """When raw_fi is False (default case), fi is None and create should return a numerical file handle. When raw_fi is True the file handle should be set directly by create and return 0.""" raise OSError(EROFS, '') def destroy(self, path): """Called on filesystem destruction. Path is always /""" pass def flush(self, path, fh): return 0 def fsync(self, path, datasync, fh): return 0 def fsyncdir(self, path, datasync, fh): return 0 def getattr(self, path, fh=None): """Returns a dictionary with keys identical to the stat C structure of stat(2). st_atime, st_mtime and st_ctime should be floats. NOTE: There is an incombatibility between Linux and Mac OS X concerning st_nlink of directories. Mac OS X counts all files inside the directory, while Linux counts only the subdirectories.""" if path != '/': raise OSError(ENOENT, '') return dict(st_mode=(S_IFDIR | 0755), st_nlink=2) def getxattr(self, path, name, position=0): raise OSError(ENOTSUP, '') def init(self, path): """Called on filesystem initialization. Path is always / Use it instead of __init__ if you start threads on initialization.""" pass def link(self, target, source): raise OSError(EROFS, '') def listxattr(self, path): return [] lock = None def mkdir(self, path, mode): raise OSError(EROFS, '') def mknod(self, path, mode, dev): raise OSError(EROFS, '') def open(self, path, flags): """When raw_fi is False (default case), open should return a numerical file handle. When raw_fi is True the signature of open becomes: open(self, path, fi) and the file handle should be set directly.""" return 0 def opendir(self, path): """Returns a numerical file handle.""" return 0 def read(self, path, size, offset, fh): """Returns a string containing the data requested.""" raise OSError(ENOENT, '') def readdir(self, path, fh): """Can return either a list of names, or a list of (name, attrs, offset) tuples. attrs is a dict as in getattr.""" return ['.', '..'] def readlink(self, path): raise OSError(ENOENT, '') def release(self, path, fh): return 0 def releasedir(self, path, fh): return 0 def removexattr(self, path, name): raise OSError(ENOTSUP, '') def rename(self, old, new): raise OSError(EROFS, '') def rmdir(self, path): raise OSError(EROFS, '') def setxattr(self, path, name, value, options, position=0): raise OSError(ENOTSUP, '') def statfs(self, path): """Returns a dictionary with keys identical to the statvfs C structure of statvfs(3). On Mac OS X f_bsize and f_frsize must be a power of 2 (minimum 512).""" return {} def symlink(self, target, source): raise OSError(EROFS, '') def truncate(self, path, length, fh=None): raise OSError(EROFS, '') def unlink(self, path): raise OSError(EROFS, '') def utimens(self, path, times=None): """Times is a (atime, mtime) tuple. If None use current time.""" return 0 def write(self, path, data, offset, fh): raise OSError(EROFS, '') class LoggingMixIn: def __call__(self, op, path, *args): print '->', op, path, repr(args) ret = '[Unknown Error]' try: ret = getattr(self, op)(path, *args) return ret except OSError, e: ret = str(e) raise finally: print '<-', op, repr(ret) fs-0.3.0/fs/expose/fuse/__init__.py0000644000175000017500000004774211406465611015632 0ustar willwill""" fs.expose.fuse ============== Expose an FS object to the native filesystem via FUSE This module provides the necessary interfaces to mount an FS object into the local filesystem via FUSE:: http://fuse.sourceforge.net/ For simple usage, the function 'mount' takes an FS object and a local path, and exposes the given FS at that path:: >>> from fs.memoryfs import MemoryFS >>> from fs.expose import fuse >>> fs = MemoryFS() >>> mp = fuse.mount(fs,"/mnt/my-memory-fs") >>> mp.path '/mnt/my-memory-fs' >>> mp.unmount() The above spawns a new background process to manage the FUSE event loop, which can be controlled through the returned subprocess.Popen object. To avoid spawning a new process, set the 'foreground' option:: >>> # This will block until the filesystem is unmounted >>> fuse.mount(fs,"/mnt/my-memory-fs",foreground=True) Any additional options for the FUSE process can be passed as keyword arguments to the 'mount' function. If you require finer control over the creation of the FUSE process, you can instantiate the MountProcess class directly. It accepts all options available to subprocess.Popen:: >>> from subprocess import PIPE >>> mp = fuse.MountProcess(fs,"/mnt/my-memory-fs",stderr=PIPE) >>> fuse_errors = mp.communicate()[1] The binding to FUSE is created via ctypes, using a custom version of the fuse.py code from Giorgos Verigakis: http://code.google.com/p/fusepy/ """ import datetime import os import sys import signal import errno import time import stat as statinfo import subprocess import pickle from fs.base import flags_to_mode, threading from fs.errors import * from fs.path import * try: import fuse_ctypes as fuse except NotImplementedError: raise ImportError("FUSE found but not usable") try: fuse._libfuse.fuse_get_context except AttributeError: raise ImportError("could not locate FUSE library") FUSE = fuse.FUSE Operations = fuse.Operations fuse_get_context = fuse.fuse_get_context STARTUP_TIME = time.time() NATIVE_ENCODING = sys.getfilesystemencoding() def handle_fs_errors(func): """Method decorator to report FS errors in the appropriate way. This decorator catches all FS errors and translates them into an equivalent OSError. It also makes the function return zero instead of None as an indication of successful execution. """ name = func.__name__ func = convert_fs_errors(func) @wraps(func) def wrapper(*args,**kwds): res = func(*args,**kwds) if res is None: return 0 return res return wrapper class FSOperations(Operations): """FUSE Operations interface delegating all activities to an FS object.""" def __init__(self, fs, on_init=None, on_destroy=None): self.fs = fs self._on_init = on_init self._on_destroy = on_destroy self._files_by_handle = {} self._files_lock = threading.Lock() self._next_handle = 1 # FUSE expects a succesful write() to be reflected in the file's # reported size, but the FS might buffer writes and prevent this. # We explicitly keep track of the size FUSE expects a file to be. # This dict is indexed by path, then file handle. self._files_size_written = {} def _get_file(self, fh): try: return self._files_by_handle[fh.fh] except KeyError: raise FSError("invalid file handle") def _reg_file(self, f, path): self._files_lock.acquire() try: fh = self._next_handle self._next_handle += 1 lock = threading.Lock() self._files_by_handle[fh] = (f,path,lock) if path not in self._files_size_written: self._files_size_written[path] = {} self._files_size_written[path][fh] = 0 return fh finally: self._files_lock.release() def _del_file(self, fh): self._files_lock.acquire() try: (f,path,lock) = self._files_by_handle.pop(fh.fh) del self._files_size_written[path][fh.fh] if not self._files_size_written[path]: del self._files_size_written[path] finally: self._files_lock.release() def init(self, conn): if self._on_init: self._on_init() def destroy(self, data): if self._on_destroy: self._on_destroy() @handle_fs_errors def chmod(self, path, mode): raise UnsupportedError("chmod") @handle_fs_errors def chown(self, path, uid, gid): raise UnsupportedError("chown") @handle_fs_errors def create(self, path, mode, fi): path = path.decode(NATIVE_ENCODING) fh = self._reg_file(self.fs.open(path,"w"),path) fi.fh = fh fi.keep_cache = 0 @handle_fs_errors def flush(self, path, fh): (file,_,lock) = self._get_file(fh) lock.acquire() try: file.flush() finally: lock.release() @handle_fs_errors def getattr(self, path, fh=None): attrs = self._get_stat_dict(path.decode(NATIVE_ENCODING)) return attrs @handle_fs_errors def getxattr(self, path, name, position=0): path = path.decode(NATIVE_ENCODING) name = name.decode(NATIVE_ENCODING) try: value = self.fs.getxattr(path,name) except AttributeError: raise OSError(errno.ENODATA,"no attribute '%s'" % (name,)) else: if value is None: raise OSError(errno.ENODATA,"no attribute '%s'" % (name,)) return value @handle_fs_errors def link(self, target, souce): raise UnsupportedError("link") @handle_fs_errors def listxattr(self, path): path = path.decode(NATIVE_ENCODING) try: return self.fs.listxattrs(path) except AttributeError: return [] @handle_fs_errors def mkdir(self, path, mode): path = path.decode(NATIVE_ENCODING) try: self.fs.makedir(path,mode) except TypeError: self.fs.makedir(path) @handle_fs_errors def mknod(self, path, mode, dev): raise UnsupportedError("mknod") @handle_fs_errors def open(self, path, fi): path = path.decode(NATIVE_ENCODING) mode = flags_to_mode(fi.flags) fi.fh = self._reg_file(self.fs.open(path,mode),path) fi.keep_cache = 0 return 0 @handle_fs_errors def read(self, path, size, offset, fh): (file,_,lock) = self._get_file(fh) lock.acquire() try: file.seek(offset) data = file.read(size) return data finally: lock.release() @handle_fs_errors def readdir(self, path, fh=None): path = path.decode(NATIVE_ENCODING) # If listdir() can return info dicts directly, it will save FUSE # having to call getinfo() on each entry individually. try: entries = self.fs.listdir(path,info=True) except TypeError: entries = [] for name in self.fs.listdir(path): name = name.encode(NATIVE_ENCODING) entries.append(name) else: entries = [(e["name"].encode(NATIVE_ENCODING),e,0) for e in entries] for (name,attrs,offset) in entries: self._fill_stat_dict(pathjoin(path,name.decode(NATIVE_ENCODING)),attrs) entries = [".",".."] + entries return entries @handle_fs_errors def readlink(self, path): raise UnsupportedError("readlink") @handle_fs_errors def release(self, path, fh): (file,_,lock) = self._get_file(fh) lock.acquire() try: file.close() self._del_file(fh) finally: lock.release() @handle_fs_errors def removexattr(self, path, name): path = path.decode(NATIVE_ENCODING) name = name.decode(NATIVE_ENCODING) try: return self.fs.delxattr(path,name) except AttributeError: raise UnsupportedError("removexattr") @handle_fs_errors def rename(self, old, new): old = old.decode(NATIVE_ENCODING) new = new.decode(NATIVE_ENCODING) try: self.fs.rename(old,new) except FSError: if self.fs.isdir(old): self.fs.movedir(old,new) else: self.fs.move(old,new) @handle_fs_errors def rmdir(self, path): path = path.decode(NATIVE_ENCODING) self.fs.removedir(path) @handle_fs_errors def setxattr(self, path, name, value, options, position=0): path = path.decode(NATIVE_ENCODING) name = name.decode(NATIVE_ENCODING) try: return self.fs.setxattr(path,name,value) except AttributeError: raise UnsupportedError("setxattr") @handle_fs_errors def symlink(self, target, source): raise UnsupportedError("symlink") @handle_fs_errors def truncate(self, path, length, fh=None): path = path.decode(NATIVE_ENCODING) if fh is None and length == 0: self.fs.open(path,"w").close() else: if fh is None: f = self.fs.open(path,"w+") if not hasattr(f,"truncate"): raise UnsupportedError("truncate") f.truncate(length) else: (file,_,lock) = self._get_file(fh) lock.acquire() try: if not hasattr(file,"truncate"): raise UnsupportedError("truncate") file.truncate(length) finally: lock.release() self._files_lock.acquire() try: try: size_written = self._files_size_written[path] except KeyError: pass else: for k in size_written: size_written[k] = length finally: self._files_lock.release() @handle_fs_errors def unlink(self, path): path = path.decode(NATIVE_ENCODING) self.fs.remove(path) @handle_fs_errors def utimens(self, path, times=None): accessed_time, modified_time = times if accessed_time is not None: accessed_time = datetime.datetime.fromtimestamp(accessed_time) if modified_time is not None: modified_time = datetime.datetime.fromtimestamp(modified_time) self.fs.settimes(path, accessed_time, modified_time) @handle_fs_errors def write(self, path, data, offset, fh): (file,path,lock) = self._get_file(fh) lock.acquire() try: file.seek(offset) file.write(data) if self._files_size_written[path][fh.fh] < offset + len(data): self._files_size_written[path][fh.fh] = offset + len(data) return len(data) finally: lock.release() def _get_stat_dict(self, path): """Build a 'stat' dictionary for the given file.""" info = self.fs.getinfo(path) self._fill_stat_dict(path,info) return info def _fill_stat_dict(self, path, info): """Fill default values in the stat dict.""" uid, gid, pid = fuse_get_context() private_keys = [k for k in info if k.startswith("_")] for k in private_keys: del info[k] # Basic stuff that is constant for all paths info.setdefault("st_ino",0) info.setdefault("st_dev",0) info.setdefault("st_uid",uid) info.setdefault("st_gid",gid) info.setdefault("st_rdev",0) info.setdefault("st_blksize",1024) info.setdefault("st_blocks",1) # The interesting stuff if 'st_mode' not in info: if self.fs.isdir(path): info['st_mode'] = 0755 else: info['st_mode'] = 0666 mode = info['st_mode'] if not statinfo.S_ISDIR(mode) and not statinfo.S_ISREG(mode): if self.fs.isdir(path): info["st_mode"] = mode | statinfo.S_IFDIR info.setdefault("st_nlink",2) else: info["st_mode"] = mode | statinfo.S_IFREG info.setdefault("st_nlink",1) for (key1,key2) in [("st_atime","accessed_time"),("st_mtime","modified_time"),("st_ctime","created_time")]: if key1 not in info: if key2 in info: info[key1] = time.mktime(info[key2].timetuple()) else: info[key1] = STARTUP_TIME # Ensure the reported size reflects any writes performed, even if # they haven't been flushed to the filesystem yet. info.setdefault("st_size",info.get("size",1024)) try: written_sizes = self._files_size_written[path] except KeyError: pass else: info["st_size"] = max(written_sizes.values() + [info["st_size"]]) return info def mount(fs, path, foreground=False, ready_callback=None, unmount_callback=None, **kwds): """Mount the given FS at the given path, using FUSE. By default, this function spawns a new background process to manage the FUSE event loop. The return value in this case is an instance of the 'MountProcess' class, a subprocess.Popen subclass. If the keyword argument 'foreground' is given, we instead run the FUSE main loop in the current process. In this case the function will block until the filesystem is unmounted, then return None. If the keyword argument 'ready_callback' is provided, it will be called when the filesystem has been mounted and is ready for use. Any additional keyword arguments will be passed through as options to the underlying FUSE class. Some interesting options include: * nothreads Switch off threading in the FUSE event loop * fsname Name to display in the mount info table """ path = os.path.expanduser(path) if foreground: op = FSOperations(fs, on_init=ready_callback, on_destroy=unmount_callback) return FUSE(op, path, raw_fi=True, foreground=foreground, **kwds) else: mp = MountProcess(fs, path, kwds) if ready_callback: ready_callback() if unmount_callback: orig_unmount = mp.unmount def new_unmount(): orig_unmount() unmount_callback() mp.unmount = new_unmount return mp def unmount(path): """Unmount the given mount point. This function shells out to the 'fusermount' program to unmount a FUSE filesystem. It works, but it would probably be better to use the 'unmount' method on the MountProcess class if you have it. """ for num_tries in xrange(3): p = subprocess.Popen(["fusermount","-u",path],stderr=subprocess.PIPE) (stdout,stderr) = p.communicate() if p.returncode == 0: return if "not mounted" in stderr: return if "not found" in stderr: return raise OSError("filesystem could not be unmounted: %s (%s) " % (path,stderr,)) class MountProcess(subprocess.Popen): """subprocess.Popen subclass managing a FUSE mount. This is a subclass of subprocess.Popen, designed for easy management of a FUSE mount in a background process. Rather than specifying the command to execute, pass in the FS object to be mounted, the target mount point and a dictionary of options for the underlying FUSE class. In order to be passed successfully to the new process, the FS object must be pickleable. This restriction may be lifted in the future. This class has an extra attribute 'path' giving the path to the mounted filesystem, and an extra method 'unmount' that will cleanly unmount it and terminate the process. By default, the spawning process will block until it receives notification that the filesystem has been mounted. Since this notification is sent by writing to a pipe, using the 'close_fds' option on this class will prevent it from being sent. You can also pass in the keyword argument 'nowait' to continue without waiting for notification. """ # This works by spawning a new python interpreter and passing it the # pickled (fs,path,opts) tuple on the command-line. Something like this: # # python -c "import MountProcess; MountProcess._do_mount('..data..') # # It would be more efficient to do a straight os.fork() here, and would # remove the need to pickle the FS. But API wise, I think it's much # better for mount() to return a Popen instance than just a pid. # # In the future this class could implement its own forking logic and # just copy the relevant bits of the Popen interface. For now, this # spawn-a-new-interpreter solution is the easiest to get up and running. unmount_timeout = 5 def __init__(self, fs, path, fuse_opts={}, nowait=False, **kwds): self.path = path if nowait or kwds.get("close_fds",False): cmd = 'from fs.expose.fuse import MountProcess; ' cmd = cmd + 'MountProcess._do_mount_nowait(%s)' cmd = cmd % (repr(pickle.dumps((fs,path,fuse_opts),-1)),) cmd = [sys.executable,"-c",cmd] super(MountProcess,self).__init__(cmd,**kwds) else: (r,w) = os.pipe() cmd = 'from fs.expose.fuse import MountProcess; ' cmd = cmd + 'MountProcess._do_mount_wait(%s)' cmd = cmd % (repr(pickle.dumps((fs,path,fuse_opts,r,w),-1)),) cmd = [sys.executable,"-c",cmd] super(MountProcess,self).__init__(cmd,**kwds) os.close(w) if os.read(r,1) != "S": self.terminate() raise RuntimeError("FUSE error: " + os.read(r,20)) def unmount(self): """Cleanly unmount the FUSE filesystem, terminating this subprocess.""" self.terminate() def killme(): self.kill() time.sleep(0.1) try: unmount(self.path) except OSError: pass tmr = threading.Timer(self.unmount_timeout,killme) tmr.start() self.wait() tmr.cancel() if not hasattr(subprocess.Popen, "terminate"): def terminate(self): """Gracefully terminate the subprocess.""" os.kill(self.pid,signal.SIGTERM) if not hasattr(subprocess.Popen, "kill"): def kill(self): """Forcibly terminate the subprocess.""" os.kill(self.pid,signal.SIGKILL) @staticmethod def _do_mount_nowait(data): """Perform the specified mount, return without waiting.""" (fs,path,opts) = pickle.loads(data) opts["foreground"] = True def unmount_callback(): fs.close() opts["unmount_callback"] = unmount_callback mount(fs,path,*opts) @staticmethod def _do_mount_wait(data): """Perform the specified mount, signalling when ready.""" (fs,path,opts,r,w) = pickle.loads(data) os.close(r) opts["foreground"] = True successful = [] def ready_callback(): successful.append(True) os.write(w,"S") os.close(w) opts["ready_callback"] = ready_callback def unmount_callback(): fs.close() opts["unmount_callback"] = unmount_callback try: mount(fs,path,**opts) except Exception, e: os.write(w,"E"+str(e)) os.close(w) else: if not successful: os.write(w,"E") os.close(w) if __name__ == "__main__": import os, os.path from fs.tempfs import TempFS mount_point = os.path.join(os.environ["HOME"], "fs.expose.fuse") if not os.path.exists(mount_point): os.makedirs(mount_point) def ready_callback(): print "READY" mount(TempFS(), mount_point, foreground=True, ready_callback=ready_callback) fs-0.3.0/fs/expose/__init__.py0000644000175000017500000000000011223657267014647 0ustar willwillfs-0.3.0/fs/expose/sftp.py0000644000175000017500000002544011406246062014071 0ustar willwill""" fs.expose.sftp ============== Expose an FS object over SFTP (via paramiko). This module provides the necessary interfaces to expose an FS object over SFTP, plugging into the infratructure provided by the 'paramiko' module. For simple usage, the class 'BaseSFTPServer' provides an all-in-one server class based on the standard SocketServer module. Use it like so:: server = BaseSFTPServer((hostname,port),fs) server.serve_forever() Note that the base class allows UNAUTHENTICATED ACCESS by default. For more serious work you will probably want to subclass it and override methods such as check_auth_password() and get_allowed_auths(). To integrate this module into an existing server framework based on paramiko, the 'SFTPServerInterface' class provides a concrete implementation of the paramiko.SFTPServerInterface protocol. If you don't understand what this is, you probably don't want to use it. """ import os import stat as statinfo import time import SocketServer as sockserv import threading from StringIO import StringIO import paramiko from fs.base import flags_to_mode from fs.path import * from fs.errors import * from fs.errors import wraps # Default host key used by BaseSFTPServer # DEFAULT_HOST_KEY = paramiko.RSAKey.from_private_key(StringIO("-----BEGIN RSA PRIVATE KEY-----\nMIICXgIBAAKCAIEAl7sAF0x2O/HwLhG68b1uG8KHSOTqe3Cdlj5i/1RhO7E2BJ4B\n3jhKYDYtupRnMFbpu7fb21A24w3Y3W5gXzywBxR6dP2HgiSDVecoDg2uSYPjnlDk\nHrRuviSBG3XpJ/awn1DObxRIvJP4/sCqcMY8Ro/3qfmid5WmMpdCZ3EBeC0CAwEA\nAQKCAIBSGefUs5UOnr190C49/GiGMN6PPP78SFWdJKjgzEHI0P0PxofwPLlSEj7w\nRLkJWR4kazpWE7N/bNC6EK2pGueMN9Ag2GxdIRC5r1y8pdYbAkuFFwq9Tqa6j5B0\nGkkwEhrcFNBGx8UfzHESXe/uE16F+e8l6xBMcXLMJVo9Xjui6QJBAL9MsJEx93iO\nzwjoRpSNzWyZFhiHbcGJ0NahWzc3wASRU6L9M3JZ1VkabRuWwKNuEzEHNK8cLbRl\nTyH0mceWXcsCQQDLDEuWcOeoDteEpNhVJFkXJJfwZ4Rlxu42MDsQQ/paJCjt2ONU\nWBn/P6iYDTvxrt/8+CtLfYc+QQkrTnKn3cLnAkEAk3ixXR0h46Rj4j/9uSOfyyow\nqHQunlZ50hvNz8GAm4TU7v82m96449nFZtFObC69SLx/VsboTPsUh96idgRrBQJA\nQBfGeFt1VGAy+YTLYLzTfnGnoFQcv7+2i9ZXnn/Gs9N8M+/lekdBFYgzoKN0y4pG\n2+Q+Tlr2aNlAmrHtkT13+wJAJVgZATPI5X3UO0Wdf24f/w9+OY+QxKGl86tTQXzE\n4bwvYtUGufMIHiNeWP66i6fYCucXCMYtx6Xgu2hpdZZpFw==\n-----END RSA PRIVATE KEY-----\n")) def report_sftp_errors(func): """Decorator to catch and report FS errors as SFTP error codes. Any FSError exceptions are caught and translated into an appropriate return code, while other exceptions are passed through untouched. """ @wraps(func) def wrapper(*args,**kwds): try: return func(*args,**kwds) except ResourceNotFoundError, e: return paramiko.SFTP_NO_SUCH_FILE except UnsupportedError, e: return paramiko.SFTP_OP_UNSUPPORTED except FSError, e: return paramiko.SFTP_FAILURE return wrapper class SFTPServerInterface(paramiko.SFTPServerInterface): """SFTPServerInferface implementation that exposes an FS object. This SFTPServerInterface subclass expects a single additional argument, the fs object to be exposed. Use it to set up a transport subsystem handler like so:: t.set_subsystem_handler("sftp",SFTPServer,SFTPServerInterface,fs) If this all looks too complicated, you might consider the BaseSFTPServer class also provided by this module - it automatically creates the enclosing paramiko server infrastructure. """ def __init__(self, server, fs, encoding=None, *args, **kwds): self.fs = fs if encoding is None: encoding = "utf8" self.encoding = encoding super(SFTPServerInterface,self).__init__(server,*args,**kwds) @report_sftp_errors def open(self, path, flags, attr): return SFTPHandle(self, path, flags) @report_sftp_errors def list_folder(self, path): if not isinstance(path, unicode): path = path.decode(self.encoding) stats = [] for entry in self.fs.listdir(path,absolute=True): stats.append(self.stat(entry)) return stats @report_sftp_errors def stat(self, path): if not isinstance(path, unicode): path = path.decode(self.encoding) info = self.fs.getinfo(path) stat = paramiko.SFTPAttributes() stat.filename = basename(path).encode(self.encoding) stat.st_size = info.get("size") stat.st_atime = time.mktime(info.get("accessed_time").timetuple()) stat.st_mtime = time.mktime(info.get("modified_time").timetuple()) if self.fs.isdir(path): stat.st_mode = 0777 | statinfo.S_IFDIR else: stat.st_mode = 0777 | statinfo.S_IFREG return stat def lstat(self, path): return self.stat(path) @report_sftp_errors def remove(self, path): if not isinstance(path,unicode): path = path.decode(self.encoding) self.fs.remove(path) return paramiko.SFTP_OK @report_sftp_errors def rename(self, oldpath, newpath): if not isinstance(oldpath, unicode): oldpath = oldpath.decode(self.encoding) if not isinstance(newpath, unicode): newpath = newpath.decode(self.encoding) if self.fs.isfile(oldpath): self.fs.move(oldpath, newpath) else: self.fs.movedir(oldpath, newpath) return paramiko.SFTP_OK @report_sftp_errors def mkdir(self, path, attr): if not isinstance(path,unicode): path = path.decode(self.encoding) self.fs.makedir(path) return paramiko.SFTP_OK @report_sftp_errors def rmdir(self, path): if not isinstance(path,unicode): path = path.decode(self.encoding) self.fs.removedir(path) return paramiko.SFTP_OK def canonicalize(self, path): return abspath(normpath(path)) def chattr(self, path, attr): return paramiko.SFTP_OP_UNSUPPORTED def readlink(self, path): return paramiko.SFTP_OP_UNSUPPORTED def symlink(self, path): return paramiko.SFTP_OP_UNSUPPORTED class SFTPHandle(paramiko.SFTPHandle): """SFTP file handler pointing to a file in an FS object. This is a simple file wrapper for SFTPServerInterface, passing read and write requests directly through the to underlying file from the FS. """ def __init__(self, owner, path, flags): super(SFTPHandle,self).__init__(flags) mode = flags_to_mode(flags) + "b" self.owner = owner if not isinstance(path,unicode): path = path.decode(self.owner.encoding) self.path = path self._file = owner.fs.open(path,mode) @report_sftp_errors def close(self): self._file.close() return paramiko.SFTP_OK @report_sftp_errors def read(self, offset, length): self._file.seek(offset) return self._file.read(length) @report_sftp_errors def write(self, offset, data): self._file.seek(offset) self._file.write(data) return paramiko.SFTP_OK def stat(self): return self.owner.stat(self.path) def chattr(self,attr): return self.owner.chattr(self.path,attr) class SFTPRequestHandler(sockserv.StreamRequestHandler): """SockerServer RequestHandler subclass for BaseSFTPServer. This RequestHandler subclass creates a paramiko Transport, sets up the sftp subsystem, and hands off the the transport's own request handling thread. Note that paramiko.Transport uses a separate thread by default, so there is no need to use TreadingMixIn. """ def handle(self): t = paramiko.Transport(self.request) t.add_server_key(self.server.host_key) t.set_subsystem_handler("sftp", paramiko.SFTPServer, SFTPServerInterface, self.server.fs, getattr(self.server,"encoding",None)) # Note that this actually spawns a new thread to handle the requests. # (Actually, paramiko.Transport is a subclass of Thread) t.start_server(server=self.server) class BaseSFTPServer(sockserv.TCPServer,paramiko.ServerInterface): """SocketServer.TCPServer subclass exposing an FS via SFTP. BaseSFTPServer combines a simple SocketServer.TCPServer subclass with an implementation of paramiko.ServerInterface, providing everything that's needed to expose an FS via SFTP. Operation is in the standard SocketServer style. The target FS object can be passed into the constructor, or set as an attribute on the server:: server = BaseSFTPServer((hostname,port),fs) server.serve_forever() It is also possible to specify the host key used by the sever by setting the 'host_key' attribute. If this is not specified, it will default to the key found in the DEFAULT_HOST_KEY variable. Note that this base class allows UNAUTHENTICATED ACCESS to the exposed FS. This is intentional, since we can't guess what your authentication needs are. To protect the exposed FS, override the following methods: * get_allowed_auths Determine the allowed auth modes * check_auth_none Check auth with no credentials * check_auth_password Check auth with a password * check_auth_publickey Check auth with a public key """ def __init__(self, address, fs=None, encoding=None, host_key=None, RequestHandlerClass=None): self.fs = fs self.encoding = encoding if host_key is None: host_key = DEFAULT_HOST_KEY self.host_key = host_key if RequestHandlerClass is None: RequestHandlerClass = SFTPRequestHandler sockserv.TCPServer.__init__(self,address,RequestHandlerClass) def close_request(self, request): # paramiko.Transport closes itself when finished. # If we close it here, we'll break the Transport thread. pass def check_channel_request(self, kind, chanid): if kind == 'session': return paramiko.OPEN_SUCCEEDED return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED def check_auth_none(self, username): """Check whether the user can proceed without authentication.""" return paramiko.AUTH_SUCCESSFUL def check_auth_publickey(self, username,key): """Check whether the given public key is valid for authentication.""" return paramiko.AUTH_FAILED def check_auth_password(self, username, password): """Check whether the given password is valid for authentication.""" return paramiko.AUTH_FAILED def get_allowed_auths(self,username): """Return list of allowed auth modes. The available modes are "node", "password" and "publickey". """ return ("none",) # When called from the command-line, expose a TempFS for testing purposes if __name__ == "__main__": from fs.tempfs import TempFS server = BaseSFTPServer(("localhost",8022),TempFS()) try: server.serve_forever() except (SystemExit,KeyboardInterrupt): server.server_close() fs-0.3.0/fs/expose/django_storage.py0000644000175000017500000000313611406462302016076 0ustar willwill""" fs.expose.django ================ Use an FS object for Django File Storage """ from django.conf import settings from django.core.files.storage import Storage from django.core.files import File from fs.path import abspath, dirname from fs.errors import convert_fs_errors class FSStorage(Storage): """Expose an FS object as a Django File Storage object.""" def __init__(self, fs=None, base_url=None): """ :param fs: an FS object :param base_url: The url to prepend to the path """ if fs is None: fs = settings.DEFAULT_FILE_STORAGE_FS if base_url is None: base_url = settings.MEDIA_URL base_url = base_url.rstrip('/') self.fs = fs self.base_url = base_url def exists(self, name): return self.fs.isfile(name) def path(self, name): path = self.fs.getsyspath(name) if path is None: raise NotImplementedError return path @convert_fs_errors def size(self, name): return self.fs.getsize(name) @convert_fs_errors def url(self, name): return self.base_url + abspath(name) @convert_fs_errors def _open(self, name, mode): return File(self.fs.open(name, mode)) @convert_fs_errors def _save(self, name, content): self.fs.makedir(dirname(name), allow_recreate=True, recursive=True) self.fs.setcontents(name, content) return name @convert_fs_errors def delete(self, name): try: self.fs.remove(name) except ResourceNotFoundError: pass fs-0.3.0/fs/expose/xmlrpc.py0000644000175000017500000001321711407373567014435 0ustar willwill""" fs.expose.xmlrpc ================ Server to expose an FS via XML-RPC This module provides the necessary infrastructure to expose an FS object over XML-RPC. The main class is 'RPCFSServer', a SimpleXMLRPCServer subclass designed to expose an underlying FS. If you need to use a more powerful server than SimpleXMLRPCServer, you can use the RPCFSInterface class to provide an XML-RPC-compatible wrapper around an FS object, which can then be exposed using whatever server you choose (e.g. Twisted's XML-RPC server). """ import xmlrpclib from SimpleXMLRPCServer import SimpleXMLRPCServer class RPCFSInterface(object): """Wrapper to expose an FS via a XML-RPC compatible interface. The only real trick is using xmlrpclib.Binary objects to transport the contents of files. """ def __init__(self, fs): self.fs = fs def encode_path(self, path): """Encode a filesystem path for sending over the wire. Unfortunately XMLRPC only supports ASCII strings, so this method must return something that can be represented in ASCII. The default is base64-encoded UTF-8. """ return path.encode("utf8").encode("base64") def decode_path(self, path): """Decode paths arriving over the wire.""" return path.decode("base64").decode("utf8") def get_contents(self, path): path = self.decode_path(path) data = self.fs.getcontents(path) return xmlrpclib.Binary(data) def set_contents(self, path, data): path = self.decode_path(path) self.fs.createfile(path,data.data) def exists(self, path): path = self.decode_path(path) return self.fs.exists(path) def isdir(self, path): path = self.decode_path(path) return self.fs.isdir(path) def isfile(self, path): path = self.decode_path(path) return self.fs.isfile(path) def listdir(self, path="./", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False): path = self.decode_path(path) entries = self.fs.listdir(path,wildcard,full,absolute,dirs_only,files_only) return [self.encode_path(e) for e in entries] def makedir(self, path, recursive=False, allow_recreate=False): path = self.decode_path(path) return self.fs.makedir(path, recursive, allow_recreate) def remove(self, path): path = self.decode_path(path) return self.fs.remove(path) def removedir(self, path, recursive=False, force=False): path = self.decode_path(path) return self.fs.removedir(path, recursive, force) def rename(self, src, dst): src = self.decode_path(src) dst = self.decode_path(dst) return self.fs.rename(src, dst) def settimes(self, path, accessed_time, modified_time): path = self.decode_path(path) return self.fs.settimes(path, accessed_time, modified_time) def getinfo(self, path): path = self.decode_path(path) return self.fs.getinfo(path) def desc(self, path): path = self.decode_path(path) return self.fs.desc(path) def getxattr(self, path, attr, default=None): path = self.decode_path(path) attr = self.decode_path(attr) return self.fs.getxattr(path, attr, default) def setxattr(self, path, attr, value): path = self.decode_path(path) attr = self.decode_path(attr) return self.fs.setxattr(path, attr, value) def delxattr(self, path, attr): path = self.decode_path(path) attr = self.decode_path(attr) return self.fs.delxattr(path, attr) def listxattrs(self, path): path = self.decode_path(path) return [self.encode_path(a) for a in self.fs.listxattrs(path)] def copy(self, src, dst, overwrite=False, chunk_size=16384): src = self.decode_path(src) dst = self.decode_path(dst) return self.fs.copy(src, dst, overwrite, chunk_size) def move(self,src,dst,overwrite=False,chunk_size=16384): src = self.decode_path(src) dst = self.decode_path(dst) return self.fs.move(src, dst, overwrite, chunk_size) def movedir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384): src = self.decode_path(src) dst = self.decode_path(dst) return self.fs.movedir(src, dst, overwrite, ignore_errors, chunk_size) def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384): src = self.decode_path(src) dst = self.decode_path(dst) return self.fs.copydir(src, dst, overwrite, ignore_errors, chunk_size) class RPCFSServer(SimpleXMLRPCServer): """Server to expose an FS object via XML-RPC. This class takes as its first argument an FS instance, and as its second argument a (hostname,port) tuple on which to listen for XML-RPC requests. Example:: fs = OSFS('/var/srv/myfiles') s = RPCFSServer(fs,("",8080)) s.serve_forever() To cleanly shut down the server after calling serve_forever, set the attribute "serve_more_requests" to False. """ def __init__(self, fs, addr, requestHandler=None, logRequests=None): kwds = dict(allow_none=True) if requestHandler is not None: kwds['requestHandler'] = requestHandler if logRequests is not None: kwds['logRequests'] = logRequests self.serve_more_requests = True SimpleXMLRPCServer.__init__(self,addr,**kwds) self.register_instance(RPCFSInterface(fs)) def serve_forever(self): """Override serve_forever to allow graceful shutdown.""" while self.serve_more_requests: self.handle_request() fs-0.3.0/fs/sftpfs.py0000644000175000017500000002700411406167564013126 0ustar willwill""" fs.sftpfs ========= Filesystem accessing an SFTP server (via paramiko) """ import datetime import stat as statinfo import paramiko from fs.base import * # SFTPClient appears to not be thread-safe, so we use an instance per thread if hasattr(threading,"local"): thread_local = threading.local else: class thread_local(object): def __init__(self): self._map = {} def __getattr__(self,attr): try: return self._map[(threading.currentThread().ident,attr)] except KeyError: raise AttributeError, attr def __setattr__(self,attr,value): self._map[(threading.currentThread().ident,attr)] = value if not hasattr(paramiko.SFTPFile,"__enter__"): paramiko.SFTPFile.__enter__ = lambda self: self paramiko.SFTPFile.__exit__ = lambda self,et,ev,tb: self.close() and False class SFTPFS(FS): """A filesystem stored on a remote SFTP server. This is basically a compatability wrapper for the excellent SFTPClient class in the paramiko module. """ def __init__(self, connection, root_path="/", encoding=None, **credentials): """SFTPFS constructor. The only required argument is 'connection', which must be something from which we can construct a paramiko.SFTPClient object. Possibile values include: * a hostname string * a (hostname,port) tuple * a paramiko.Transport instance * a paramiko.Channel instance in "sftp" mode The kwd argument 'root_path' specifies the root directory on the remote machine - access to files outsite this root wil be prevented. Any other keyword arguments are assumed to be credentials to be used when connecting the transport. :param connection: a connection string :param root_path: The root path to open """ if encoding is None: encoding = "utf8" self.encoding = encoding self.closed = False self._owns_transport = False self._credentials = credentials self._tlocal = thread_local() self._transport = None self._client = None if isinstance(connection,paramiko.Channel): self._transport = None self._client = paramiko.SFTPClient(connection) else: if not isinstance(connection,paramiko.Transport): connection = paramiko.Transport(connection) self._owns_transport = True if not connection.is_authenticated(): connection.connect(**credentials) self._transport = connection self.root_path = abspath(normpath(root_path)) super(SFTPFS, self).__init__() def __del__(self): self.close() def __getstate__(self): state = super(SFTPFS,self).__getstate__() del state["_tlocal"] if self._owns_transport: state['_transport'] = self._transport.getpeername() return state def __setstate__(self,state): for (k,v) in state.iteritems(): self.__dict__[k] = v self._tlocal = thread_local() if self._owns_transport: self._transport = paramiko.Transport(self._transport) self._transport.connect(**self._credentials) @property def client(self): try: return self._tlocal.client except AttributeError: if self._transport is None: return self._client client = paramiko.SFTPClient.from_transport(self._transport) self._tlocal.client = client return client def close(self): """Close the connection to the remote server.""" if not self.closed: if self.client: self.client.close() if self._owns_transport and self._transport: self._transport.close() def _normpath(self,path): if not isinstance(path,unicode): path = path.decode(self.encoding) npath = pathjoin(self.root_path,relpath(normpath(path))) if not isprefix(self.root_path,npath): raise PathError(path,msg="Path is outside root: %(path)s") return npath @convert_os_errors def open(self,path,mode="r",bufsize=-1): npath = self._normpath(path) # paramiko implements its own buffering and write-back logic, # so we don't need to use a RemoteFileBuffer here. f = self.client.open(npath,mode,bufsize) if self.isdir(path): msg = "that's a directory: %(path)s" raise ResourceInvalidError(path,msg=msg) return f @convert_os_errors def exists(self,path): npath = self._normpath(path) try: self.client.stat(npath) except IOError, e: if getattr(e,"errno",None) == 2: return False raise return True @convert_os_errors def isdir(self,path): npath = self._normpath(path) try: stat = self.client.stat(npath) except IOError, e: if getattr(e,"errno",None) == 2: return False raise return statinfo.S_ISDIR(stat.st_mode) @convert_os_errors def isfile(self,path): npath = self._normpath(path) try: stat = self.client.stat(npath) except IOError, e: if getattr(e,"errno",None) == 2: return False raise return statinfo.S_ISREG(stat.st_mode) @convert_os_errors def listdir(self,path="./",wildcard=None,full=False,absolute=False,dirs_only=False,files_only=False): npath = self._normpath(path) try: paths = self.client.listdir(npath) except IOError, e: if getattr(e,"errno",None) == 2: if self.isfile(path): raise ResourceInvalidError(path,msg="Can't list directory contents of a file: %(path)s") raise ResourceNotFoundError(path) elif self.isfile(path): raise ResourceInvalidError(path,msg="Can't list directory contents of a file: %(path)s") raise for (i,p) in enumerate(paths): if not isinstance(p,unicode): paths[i] = p.decode(self.encoding) return self._listdir_helper(path, paths, wildcard, full, absolute, dirs_only, files_only) @convert_os_errors def makedir(self,path,recursive=False,allow_recreate=False): npath = self._normpath(path) try: self.client.mkdir(npath) except IOError, e: # Error code is unreliable, try to figure out what went wrong try: stat = self.client.stat(npath) except IOError: if not self.isdir(dirname(path)): # Parent dir is missing if not recursive: raise ParentDirectoryMissingError(path) self.makedir(dirname(path),recursive=True) self.makedir(path,allow_recreate=allow_recreate) else: # Undetermined error, let the decorator handle it raise else: # Destination exists if statinfo.S_ISDIR(stat.st_mode): if not allow_recreate: raise DestinationExistsError(path,msg="Can't create a directory that already exists (try allow_recreate=True): %(path)s") else: raise ResourceInvalidError(path,msg="Can't create directory, there's already a file of that name: %(path)s") @convert_os_errors def remove(self,path): npath = self._normpath(path) try: self.client.remove(npath) except IOError, e: if getattr(e,"errno",None) == 2: raise ResourceNotFoundError(path) elif self.isdir(path): raise ResourceInvalidError(path,msg="Cannot use remove() on a directory: %(path)s") raise @convert_os_errors def removedir(self,path,recursive=False,force=False): npath = self._normpath(path) if path in ("","/"): return if force: for path2 in self.listdir(path,absolute=True): try: self.remove(path2) except ResourceInvalidError: self.removedir(path2,force=True) try: self.client.rmdir(npath) except IOError, e: if getattr(e,"errno",None) == 2: if self.isfile(path): raise ResourceInvalidError(path,msg="Can't use removedir() on a file: %(path)s") raise ResourceNotFoundError(path) elif self.listdir(path): raise DirectoryNotEmptyError(path) raise if recursive: try: self.removedir(dirname(path),recursive=True) except DirectoryNotEmptyError: pass @convert_os_errors def rename(self,src,dst): nsrc = self._normpath(src) ndst = self._normpath(dst) try: self.client.rename(nsrc,ndst) except IOError, e: if getattr(e,"errno",None) == 2: raise ResourceNotFoundError(path) if not self.isdir(dirname(dst)): raise ParentDirectoryMissingError(dst) raise @convert_os_errors def move(self,src,dst,overwrite=False,chunk_size=16384): nsrc = self._normpath(src) ndst = self._normpath(dst) if overwrite and self.isfile(dst): self.remove(dst) try: self.client.rename(nsrc,ndst) except IOError, e: if getattr(e,"errno",None) == 2: raise ResourceNotFoundError(path) if self.exists(dst): raise DestinationExistsError(dst) if not self.isdir(dirname(dst)): raise ParentDirectoryMissingError(dst,msg="Destination directory does not exist: %(path)s") raise @convert_os_errors def movedir(self,src,dst,overwrite=False,ignore_errors=False,chunk_size=16384): nsrc = self._normpath(src) ndst = self._normpath(dst) if overwrite and self.isdir(dst): self.removedir(dst) try: self.client.rename(nsrc,ndst) except IOError, e: if getattr(e,"errno",None) == 2: raise ResourceNotFoundError(path) if self.exists(dst): raise DestinationExistsError(dst) if not self.isdir(dirname(dst)): raise ParentDirectoryMissingError(dst,msg="Destination directory does not exist: %(path)s") raise @convert_os_errors def getinfo(self, path): npath = self._normpath(path) stats = self.client.stat(npath) info = dict((k, getattr(stats, k)) for k in dir(stats) if not k.startswith('__') ) info['size'] = info['st_size'] ct = info.get('st_ctime', None) if ct is not None: info['created_time'] = datetime.datetime.fromtimestamp(ct) at = info.get('st_atime', None) if at is not None: info['accessed_time'] = datetime.datetime.fromtimestamp(at) mt = info.get('st_mtime', None) if mt is not None: info['modified_time'] = datetime.datetime.fromtimestamp(at) return info @convert_os_errors def getsize(self, path): npath = self._normpath(path) stats = self.client.stat(npath) return stats.st_size fs-0.3.0/fs/base.py0000644000175000017500000010413711407422475012532 0ustar willwill#!/usr/bin/env python """ fs.base: base class defining the FS abstraction. This module defines the most basic filesystem abstraction, the FS class. Instances of FS represent a filesystem containing files and directories that can be queried and manipulated. To implement a new kind of filesystem, start by sublcassing the base FS class. """ import os, os.path import sys import shutil import fnmatch import datetime import time try: import threading except ImportError: import dummy_threading as threading from fs.path import * from fs.errors import * class DummyLock: """A dummy lock object that doesn't do anything. This is used as a placeholder when locking is disabled. We can't directly use the Lock class from the dummy_threading module, since it attempts to sanity-check the sequence of acquire/release calls in a way that breaks when real threading is available. """ def acquire(self,blocking=1): """Acquiring a DummyLock always succeeds.""" return 1 def release(self): """Releasing a DummyLock always succeeds.""" pass def silence_fserrors(f, *args, **kwargs): """Perform a function call and return None if FSError is thrown :param f: Function to call :param args: Parameters to f :param kwargs: Keyword parameters to f """ try: return f(*args, **kwargs) except FSError: return None class NullFile(object): """A NullFile is a file object that has no functionality. Null files are returned by the 'safeopen' method in FS objects when the file doesn't exist. This can simplify code by negating the need to check if a file exists, or handling exceptions. """ def __init__(self): self.closed = False def close(self): self.closed = True def flush(self): pass def __iter__(self): return self def next(self): raise StopIteration def readline(self, *args, **kwargs): return "" def close(self): self.closed = True def read(self, size=None): return "" def seek(self, *args, **kwargs): pass def tell(self): return 0 def truncate(self, *args, **kwargs): return 0 def write(self, data): pass def writelines(self, *args, **kwargs): pass try: from functools import wraps except ImportError: wraps = lambda f: lambda f: f def synchronize(func): """Decorator to synchronize a method on self._lock.""" @wraps(func) def acquire_lock(self, *args, **kwargs): self._lock.acquire() try: return func(self, *args, **kwargs) finally: self._lock.release() return acquire_lock class FS(object): """The base class for Filesystem abstraction objects. An instance of a class derived from FS is an abstraction on some kind of filesytem, such as the OS filesystem or a zip file. """ def __init__(self, thread_synchronize=False): """The base class for Filesystem objects. :param thread_synconize: If True, a lock object will be created for the object, otherwise a dummy lock will be used. :type thread_synchronize: bool """ super(FS,self).__init__() self.closed = False if thread_synchronize: self._lock = threading.RLock() else: self._lock = DummyLock() def __del__(self): if not getattr(self, 'closed', True): self.close() def cache_hint(self, enabled): """Recommends the use of caching. Implementations are free to use or ignore this value. :param enabled: If True the implementation is permitted to cache directory structure / file info. """ pass def close(self): self.closed = True def __getstate__(self): # Locks can't be pickled, so instead we just indicate the # type of lock that should be there. None == no lock, # True == a proper lock, False == a dummy lock. state = self.__dict__.copy() lock = state.get("_lock",None) if lock is not None: if isinstance(lock,threading._RLock): state["_lock"] = True else: state["_lock"] = False return state def __setstate__(self,state): for (k,v) in state.iteritems(): self.__dict__[k] = v lock = state.get("_lock",None) if lock is not None: if lock: self._lock = threading.RLock() else: self._lock = DummyLock() def getsyspath(self, path, allow_none=False): """Returns the system path (a path recognised by the OS) if present. If the path does not map to a system path (and allow_none is False) then a NoSysPathError exception is thrown. Otherwise, the system path will be returned as a unicode string. :param path: a path within the filesystem :param allow_none: if True, this method will return None when there is no system path, rather than raising NoSysPathError :type allow_none: bool :raises NoSysPathError: If the path does not map on to a system path, and allow_none is set to False (default) :rtype: unicode """ if not allow_none: raise NoSysPathError(path=path) return None def hassyspath(self, path): """Check if the path maps to a system path (a path recognised by the OS). :param path: -- path to check :returns: True if `path` maps to a system path :rtype: bool """ return self.getsyspath(path, allow_none=True) is not None def open(self, path, mode="r", **kwargs): """Open a the given path as a file-like object. :param path: a path to file that should be opened :param mode: ,ode of file to open, identical to the mode string used in 'file' and 'open' builtins :param kwargs: additional (optional) keyword parameters that may be required to open the file :rtype: a file-like object """ raise UnsupportedError("open file") def safeopen(self, *args, **kwargs): """Like 'open', but returns a NullFile if the file could not be opened. A NullFile is a dummy file which has all the methods of a file-like object, but contains no data. :rtype: file-like object """ try: f = self.open(*args, **kwargs) except ResourceNotFoundError: return NullFile() return f def exists(self, path): """Check if a path references a valid resource. :param path: A path in the filessystem :rtype: bool """ return self.isfile(path) or self.isdir(path) def isdir(self, path): """Check if a path references a directory. :param path: a path in the filessystem :rtype: bool """ raise UnsupportedError("check for directory") def isfile(self, path): """Check if a path references a file. :param path: a path in the filessystem :rtype: bool """ raise UnsupportedError("check for file") def __iter__(self): """ Iterates over paths returned by listdir method with default params. """ for f in self.listdir(): yield f def listdir(self, path="./", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False): """Lists the the files and directories under a given path. The directory contents are returned as a list of unicode paths. :param path: root of the path to list :type path: string :param wildcard: Only returns paths that match this wildcard :type wildcard: string :param full: returns full paths (relative to the root) :type full: bool :param absolute: returns absolute paths (paths begining with /) :type absolute: bool :param dirs_only: if True, only return directories :type dirs_only: bool :param files_only: if True, only return files :type files_only: bool :rtype: iterable of paths :raises ResourceNotFoundError: if the path is not found :raises ResourceInvalidError: if the path exists, but is not a directory """ raise UnsupportedError("list directory") def listdirinfo(self, path="./", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False): """Retrieves an iterable of paths and path info (as returned by getinfo) under a given path. :param path: root of the path to list :param wildcard: filter paths that match this wildcard :param dirs_only: only retrive directories :type dirs_only: bool :param files_only: only retrieve files :type files_only: bool :raises ResourceNotFoundError: If the path is not found :raises ResourceInvalidError: If the path exists, but is not a directory """ def get_path(p): if not full or absolute: return pathjoin(path, p) return [(p, self.getinfo(get_path(p))) for p in self._listdir( path, widcard=wildcard, full=full, absolute=absolute, dirs_only=dirs_only, files_only=files_only )] def _listdir_helper(self, path, entries, wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False): """A helper method called by listdir method that applies filtering. Given the path to a directory and a list of the names of entries within that directory, this method applies the semantics of the listdir() keyword arguments. An appropriately modified and filtered list of directory entries is returned. """ if dirs_only and files_only: raise ValueError("dirs_only and files_only can not both be True") if wildcard is not None: match = fnmatch.fnmatch entries = [p for p in entries if match(p, wildcard)] if dirs_only: entries = [p for p in entries if self.isdir(pathjoin(path, p))] elif files_only: entries = [p for p in entries if self.isfile(pathjoin(path, p))] if full: entries = [pathjoin(path, p) for p in entries] elif absolute: entries = [abspath(pathjoin(path, p)) for p in entries] return entries def makedir(self, path, recursive=False, allow_recreate=False): """Make a directory on the filesystem. :param path: path of directory :param recursive: if True, any intermediate directories will also be created :type recursive: bool :param allow_recreate: if True, re-creating a directory wont be an error :type allow_create: bool :raises DestinationExistsError: if the path is already a directory, and allow_recreate is False :raises ParentDirectoryMissingError: if a containing directory is missing and recursive is False :raises ResourceInvalidError: if a path is an existing file """ raise UnsupportedError("make directory") def remove(self, path): """Remove a file from the filesystem. :param path: Path of the resource to remove :raises ResourceNotFoundError: if the path does not exist :raises ResourceInvalidError: if the path is a directory """ raise UnsupportedError("remove resource") def removedir(self, path, recursive=False, force=False): """Remove a directory from the filesystem :param path: path of the directory to remove :param recursive: pf True, then empty parent directories will be removed :type recursive: bool :param force: if True, any directory contents will be removed :type force: bool :raises ResourceNotFoundError: If the path does not exist :raises ResourceInvalidError: If the path is not a directory :raises DirectoryNotEmptyError: If the directory is not empty and force is False """ raise UnsupportedError("remove directory") def rename(self, src, dst): """Renames a file or directory :param src: path to rename :param dst: new name """ raise UnsupportedError("rename resource") def settimes(self, path, accessed_time=None, modified_time=None): """Set the accessed time and modified time of a file :param path: path to a file :param accessed_time: a datetime object the file was accessed (defaults to current time) :param modified_time: a datetime object the file was modified (defaults to current time) """ sys_path = self.getsyspath(path, allow_none=True) if sys_path is not None: now = datetime.datetime.now() if accessed_time is None: accessed_time = now if modified_time is None: modified_time = now accessed_time = int(time.mktime(accessed_time.timetuple())) modified_time = int(time.mktime(modified_time.timetuple())) os.utime(sys_path, (accessed_time, modified_time)) return True else: raise UnsupportedError("settimes") def getinfo(self, path): """Returns information for a path as a dictionary. The exact content of this dictionary will vary depending on the implementation, but will likely include a few common values. :param path: a path to retrieve information for :rtype: dict """ raise UnsupportedError("get resource info") def desc(self, path): """Returns short descriptive text regarding a path. Intended mainly as a debugging aid :param path: A path to describe :rtype: str """ if not self.exists(path): return '' try: sys_path = self.getsyspath(path) except NoSysPathError: return "No description available" if self.isdir(path): return "OS dir, maps to %s" % sys_path else: return "OS file, maps to %s" % sys_path def getcontents(self, path): """Returns the contents of a file as a string. :param path: A path of file to read :rtype: str :returns: file contents """ f = None try: f = self.open(path, "rb") contents = f.read() return contents finally: if f is not None: f.close() def createfile(self, path, data=""): """A convenience method to create a new file from a string. :param path: a path of the file to create :param data: a string or a file-like object containing the contents for the new file """ f = None try: f = self.open(path, 'wb') if hasattr(data,"read"): chunk = data.read(1024*512) while chunk: f.write(chunk) chunk = data.read(1024*512) else: f.write(data) f.flush() finally: if f is not None: f.close() setcontents = createfile def opendir(self, path): """Opens a directory and returns a FS object representing its contents. :param path: path to directory to open :rtype: An FS object """ if not self.exists(path): raise ResourceNotFoundError(path) sub_fs = SubFS(self, path) return sub_fs def walk(self, path="/", wildcard=None, dir_wildcard=None, search="breadth", ignore_errors=False): """Walks a directory tree and yields the root path and contents. Yields a tuple of the path of each directory and a list of its file contents. :param path: root path to start walking :param wildcard: if given, only return files that match this wildcard :param dir_wildcard: if given, only walk directories that match the wildcard :param search: -- a string dentifying the method used to walk the directories. There are two such methods: * 'breadth' Yields paths in the top directories first * 'depth' Yields the deepest paths first :param ignore_errors: ignore any errors reading the directory """ def listdir(path, *args, **kwargs): if ignore_errors: try: return self.listdir(path, *args, **kwargs) except: return [] else: return self.listdir(path, *args, **kwargs) if search == "breadth": dirs = [path] while dirs: current_path = dirs.pop() paths = [] for filename in listdir(current_path): path = pathjoin(current_path, filename) if self.isdir(path): if dir_wildcard is not None: if fnmatch.fnmatch(path, dir_wilcard): dirs.append(path) else: dirs.append(path) else: if wildcard is not None: if fnmatch.fnmatch(path, wildcard): paths.append(filename) else: paths.append(filename) yield (current_path, paths) elif search == "depth": def recurse(recurse_path): for path in listdir(recurse_path, wildcard=dir_wildcard, full=True, dirs_only=True): for p in recurse(path): yield p yield (recurse_path, self.listdir(recurse_path, wildcard=wildcard, files_only=True)) for p in recurse(path): yield p else: raise ValueError("Search should be 'breadth' or 'depth'") def walkfiles(self, path="/", wildcard=None, dir_wildcard=None, search="breadth", ignore_errors=False ): """Like the 'walk' method, but just yields files. :param path: root path to start walking :param wildcard: if given, only return files that match this wildcard :param dir_wildcard: if given, only walk directories that match the wildcard :param search: same as walk method :param ignore_errors: ignore any errors reading the directory """ for path, files in self.walk(path, wildcard=wildcard, dir_wildcard=dir_wildcard, search=search, ignore_errors=ignore_errors): for f in files: yield pathjoin(path, f) def walkdirs(self, path="/", wildcard=None, search="breadth", ignore_errors=False): """Like the 'walk' method but yields directories. :param path: root path to start walking :param wildcard: if given, only return dictories that match this wildcard :param search: same as the walk method :param ignore_errors: ignore any errors reading the directory """ for p, files in self.walk(path, wildcard=wildcard, search=search, ignore_errors=ignore_errors): yield p def getsize(self, path): """Returns the size (in bytes) of a resource. :param path: a path to the resource :rtype: integer :returns: the size of the file """ info = self.getinfo(path) size = info.get('size', None) if size is None: raise OperationFailedError("get size of resource", path) return size def copy(self, src, dst, overwrite=False, chunk_size=16384): """Copies a file from src to dst. :param src: the source path :param dst: the destination path :param overwrite: if True, then an existing file at the destination may be overwritten; If False then DestinationExistsError will be raised. :param chunk_size: size of chunks to use if a simple copy is required (defaults to 16K). """ if not self.isfile(src): if self.isdir(src): raise ResourceInvalidError(src,msg="Source is not a file: %(path)s") raise ResourceNotFoundError(src) if not overwrite and self.exists(dst): raise DestinationExistsError(dst) src_syspath = self.getsyspath(src, allow_none=True) dst_syspath = self.getsyspath(dst, allow_none=True) if src_syspath is not None and dst_syspath is not None: self._shutil_copyfile(src_syspath, dst_syspath) else: src_file, dst_file = None, None try: src_file = self.open(src, "rb") dst_file = self.open(dst, "wb") while True: chunk = src_file.read(chunk_size) dst_file.write(chunk) if len(chunk) != chunk_size: break finally: if src_file is not None: src_file.close() if dst_file is not None: dst_file.close() @convert_os_errors def _shutil_copyfile(self, src_syspath, dst_syspath): try: shutil.copyfile(src_syspath, dst_syspath) except IOError, e: # shutil reports ENOENT when a parent directory is missing if getattr(e,"errno",None) == 2: if not os.path.exists(dirname(dst_syspath)): raise ParentDirectoryMissingError(dst_syspath) raise def move(self, src, dst, overwrite=False, chunk_size=16384): """moves a file from one location to another. :param src: source path :param dst: destination path :param overwrite: if True, then an existing file at the destination path will be silently overwritten; if False then an exception will be raised in this case. :type overwrite: bool :param chunk_size: Size of chunks to use when copying, if a simple copy is required :type chunk_size: integer """ src_syspath = self.getsyspath(src, allow_none=True) dst_syspath = self.getsyspath(dst, allow_none=True) # Try to do an os-level rename if possible. # Otherwise, fall back to copy-and-remove. if src_syspath is not None and dst_syspath is not None: if not os.path.isfile(src_syspath): if os.path.isdir(src_syspath): raise ResourceInvalidError(src, msg="Source is not a file: %(path)s") raise ResourceNotFoundError(src) if not overwrite and os.path.exists(dst_syspath): raise DestinationExistsError(dst) try: os.rename(src_syspath, dst_syspath) return except OSError: pass self.copy(src, dst, overwrite=overwrite, chunk_size=chunk_size) self.remove(src) def movedir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384): """moves a directory from one location to another. :param src: source directory path :param dst: destination directory path :param overwrite: if True then any existing files in the destination directory will be overwritten :param ignore_errors: if True then this method will ignore FSError exceptions when moving files :param chunk_size: size of chunks to use when copying, if a simple copy is required """ if not self.isdir(src): raise ResourceInvalidError(src, msg="Source is not a directory: %(path)s") if not overwrite and self.exists(dst): raise DestinationExistsError(dst) src_syspath = self.getsyspath(src, allow_none=True) dst_syspath = self.getsyspath(dst, allow_none=True) if src_syspath is not None and dst_syspath is not None: try: os.rename(src_syspath,dst_syspath) return except OSError: pass def movefile_noerrors(src, dst, **kwargs): try: return self.move(src, dst, **kwargs) except FSError: return if ignore_errors: movefile = movefile_noerrors else: movefile = self.move src = abspath(src) dst = abspath(dst) if dst: self.makedir(dst, allow_recreate=overwrite) for dirname, filenames in self.walk(src, search="depth"): dst_dirname = relpath(frombase(src, abspath(dirname))) dst_dirpath = pathjoin(dst, dst_dirname) self.makedir(dst_dirpath, allow_recreate=True, recursive=True) for filename in filenames: src_filename = pathjoin(dirname, filename) dst_filename = pathjoin(dst_dirpath, filename) movefile(src_filename, dst_filename, overwrite=overwrite, chunk_size=chunk_size) self.removedir(dirname) def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384): """copies a directory from one location to another. :param src: source directory path :param dst: destination directory path :param overwrite: if True then any existing files in the destination directory will be overwritten :type overwrite: bool :param ignore_errors: if True, exceptions when copying will be ignored :type ignore_errors: bool :param chunk_size: size of chunks to use when copying, if a simple copy is required (defaults to 16K) """ if not self.isdir(src): raise ResourceInvalidError(src, msg="Source is not a directory: %(path)s") def copyfile_noerrors(src, dst, **kwargs): try: return self.copy(src, dst, **kwargs) except FSError: return if ignore_errors: copyfile = copyfile_noerrors else: copyfile = self.copy src = abspath(src) dst = abspath(dst) if not overwrite and self.exists(dst): raise DestinationExistsError(dst) if dst: self.makedir(dst, allow_recreate=overwrite) for dirname, filenames in self.walk(src): dst_dirname = relpath(frombase(src, abspath(dirname))) dst_dirpath = pathjoin(dst, dst_dirname) self.makedir(dst_dirpath, allow_recreate=True, recursive=True) for filename in filenames: src_filename = pathjoin(dirname, filename) dst_filename = pathjoin(dst_dirpath, filename) copyfile(src_filename, dst_filename, overwrite=overwrite, chunk_size=chunk_size) def isdirempty(self, path): """Check if a directory is empty (contains no files or sub-directories) :param path: a directory path :rtype: bool """ path = normpath(path) iter_dir = iter(self.listdir(path)) try: iter_dir.next() except StopIteration: return True return False def makeopendir(self, path, recursive=False): """makes a directory (if it doesn't exist) and returns an FS object for the newly created directory. :param path: path to the new directory :param recursive: if True any intermediate directories will be created """ self.makedir(path, allow_recreate=True, recursive=recursive) dir_fs = self.opendir(path) return dir_fs def printtree(self, max_levels=5): """Prints a tree structure of the FS object to the console :param max_levels: The maximum sub-directories to display, defaults to 5. Set to None for no limit """ from fs.utils import print_fs print_fs(self, max_levels=max_levels) tree = printtree def browse(self): """Displays the FS tree in a graphical window (requires wxWidgets)""" from fs.browsewin import browse browse(self) class SubFS(FS): """A SubFS represents a sub directory of another filesystem object. SubFS objects are returned by opendir, which effectively creates a 'sandbox' filesystem that can only access files/dirs under a root path within its 'parent' dir. """ def __init__(self, parent, sub_dir): self.parent = parent self.sub_dir = abspath(normpath(sub_dir)) FS.__init__(self, thread_synchronize=False) def __str__(self): return "" % (self.sub_dir, self.parent) def __unicode__(self): return u"" % (self.sub_dir, self.parent) def __repr__(self): return str(self) def desc(self, path): if self.isdir(path): return "Sub dir of %s" % str(self.parent) else: return "File in sub dir of %s" % str(self.parent) def _delegate(self, path): return pathjoin(self.sub_dir, relpath(normpath(path))) def getsyspath(self, path, allow_none=False): return self.parent.getsyspath(self._delegate(path), allow_none=allow_none) def open(self, path, mode="r", **kwargs): return self.parent.open(self._delegate(path), mode) def exists(self, path): return self.parent.exists(self._delegate(path)) def opendir(self, path): if not self.exists(path): raise ResourceNotFoundError(path) path = self._delegate(path) sub_fs = self.parent.opendir(path) return sub_fs def isdir(self, path): return self.parent.isdir(self._delegate(path)) def isfile(self, path): return self.parent.isfile(self._delegate(path)) def listdir(self, path="./", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False): paths = self.parent.listdir(self._delegate(path), wildcard=wildcard, full=False, absolute=False, dirs_only=dirs_only, files_only=files_only) if absolute: listpath = normpath(path) paths = [abspath(pathjoin(listpath, path)) for path in paths] elif full: listpath = normpath(path) paths = [relpath(pathjoin(listpath, path)) for path in paths] return paths def makedir(self, path, recursive=False, allow_recreate=False): return self.parent.makedir(self._delegate(path), recursive=recursive, allow_recreate=allow_recreate) def remove(self, path): return self.parent.remove(self._delegate(path)) def removedir(self, path, recursive=False,force=False): # Careful not to recurse outside the subdir if path in ("","/"): if force: for path2 in self.listdir(path,absolute=True,files_only=True): try: self.remove(path2) except ResourceNotFoundError: pass for path2 in self.listdir(path,absolute=True,dirs_only=True): try: self.removedir(path2,force=True) except ResourceNotFoundError: pass else: self.parent.removedir(self._delegate(path),force=force) if recursive: try: self.removedir(dirname(path),recursive=True) except DirectoryNotEmptyError: pass def settimes(self, path, accessed_time=None, modified_time=None): return self.parent.settimes(self._delegate(path), accessed_time, modified_time) def getinfo(self, path): return self.parent.getinfo(self._delegate(path)) def getsize(self, path): return self.parent.getsize(self._delegate(path)) def rename(self, src, dst): return self.parent.rename(self._delegate(src), self._delegate(dst)) def move(self, src, dst, **kwds): self.parent.move(self._delegate(src),self._delegate(dst),**kwds) def movedir(self, src, dst, **kwds): self.parent.movedir(self._delegate(src),self._delegate(dst),**kwds) def copy(self, src, dst, **kwds): self.parent.copy(self._delegate(src),self._delegate(dst),**kwds) def copydir(self, src, dst, **kwds): self.parent.copydir(self._delegate(src),self._delegate(dst),**kwds) def createfile(self, path, data=""): return self.parent.createfile(self._delegate(path),data) def setcontents(self, path, data=""): return self.parent.setcontents(self._delegate(path),data) def getcontents(self, path): return self.parent.getcontents(self._delegate(path)) def flags_to_mode(flags): """Convert an os.O_* flag bitmask into an FS mode string.""" if flags & os.O_EXCL: raise UnsupportedError("open",msg="O_EXCL is not supported") if flags & os.O_WRONLY: if flags & os.O_TRUNC: mode = "w" elif flags & os.O_APPEND: mode = "a" else: mode = "r+" elif flags & os.O_RDWR: if flags & os.O_TRUNC: mode = "w+" elif flags & os.O_APPEND: mode = "a+" else: mode = "r+" else: mode = "r" return mode fs-0.3.0/fs/multifs.py0000644000175000017500000001472111407421554013277 0ustar willwill""" fs.multifs ========== A MultiFS is a filesytem composed of a sequence of other filesystems, where the directory structure of each filesystem is overlaid over the previous filesystem. When you attempt to access a file from the MultiFS it will try each 'child' FS in order, until it either finds a path that exists or raises a ResourceNotFoundError. One use for such a filesystem would be to selectively override a set of files, to customize behaviour. For example, to create a filesystem that could be used to *theme* a web application. We start with the following directories:: `-- templates |-- snippets | `-- panel.html |-- index.html |-- profile.html `-- base.html `-- theme |-- snippets | |-- widget.html | `-- extra.html |-- index.html `-- theme.html And we want to create a single filesystem that looks for files in `templates` if they don't exist in `theme`. We can do this with the following code:: from fs.osfs import OSFS from fs.multifs import MultiFS themed_template_fs.addfs('templates', OSFS('templates')) themed_template_fs.addfs('theme', OSFS('themes')) Now we have a `themed_template_fs` FS object presents a single view of both directories:: |-- snippets | |-- panel.html | |-- widget.html | `-- extra.html |-- index.html |-- profile.html |-- base.html `-- theme.html """ from fs.base import FS, FSError, synchronize from fs.path import * from fs import _thread_synchronize_default from fs.errors import ResourceNotFoundError class MultiFS(FS): """A MultiFS is a filesystem that delegates to a sequence of other filesystems. Operations on the MultiFS will try each 'child' filesystem in order, until it succeeds. In effect, creating a filesystem that combines the files and dirs of its children. """ def __init__(self): super(MultiFS, self).__init__(thread_synchronize=_thread_synchronize_default) self.fs_sequence = [] self.fs_lookup = {} @synchronize def __str__(self): return "" % ", ".join(str(fs) for fs in self.fs_sequence) __repr__ = __str__ @synchronize def __unicode__(self): return u"" % ", ".join(unicode(fs) for fs in self.fs_sequence) @synchronize def addfs(self, name, fs): """Adds a filesystem to the MultiFS. :param name: A unique name to refer to the filesystem being added :param fs: The filesystem to add """ if name in self.fs_lookup: raise ValueError("Name already exists.") self.fs_sequence.append(fs) self.fs_lookup[name] = fs @synchronize def removefs(self, name): """Removes a filesystem from the sequence. :param name: The name of the filesystem, as used in addfs """ if name not in self.fs_lookup: raise ValueError("No filesystem called '%s'"%name) fs = self.fs_lookup[name] self.fs_sequence.remove(fs) del self.fs_lookup[name] @synchronize def __getitem__(self, name): return self.fs_lookup[name] @synchronize def __iter__(self): return reversed(self.fs_sequence[:]) def _delegate_search(self, path): for fs in self: if fs.exists(path): return fs return None @synchronize def which(self, path): """Retrieves the filesystem that a given path would delegate to. Returns a tuple of the filesystem's name and the filesystem object itself. :param path: A path in MultiFS """ for fs in self: if fs.exists(path): for fs_name, fs_object in self.fs_lookup.iteritems(): if fs is fs_object: return fs_name, fs raise ResourceNotFoundError(path, msg="Path does not map to any filesystem: %(path)s") @synchronize def getsyspath(self, path, allow_none=False): fs = self._delegate_search(path) if fs is not None: return fs.getsyspath(path, allow_none=allow_none) raise ResourceNotFoundError(path) @synchronize def desc(self, path): if not self.exists(path): raise ResourceNotFoundError(path) name, fs = self.which(path) if name is None: return "" return "%s, on %s (%s)" % (fs.desc(path), name, fs) @synchronize def open(self, path, mode="r", **kwargs): for fs in self: if fs.exists(path): fs_file = fs.open(path, mode, **kwargs) return fs_file raise ResourceNotFoundError(path) @synchronize def exists(self, path): return self._delegate_search(path) is not None @synchronize def isdir(self, path): fs = self._delegate_search(path) if fs is not None: return fs.isdir(path) return False @synchronize def isfile(self, path): fs = self._delegate_search(path) if fs is not None: return fs.isfile(path) return False @synchronize def listdir(self, path="./", *args, **kwargs): paths = [] for fs in self: try: paths += fs.listdir(path, *args, **kwargs) except FSError, e: pass return list(set(paths)) @synchronize def remove(self, path): for fs in self: if fs.exists(path): fs.remove(path) return raise ResourceNotFoundError(path) @synchronize def removedir(self, path, recursive=False): for fs in self: if fs.isdir(path): fs.removedir(path, recursive) return raise ResourceNotFoundError(path) @synchronize def rename(self, src, dst): for fs in self: if fs.exists(src): fs.rename(src, dst) return raise ResourceNotFoundError(path) @synchronize def settimes(self, path, accessed_time=None, modified_time=None): for fs in self: if fs.exists(path): return fs.settimes(path, accessed_time, modified_time) raise ResourceNotFoundError(path) @synchronize def getinfo(self, path): for fs in self: if fs.exists(path): return fs.getinfo(path) raise ResourceNotFoundError(path) fs-0.3.0/fs/mountfs.py0000644000175000017500000003221311407377715013314 0ustar willwill""" fs.mountfs ========== Contains MountFS class which is a virtual filesystem which can have other filesystems linked as branched directories. For example, lets say we have two filesystems containing config files and resource respectively:: [config_fs] |-- config.cfg `-- defaults.cfg [resources_fs] |-- images | |-- logo.jpg | `-- photo.jpg `-- data.dat We can combine these filesystems in to a single filesystem with the following code:: from fs.mountfs import MountFS combined_fs = MountFS combined_fs.mountdir('config', config_fs) combined_fs.mountdir('resources', resources_fs) This will create a single filesystem where paths under `config` map to `config_fs`, and paths under `resources` map to `resources_fs`:: [combined_fs] |-- config | |-- config.cfg | `-- defaults.cfg `-- resources |-- images | |-- logo.jpg | `-- photo.jpg `-- data.dat Now both filesystems can be accessed with the same path structure:: print combined_fs.getcontents('/config/defaults.cfg') read_jpg(combined_fs.open('/resources/images/logo.jpg') """ from fs.base import * from fs.objecttree import ObjectTree from fs import _thread_synchronize_default class DirMount(object): def __init__(self, path, fs): self.path = path self.fs = fs def __str__(self): return "Mount point: %s"%self.path class FileMount(object): def __init__(self, path, open_callable, info_callable=None): self.open_callable = open_callable def no_info_callable(path): return {} self.info_callable = info_callable or no_info_callable class MountFS(FS): """A filesystem that delegates to other filesystems.""" DirMount = DirMount FileMount = FileMount def __init__(self, thread_synchronize=_thread_synchronize_default): super(MountFS, self).__init__(thread_synchronize=thread_synchronize) self.mount_tree = ObjectTree() def __str__(self): return "" __repr__ = __str__ def __unicode__(self): return unicode(self.__str__()) def _delegate(self, path): path = normpath(path) head_path, object, tail_path = self.mount_tree.partialget(path) if type(object) is MountFS.DirMount: dirmount = object return dirmount.fs, head_path, tail_path if object is None: return None, None, None return self, head_path, tail_path def getsyspath(self, path, allow_none=False): fs, mount_path, delegate_path = self._delegate(path) if fs is self or fs is None: if allow_none: return None else: raise NoSysPathError(path=path) return fs.getsyspath(delegate_path, allow_none=allow_none) @synchronize def desc(self, path): fs, mount_path, delegate_path = self._delegate(path) if fs is self: if fs.isdir(path): return "Mount dir" else: return "Mounted file" return "Mounted dir, maps to path %s on %s" % (delegate_path, str(fs)) @synchronize def isdir(self, path): fs, mount_path, delegate_path = self._delegate(path) if fs is None: return False if fs is self: object = self.mount_tree.get(path, None) return isinstance(object, dict) else: return fs.isdir(delegate_path) @synchronize def isfile(self, path): fs, mount_path, delegate_path = self._delegate(path) if fs is None: return False if fs is self: object = self.mount_tree.get(path, None) return type(object) is MountFS.FileMount else: return fs.isfile(delegate_path) @synchronize def listdir(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False): path = normpath(path) fs, mount_path, delegate_path = self._delegate(path) if fs is None: raise ResourceNotFoundError(path) if fs is self: if files_only: return [] paths = self.mount_tree[path].keys() return self._listdir_helper(path, paths, wildcard, full, absolute, dirs_only, files_only) else: paths = fs.listdir(delegate_path, wildcard=wildcard, full=False, absolute=False, dirs_only=dirs_only, files_only=files_only) if full or absolute: if full: path = abspath(normpath(path)) else: path = relpath(normpath(path)) paths = [pathjoin(path, p) for p in paths] return paths @synchronize def makedir(self, path, recursive=False, allow_recreate=False): path = normpath(path) fs, mount_path, delegate_path = self._delegate(path) if fs is self: raise UnsupportedError("make directory", msg="Can only makedir for mounted paths" ) if not delegate_path: return True return fs.makedir(delegate_path, recursive=recursive, allow_recreate=allow_recreate) @synchronize def open(self, path, mode="r", **kwargs): path = normpath(path) object = self.mount_tree.get(path, None) if type(object) is MountFS.FileMount: callable = object.open_callable return callable(path, mode, **kwargs) fs, mount_path, delegate_path = self._delegate(path) if fs is None: raise ResourceNotFoundError(path) return fs.open(delegate_path, mode, **kwargs) @synchronize def setcontents(self, path, contents): path = normpath(path) object = self.mount_tree.get(path, None) if type(object) is MountFS.FileMount: return super(MountFS,self).setcontents(path,contents) fs, mount_path, delegate_path = self._delegate(path) if fs is None: raise ParentDirectoryMissingError(path) return fs.setcontents(delegate_path,contents) @synchronize def exists(self, path): path = normpath(path) fs, mount_path, delegate_path = self._delegate(path) if fs is None: return False if fs is self: return path in self.mount_tree return fs.exists(delegate_path) @synchronize def remove(self, path): path = normpath(path) fs, mount_path, delegate_path = self._delegate(path) if fs is None: raise ResourceNotFoundError(path) if fs is self: raise UnsupportedError("remove file", msg="Can only remove paths within a mounted dir") return fs.remove(delegate_path) @synchronize def removedir(self, path, recursive=False, force=False): path = normpath(path) fs, mount_path, delegate_path = self._delegate(path) if fs is None or fs is self: raise ResourceInvalidError(path, msg="Can not removedir for an un-mounted path") if not force and not fs.isdirempty(delegate_path): raise DirectoryNotEmptyError("Directory is not empty: %(path)s") return fs.removedir(delegate_path, recursive, force) @synchronize def rename(self, src, dst): fs1, mount_path1, delegate_path1 = self._delegate(src) fs2, mount_path2, delegate_path2 = self._delegate(dst) if fs1 is not fs2: raise OperationFailedError("rename resource", path=src) if fs1 is not self: return fs1.rename(delegate_path1, delegate_path2) path_src = normpath(src) path_dst = normpath(dst) object = self.mount_tree.get(path_src, None) object2 = self.mount_tree.get(path_dst, None) if object1 is None: raise ResourceNotFoundError(src) # TODO! raise UnsupportedError("rename resource", path=src) @synchronize def move(self,src,dst,**kwds): fs1, mount_path1, delegate_path1 = self._delegate(src) fs2, mount_path2, delegate_path2 = self._delegate(dst) if fs1 is fs2 and fs1 is not self: fs1.move(delegate_path1,delegate_path2,**kwds) else: super(MountFS,self).move(src,dst,**kwds) @synchronize def movedir(self,src,dst,**kwds): fs1, mount_path1, delegate_path1 = self._delegate(src) fs2, mount_path2, delegate_path2 = self._delegate(dst) if fs1 is fs2 and fs1 is not self: fs1.movedir(delegate_path1,delegate_path2,**kwds) else: super(MountFS,self).movedir(src,dst,**kwds) @synchronize def copy(self,src,dst,**kwds): fs1, mount_path1, delegate_path1 = self._delegate(src) fs2, mount_path2, delegate_path2 = self._delegate(dst) if fs1 is fs2 and fs1 is not self: fs1.copy(delegate_path1,delegate_path2,**kwds) else: super(MountFS,self).copy(src,dst,**kwds) @synchronize def copydir(self,src,dst,**kwds): fs1, mount_path1, delegate_path1 = self._delegate(src) fs2, mount_path2, delegate_path2 = self._delegate(dst) if fs1 is fs2 and fs1 is not self: fs1.copydir(delegate_path1,delegate_path2,**kwds) else: super(MountFS,self).copydir(src,dst,**kwds) @synchronize def mountdir(self, path, fs): """Mounts a host FS object on a given path. :param path: A path within the MountFS :param fs: A filesystem object to mount """ path = normpath(path) self.mount_tree[path] = MountFS.DirMount(path, fs) mount = mountdir @synchronize def mountfile(self, path, open_callable=None, info_callable=None): """Mounts a single file path. :param path: A path within the MountFS :param open_Callable: A callable that returns a file-like object :param info_callable: A callable that returns a dictionary with information regarding the file-like object """ path = normpath(path) self.mount_tree[path] = MountFS.FileMount(path, callable, info_callable) @synchronize def unmount(self, path): """Unmounts a path. :param path: Path to unmount """ path = normpath(path) del self.mount_tree[path] @synchronize def settimes(self, path, accessed_time=None, modified_time=None): path = normpath(path) fs, mount_path, delegate_path = self._delegate(path) if fs is None: raise ResourceNotFoundError(path) if fs is self: raise UnsupportedError("settimes") fs.settimes(delegate_path, accessed_time, modified_time) @synchronize def getinfo(self, path): path = normpath(path) fs, mount_path, delegate_path = self._delegate(path) if fs is None: raise ResourceNotFoundError(path) if fs is self: if self.isfile(path): return self.mount_tree[path].info_callable(path) return {} return fs.getinfo(delegate_path) @synchronize def getsize(self, path): path = normpath(path) fs, mount_path, delegate_path = self._delegate(path) if fs is None: raise ResourceNotFoundError(path) if fs is self: object = self.mount_tree.get(path, None) if object is None or isinstance(object, dict): raise ResourceNotFoundError(path) size = self.mount_tree[path].info_callable(path).get("size", None) return size return fs.getinfo(delegate_path).get("size", None) @synchronize def getxattr(self,path,name,default=None): path = normpath(path) fs, mount_path, delegate_path = self._delegate(path) if fs is None: raise ResourceNotFoundError(path) if fs is self: return default return fs.getxattr(delegate_path,name,default) @synchronize def setxattr(self,path,name,value): path = normpath(path) fs, mount_path, delegate_path = self._delegate(path) if fs is None: raise ResourceNotFoundError(path) if fs is self: raise UnsupportedError("setxattr") return fs.setxattr(delegate_path,name,value) @synchronize def delxattr(self,path,name): path = normpath(path) fs, mount_path, delegate_path = self._delegate(path) if fs is None: raise ResourceNotFoundError(path) if fs is self: return True return fs.delxattr(delegate_path,name) @synchronize def listxattrs(self,path): path = normpath(path) fs, mount_path, delegate_path = self._delegate(path) if fs is None: raise ResourceNotFoundError(path) if fs is self: return [] return fs.listxattrs(delegate_path) fs-0.3.0/fs/remote.py0000644000175000017500000003640311375762640013120 0ustar willwill""" fs.remote ========= Utilities for interfacing with remote filesystems This module provides reusable utility functions that can be used to construct FS subclasses interfacing with a remote filesystem. These include: * RemoteFileBuffer: a file-like object that locally buffers the contents of a remote file, writing them back on flush() or close(). * ConnectionManagerFS: a WrapFS subclass that tracks the connection state of a remote FS, and allows client code to wait for a connection to be re-established. * CacheFS: a WrapFS subclass that caches file and directory meta-data in memory, to speed access to a remote FS. """ import time import copy from fs.base import FS, threading from fs.wrapfs import WrapFS, wrap_fs_methods from fs.wrapfs.lazyfs import LazyFS from fs.path import * from fs.errors import * try: from tempfile import SpooledTemporaryFile except ImportError: from tempfile import NamedTemporaryFile class SpooledTemporaryFile(NamedTemporaryFile): def __init__(self,max_size=0,*args,**kwds): NamedTemporaryFile.__init__(self,*args,**kwds) class RemoteFileBuffer(object): """File-like object providing buffer for local file operations. Instances of this class manage a local tempfile buffer corresponding to the contents of a remote file. All reads and writes happen locally, with the content being copied to the remote file only on flush() or close(). Writes to the remote file are performed using the setcontents() method on the owning FS object. The intended use-case is for a remote filesystem (e.g. S3FS) to return instances of this class from its open() method, and to provide the file-uploading logic in its setcontents() method, as in the following pseudo-code:: def open(self,path,mode="r"): rf = self._get_remote_file(path) return RemoteFileBuffer(self,path,mode,rf) def setcontents(self,path,file): self._put_remote_file(path,file) The current implementation reads the entire contents of the file into the buffer before returning. Future implementations may pull data into the buffer on demand. """ max_size_in_memory = 1024 * 8 def __init__(self,fs,path,mode,rfile=None): """RemoteFileBuffer constructor. The owning filesystem, path and mode must be provided. If the optional argument 'rfile' is provided, it must be a read()-able object or a string containing the initial file contents. """ self.file = SpooledTemporaryFile(max_size=self.max_size_in_memory) self.fs = fs self.path = path self.mode = mode self.closed = False self._flushed = False if getattr(fs,"_lock",None) is not None: self._lock = fs._lock.__class__() else: self._lock = threading.RLock() if "r" in mode or "+" in mode or "a" in mode: if rfile is not None: if hasattr(rfile,"read"): data = rfile.read(1024*256) while data: self.file.write(data) data = rfile.read(1024*256) else: self.file.write(str(rfile)) if "a" not in mode: self.file.seek(0) def __del__(self): if not self.closed: self.close() def __getattr__(self,name): file = self.__dict__['file'] a = getattr(file, name) if not callable(a): return a @wraps(a) def call_with_lock(*args,**kwds): self._lock.acquire() try: if "write" in name: self._flushed = False return a(*args,**kwds) finally: self._lock.release() setattr(self, name, call_with_lock) return call_with_lock def __enter__(self): self.file.__enter__() return self def __exit__(self,exc,value,tb): self.close() return False def __iter__(self): return iter(self.file) def truncate(self,size=None): self._lock.acquire() try: self.file.truncate(size) self.flush() finally: self._lock.release() def flush(self): self._lock.acquire() try: self.file.flush() if "w" in self.mode or "a" in self.mode or "+" in self.mode: if not self._flushed: pos = self.file.tell() self.file.seek(0) self.fs.setcontents(self.path,self.file) self.file.seek(pos) self._flushed = True finally: self._lock.release() def close(self): self._lock.acquire() try: if not self.closed: self.closed = True if "w" in self.mode or "a" in self.mode or "+" in self.mode: if not self._flushed: self.file.seek(0) self.file.seek(0) self.fs.setcontents(self.path,self.file) self.file.close() finally: self._lock.release() class ConnectionManagerFS(LazyFS): """FS wrapper providing simple connection management of a remote FS. The ConnectionManagerFS class is designed to wrap a remote FS object and provide some convenience methods for dealing with its remote connection state. The boolean attribute 'connected' indicates whether the remote fileystem has an active connection, and is initially True. If any of the remote filesystem methods raises a RemoteConnectionError, 'connected' will switch to False and remain so until a successful remote method call. Application code can use the method 'wait_for_connection' to block until the connection is re-established. Currently this reconnection is checked by a simple polling loop; eventually more sophisticated operating-system integration may be added. Since some remote FS classes can raise RemoteConnectionError during initialisation, this class makes use of lazy initialization. The remote FS can be specified as an FS instance, an FS subclass, or a (class,args) or (class,args,kwds) tuple. For example:: >>> fs = ConnectionManagerFS(MyRemoteFS("http://www.example.com/")) Traceback (most recent call last): ... RemoteConnectionError: couldn't connect to "http://www.example.com/" >>> fs = ConnectionManagerFS((MyRemoteFS,["http://www.example.com/"])) >>> fs.connected False >>> """ poll_interval = 1 def __init__(self,wrapped_fs,poll_interval=None,connected=True): super(ConnectionManagerFS,self).__init__(wrapped_fs) if poll_interval is not None: self.poll_interval = poll_interval self._connection_cond = threading.Condition() self._poll_thread = None self._poll_sleeper = threading.Event() self.connected = connected def setcontents(self,path,data): return self.wrapped_fs.setcontents(path,data) def __getstate__(self): state = super(ConnectionManagerFS,self).__getstate__() del state["_connection_cond"] del state["_poll_sleeper"] state["_poll_thread"] = None return state def __setstate__(self,state): super(ConnectionManagerFS,self).__setstate__(state) self._connection_cond = threading.Condition() self._poll_sleeper = threading.Event() def wait_for_connection(self,timeout=None): self._connection_cond.acquire() try: if not self.connected: if not self._poll_thread: target = self._poll_connection self._poll_thread = threading.Thread(target=target) self._poll_thread.start() self._connection_cond.wait(timeout) finally: self._connection_cond.release() def _poll_connection(self): while not self.connected and not self.closed: try: self.wrapped_fs.isdir("") except RemoteConnectionError: self._poll_sleeper.wait(self.poll_interval) self._poll_sleeper.clear() except FSError: break else: break self._connection_cond.acquire() try: if not self.closed: self.connected = True self._poll_thread = None self._connection_cond.notifyAll() finally: self._connection_cond.release() def close(self): if not self.closed: try: super(ConnectionManagerFS,self).close() except (RemoteConnectionError,): pass if self._poll_thread: self.connected = True self._poll_sleeper.set() self._poll_thread.join() self._poll_thread = None def _ConnectionManagerFS_method_wrapper(func): """Method wrapper for ConnectionManagerFS. This method wrapper keeps an eye out for RemoteConnectionErrors and adjusts self.connected accordingly. """ @wraps(func) def wrapper(self,*args,**kwds): try: result = func(self,*args,**kwds) except RemoteConnectionError: self.connected = False raise except FSError: self.connected = True raise else: self.connected = True return result return wrapper wrap_fs_methods(_ConnectionManagerFS_method_wrapper,ConnectionManagerFS) def _cached_method(func): """Method decorator that caches results for CacheFS.""" @wraps(func) def wrapper(self,path="",*args,**kwds): try: (success,result) = self._cache_get(path,func.__name__,args,kwds) except KeyError: try: res = func(self,path,*args,**kwds) except Exception, e: self._cache_set(path,func.__name__,args,kwds,(False,e)) raise else: self._cache_set(path,func.__name__,args,kwds,(True,res)) return copy.copy(res) else: if not success: raise result else: return copy.copy(result) return wrapper class CacheFS(WrapFS): """Simple wrapper to cache meta-data of a remote filesystems. This FS wrapper implements a simplistic cache that can help speed up access to a remote filesystem. File and directory meta-data is cached but the actual file contents are not. """ def __init__(self,fs,timeout=1): """CacheFS constructor. The optional argument 'timeout' specifies the cache timeout in seconds. The default timeout is 1 second. To prevent cache entries from ever timing out, set it to None. """ self.timeout = timeout self._cache = {"":{}} super(CacheFS,self).__init__(fs) def _path_cache(self,path): cache = self._cache for name in iteratepath(path): cache = cache.setdefault(name,{"":{}}) return cache def _cache_get(self,path,func,args,kwds): now = time.time() cache = self._path_cache(path) key = (tuple(args),tuple(sorted(kwds.iteritems()))) (t,v) = cache[""][func][key] if self.timeout is not None: if t < now - self.timeout: raise KeyError return v def _cache_set(self,path,func,args,kwds,v): t = time.time() cache = self._path_cache(path) key = (tuple(args),tuple(sorted(kwds.iteritems()))) cache[""].setdefault(func,{})[key] = (t,v) def _uncache(self,path,added=False,removed=False,unmoved=False): cache = self._cache names = list(iteratepath(path)) # If it's not the root dir, also clear some items for ancestors if names: # Clear cached 'getinfo' and 'getsize' for all ancestors for name in names[:-1]: cache[""].pop("getinfo",None) cache[""].pop("getsize",None) cache = cache.get(name,None) if cache is None: return # Adjust cached 'listdir' for parent directory. # TODO: account for whether it was added, removed, or unmoved cache[""].pop("getinfo",None) cache[""].pop("getsize",None) cache[""].pop("listdir",None) # Clear all cached info for the path itself. cache[names[-1]] = {"":{}} @_cached_method def exists(self,path): return super(CacheFS,self).exists(path) @_cached_method def isdir(self,path): return super(CacheFS,self).isdir(path) @_cached_method def isfile(self,path): return super(CacheFS,self).isfile(path) @_cached_method def listdir(self,path="",**kwds): return super(CacheFS,self).listdir(path,**kwds) @_cached_method def getinfo(self,path): return super(CacheFS,self).getinfo(path) @_cached_method def getsize(self,path): return super(CacheFS,self).getsize(path) @_cached_method def getxattr(self,path,name,default=None): return super(CacheFS,self).getxattr(path,name,default) @_cached_method def listxattrs(self,path): return super(CacheFS,self).listxattrs(path) def open(self,path,mode="r"): f = super(CacheFS,self).open(path,mode) self._uncache(path,unmoved=True) return f def setcontents(self,path,contents): res = super(CacheFS,self).setcontents(path,contents) self._uncache(path,unmoved=True) return res def getcontents(self,path): res = super(CacheFS,self).getcontents(path) self._uncache(path,unmoved=True) return res def makedir(self,path,**kwds): super(CacheFS,self).makedir(path,**kwds) self._uncache(path,added=True) def remove(self,path): super(CacheFS,self).remove(path) self._uncache(path,removed=True) def removedir(self,path,**kwds): super(CacheFS,self).removedir(path,**kwds) self._uncache(path,removed=True) def rename(self,src,dst): super(CacheFS,self).rename(src,dst) self._uncache(src,removed=True) self._uncache(dst,added=True) def copy(self,src,dst,**kwds): super(CacheFS,self).copy(src,dst,**kwds) self._uncache(dst,added=True) def copydir(self,src,dst,**kwds): super(CacheFS,self).copydir(src,dst,**kwds) self._uncache(dst,added=True) def move(self,src,dst,**kwds): super(CacheFS,self).move(src,dst,**kwds) self._uncache(src,removed=True) self._uncache(dst,added=True) def movedir(self,src,dst,**kwds): super(CacheFS,self).movedir(src,dst,**kwds) self._uncache(src,removed=True) self._uncache(dst,added=True) def setxattr(self,path,name,value): self._uncache(path,unmoved=True) return super(CacheFS,self).setxattr(path,name,value) def delxattr(self,path,name): self._uncache(path,unmoved=True) return super(CacheFS,self).delxattr(path,name) fs-0.3.0/fs/s3fs.py0000644000175000017500000004446511321440534012474 0ustar willwill""" fs.s3fs ======= FS subclass accessing files in Amazon S3 This module provides the class 'S3FS', which implements the FS filesystem interface for objects stored in Amazon Simple Storage Service (S3). """ import time import datetime import tempfile import stat as statinfo import boto.s3.connection from boto.s3.prefix import Prefix from boto.exception import S3ResponseError from fs.base import * from fs.remote import * # Boto is not thread-safe, so we need to use a per-thread S3 connection. if hasattr(threading,"local"): thread_local = threading.local else: class thread_local(object): def __init__(self): self._map = {} def __getattr__(self,attr): try: return self._map[(threading.currentThread(),attr)] except KeyError: raise AttributeError, attr def __setattr__(self,attr,value): self._map[(threading.currentThread(),attr)] = value class S3FS(FS): """A filesystem stored in Amazon S3. This class provides the FS interface for files stored in Amazon's Simple Storage Service (S3). It should be instantiated with the name of the S3 bucket to use, and optionally a prefix under which the files should be stored. Local temporary files are used when opening files from this filesystem, and any changes are only pushed back into S3 when the files are closed or flushed. """ class meta: PATH_MAX = None NAME_MAX = None def __init__(self, bucket, prefix="", aws_access_key=None, aws_secret_key=None, separator="/", thread_synchronize=True,key_sync_timeout=1): """Constructor for S3FS objects. S3FS objects require the name of the S3 bucket in which to store files, and can optionally be given a prefix under which the files shoud be stored. The AWS public and private keys may be specified as additional arguments; if they are not specified they will be read from the two environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY. The keyword argument 'key_sync_timeout' specifies the maximum time in seconds that the filesystem will spend trying to confirm that a newly-uploaded S3 key is available for reading. For no timeout set it to zero. To disable these checks entirely (and thus reduce the filesystem's consistency guarantees to those of S3's "eventual consistency" model) set it to None. By default the path separator is "/", but this can be overridden by specifying the keyword 'separator' in the constructor. """ self._bucket_name = bucket self._access_keys = (aws_access_key,aws_secret_key) self._separator = separator self._key_sync_timeout = key_sync_timeout # Normalise prefix to this form: path/to/files/ prefix = normpath(prefix) while prefix.startswith(separator): prefix = prefix[1:] if not prefix.endswith(separator) and prefix != "": prefix = prefix + separator self._prefix = prefix self._tlocal = thread_local() super(S3FS, self).__init__(thread_synchronize=thread_synchronize) # Make _s3conn and _s3bukt properties that are created on demand, # since they cannot be stored during pickling. def _s3conn(self): try: return self._tlocal.s3conn except AttributeError: c = boto.s3.connection.S3Connection(*self._access_keys) self._tlocal.s3conn = c return c _s3conn = property(_s3conn) def _s3bukt(self): try: return self._tlocal.s3bukt except AttributeError: try: b = self._s3conn.get_bucket(self._bucket_name) except S3ResponseError, e: if "404 Not Found" not in str(e): raise e b = self._s3conn.create_bucket(self._bucket_name) self._tlocal.s3bukt = b return b _s3bukt = property(_s3bukt) def __getstate__(self): state = super(S3FS,self).__getstate__() del state['_tlocal'] return state def __setstate__(self,state): super(S3FS,self).__setstate__(state) self._tlocal = thread_local() def __str__(self): return '' % (self._bucket_name,self._prefix) __repr__ = __str__ def _s3path(self,path): """Get the absolute path to a file stored in S3.""" path = relpath(normpath(path)) path = self._separator.join(iteratepath(path)) s3path = self._prefix + path if s3path and s3path[-1] == self._separator: s3path = s3path[:-1] return s3path def _sync_key(self,k): """Synchronise on contents of the given key. Since S3 only offers "eventual consistency" of data, it is possible to create a key but be unable to read it back straight away. This method works around that limitation by polling the key until it reads back the value expected by the given key. Note that this could easily fail if the key is modified by another program, meaning the content will never be as specified in the given key. This is the reason for the timeout argument to the construtcor. """ timeout = self._key_sync_timeout if timeout is None: return k k2 = self._s3bukt.get_key(k.name) t = time.time() while k2 is None or k2.etag != k.etag: if timeout > 0: if t + timeout < time.time(): break time.sleep(0.1) k2 = self._s3bukt.get_key(k.name) return k2 def _sync_set_contents(self,key,contents): """Synchronously set the contents of a key.""" if isinstance(key,basestring): key = self._s3bukt.new_key(key) if isinstance(contents,basestring): key.set_contents_from_string(contents) else: try: contents.seek(0) except (AttributeError,EnvironmentError): tf = tempfile.TemporaryFile() data = contents.read(524288) while data: tf.write(data) data = contents.read(524288) tf.seek(0) contents = tf key.set_contents_from_file(contents) return self._sync_key(key) def setcontents(self,path,contents): s3path = self._s3path(path) self._sync_set_contents(s3path,contents) def open(self,path,mode="r"): """Open the named file in the given mode. This method downloads the file contents into a local temporary file so that it can be worked on efficiently. Any changes made to the file are only sent back to S3 when the file is flushed or closed. """ s3path = self._s3path(path) # Truncate the file if requested if "w" in mode: k = self._sync_set_contents(s3path,"") else: k = self._s3bukt.get_key(s3path) if k is None: # Create the file if it's missing if "w" not in mode and "a" not in mode: raise ResourceNotFoundError(path) if not self.isdir(dirname(path)): raise ParentDirectoryMissingError(path) k = self._sync_set_contents(s3path,"") return RemoteFileBuffer(self,path,mode,k) def exists(self,path): """Check whether a path exists.""" s3path = self._s3path(path) s3pathD = s3path + self._separator # The root directory always exists if self._prefix.startswith(s3path): return True ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator) for k in ks: # A regular file if k.name == s3path: return True # A directory if k.name == s3pathD: return True return False def isdir(self,path): """Check whether a path exists and is a directory.""" s3path = self._s3path(path) + self._separator # Root is always a directory if s3path == "/" or s3path == self._prefix: return True # Use a list request so that we return true if there are any files # in that directory. This avoids requiring a special file for the # the directory itself, which other tools may not create. ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator) try: iter(ks).next() except StopIteration: return False else: return True def isfile(self,path): """Check whether a path exists and is a regular file.""" s3path = self._s3path(path) # Root is never a file if self._prefix.startswith(s3path): return False k = self._s3bukt.get_key(s3path) if k is not None: return True return False def listdir(self,path="./",wildcard=None,full=False,absolute=False,info=False,dirs_only=False,files_only=False): """List contents of a directory.""" s3path = self._s3path(path) + self._separator if s3path == "/": s3path = "" i = len(s3path) keys = [] isDir = False for k in self._s3bukt.list(prefix=s3path,delimiter=self._separator): if not isDir: isDir = True # Skip over the entry for the directory itself, if it exists if k.name[i:] != "": k.name = k.name[i:] keys.append(k) if not isDir: if s3path != self._prefix: if self.isfile(path): raise ResourceInvalidError(path,msg="that's not a directory: %(path)s") raise ResourceNotFoundError(path) return self._listdir_helper(path,keys,wildcard,full,absolute,info,dirs_only,files_only) def _listdir_helper(self,path,keys,wildcard,full,absolute,info,dirs_only,files_only): """Modify listdir helper to avoid additional calls to the server.""" if dirs_only and files_only: raise ValueError("dirs_only and files_only can not both be True") if dirs_only: keys = [k for k in keys if k.name.endswith(self._separator)] elif files_only: keys = [k for k in keys if not k.name.endswith(self._separator)] for k in keys: if k.name.endswith(self._separator): k.name = k.name[:-1] if type(path) is not unicode: k.name = k.name.encode() if wildcard is not None: keys = [k for k in keys if fnmatch.fnmatch(k.name, wildcard)] if full: entries = [relpath(pathjoin(path, k.name)) for k in keys] elif absolute: entries = [abspath(pathjoin(path, k.name)) for k in keys] elif info: entries = [self._get_key_info(k) for k in keys] else: entries = [k.name for k in keys] return entries def makedir(self,path,recursive=False,allow_recreate=False): """Create a directory at the given path. The 'mode' argument is accepted for compatability with the standard FS interface, but is currently ignored. """ s3path = self._s3path(path) s3pathD = s3path + self._separator if s3pathD == self._prefix: if allow_recreate: return raise DestinationExistsError(path, msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s") s3pathP = self._s3path(dirname(path)) if s3pathP: s3pathP = s3pathP + self._separator # Check various preconditions using list of parent dir ks = self._s3bukt.list(prefix=s3pathP,delimiter=self._separator) if s3pathP == self._prefix: parentExists = True else: parentExists = False for k in ks: if not parentExists: parentExists = True if k.name == s3path: # It's already a file raise ResourceInvalidError(path, msg="Destination exists as a regular file: %(path)s") if k.name == s3pathD: # It's already a directory if allow_recreate: return raise DestinationExistsError(path, msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s") # Create parent if required if not parentExists: if recursive: self.makedir(dirname(path),recursive,allow_recreate) else: raise ParentDirectoryMissingError(path, msg="Parent directory does not exist: %(path)s") # Create an empty file representing the directory # TODO: is there some standard scheme for representing empty dirs? self._sync_set_contents(s3pathD,"") def remove(self,path): """Remove the file at the given path.""" s3path = self._s3path(path) ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator) for k in ks: if k.name == s3path: break if k.name.startswith(s3path + "/"): raise ResourceInvalidError(path,msg="that's not a file: %(path)s") else: raise ResourceNotFoundError(path) self._s3bukt.delete_key(s3path) k = self._s3bukt.get_key(s3path) while k: k = self._s3bukt.get_key(s3path) def removedir(self,path,recursive=False,force=False): """Remove the directory at the given path.""" s3path = self._s3path(path) if s3path != self._prefix: s3path = s3path + self._separator if force: # If we will be forcibly removing any directory contents, we # might as well get the un-delimited list straight away. ks = self._s3bukt.list(prefix=s3path) else: ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator) # Fail if the directory is not empty, or remove them if forced found = False for k in ks: found = True if k.name != s3path: if not force: raise DirectoryNotEmptyError(path) self._s3bukt.delete_key(k.name) if not found: if self.isfile(path): raise ResourceInvalidError(path,msg="removedir() called on a regular file: %(path)s") raise ResourceNotFoundError(path) self._s3bukt.delete_key(s3path) if recursive and path not in ("","/"): pdir = dirname(path) try: self.removedir(pdir,recursive=True,force=False) except DirectoryNotEmptyError: pass def rename(self,src,dst): """Rename the file at 'src' to 'dst'.""" # Actually, in S3 'rename' is exactly the same as 'move' self.move(src,dst) def getinfo(self,path): s3path = self._s3path(path) if path in ("","/"): k = Prefix(bucket=self._s3bukt,name="/") else: k = self._s3bukt.get_key(s3path) if k is None: k = self._s3bukt.get_key(s3path+"/") if k is None: raise ResourceNotFoundError(path) k = Prefix(bucket=self._s3bukt,name=k.name) return self._get_key_info(k) def _get_key_info(self,key): info = {} info["name"] = basename(key.name) if isinstance(key,Prefix): info["st_mode"] = 0700 | statinfo.S_IFDIR else: info["st_mode"] = 0700 | statinfo.S_IFREG if hasattr(key,"size"): info['size'] = int(key.size) if hasattr(key,"last_modified"): # TODO: does S3 use any other formats? fmt = "%a, %d %b %Y %H:%M:%S %Z" try: mtime = datetime.datetime.strptime(key.last_modified,fmt) info['modified_time'] = mtime except ValueError: pass return info def desc(self,path): return "No description available" def copy(self,src,dst,overwrite=False,chunk_size=16384): """Copy a file from 'src' to 'dst'. src -- The source path dst -- The destination path overwrite -- If True, then the destination may be overwritten (if a file exists at that location). If False then an exception will be thrown if the destination exists chunk_size -- Size of chunks to use in copy (ignored by S3) """ s3path_dst = self._s3path(dst) s3path_dstD = s3path_dst + self._separator # Check for various preconditions. ks = self._s3bukt.list(prefix=s3path_dst,delimiter=self._separator) dstOK = False for k in ks: # It exists as a regular file if k.name == s3path_dst: if not overwrite: raise DestinationExistsError(dst) dstOK = True break # Check if it refers to a directory. If so, we copy *into* it. # Since S3 lists in lexicographic order, subsequent iterations # of the loop will check for the existence of the new filename. if k.name == s3path_dstD: nm = basename(src) dst = pathjoin(dirname(dst),nm) s3path_dst = s3path_dstD + nm dstOK = True if not dstOK and not self.isdir(dirname(dst)): raise ParentDirectoryMissingError(dst,msg="Destination directory does not exist: %(path)s") # OK, now we can copy the file. s3path_src = self._s3path(src) try: self._s3bukt.copy_key(s3path_dst,self._bucket_name,s3path_src) except S3ResponseError, e: if "404 Not Found" in str(e): raise ResourceInvalidError(src, msg="Source is not a file: %(path)s") raise e else: k = self._s3bukt.get_key(s3path_dst) self._sync_key(k) def move(self,src,dst,overwrite=False,chunk_size=16384): """Move a file from one location to another.""" self.copy(src,dst,overwrite=overwrite) self._s3bukt.delete_key(self._s3path(src)) def get_total_size(self): """Get total size of all files in this FS.""" return sum(k.size for k in self._s3bukt.list(prefix=self._prefix)) fs-0.3.0/fs/tempfs.py0000644000175000017500000000610111321453762013104 0ustar willwill""" fs.tempfs ========= Make a temporary file system that exists in a folder provided by the OS. All files contained in a TempFS are removed when the `close` method is called (or when the TempFS is cleaned up by Python). """ import os import time import tempfile from fs.osfs import OSFS from fs.errors import * from fs import _thread_synchronize_default class TempFS(OSFS): """Create a Filesystem in a tempory directory (with tempfile.mkdtemp), and removes it when the TempFS object is cleaned up.""" def __init__(self, identifier=None, temp_dir=None, dir_mode=0700, thread_synchronize=_thread_synchronize_default): """Creates a temporary Filesystem identifier -- A string that is included in the name of the temporary directory, default uses "TempFS" """ self._temp_dir = tempfile.mkdtemp(identifier or "TempFS",dir=temp_dir) self._cleaned = False super(TempFS, self).__init__(self._temp_dir, dir_mode=dir_mode, thread_synchronize=thread_synchronize) def __str__(self): return '' % self._temp_dir __repr__ = __str__ def __unicode__(self): return u'' % self._temp_dir def close(self): """Removes the temporary directory. This will be called automatically when the object is cleaned up by Python, although it is advisable to call it manually. Note that once this method has been called, the FS object may no longer be used. """ # Depending on how resources are freed by the OS, there could # be some transient errors when freeing a TempFS soon after it # was used. If they occur, do a small sleep and try again. try: self._close() except (ResourceLockedError,ResourceInvalidError): time.sleep(0.5) self._close() @convert_os_errors def _close(self): """Actual implementation of close(). This is a separate method so it can be re-tried in the face of transient errors. """ os_remove = convert_os_errors(os.remove) os_rmdir = convert_os_errors(os.rmdir) if not self._cleaned and self.exists("/"): self._lock.acquire() try: # shutil.rmtree doesn't handle long paths on win32, # so we walk the tree by hand. entries = os.walk(self.root_path,topdown=False) for (dir,dirnames,filenames) in entries: for filename in filenames: try: os_remove(os.path.join(dir,filename)) except ResourceNotFoundError: pass for dirname in dirnames: try: os_rmdir(os.path.join(dir,dirname)) except ResourceNotFoundError: pass os.rmdir(self.root_path) self._cleaned = True finally: self._lock.release() super(TempFS,self).close() fs-0.3.0/fs/utils.py0000644000175000017500000003225711407352565012765 0ustar willwill""" The `utils` module provides a number of utility functions that don't belong in the Filesystem interface. Generally the functions in this module work with multiple Filesystems, for instance moving and copying between non-similar Filesystems. """ import shutil import os import sys from fs.mountfs import MountFS from fs.path import pathjoin, pathsplit from fs.errors import DestinationExistsError def copyfile(src_fs, src_path, dst_fs, dst_path, overwrite=True, chunk_size=16384): """Copy a file from one filesystem to another. Will use system copyfile, if both files have a syspath. Otherwise file will be copied a chunk at a time. :param src_fs: Source filesystem object :param src_path: -- Source path :param dst_fs: Destination filesystem object :param dst_path: Destination filesystem object :param chunk_size: Size of chunks to move if system copyfile is not available (default 16K) """ # If the src and dst fs objects are the same, then use a direct copy if src_fs is dst_fs: src_fs.copy(src_path, dst_path, overwrite=overwrite) return src_syspath = src_fs.getsyspath(src_path, allow_none=True) dst_syspath = dst_fs.getsyspath(dst_path, allow_none=True) if not overwrite and dst_fs.exists(dst_path): raise DestinationExistsError(dst_path) # System copy if there are two sys paths if src_syspath is not None and dst_syspath is not None: shutil.copyfile(src_syspath, dst_syspath) return src, dst = None, None try: # Chunk copy src = src_fs.open(src_path, 'rb') dst = dst_fs.open(dst_path, 'wb') while True: chunk = src.read(chunk_size) if not chunk: break dst.write(chunk) finally: if src is not None: src.close() if dst is not None: dst.close() def movefile(src_fs, src_path, dst_fs, dst_path, overwrite=True, chunk_size=16384): """Move a file from one filesystem to another. Will use system copyfile, if both files have a syspath. Otherwise file will be copied a chunk at a time. :param src_fs: Source filesystem object :param src_path: Source path :param dst_fs: Destination filesystem object :param dst_path: Destination filesystem object :param chunk_size: Size of chunks to move if system copyfile is not available (default 16K) """ src_syspath = src_fs.getsyspath(src_path, allow_none=True) dst_syspath = dst_fs.getsyspath(dst_path, allow_none=True) if not overwrite and dst_fs.exists(dst_path): raise DestinationExistsError(dst_path) if src_fs is dst_fs: src_fs.move(src_path, dst_path, overwrite=overwrite) return # System copy if there are two sys paths if src_syspath is not None and dst_syspath is not None: shutil.move(src_syspath, dst_syspath) return src, dst = None, None try: # Chunk copy src = src_fs.open(src_path, 'rb') dst = dst_fs.open(dst_path, 'wb') while True: chunk = src.read(chunk_size) if not chunk: break dst.write(chunk) src_fs.remove(src_path) finally: if src is not None: src.close() if dst is not None: dst.close() def movedir(fs1, fs2, overwrite=False, ignore_errors=False, chunk_size=16384): """Moves contents of a directory from one filesystem to another. :param fs1: Source filesystem, or a tuple of (, ) :param fs2: Destination filesystem, or a tuple of (, ) :param ignore_errors: If True, exceptions from file moves are ignored :param chunk_size: Size of chunks to move if a simple copy is used """ if isinstance(fs1, tuple): fs1, dir1 = fs1 fs1 = fs1.opendir(dir1) if isinstance(fs2, tuple): fs2, dir2 = fs2 fs2.makedir(dir2, allow_recreate=True) fs2 = fs2.opendir(dir2) mount_fs = MountFS() mount_fs.mount('src', fs1) mount_fs.mount('dst', fs2) mount_fs.movedir('src', 'dst', overwrite=True, ignore_errors=ignore_errors, chunk_size=chunk_size) def copydir(fs1, fs2, overwrite=False, ignore_errors=False, chunk_size=16384): """Copies contents of a directory from one filesystem to another. :param fs1: Source filesystem, or a tuple of (, ) :param fs2: Destination filesystem, or a tuple of (, ) :param ignore_errors: If True, exceptions from file moves are ignored :param chunk_size: Size of chunks to move if a simple copy is used """ if isinstance(fs1, tuple): fs1, dir1 = fs1 fs1 = fs1.opendir(dir1) if isinstance(fs2, tuple): fs2, dir2 = fs2 fs2.makedir(dir2, allow_recreate=True) fs2 = fs2.opendir(dir2) mount_fs = MountFS() mount_fs.mount('src', fs1) mount_fs.mount('dst', fs2) mount_fs.copydir('src', 'dst', overwrite=True, ignore_errors=ignore_errors, chunk_size=chunk_size) def countbytes(fs): """Returns the total number of bytes contained within files in a filesystem. :param fs: A filesystem object """ total = sum(fs.getsize(f) for f in fs.walkfiles()) return total def find_duplicates(fs, compare_paths=None, quick=False, signature_chunk_size=16*1024, signature_size=10*16*1024): """A generator that yields the paths of duplicate files in an FS object. Files are considered identical if the contents are the same (dates or other attributes not take in to account). :param fs: A filesystem object :param compare_paths: An iterable of paths within the FS object, or all files if omitted :param quick: If set to True, the quick method of finding duplicates will be used, which can potentially return false positives if the files have the same size and start with the same data. Do not use when deleting files! :param signature_chunk_size: The number of bytes to read before generating a signature checksum value :param signature_size: The total number of bytes read to generate a signature For example, the following will list all the duplicate .jpg files in "~/Pictures":: >>> from fs.utils import find_duplicates >>> from fs.osfs import OSFS >>> fs = OSFS('~/Pictures') >>> for dups in find_duplicates(fs, fs.walkfiles('*.jpg')): ... print list(dups) """ from collections import defaultdict from zlib import crc32 if compare_paths is None: compare_paths = fs.walkfiles() # Create a dictionary that maps file sizes on to the paths of files with # that filesize. So we can find files of the same size with a quick lookup file_sizes = defaultdict(list) for path in compare_paths: file_sizes[fs.getsize(path)].append(path) size_duplicates = [paths for paths in file_sizes.itervalues() if len(paths) > 1] signatures = defaultdict(list) # A signature is a tuple of CRC32s for each 4x16K of the file # This allows us to find potential duplicates with a dictionary lookup for paths in size_duplicates: for path in paths: signature = [] fread = None bytes_read = 0 try: fread = fs.open(path, 'rb') while signature_size is None or bytes_read < signature_size: data = fread.read(signature_chunk_size) if not data: break bytes_read += len(data) signature.append(crc32(data)) finally: if fread is not None: fread.close() signatures[tuple(signature)].append(path) # If 'quick' is True then the signature comparison is adequate (although # it may result in false positives) if quick: for paths in signatures.itervalues(): if len(paths) > 1: yield paths return def identical(p1, p2): """ Returns True if the contests of two files are identical. """ f1, f2 = None, None try: f1 = fs.open(p1, 'rb') f2 = fs.open(p2, 'rb') while True: chunk1 = f1.read(16384) if not chunk1: break chunk2 = f2.read(16384) if chunk1 != chunk2: return False return True finally: if f1 is not None: f1.close() if f2 is not None: f2.close() # If we want to be accurate then we need to compare suspected duplicates # byte by byte. # All path groups in this loop have the same size and same signature, so are # highly likely to be identical. for paths in signatures.itervalues(): while len(paths) > 1: test_p = paths.pop() dups = [test_p] for path in paths: if identical(test_p, path): dups.append(path) if len(dups) > 1: yield dups paths = list(set(paths).difference(dups)) def print_fs(fs, path='/', max_levels=5, file_out=None, terminal_colors=None): """Prints a filesystem listing to stdout (including sub dirs). Useful as a debugging aid. Be careful about printing a OSFS, or any other large filesystem. Without max_levels set, this function will traverse the entire directory tree. For example, the following will print a tree of the files under the current working directory:: >>> from fs.osfs import * >>> from fs.utils import * >>> fs = OSFS('.') >>> print_fs(fs) :param fs: A filesystem object :param path: Path of a directory to list (default "/") :param max_levels: Maximum levels of dirs to list (default 5), set to None for no maximum :param file_out: File object to write output to (defaults to sys.stdout) :param terminal_colors: If True, terminal color codes will be written, set to False for non-console output. The default (None) will select an appropriate setting for the platform. """ if file_out is None: file_out = sys.stdout if terminal_colors is None: if sys.platform == 'win32': terminal_colors = False else: terminal_colors = True def write(line): file_out.write(line.encode(file_out.encoding or 'utf-8')+'\n') def wrap_prefix(prefix): if not terminal_colors: return prefix return '\x1b[34m%s\x1b[0m' % prefix def wrap_dirname(dirname): if not terminal_colors: return dirname return '\x1b[1;32m%s\x1b[0m' % dirname def wrap_error(msg): if not terminal_colors: return msg return '\x1b[31m%s\x1b[0m' % msg def wrap_filename(fname): if not terminal_colors: return fname if '.' in fname: name, ext = os.path.splitext(fname) fname = '%s\x1b[36m%s\x1b[0m' % (name, ext) if fname.startswith('.'): fname = '\x1b[2m%s\x1b[0m' % fname return fname def print_dir(fs, path, levels=[]): try: dir_listing = [(fs.isdir(pathjoin(path,p)), p) for p in fs.listdir(path)] except Exception, e: prefix = ''.join([('| ', ' ')[last] for last in levels]) + ' ' write(wrap_prefix(prefix[:-1] + ' ') + wrap_error("unabled to retrieve directory list (%s) ..." % str(e))) return 0 dir_listing.sort(key = lambda (isdir, p):(not isdir, p.lower())) for i, (is_dir, item) in enumerate(dir_listing): is_last_item = (i == len(dir_listing) - 1) prefix = ''.join([('| ', ' ')[last] for last in levels]) if is_last_item: prefix += '`' else: prefix += '|' if is_dir: write('%s %s' % (wrap_prefix(prefix + '--'), wrap_dirname(item))) if max_levels is not None and len(levels) >= max_levels: write(wrap_prefix(prefix[:-1] + ' ') + wrap_error('max recursion levels reached')) else: print_dir(fs, pathjoin(path, item), levels[:] + [is_last_item]) else: write('%s %s' % (wrap_prefix(prefix + '--'), wrap_filename(item))) return len(dir_listing) print_dir(fs, path) if __name__ == "__main__": #from osfs import * #fs = OSFS('~/copytest') #from memoryfs import * #m = MemoryFS() #m.makedir('maps') #copydir((fs, 'maps'), (m, 'maps')) #from browsewin import browse #browse(m) from osfs import * f = OSFS('/home/will/projects') print_fs(f) fs-0.3.0/fs/objecttree.py0000644000175000017500000000557011317506512013742 0ustar willwill class _ObjectDict(dict): pass class ObjectTree(object): """A class to facilitate the creation of tree structures.""" def __init__(self): self.root = _ObjectDict() def _split(self, path): if '/' not in path: return "", path else: return path.rsplit('/', 1) def _splitpath(self, path): return [p for p in path.split('/') if p] def _locate(self, path): current = self.root for path_component in self._splitpath(path): if type(current) is not _ObjectDict: return None node = current.get(path_component, None) if node is None: return None current = node return current def __setitem__(self, path, object): if not path: raise IndexError("No path supplied") current = self.root path, name = self._split(path) for path_component in self._splitpath(path): node = current.get(path_component, None) if type(node) is not _ObjectDict: new_dict = _ObjectDict() current[path_component] = new_dict current = new_dict else: current = node current[name] = object def __getitem__(self, path): node = self._locate(path) if node is None: raise IndexError("Path does not exist") return node def __delitem__(self, path): path, name = self._split(path) node = self._locate(path) if node is None or type(node) is not _ObjectDict: raise IndexError("Path does not exist") del node[name] def get(self, path, default): node = self._locate(path) if node is None: return default return node def partialget(self, path, default=None): current = self.root partial_path = [] remaining_path = self._splitpath(path) for path_component in remaining_path[:]: if type(current) is not _ObjectDict: return "/".join(partial_path), current, "/".join(remaining_path) partial_path.append(path_component) remaining_path.pop(0) node = current.get(path_component, None) if node is None: return None, default, None current = node return path, current, "" def isobject(self, path): node = self._locate(path) return type(node) is not _ObjectDict def __contains__(self, path): node = self._locate(path) return node is not None def __iter__(self): return iter(self.root) def keys(self): return self.root.keys() def iterkeys(self): return self.root.iterkeys() def items(self): return self.root.items() def iteritems(self): return self.root.iteritems() fs-0.3.0/AUTHORS0000644000175000017500000000010211223657272011672 0ustar willwill Will McGugan (will@willmcgugan.com) Ryan Kelly (ryan@rfk.id.au) fs-0.3.0/PKG-INFO0000644000175000017500000000122311407431454011720 0ustar willwillMetadata-Version: 1.0 Name: fs Version: 0.3.0 Summary: Filesystem abstraction Home-page: http://code.google.com/p/pyfilesystem/ Author: Will McGugan Author-email: will@willmcgugan.com License: Python Software Foundation License Download-URL: http://code.google.com/p/pyfilesystem/downloads/list Description: Creates a common interface to filesystems Platform: any Classifier: Development Status :: 3 - Alpha Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: Python Software Foundation License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Topic :: System :: Filesystems fs-0.3.0/setup.py0000644000175000017500000000167211375762640012355 0ustar willwill#!/usr/bin/env python from distutils.core import setup from fs import __version__ as VERSION classifiers = [ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: Python Software Foundation License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: System :: Filesystems', ] setup(name='fs', version=VERSION, description="Filesystem abstraction", long_description="Creates a common interface to filesystems", license = "Python Software Foundation License", author="Will McGugan", author_email="will@willmcgugan.com", url="http://code.google.com/p/pyfilesystem/", download_url="http://code.google.com/p/pyfilesystem/downloads/list", platforms = ['any'], packages=['fs','fs.expose','fs.expose.fuse','fs.tests','fs.wrapfs', 'fs.osfs'], classifiers=classifiers, )