s3ql-2.26/0000775000175000017500000000000013246754372014026 5ustar nikrationikratio00000000000000s3ql-2.26/contrib/0000775000175000017500000000000013246754372015466 5ustar nikrationikratio00000000000000s3ql-2.26/contrib/remove_objects.py0000755000175000017500000000337712615000156021040 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' remove_objects.py - this file is part of S3QL. Copyright © 2014 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' import os import sys import argparse import atexit # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path from s3ql.logging import logging, setup_logging from s3ql.common import get_backend from s3ql.parse_args import ArgumentParser, storage_url_type log = logging.getLogger(__name__) def parse_args(args): '''Parse command line''' parser = ArgumentParser(description='Batch remove objects from an S3QL backend') parser.add_authfile() parser.add_quiet() parser.add_debug() parser.add_backend_options() parser.add_version() parser.add_argument("storage_url", type=storage_url_type, help='Storage URL of the backend to delete from') parser.add_argument("file", type=argparse.FileType(mode='r', encoding='utf-8'), help='File with newline separated object keys to delete') return parser.parse_args(args) def main(args=None): if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) backend = get_backend(options, raw=True) atexit.register(backend.close) for line in options.file: key = line.rstrip() log.info('Deleting %s', key) backend.delete(key) if __name__ == '__main__': main(sys.argv[1:]) s3ql-2.26/contrib/clone_fs.py0000755000175000017500000001221412615000156017610 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' clone_fs.py - this file is part of S3QL. Clone an S3QL file system from one backend into another, without recompressing or reencrypting. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' import os import sys import tempfile import time from queue import Queue, Full as QueueFull # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path from s3ql.logging import logging, setup_logging, QuietError from s3ql.common import get_backend_factory, AsyncFn, handle_on_return from s3ql.backends.common import DanglingStorageURLError from s3ql import BUFSIZE from s3ql.parse_args import ArgumentParser, storage_url_type log = logging.getLogger(__name__) def parse_args(args): '''Parse command line''' parser = ArgumentParser( description='Clone an S3QL file system.') parser.add_authfile() parser.add_quiet() parser.add_debug() parser.add_backend_options() parser.add_version() parser.add_argument("src_storage_url", metavar='', type=storage_url_type, help='Storage URL of the source backend that contains the file system') parser.add_argument("dst_storage_url", metavar='', type=storage_url_type, help='Storage URL of the destination backend') parser.add_argument("--threads", type=int, default=3, help='Number of threads to use') return parser.parse_args(args) @handle_on_return def copy_loop(queue, src_backend_factory, dst_backend_factory, on_return): '''Copy keys arriving in *queue* from *src_backend* to *dst_backend* Terminate when None is received. ''' src_backend = on_return.enter_context(src_backend_factory()) dst_backend = on_return.enter_context(dst_backend_factory()) tmpfh = on_return.enter_context(tempfile.TemporaryFile()) while True: key = queue.get() if key is None: break log.debug('reading object %s', key) def do_read(fh): tmpfh.seek(0) tmpfh.truncate() while True: buf = fh.read(BUFSIZE) if not buf: break tmpfh.write(buf) return fh.metadata metadata = src_backend.perform_read(do_read, key) log.debug('writing object %s', key) def do_write(fh): tmpfh.seek(0) while True: buf = tmpfh.read(BUFSIZE) if not buf: break fh.write(buf) dst_backend.perform_write(do_write, key, metadata) def main(args=None): options = parse_args(args) setup_logging(options) try: options.storage_url = options.src_storage_url src_backend_factory = get_backend_factory(options.src_storage_url, options.backend_options, options.authfile, raw=True) options.storage_url = options.dst_storage_url dst_backend_factory = get_backend_factory(options.dst_storage_url, options.backend_options, options.authfile, raw=True) except DanglingStorageURLError as exc: raise QuietError(str(exc)) from None queue = Queue(maxsize=options.threads) threads = [] for _ in range(options.threads): t = AsyncFn(copy_loop, queue, src_backend_factory, dst_backend_factory) # Don't wait for worker threads, gives deadlock if main thread # terminates with exception t.daemon = True t.start() threads.append(t) with src_backend_factory() as backend: stamp1 = 0 for (i, key) in enumerate(backend): stamp2 = time.time() if stamp2 - stamp1 > 1: stamp1 = stamp2 sys.stdout.write('\rCopied %d objects so far...' % i) sys.stdout.flush() # Terminate early if any thread failed with an exception for t in threads: if not t.is_alive(): t.join_and_raise() # Avoid blocking if all threads terminated while True: try: queue.put(key, timeout=1) except QueueFull: pass else: break for t in threads: if not t.is_alive(): t.join_and_raise() sys.stdout.write('\n') queue.maxsize += len(threads) for t in threads: queue.put(None) for t in threads: t.join_and_raise() if __name__ == '__main__': main(sys.argv[1:]) s3ql-2.26/contrib/expire_backups.py0000755000175000017500000002222612742247106021041 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' expire_backups.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' import sys import os import re import textwrap import shutil import pickle from datetime import datetime, timedelta from collections import defaultdict # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path from s3ql.logging import setup_logging, QuietError, logging from s3ql.common import thaw_basic_mapping, freeze_basic_mapping from s3ql.parse_args import ArgumentParser from s3ql.remove import main as s3qlrm log = logging.getLogger(__name__) def parse_args(args): '''Parse command line''' parser = ArgumentParser( description=textwrap.dedent('''\ ``expire_backups.py`` is a program to intelligently remove old backups that are no longer needed. To define what backups you want to keep for how long, you define a number of *age ranges*. ``expire_backups`` ensures that you will have at least one backup in each age range at all times. It will keep exactly as many backups as are required for that and delete any backups that become redundant. Age ranges are specified by giving a list of range boundaries in terms of backup cycles. Every time you create a new backup, the existing backups age by one cycle. Please refer to the S3QL documentation for details. ''')) parser.add_quiet() parser.add_debug() parser.add_version() parser.add_argument('cycles', nargs='+', type=int, metavar='', help='Age range boundaries in terms of backup cycles') parser.add_argument('--state', metavar='', type=str, default='.expire_backups.dat', # Add quotes around default to prevent groff # from choking on leading . generated by buggy # docutils man page generator. help='File to save state information in (default: "%(default)s")') parser.add_argument("-n", action="store_true", default=False, help="Dry run. Just show which backups would be deleted.") parser.add_argument('--reconstruct-state', action='store_true', default=False, help='Try to reconstruct a missing state file from backup dates.') parser.add_argument("--use-s3qlrm", action="store_true", help="Use `s3qlrm` command to delete backups.") options = parser.parse_args(args) if sorted(options.cycles) != options.cycles: parser.error('Age range boundaries must be in increasing order') return options def main(args=None): if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) # Determine available backups backup_list = set(x for x in os.listdir('.') if re.match(r'^\d{4}-\d\d-\d\d_\d\d:\d\d:\d\d$', x)) if not os.path.exists(options.state) and len(backup_list) > 1: if not options.reconstruct_state: raise QuietError('Found more than one backup but no state file! Aborting.') log.warning('Trying to reconstruct state file..') state = upgrade_to_state(backup_list) if not options.n: log.info('Saving reconstructed state..') with open(options.state, 'wb') as fh: fh.write(freeze_basic_mapping(state)) elif not os.path.exists(options.state): log.warning('Creating state file..') state = dict() else: log.info('Reading state...') with open(options.state, 'rb') as fh: state = thaw_basic_mapping(fh.read()) to_delete = process_backups(backup_list, state, options.cycles) for x in to_delete: log.info('Backup %s is no longer needed, removing...', x) if not options.n: if options.use_s3qlrm: s3qlrm([x]) else: shutil.rmtree(x) if options.n: log.info('Dry run, not saving state.') else: log.info('Saving state..') with open(options.state, 'wb') as fh: fh.write(freeze_basic_mapping(state)) def upgrade_to_state(backup_list): log.info('Several existing backups detected, trying to convert absolute ages to cycles') now = datetime.now() age = dict() for x in sorted(backup_list): age[x] = now - datetime.strptime(x, '%Y-%m-%d_%H:%M:%S') log.info('Backup %s is %s hours old', x, age[x]) deltas = [ abs(x - y) for x in age.values() for y in age.values() if x != y ] step = min(deltas) log.info('Assuming backup interval of %s hours', step) state = dict() for x in sorted(age): state[x] = 0 while age[x] > timedelta(0): state[x] += 1 age[x] -= step log.info('Backup %s is %d cycles old', x, state[x]) log.info('State construction complete.') return state def simulate(args): options = parse_args(args) setup_logging(options) state = dict() backup_list = set() for i in range(50): backup_list.add('backup-%2d' % i) delete = process_backups(backup_list, state, options.cycles) log.info('Deleting %s', delete) backup_list -= delete log.info('Available backups on day %d:', i) for x in sorted(backup_list): log.info(x) def process_backups(backup_list, state, cycles): # New backups new_backups = backup_list - set(state) for x in sorted(new_backups): log.info('Found new backup %s', x) for y in state: state[y] += 1 state[x] = 0 for x in state: log.debug('Backup %s has age %d', x, state[x]) # Missing backups missing_backups = set(state) - backup_list for x in missing_backups: log.warning('backup %s is missing. Did you delete it manually?', x) del state[x] # Ranges ranges = [ (0, cycles[0]) ] for i in range(1, len(cycles)): ranges.append((cycles[i - 1], cycles[i])) # Go forward in time to see what backups need to be kept simstate = dict() keep = set() missing = defaultdict(list) for step in range(max(cycles)): log.debug('Considering situation after %d more backups', step) for x in simstate: simstate[x] += 1 log.debug('Backup x now has simulated age %d', simstate[x]) # Add the hypothetical backup that has been made "just now" if step != 0: simstate[step] = 0 for (min_, max_) in ranges: log.debug('Looking for backup for age range %d to %d', min_, max_) # Look in simstate found = False for (backup, age) in simstate.items(): if min_ <= age < max_: found = True break if found: # backup and age will be defined #pylint: disable=W0631 log.debug('Using backup %s (age %d)', backup, age) continue # Look in state for (backup, age) in state.items(): age += step if min_ <= age < max_: log.info('Keeping backup %s (current age %d) for age range %d to %d%s', backup, state[backup], min_, max_, (' in %d cycles' % step) if step else '') simstate[backup] = age keep.add(backup) break else: if step == 0: log.info('Note: there is currently no backup available ' 'for age range %d to %d', min_, max_) else: missing['%d to %d' % (min_, max_)].append(step) for range_ in sorted(missing): log.info('Note: there will be no backup for age range %s ' 'in (forthcoming) cycle(s): %s', range_, format_list(missing[range_])) to_delete = set(state) - keep for x in to_delete: del state[x] return to_delete def format_list(l): if not l: return '' l = l[:] # Append bogus end element l.append(l[-1] + 2) range_start = l.pop(0) cur = range_start res = list() for n in l: if n == cur + 1: pass elif range_start == cur: res.append('%d' % cur) elif range_start == cur - 1: res.append('%d' % range_start) res.append('%d' % cur) else: res.append('%d-%d' % (range_start, cur)) if n != cur + 1: range_start = n cur = n if len(res) > 1: return ('%s and %s' % (', '.join(res[:-1]), res[-1])) else: return ', '.join(res) if __name__ == '__main__': #simulate(sys.argv[1:]) main(sys.argv[1:]) s3ql-2.26/contrib/fsck_db.py0000755000175000017500000000324312615000156017415 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' fsck_db.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from argparse import ArgumentTypeError import os import sys # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path from s3ql.logging import logging, setup_logging from s3ql.fsck import ROFsck from s3ql.parse_args import ArgumentParser log = logging.getLogger(__name__) def parse_args(args): parser = ArgumentParser( description="Checks S3QL file system metadata") parser.add_log('~/.s3ql/fsck_db.log') parser.add_debug() parser.add_quiet() parser.add_version() def db_path(s): s = os.path.splitext(s)[0] if not os.path.exists(s + '.db'): raise ArgumentTypeError('Unable to read %s.db' % s) if not os.path.exists(s + '.params'): raise ArgumentTypeError('Unable to read %s.params' % s) return s parser.add_argument("path", metavar='', type=db_path, help='Database to be checked') options = parser.parse_args(args) return options def main(args=None): if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) fsck = ROFsck(options.path) fsck.check() if __name__ == '__main__': main(sys.argv[1:]) s3ql-2.26/contrib/pcp.py0000755000175000017500000000545112615000156016607 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' pcp.py - this file is part of S3QL. Parallel, recursive copy of directory trees. --- Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' import sys import os import subprocess # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path from s3ql.logging import logging, setup_logging from s3ql.parse_args import ArgumentParser log = logging.getLogger(__name__) def parse_args(args): '''Parse command line''' parser = ArgumentParser( description='Recursively copy source(s) to destination using multiple ' 'parallel rsync processes.') parser.add_quiet() parser.add_debug() parser.add_version() parser.add_argument("-a", action="store_true", help='Pass -aHAX option to rsync.') parser.add_argument("--processes", action="store", type=int, metavar='', default=10, help='Number of rsync processes to use (default: %(default)s).') parser.add_argument('source', metavar='', nargs='+', help='Directories to copy') parser.add_argument('dest', metavar='', help="Target directory") options = parser.parse_args(args) options.pps = options.source + [ options.dest ] return options def main(args=None): if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) pool = ('abcdefghijklmnopqrstuvwxyz', 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', '0123456789') steps = [ len(x) / (options.processes - 1) for x in pool ] prefixes = list() for i in range(options.processes - 1): parts = [ x[int(i * y):int((i + 1) * y)] for (x, y) in zip(pool, steps) ] prefixes.append(''.join(parts)) filters = [ '-! [%s]*' % x for x in prefixes ] # Catch all filters.append('- [%s]*' % ''.join(prefixes)) rsync_args = [ 'rsync', '-f', '+ */' ] if not options.quiet: rsync_args.append('--out-format') rsync_args.append('%n%L') if options.a: rsync_args.append('-aHAX') processes = list() for filter_ in filters: cmd = rsync_args + [ '-f', filter_ ] + options.pps log.debug('Calling %s', cmd) processes.append(subprocess.Popen(cmd)) if all([ c.wait() == 0 for c in processes]): sys.exit(0) else: sys.exit(1) if __name__ == '__main__': main(sys.argv[1:]) s3ql-2.26/contrib/pcp.10000664000175000017500000000477513246754370016345 0ustar nikrationikratio00000000000000.\" Man page generated from reStructuredText. . .TH "PCP" "1" "Mar 04, 2018" "2.26" "S3QL" .SH NAME pcp \- Recursive, parallel copy of directory trees . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C pcp [options] [ ...] .ft P .fi .UNINDENT .UNINDENT .SH DESCRIPTION .sp The \fBpcp\fP command is a is a wrapper that starts several \fBsync\fP processes to copy directory trees in parallel. This is allows much better copying performance on file system that have relatively high latency when retrieving individual files like S3QL. .sp \fBNote\fP: Using this program only improves performance when copying \fIfrom\fP an S3QL file system. When copying \fIto\fP an S3QL file system, using \fBpcp\fP is more likely to \fIdecrease\fP performance. .SH OPTIONS .sp The \fBpcp\fP command accepts the following options: .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .B \-\-quiet be really quiet .TP .BI \-\-debug\-modules \ Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-debug Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-version just print program version and exit .TP .B \-a Pass \-aHAX option to rsync. .TP .BI \-\-processes \ Number of rsync processes to use (default: 10). .UNINDENT .UNINDENT .UNINDENT .SH EXIT CODES .sp \fBpcp\fP may terminate with the following exit codes: .INDENT 0.0 .TP .B 0 Everything went well. .TP .B 1 An unexpected error occured. This may indicate a bug in the program. .TP .B 2 Invalid command line argument. .UNINDENT .SH SEE ALSO .sp \fBpcp\fP is shipped as part of S3QL, \fI\%https://bitbucket.org/nikratio/s3ql/\fP\&. .SH COPYRIGHT © 2008 Nikolaus Rath .\" Generated by docutils manpage writer. . s3ql-2.26/contrib/benchmark.py0000755000175000017500000001760212615000156017760 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' benchmark.py - this file is part of S3QL. Benchmark compression and upload performance and recommend compression algorithm that maximizes throughput. --- Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' import argparse import atexit import os import shutil import subprocess import sys import tempfile import time # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path exec_prefix = os.path.join(basedir, 'bin', '') else: exec_prefix = '' from s3ql.logging import logging, setup_logging, QuietError from s3ql.common import get_backend from s3ql.backends.common import DanglingStorageURLError from s3ql.backends.comprenc import ComprencBackend from s3ql.backends.local import Backend from s3ql import BUFSIZE from s3ql.parse_args import ArgumentParser ALGS = ('lzma', 'bzip2', 'zlib') log = logging.getLogger(__name__) def parse_args(args): '''Parse command line''' parser = ArgumentParser( description='Measure S3QL write performance, uplink bandwidth and ' 'compression speed and determine limiting factor.') parser.add_authfile() parser.add_quiet() parser.add_debug() parser.add_backend_options() parser.add_version() parser.add_storage_url() parser.add_argument('file', metavar='', type=argparse.FileType(mode='rb'), help='File to transfer') parser.add_argument('--threads', metavar='', type=int, default=None, help='Also include statistics for threads in results.') parser.add_cachedir() return parser.parse_args(args) def main(args=None): if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) # /dev/urandom may be slow, so we cache the data first log.info('Preparing test data...') rnd_fh = tempfile.TemporaryFile() with open('/dev/urandom', 'rb', 0) as src: copied = 0 while copied < 50 * 1024 * 1024: buf = src.read(BUFSIZE) rnd_fh.write(buf) copied += len(buf) log.info('Measuring throughput to cache...') backend_dir = tempfile.mkdtemp(prefix='s3ql-benchmark-') mnt_dir = tempfile.mkdtemp(prefix='s3ql-mnt') atexit.register(shutil.rmtree, backend_dir) atexit.register(shutil.rmtree, mnt_dir) block_sizes = [ 2**b for b in range(12, 18) ] for blocksize in block_sizes: write_time = 0 size = 50 * 1024 * 1024 while write_time < 3: log.debug('Write took %.3g seconds, retrying', write_time) subprocess.check_call([exec_prefix + 'mkfs.s3ql', '--plain', 'local://%s' % backend_dir, '--quiet', '--force', '--cachedir', options.cachedir]) subprocess.check_call([exec_prefix + 'mount.s3ql', '--threads', '1', '--quiet', '--cachesize', '%d' % (2 * size / 1024), '--log', '%s/mount.log' % backend_dir, '--cachedir', options.cachedir, 'local://%s' % backend_dir, mnt_dir]) try: size *= 2 with open('%s/bigfile' % mnt_dir, 'wb', 0) as dst: rnd_fh.seek(0) write_time = time.time() copied = 0 while copied < size: buf = rnd_fh.read(blocksize) if not buf: rnd_fh.seek(0) continue dst.write(buf) copied += len(buf) write_time = time.time() - write_time os.unlink('%s/bigfile' % mnt_dir) finally: subprocess.check_call([exec_prefix + 'umount.s3ql', mnt_dir]) fuse_speed = copied / write_time log.info('Cache throughput with %3d KiB blocks: %d KiB/sec', blocksize / 1024, fuse_speed / 1024) # Upload random data to prevent effects of compression # on the network layer log.info('Measuring raw backend throughput..') try: backend = get_backend(options, raw=True) except DanglingStorageURLError as exc: raise QuietError(str(exc)) from None upload_time = 0 size = 512 * 1024 while upload_time < 10: size *= 2 def do_write(dst): rnd_fh.seek(0) stamp = time.time() copied = 0 while copied < size: buf = rnd_fh.read(BUFSIZE) if not buf: rnd_fh.seek(0) continue dst.write(buf) copied += len(buf) return (copied, stamp) (upload_size, upload_time) = backend.perform_write(do_write, 's3ql_testdata') upload_time = time.time() - upload_time backend_speed = upload_size / upload_time log.info('Backend throughput: %d KiB/sec', backend_speed / 1024) backend.delete('s3ql_testdata') src = options.file size = os.fstat(options.file.fileno()).st_size log.info('Test file size: %.2f MiB', (size / 1024 ** 2)) in_speed = dict() out_speed = dict() for alg in ALGS: log.info('compressing with %s-6...', alg) backend = ComprencBackend(b'pass', (alg, 6), Backend('local://' + backend_dir, None, None)) def do_write(dst): #pylint: disable=E0102 src.seek(0) stamp = time.time() while True: buf = src.read(BUFSIZE) if not buf: break dst.write(buf) return (dst, stamp) (dst_fh, stamp) = backend.perform_write(do_write, 's3ql_testdata') dt = time.time() - stamp in_speed[alg] = size / dt out_speed[alg] = dst_fh.get_obj_size() / dt log.info('%s compression speed: %d KiB/sec per thread (in)', alg, in_speed[alg] / 1024) log.info('%s compression speed: %d KiB/sec per thread (out)', alg, out_speed[alg] / 1024) print('') print('With %d KiB blocks, maximum performance for different compression' % (block_sizes[-1]/1024), 'algorithms and thread counts is:', '', sep='\n') threads = set([1,2,4,8]) cores = os.sysconf('SC_NPROCESSORS_ONLN') if cores != -1: threads.add(cores) if options.threads: threads.add(options.threads) print('%-26s' % 'Threads:', ('%12d' * len(threads)) % tuple(sorted(threads))) for alg in ALGS: speeds = [] limits = [] for t in sorted(threads): if fuse_speed > t * in_speed[alg]: limit = 'CPU' speed = t * in_speed[alg] else: limit = 'S3QL/FUSE' speed = fuse_speed if speed / in_speed[alg] * out_speed[alg] > backend_speed: limit = 'uplink' speed = backend_speed * in_speed[alg] / out_speed[alg] limits.append(limit) speeds.append(speed / 1024) print('%-26s' % ('Max FS throughput (%s):' % alg), ('%7d KiB/s' * len(threads)) % tuple(speeds)) print('%-26s' % '..limited by:', ('%12s' * len(threads)) % tuple(limits)) print('') print('All numbers assume that the test file is representative and that', 'there are enough processor cores to run all active threads in parallel.', 'To compensate for network latency, you should use about twice as', 'many upload threads as indicated by the above table.\n', sep='\n') if __name__ == '__main__': main(sys.argv[1:]) s3ql-2.26/contrib/scramble_db.py0000755000175000017500000000643412742247106020275 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' scramble_db.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' # Data can be restored with: #from s3ql.metadata import restore_metadata #from s3ql.database import Connection #restore_metadata(open('s3ql_metadata.dat', 'rb+'), 'data.sqlite') import os import shutil import sys import tempfile import hashlib # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path from s3ql.logging import logging, setup_logging, QuietError from s3ql import CURRENT_FS_REV from s3ql.common import get_backend_cachedir, load_params from s3ql.database import Connection from s3ql.metadata import dump_metadata from s3ql.parse_args import ArgumentParser log = logging.getLogger(__name__) DBNAME = 's3ql_metadata.dat' def parse_args(args): parser = ArgumentParser( description="Create metadata copy where all file- and directory names, " "and extended attribute names and values have been scrambled. " "This is intended to preserve privacy when a metadata copy " "needs to be provided to the developers for debugging.") parser.add_debug() parser.add_quiet() parser.add_version() parser.add_cachedir() parser.add_storage_url() options = parser.parse_args(args) return options def main(args=None): if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) # Check for cached metadata cachepath = get_backend_cachedir(options.storage_url, options.cachedir) if not os.path.exists(cachepath + '.params'): raise QuietError("No local metadata found.") param = load_params(cachepath) # Check revision if param['revision'] < CURRENT_FS_REV: raise QuietError('File system revision too old.') elif param['revision'] > CURRENT_FS_REV: raise QuietError('File system revision too new.') if os.path.exists(DBNAME): raise QuietError('%s exists, aborting.' % DBNAME) log.info('Copying database...') dst = tempfile.NamedTemporaryFile() with open(cachepath + '.db', 'rb') as src: shutil.copyfileobj(src, dst) dst.flush() db = Connection(dst.name) log.info('Scrambling...') md5 = lambda x: hashlib.md5(x).hexdigest() for (id_, name) in db.query('SELECT id, name FROM names'): db.execute('UPDATE names SET name=? WHERE id=?', (md5(name), id_)) for (id_, name) in db.query('SELECT inode, target FROM symlink_targets'): db.execute('UPDATE symlink_targets SET target=? WHERE inode=?', (md5(name), id_)) for (id_, name) in db.query('SELECT rowid, value FROM ext_attributes'): db.execute('UPDATE ext_attributes SET value=? WHERE rowid=?', (md5(name), id_)) log.info('Saving...') with open(DBNAME, 'wb+') as fh: dump_metadata(db, fh) if __name__ == '__main__': main(sys.argv[1:]) s3ql-2.26/contrib/s3ql_backup.sh0000755000175000017500000000341712653773225020236 0ustar nikrationikratio00000000000000#!/bin/bash # Abort entire script if any command fails set -e # Backup destination (storage url) storage_url="s3://my_backup_bucket" # Recover cache if e.g. system was shut down while fs was mounted fsck.s3ql --batch "$storage_url" # Create a temporary mountpoint and mount file system mountpoint="/tmp/s3ql_backup_$$" mkdir "$mountpoint" mount.s3ql "$storage_url" "$mountpoint" # Make sure the file system is unmounted when we are done # Note that this overwrites the earlier trap, so we # also delete the lock file here. trap "cd /; umount.s3ql '$mountpoint'; rmdir '$mountpoint'" EXIT # Figure out the most recent backup cd "$mountpoint" last_backup=`python < [ ...] .ft P .fi .UNINDENT .UNINDENT .SH DESCRIPTION .sp The \fBexpire_backups\fP command intelligently remove old backups that are no longer needed. .sp To define what backups you want to keep for how long, you define a number of \fIage ranges\fP\&. \fBexpire_backups\fP ensures that you will have at least one backup in each age range at all times. It will keep exactly as many backups as are required for that and delete any backups that become redundant. .sp Age ranges are specified by giving a list of range boundaries in terms of backup cycles. Every time you create a new backup, the existing backups age by one cycle. .sp Example: when \fBexpire_backups\fP is called with the age range definition \fB1 3 7 14 31\fP, it will guarantee that you always have the following backups available: .INDENT 0.0 .IP 1. 3 A backup that is 0 to 1 cycles old (i.e, the most recent backup) .IP 2. 3 A backup that is 1 to 3 cycles old .IP 3. 3 A backup that is 3 to 7 cycles old .IP 4. 3 A backup that is 7 to 14 cycles old .IP 5. 3 A backup that is 14 to 31 cycles old .UNINDENT .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 If you do backups in fixed intervals, then one cycle will be equivalent to the backup interval. The advantage of specifying the age ranges in terms of backup cycles rather than days or weeks is that it allows you to gracefully handle irregular backup intervals. Imagine that for some reason you do not turn on your computer for one month. Now all your backups are at least a month old, and if you had specified the above backup strategy in terms of absolute ages, they would all be deleted! Specifying age ranges in terms of backup cycles avoids these sort of problems. .UNINDENT .UNINDENT .sp \fBexpire_backups\fP usage is simple. It requires backups to be stored in directories of the form \fByear\-month\-day_hour:minute:seconds\fP (\fBYYYY\-MM\-DD_HH:mm:ss\fP) and works on all backups in the current directory. So for the above backup strategy, the correct invocation would be: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C expire_backups.py 1 3 7 14 31 .ft P .fi .UNINDENT .UNINDENT .sp When storing your backups on an S3QL file system, you probably want to specify the \fB\-\-use\-s3qlrm\fP option as well. This tells \fBexpire_backups\fP to use the s3qlrm command to delete directories. .sp \fBexpire_backups\fP uses a "state file" to keep track which backups are how many cycles old (since this cannot be inferred from the dates contained in the directory names). The standard name for this state file is \fB\&.expire_backups.dat\fP\&. If this file gets damaged or deleted, \fBexpire_backups\fP no longer knows the ages of the backups and refuses to work. In this case you can use the \fB\-\-reconstruct\-state\fP option to try to reconstruct the state from the backup dates. However, the accuracy of this reconstruction depends strongly on how rigorous you have been with making backups (it is only completely correct if the time between subsequent backups has always been exactly the same), so it\(aqs generally a good idea not to tamper with the state file. .SH OPTIONS .sp The \fBexpire_backups\fP command accepts the following options: .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .B \-\-quiet be really quiet .TP .BI \-\-debug\-modules \ Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-debug Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-version just print program version and exit .TP .BI \-\-state \ File to save state information in (default: ".expire_backups.dat") .TP .B \-n Dry run. Just show which backups would be deleted. .TP .B \-\-reconstruct\-state Try to reconstruct a missing state file from backup dates. .TP .B \-\-use\-s3qlrm Use \fBs3qlrm\fP command to delete backups. .UNINDENT .UNINDENT .UNINDENT .SH EXIT CODES .sp \fBexpire_backups\fP may terminate with the following exit codes: .INDENT 0.0 .TP .B 0 Everything went well. .TP .B 1 An unexpected error occured. This may indicate a bug in the program. .TP .B 2 Invalid command line argument. .UNINDENT .SH SEE ALSO .sp \fBexpire_backups\fP is shipped as part of S3QL, \fI\%https://bitbucket.org/nikratio/s3ql/\fP\&. .SH COPYRIGHT © 2008 Nikolaus Rath .\" Generated by docutils manpage writer. . s3ql-2.26/util/0000775000175000017500000000000013246754372015003 5ustar nikrationikratio00000000000000s3ql-2.26/util/sphinx_pipeinclude.py0000644000175000017500000000327212615000156021230 0ustar nikrationikratio00000000000000''' sphinx_pipe.py - this file is part of S3QL. Implements a Sphinx extension that provides a `pipeinclude` directive to include the output of a program. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from docutils.parsers.rst.directives.misc import Include import subprocess import shlex from docutils.utils.error_reporting import SafeString import tempfile import os.path import sys class PipeInclude(Include): """ Include program output as ReST source. """ def run(self): # To maximize code reuse, we just write the output in a temporary # file and call the base class. Otherwise we'd have to copy & paste # all the code to handle start-line, end-line etc options. source = self.state_machine.input_lines.source( self.lineno - self.state_machine.input_offset - 1) source_dir = os.path.dirname(os.path.abspath(source)) command = self.arguments[0] command_list = shlex.split(command) if command_list[0] == 'python': command_list[0] = sys.executable with tempfile.NamedTemporaryFile() as fh: exitcode = subprocess.call(command_list, stdout=fh, cwd=source_dir) if exitcode != 0: raise self.severe('Problems with "%s" directive:\n' 'Command %s returned with exit code %d' % (self.name, SafeString(command), exitcode)) fh.flush() self.arguments[0] = fh.name return super().run() def setup(app): app.add_directive('pipeinclude', PipeInclude) s3ql-2.26/util/cmdline_lexer.py0000644000175000017500000000127612615000156020152 0ustar nikrationikratio00000000000000''' cmdline_lexer.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from pygments.token import Comment, Name, Generic, Literal from pygments.lexer import RegexLexer __all__ = [ 'CommandLineLexer' ] class CommandLineLexer(RegexLexer): """ A lexer that highlights a command line with variable parts """ name = 'CommandLine' aliases = ['commandline'] mimetypes = [] tokens = { 'root': [ (r'#.*\n', Comment), (r'[^[<]+', Literal), (r'\[[^[\]]+\]', Generic.Emph), (r'<[^>]+>', Name.Variable), ], } s3ql-2.26/README.rst0000644000175000017500000001375213223730045015505 0ustar nikrationikratio00000000000000.. NOTE: We cannot use sophisticated ReST syntax here because this file is rendered by Bitbucket. ====== S3QL ====== S3QL is a file system that stores all its data online using storage services like `Google Storage`_, `Amazon S3`_, or OpenStack_. S3QL effectively provides a hard disk of dynamic, infinite capacity that can be accessed from any computer with internet access. S3QL is a standard conforming, full featured UNIX file system that is conceptually indistinguishable from any local file system. Furthermore, S3QL has additional features like compression, encryption, data de-duplication, immutable trees and snapshotting which make it especially suitable for online backup and archival. S3QL is designed to favor simplicity and elegance over performance and feature-creep. Care has been taken to make the source code as readable and serviceable as possible. Solid error detection and error handling have been included from the very first line, and S3QL comes with extensive automated test cases for all its components. .. _`Google Storage`: http://code.google.com/apis/storage/ .. _`Amazon S3`: http://aws.amazon.com/s3 .. _OpenStack: http://openstack.org/projects/storage/ Features ======== * **Transparency.** Conceptually, S3QL is indistinguishable from a local file system. For example, it supports hardlinks, symlinks, standard unix permissions, extended attributes and file sizes up to 2 TB. * **Dynamic Size.** The size of an S3QL file system grows and shrinks dynamically as required. * **Compression.** Before storage, all data may compressed with the LZMA, bzip2 or deflate (gzip) algorithm. * **Encryption.** After compression (but before upload), all data can be AES encrypted with a 256 bit key. An additional SHA256 HMAC checksum is used to protect the data against manipulation. * **Data De-duplication.** If several files have identical contents, the redundant data will be stored only once. This works across all files stored in the file system, and also if only some parts of the files are identical while other parts differ. * **Immutable Trees.** Directory trees can be made immutable, so that their contents can no longer be changed in any way whatsoever. This can be used to ensure that backups can not be modified after they have been made. * **Copy-on-Write/Snapshotting.** S3QL can replicate entire directory trees without using any additional storage space. Only if one of the copies is modified, the part of the data that has been modified will take up additional storage space. This can be used to create intelligent snapshots that preserve the state of a directory at different points in time using a minimum amount of space. * **High Performance independent of network latency.** All operations that do not write or read file contents (like creating directories or moving, renaming, and changing permissions of files and directories) are very fast because they are carried out without any network transactions. S3QL achieves this by saving the entire file and directory structure in a database. This database is locally cached and the remote copy updated asynchronously. * **Support for low bandwidth connections.** S3QL splits file contents into smaller blocks and caches blocks locally. This minimizes both the number of network transactions required for reading and writing data, and the amount of data that has to be transferred when only parts of a file are read or written. Development Status ================== S3QL is considered stable and suitable for production use. Starting with version 2.17.1, S3QL uses semantic versioning. This means that backwards-incompatible versions (e.g., versions that require an upgrade of the file system revision) will be reflected in an increase of the major version number. Supported Platforms =================== S3QL is developed and tested under Linux. Users have also reported running S3QL successfully on OS-X, FreeBSD and NetBSD. We try to maintain compatibility with these systems, but (due to lack of pre-release testers) we cannot guarantee that every release will run on all non-Linux systems. Please report any bugs you find, and we will try to fix them. Typical Usage ============= Before a file system can be mounted, the backend which will hold the data has to be initialized. This is done with the *mkfs.s3ql* command. Here we are using the Amazon S3 backend, and *nikratio-s3ql-bucket* is the S3 bucket in which the file system will be stored. :: mkfs.s3ql s3://ap-south-1/nikratio-s3ql-bucket To mount the S3QL file system stored in the S3 bucket *nikratio_s3ql_bucket* in the directory ``/mnt/s3ql``, enter:: mount.s3ql s3://ap-south-1/nikratio-s3ql-bucket /mnt/s3ql Now you can instruct your favorite backup program to run a backup into the directory ``/mnt/s3ql`` and the data will be stored an Amazon S3. When you are done, the file system has to be unmounted with :: umount.s3ql /mnt/s3ql Need Help? ========== The following resources are available: * The `S3QL User's Guide`_. * The `S3QL Wiki`_, which also contains the `S3QL FAQ`_. * The `S3QL Mailing List`_. You can subscribe by sending a mail to `s3ql+subscribe@googlegroups.com `_. Please report any bugs you may encounter in the `Bitbucket Issue Tracker`_. Contributing ============ The S3QL source code is available both on GitHub_ and BitBucket_. Professional Support -------------------- Professional support is offered via `Rath Consulting`_. .. _`S3QL User's Guide`: http://www.rath.org/s3ql-docs/index.html .. _`S3QL Wiki`: https://bitbucket.org/nikratio/s3ql/wiki/ .. _`Installation Instructions`: https://bitbucket.org/nikratio/s3ql/wiki/Installation .. _`S3QL FAQ`: https://bitbucket.org/nikratio/s3ql/wiki/FAQ .. _`S3QL Mailing List`: http://groups.google.com/group/s3ql .. _`Bitbucket Issue Tracker`: https://bitbucket.org/nikratio/s3ql/issues .. _BitBucket: https://bitbucket.org/nikratio/s3ql/ .. _GitHub: https://github.com/s3ql/main .. _`Rath Consulting`: http://www.rath-consulting.biz/ s3ql-2.26/LICENSE0000644000175000017500000010550612433007261015021 0ustar nikrationikratio00000000000000This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. For reference, the full text of the GNU General Public License Version 3 is included below: GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . s3ql-2.26/PKG-INFO0000664000175000017500000001774613246754372015142 0ustar nikrationikratio00000000000000Metadata-Version: 1.1 Name: s3ql Version: 2.26 Summary: a full-featured file system for online data storage Home-page: https://bitbucket.org/nikratio/s3ql/ Author: Nikolaus Rath Author-email: Nikolaus@rath.org License: GPLv3 Download-URL: https://bitbucket.org/nikratio/s3ql/downloads Description: .. NOTE: We cannot use sophisticated ReST syntax here because this file is rendered by Bitbucket. ====== S3QL ====== S3QL is a file system that stores all its data online using storage services like `Google Storage`_, `Amazon S3`_, or OpenStack_. S3QL effectively provides a hard disk of dynamic, infinite capacity that can be accessed from any computer with internet access. S3QL is a standard conforming, full featured UNIX file system that is conceptually indistinguishable from any local file system. Furthermore, S3QL has additional features like compression, encryption, data de-duplication, immutable trees and snapshotting which make it especially suitable for online backup and archival. S3QL is designed to favor simplicity and elegance over performance and feature-creep. Care has been taken to make the source code as readable and serviceable as possible. Solid error detection and error handling have been included from the very first line, and S3QL comes with extensive automated test cases for all its components. .. _`Google Storage`: http://code.google.com/apis/storage/ .. _`Amazon S3`: http://aws.amazon.com/s3 .. _OpenStack: http://openstack.org/projects/storage/ Features ======== * **Transparency.** Conceptually, S3QL is indistinguishable from a local file system. For example, it supports hardlinks, symlinks, standard unix permissions, extended attributes and file sizes up to 2 TB. * **Dynamic Size.** The size of an S3QL file system grows and shrinks dynamically as required. * **Compression.** Before storage, all data may compressed with the LZMA, bzip2 or deflate (gzip) algorithm. * **Encryption.** After compression (but before upload), all data can be AES encrypted with a 256 bit key. An additional SHA256 HMAC checksum is used to protect the data against manipulation. * **Data De-duplication.** If several files have identical contents, the redundant data will be stored only once. This works across all files stored in the file system, and also if only some parts of the files are identical while other parts differ. * **Immutable Trees.** Directory trees can be made immutable, so that their contents can no longer be changed in any way whatsoever. This can be used to ensure that backups can not be modified after they have been made. * **Copy-on-Write/Snapshotting.** S3QL can replicate entire directory trees without using any additional storage space. Only if one of the copies is modified, the part of the data that has been modified will take up additional storage space. This can be used to create intelligent snapshots that preserve the state of a directory at different points in time using a minimum amount of space. * **High Performance independent of network latency.** All operations that do not write or read file contents (like creating directories or moving, renaming, and changing permissions of files and directories) are very fast because they are carried out without any network transactions. S3QL achieves this by saving the entire file and directory structure in a database. This database is locally cached and the remote copy updated asynchronously. * **Support for low bandwidth connections.** S3QL splits file contents into smaller blocks and caches blocks locally. This minimizes both the number of network transactions required for reading and writing data, and the amount of data that has to be transferred when only parts of a file are read or written. Development Status ================== S3QL is considered stable and suitable for production use. Starting with version 2.17.1, S3QL uses semantic versioning. This means that backwards-incompatible versions (e.g., versions that require an upgrade of the file system revision) will be reflected in an increase of the major version number. Supported Platforms =================== S3QL is developed and tested under Linux. Users have also reported running S3QL successfully on OS-X, FreeBSD and NetBSD. We try to maintain compatibility with these systems, but (due to lack of pre-release testers) we cannot guarantee that every release will run on all non-Linux systems. Please report any bugs you find, and we will try to fix them. Typical Usage ============= Before a file system can be mounted, the backend which will hold the data has to be initialized. This is done with the *mkfs.s3ql* command. Here we are using the Amazon S3 backend, and *nikratio-s3ql-bucket* is the S3 bucket in which the file system will be stored. :: mkfs.s3ql s3://ap-south-1/nikratio-s3ql-bucket To mount the S3QL file system stored in the S3 bucket *nikratio_s3ql_bucket* in the directory ``/mnt/s3ql``, enter:: mount.s3ql s3://ap-south-1/nikratio-s3ql-bucket /mnt/s3ql Now you can instruct your favorite backup program to run a backup into the directory ``/mnt/s3ql`` and the data will be stored an Amazon S3. When you are done, the file system has to be unmounted with :: umount.s3ql /mnt/s3ql Need Help? ========== The following resources are available: * The `S3QL User's Guide`_. * The `S3QL Wiki`_, which also contains the `S3QL FAQ`_. * The `S3QL Mailing List`_. You can subscribe by sending a mail to `s3ql+subscribe@googlegroups.com `_. Please report any bugs you may encounter in the `Bitbucket Issue Tracker`_. Contributing ============ The S3QL source code is available both on GitHub_ and BitBucket_. Professional Support -------------------- Professional support is offered via `Rath Consulting`_. .. _`S3QL User's Guide`: http://www.rath.org/s3ql-docs/index.html .. _`S3QL Wiki`: https://bitbucket.org/nikratio/s3ql/wiki/ .. _`Installation Instructions`: https://bitbucket.org/nikratio/s3ql/wiki/Installation .. _`S3QL FAQ`: https://bitbucket.org/nikratio/s3ql/wiki/FAQ .. _`S3QL Mailing List`: http://groups.google.com/group/s3ql .. _`Bitbucket Issue Tracker`: https://bitbucket.org/nikratio/s3ql/issues .. _BitBucket: https://bitbucket.org/nikratio/s3ql/ .. _GitHub: https://github.com/s3ql/main .. _`Rath Consulting`: http://www.rath-consulting.biz/ Keywords: FUSE,backup,archival,compression,encryption,deduplication,aws,s3 Platform: POSIX Platform: UNIX Platform: Linux Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: No Input/Output (Daemon) Classifier: Environment :: Console Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (GPLv3) Classifier: Topic :: Internet Classifier: Operating System :: POSIX Classifier: Topic :: System :: Archiving Provides: s3ql s3ql-2.26/rst/0000775000175000017500000000000013246754372014636 5ustar nikrationikratio00000000000000s3ql-2.26/rst/contrib.rst0000644000175000017500000000523112615000156017006 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ===================== Contributed Programs ===================== S3QL comes with a few contributed programs that are not part of the core distribution (and are therefore not installed automatically by default), but which may nevertheless be useful. These programs are in the `contrib` directory of the source distribution or in `/usr/share/doc/s3ql/contrib` if you installed S3QL from a package. benchmark.py ============ This program measures S3QL write performance, uplink bandwidth and compression speed to determine the limiting factor. It also gives recommendation for compression algorithm and number of upload threads to achieve maximum performance. clone_fs.py =========== This program physically clones an S3QL file system from one backend into another, without recompressing or reencrypting. It can be used to migrate S3 buckets to a different storage region or storage class (standard or reduced redundancy). .. _pcp: pcp.py ====== ``pcp.py`` is a wrapper program that starts several rsync processes to copy directory trees in parallel. This is important because transferring files in parallel significantly enhances performance when copying data from an S3QL file system (see :ref:`copy_performance` for details). To recursively copy the directory ``/mnt/home-backup`` into ``/home/joe`` using 8 parallel processes and preserving permissions, you would execute :: pcp.py -a --processes=8 /mnt/home-backup/ /home/joe s3ql_backup.sh ============== This is an example script that demonstrates how to set up a simple but powerful backup solution using S3QL and `rsync `_. The `s3ql_backup.sh` script automates the following steps: #. Mount the file system #. Replicate the previous backup with :ref:`s3qlcp ` #. Update the new copy with the data from the backup source using rsync #. Make the new backup immutable with :ref:`s3qllock ` #. Delete old backups that are no longer needed #. Unmount the file system The backups are stored in directories of the form `YYYY-MM-DD_HH:mm:SS` and the `expire_backups.py`_ command is used to delete old backups. expire_backups.py ================= :program:`expire_backups.py` is a program to intelligently remove old backups that are no longer needed. .. include:: man/expire_backups.rst :start-after: begin_main_content :end-before: end_main_content For a full list of available options, run :program:`expire_backups.py --help`. .. _remove_objects: remove_objects.py ================= :program:`remove_objects.py` is a program to remove a list of objects from a storage backend. Since it acts on the backend-level, the backend need not contain an S3QL file system. s3ql-2.26/rst/_templates/0000775000175000017500000000000013246754372016773 5ustar nikrationikratio00000000000000s3ql-2.26/rst/_templates/layout.html0000644000175000017500000000537312433007262021166 0ustar nikrationikratio00000000000000{% extends "!layout.html" %} {# put the sidebar before the body #} {% block sidebar1 %}{{ sidebar() }}{% endblock %} {% block sidebar2 %}{% endblock %} {# - Do not mention Python staff in the search box instructions - Display complete TOC #} {%- macro sidebar() %} {%- if not embedded %}{% if not theme_nosidebar|tobool %}
{%- block sidebarlogo %} {%- if logo %} {%- endif %} {%- endblock %} {%- block sidebartoc %}

{{ _('Table Of Contents') }}

{{ toctree() }} {%- endblock %} {%- block sidebarrel %} {# {%- if prev %}

{{ _('Previous topic') }}

{{ prev.title }}

{%- endif %} {%- if next %}

{{ _('Next topic') }}

{{ next.title }}

{%- endif %} #} {%- endblock %} {%- block sidebarsourcelink %} {%- if show_source and has_source and sourcename %}

{{ _('This Page') }}

{%- endif %} {%- endblock %} {%- if customsidebar %} {% include customsidebar %} {%- endif %} {%- block sidebarsearch %} {%- if pagename != "search" %} {%- endif %} {%- endblock %}
{%- endif %}{% endif %} {%- endmacro %} s3ql-2.26/rst/installation.rst0000664000175000017500000001434413227212411020055 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ============== Installation ============== S3QL depends on several other programs and libraries that have to be installed first. The best method to satisfy these dependencies depends on your distribution. The following instructions are for S3QL |version| and should be applicable to any system. The `S3QL Wiki `_ contains `additional help `_ help for specific distributions and operating systems. Note, however, that S3QL wiki is editable by anyone. The information there has thus not been vetted by the S3QL maintainers, and may be wrong, out-of-date, or even dangerous. Generally, you should only follow steps from the Wiki that you fully understand yourself, and fall back on the instructions below when in doubt. Dependencies ============ The following is a list of the programs and libraries required for running S3QL. Generally, you should first check if your distribution already provides a suitable packages and only install from source if that is not the case. * Kernel: Linux 2.6.9 or newer or FreeBSD with `FUSE4BSD `_. Starting with kernel 2.6.26 you will get significantly better write performance, so under Linux you should actually use *2.6.26 or newer whenever possible*. * The `psmisc `_ utilities. * `SQLite `_ version 3.7.0 or newer. SQLite has to be installed as a *shared library* with development headers. * `Python `_ 3.3.0 or newer. Make sure to also install the development headers. * The following Python modules: * `setuptools `_, version 1.0 or newer. * `pycrypto `_ * `defusedxml `_ * `requests `_ (optional, required for OAuth2 authentication with Google Storage) * `systemd `_ (optional, for enabling systemd support). * `apsw `_, version 3.7.0 or newer. * `llfuse `_, any version between 1.0 (inclusive) and 2.0 (exclusive) * `dugong `_, any version between 3.4 (inclusive) and 4.0 (exclusive) * `pytest `_, version 2.7 or newer (optional, to run unit tests) To check if a specific module :var:`` is installed, execute :samp:`python3 -c 'import {}; print({}.__version__)'`. This will result in an `ImportError` if the module is not installed, and will print the installed version if the module is installed. .. _inst-s3ql: Installing S3QL =============== To build and install S3QL itself, proceed as follows: 1. Download S3QL from https://bitbucket.org/nikratio/s3ql/downloads 2. Unpack it into a folder of your choice 3. Run `python3 setup.py build_ext --inplace` to build S3QL. 4. Run `python3 -m pytest tests/` to run a self-test. If this fails, ask for help on the `mailing list `_ or report a bug in the `issue tracker `_. Now you have three options: * You can run the S3QL commands from the `bin/` directory. * You can install S3QL system-wide for all users. To do that, you have to run `sudo python3 setup.py install`. * You can install S3QL into `~/.local` by executing `python3 setup.py install --user`. In this case you should make sure that `~/.local/bin` is in your `$PATH` environment variable. Development Version =================== If you have checked out the unstable development version from the Mercurial repository, a bit more effort is required. You'll also need: * Version 0.24 or newer of the Cython_ compiler. * Version 1.2b1 or newer of the Sphinx_ document processor. With these additional dependencies installed, S3QL can be build and tested with :: python3 setup.py build_cython python3 setup.py build_ext --inplace python3 -m pytest tests/ Note that when building from the Mercurial or Git repository, building and testing is done with several additional checks. This may cause compilation and/or tests to fail even though there are no problems with functionality. For example, any use of functions that are scheduled for deprecation in future Python version will cause tests to fail. If you would rather just check for functionality, you can delete the :file:`MANIFEST.in` file. In that case, the build system will behave as it does for a regular release. The HTML and PDF documentation can be generated with :: python3 setup.py build_sphinx and S3QL can be installed as usual with :: python3 setup.py install [--user] Running tests requiring remote servers ====================================== By default, tests requiring a connection to a remote storage backend are skipped. If you would like to run these tests too (which is always a good idea), you have to create additional entries in your `~/.s3ql/authinfo2` file that tell S3QL what server and credentials to use for these tests. These entries have the following form:: [-test] backend-login: backend-password: test-fs: Here ** specifies the backend that you want to test (e.g. *s3*, *s3c*, *gs*, or *swift*), ** and ** are the backend authentication credentials, and ** specifies the full storage URL that will be used for testing. **Any existing S3QL file system in this storage URL will be destroyed during testing**. For example, to run tests that need connection to a Google Storage server, you would add something like :: [gs-test] backend-login: GOOGIGWLONT238MD7HZ4 backend-password: rmEbstjscoeunt1249oes1298gauidbs3hl test-fs: gs://joes-gs-bucket/s3ql_tests/ On the next run of `runtest.py` (or `py.test` when using the development version), the additional tests will be run. If the tests are still skipped, you can get more information about why tests are being skipped by passing the :cmdopt:`-rs` argument to `runtest.py`/`py.test`. .. _Cython: http://www.cython.org/ .. _Sphinx: http://sphinx.pocoo.org/ s3ql-2.26/rst/_static/0000775000175000017500000000000013246754372016264 5ustar nikrationikratio00000000000000s3ql-2.26/rst/_static/sphinxdoc.css0000644000175000017500000001351512433007262020762 0ustar nikrationikratio00000000000000/** * Sphinx stylesheet -- sphinxdoc theme * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * Originally created by Armin Ronacher for Werkzeug, adapted by Georg Brandl. */ @import url("basic.css"); /* -- page layout ----------------------------------------------------------- */ body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'DejaVu Sans', 'Verdana', sans-serif; font-size: 14px; letter-spacing: -0.01em; line-height: 150%; text-align: center; background-color: #BFD1D4; color: black; padding: 0; border: 1px solid #aaa; margin: 0px 80px 0px 80px; min-width: 740px; } div.document { background-color: white; text-align: left; background-image: url(contents.png); background-repeat: repeat-x; } div.bodywrapper { margin: 0 240px 0 0; border-right: 1px solid #ccc; } div.body { margin: 0; padding: 0.5em 20px 20px 20px; } div.related { font-size: 1em; } div.related ul { background-image: url(navigation.png); height: 2em; border-top: 1px solid #ddd; border-bottom: 1px solid #ddd; } div.related ul li { margin: 0; padding: 0; height: 2em; float: left; } div.related ul li.right { float: right; margin-right: 5px; } div.related ul li a { margin: 0; padding: 0 5px 0 5px; line-height: 1.75em; color: #EE9816; } div.related ul li a:hover { color: #3CA8E7; } div.sphinxsidebarwrapper { padding: 0; } div.sphinxsidebar { margin: 0; padding: 0.5em 15px 15px 0; width: 210px; float: right; font-size: 1em; text-align: left; } div.sphinxsidebar h3, div.sphinxsidebar h4 { margin: 1em 0 0.5em 0; font-size: 1em; padding: 0.1em 0 0.1em 0.5em; color: white; border: 1px solid #86989B; background-color: #AFC1C4; } div.sphinxsidebar h3 a { color: white; } div.sphinxsidebar ul { padding-left: 1.5em; margin-top: 7px; padding: 0; line-height: 130%; } div.sphinxsidebar ul ul { margin-left: 20px; } div.footer { background-color: #E3EFF1; color: #86989B; padding: 3px 8px 3px 0; clear: both; font-size: 0.8em; text-align: right; } div.footer a { color: #86989B; text-decoration: underline; } /* -- body styles ----------------------------------------------------------- */ p { margin: 0.8em 0 0.5em 0; } a { color: #CA7900; text-decoration: none; } a:hover { color: #2491CF; } /* div.body a { text-decoration: underline; } */ h1 { margin: 0; padding: 0.7em 0 0.3em 0; font-size: 1.5em; color: #11557C; } h2 { margin: 1.3em 0 0.2em 0; font-size: 1.35em; padding: 0; } h3 { margin: 1em 0 -0.3em 0; font-size: 1.1em; } div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a { color: black!important; } h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor { display: none; margin: 0 0 0 0.3em; padding: 0 0.2em 0 0.2em; color: #aaa!important; } h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, h5:hover a.anchor, h6:hover a.anchor { display: inline; } h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, h5 a.anchor:hover, h6 a.anchor:hover { color: #777; background-color: #eee; } a.headerlink { color: #c60f0f!important; font-size: 1em; margin-left: 6px; padding: 0 4px 0 4px; text-decoration: none!important; } a.headerlink:hover { background-color: #ccc; color: white!important; } cite, code, tt { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.95em; letter-spacing: 0.01em; } tt { background-color: #f2f2f2; border-bottom: 1px solid #ddd; color: #333; } tt.descname, tt.descclassname, tt.xref { border: 0; } hr { border: 1px solid #abc; margin: 2em; } a tt { border: 0; color: #CA7900; } a tt:hover { color: #2491CF; } pre { font-family: 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.95em; letter-spacing: 0.015em; line-height: 120%; padding: 0.5em; border: 1px solid #ccc; background-color: #f8f8f8; } pre a { color: inherit; text-decoration: underline; } td.linenos pre { padding: 0.5em 0; } div.quotebar { background-color: #f8f8f8; max-width: 250px; float: right; padding: 2px 7px; border: 1px solid #ccc; } div.topic { background-color: #f8f8f8; } table { border-collapse: collapse; margin: 0 -0.5em 0 -0.5em; } table td, table th { padding: 0.2em 0.5em 0.2em 0.5em; } div.admonition, div.warning { font-size: 0.9em; margin: 1em 0 1em 0; border: 1px solid #86989B; background-color: #f7f7f7; padding: 0; } div.admonition p, div.warning p { margin: 0.5em 1em 0.5em 1em; padding: 0; } div.admonition pre, div.warning pre { margin: 0.4em 1em 0.4em 1em; } div.admonition p.admonition-title, div.warning p.admonition-title { margin: 0; padding: 0.1em 0 0.1em 0.5em; color: white; border-bottom: 1px solid #86989B; font-weight: bold; background-color: #AFC1C4; } div.warning { border: 1px solid #940000; } div.warning p.admonition-title { background-color: #CF0000; border-bottom-color: #940000; } div.admonition ul, div.admonition ol, div.warning ul, div.warning ol { margin: 0.1em 0.5em 0.5em 3em; padding: 0; } div.versioninfo { margin: 1em 0 0 0; border: 1px solid #ccc; background-color: #DDEAF0; padding: 8px; line-height: 1.3em; font-size: 0.9em; } /* Niko's Styles */ div.body li p { margin-bottom: 0.8em; margin-top: 0.8em; } table.option-list td, table.option-list th { border: 0px; } strong.program { font-weight: normal; font-style: italic; } s3ql-2.26/rst/adm.rst0000644000175000017500000000434712557313377016137 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- Managing File Systems ===================== The `s3qladm` command performs various operations on *unmounted* S3QL file systems. The file system *must not be mounted* when using `s3qladm` or things will go wrong badly. The syntax is :: s3qladm [options] where :var:`action` may be either of :program:`passphrase`, :program:`upgrade`, :program:`clear` or :program:`download-metadata`. The :program:`s3qladm` accepts the following general options, no matter what specific action is being invoked: .. pipeinclude:: python ../bin/s3qladm --help :start-after: show this help message and exit Changing the Passphrase ----------------------- To change the passphrase of a file system, use the `passphrase` subcommand:: s3qladm passphrase Upgrading the file system ------------------------- If you have installed a new version of S3QL, it may sometimes be necessary to upgrade the file system metadata as well. Note that in this case the file system can no longer be accessed with older versions of S3QL after the upgrade. During the upgrade you have to make sure that the command is not interrupted, and that no one else tries to mount, check or upgrade the file system at the same time. To upgrade a file system from the previous to the current revision, execute :: s3qladm upgrade Deleting a file system ---------------------- A file system can be deleted with:: s3qladm clear This physically deletes all the data and file system structures. Restoring Metadata Backups -------------------------- If the most-recent copy of the file system metadata has been damaged irreparably, it is possible to restore one of the automatically created backup copies. The command :: s3qladm download-metadata will give you a list of the available metadata backups and allow you to download them. This will create two new files in the current directory, ending in ``.db`` and ``.params``. To actually use the downloaded backup, you need to move these files into the ``~/.s3ql/`` directory and run ``fsck.s3ql``. .. WARNING:: You should probably not use this functionality without having asked for help on the mailing list first (see :ref:`resources`). s3ql-2.26/rst/umount.rst0000644000175000017500000000214112557313377016713 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ========== Unmounting ========== To unmount an S3QL file system, use the command:: umount.s3ql [options] This will block until all data has been written to the backend. Only the user who mounted the file system with :program:`mount.s3ql` is able to unmount it again. If you are root and want to unmount an S3QL file system mounted by an ordinary user, you have to use the :command:`fusermount -u` or :command:`umount` command instead. Note that these commands do not block until all data has been uploaded, so if you use them instead of `umount.s3ql` then you should manually wait for the `mount.s3ql` process to terminate before shutting down the system. The :program:`umount.s3ql` command accepts the following options: .. pipeinclude:: python ../bin/umount.s3ql --help :start-after: show this help message and exit If, for some reason, the `umount.sql` command does not work, the file system can also be unmounted with `fusermount -u -z`. Note that this command will return immediately and the file system may continue to upload data in the background for a while longer. s3ql-2.26/rst/impl_details.rst0000664000175000017500000001333613160156175020034 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- .. _impl_details: ======================== Implementation Details ======================== This section provides some background information on how S3QL works internally. Reading this section is not necessary to use S3QL. Metadata Storage ================ Like most unix filesystems, S3QL has a concept of inodes. The contents of directory inodes (aka the names and inodes of the files and sub directories contained in a directory) are stored directly in an SQLite_ database. This database is stored in a special storage object that is downloaded when the file system is mounted and uploaded periodically in the background and when the file system is unmounted. This has two implications: #. The entire file system tree can be read from the database. Fetching/storing storage objects from/in the storage backend is only required to access the contents of files (or, more precisely, inodes). This makes most file system operations very fast because no data has to be send over the network. #. An S3QL filesystem can only be mounted on one computer at a time, using a single :program:`mount.s3ql` process. Otherwise changes made in one mountpoint will invariably be overwritten when the second mount point is unmounted. Sockets, FIFOs and character devices do not need any additional storage, all information about them is contained in the database. Data Storage ============ The contents of file inodes are split into individual blocks. The maximum size of a block is specified when the file system is created and cannot be changed afterwards. Every block is stored as an individual object in the backend, and the mapping from inodes to blocks and from blocks to objects is stored in the database. While the file system is mounted, blocks are cached locally. Blocks can also be compressed and encrypted before they are stored in the storage backend. This happens during upload, i.e. the cached data is unencrypted and uncompressed. If some files have blocks with identical contents, the blocks will be stored in the same backend object (i.e., the data is only stored once). Data De-Duplication =================== Instead of uploading every block, S3QL first computes a checksum (a SHA256 hash) to check if an identical blocks has already been stored in an backend object. If that is the case, the new block will be linked to the existing object instead of being uploaded. This procedure is invisible for the user and the contents of the block can still be changed. If several blocks share a backend object and one of the blocks is changed, the changed block is automatically stored in a new object (so that the contents of the other block remain unchanged). Caching ======= When an application tries to read or write from a file, S3QL determines the block that contains the required part of the file and retrieves it from the backend or creates it if it does not yet exist. The block is then held in the cache directory. It is committed to S3 when it has not been accessed for more than a few seconds. Blocks are removed from the cache only when the maximum cache size is reached. When the file system is unmounted, all modified blocks are written to the backend and the cache is cleaned. Eventual Consistency Handling ============================= S3QL has to take into account that with some storage providers, changes in objects do not propagate immediately. For example, when an Amazon S3 object is uploaded and immediately downloaded again, the downloaded data might not yet reflect the changes done in the upload (see also http://developer.amazonwebservices.com/connect/message.jspa?messageID=38538) For the data blocks this is not a problem because a data blocks always get a new object ID when they are updated. For the metadata however, S3QL has to make sure that it always downloads the most recent copy of the database when mounting the file system. To that end, metadata versions are numbered, and the most recent version number is stored as part of the object id of a very small "marker" object. When S3QL has downloaded the metadata it checks the version number against the marker object and, if the two do not agree, waits for the most recent metadata to become available. Once the current metadata is available, the version number is increased and the marker object updated. Encryption ========== When the file system is created, :program:`mkfs.s3ql` generates a 256 bit master key by reading from :file:`/dev/random`. The master key is encrypted with the passphrase that is entered by the user, and then stored with the rest of the file system data. Since the passphrase is only used to access the master key (which is used to encrypt the actual file system data), the passphrase can easily be changed. Data is encrypted with a new session key for each object and each upload. The session key is generated by appending a nonce to the master key and then calculating the SHA256 hash. The nonce is generated by concatenating the object id and the current UTC time as a 32 bit float. The precision of the time is given by the Python `time() `_ function and usually at least 1 millisecond. The SHA256 implementation is included in the Python standard library. Once the session key has been calculated, a SHA256 HMAC is calculated over the data that is to be uploaded. Afterwards, the data is compressed (unless :cmdopt:`--compress none` was passed to :program:`mount.s3ql`) and the HMAC inserted at the beginning. Both HMAC and compressed data are then encrypted using 256 bit AES in CTR mode using PyCrypto_. Finally, the nonce is inserted in front of the encrypted data and HMAC, and the packet is send to the backend as a new S3 object. .. _PyCrypto: http://www.pycrypto.org/ .. _SQLite: http://www.sqlite.org/ s3ql-2.26/rst/mkfs.rst0000644000175000017500000000267112577121514016324 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ==================== File System Creation ==================== A S3QL file system is created with the :program:`mkfs.s3ql` command. It has the following syntax:: mkfs.s3ql [options] This command accepts the following options: .. pipeinclude:: python ../bin/mkfs.s3ql --help :start-after: show this help message and exit Unless you have specified the :cmdopt:`--plain` option, :program:`mkfs.s3ql` will ask you to enter an encryption password. This password will *not* be read from an authentication file specified with the :cmdopt:`--authfile` option to prevent accidental creation of an encrypted file system. Note that: * All data that is stored under the given storage url is assumed to managed exclusively by S3QL. Trying to manually save additional objects (or remove or manipulate existing objects) will lead to file system corruption, and :program:`fsck.s3ql` may delete objects that do not belong to the file system. * With most storage backends, slashes in the storage url prefix do not have special meaning. For example, the storage urls ``s3://mybucket/myprefix/`` and ``s3://mybucket/myprefix`` are distinct. In the first case, the prefix is ``myprefix/``, while in the second it is ``myprefix``. * S3QL file systems can not be "stacked", i.e. you cannot have one file system stored at ``s3://bucketname/outerprefix`` and a second one at ``s3://bucketname/outerprefix/innerprefix``. s3ql-2.26/rst/special.rst0000644000175000017500000000701113223730045016767 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ======================== Advanced S3QL Features ======================== .. _s3qlcp: Snapshotting and Copy-on-Write ============================== The command `s3qlcp` can be used to duplicate a directory tree without physically copying the file contents. This is made possible by the data de-duplication feature of S3QL. The syntax of `s3qlcp` is:: s3qlcp [options] This will replicate the contents of the directory `` in the directory ``. `` has to be an existing directory and `` must not exist. Moreover, both directories have to be within the same S3QL file system. .. include:: man/cp.rst :start-after: begin_main_content :end-before: end_main_content .. _s3qlstat: Getting Statistics ================== You can get more information about a mounted S3QL file system with the `s3qlstat` command. It has the following syntax:: s3qlstat [options] This will print out something like this :: Directory entries: 1488068 Inodes: 1482991 Data blocks: 87948 Total data size: 400 GiB After de-duplication: 51 GiB (12.98% of total) After compression: 43 GiB (10.85% of total, 83.60% of de-duplicated) Database size: 172 MiB (uncompressed) (some values do not take into account not-yet-uploaded dirty blocks in cache) Probably the most interesting numbers are the total size of your data, the total size after duplication, and the final size after de-duplication and compression. `s3qlstat` can only be called by the user that mounted the file system and (if the file system was mounted with `--allow-other` or `--allow-root`) the root user. For a full list of available options, run `s3qlstat --help`. .. _s3qllock: Immutable Trees =============== The command :program:`s3qllock` can be used to make a directory tree immutable. Immutable trees can no longer be changed in any way whatsoever. You can not add new files or directories and you can not change or delete existing files and directories. The only way to get rid of an immutable tree is to use the :program:`s3qlrm` command (see below). For example, to make the directory tree beneath the directory ``2010-04-21`` immutable, execute :: s3qllock 2010-04-21 .. include:: man/lock.rst :start-after: begin_main_content :end-before: end_main_content .. _s3qlrm: Fast Recursive Removal ====================== The ``s3qlrm`` command can be used to recursively delete files and directories on an S3QL file system. Although ``s3qlrm`` is faster than using e.g. ``rm -r``, the main reason for its existence is that it allows you to delete immutable trees as well. The syntax is rather simple:: s3qlrm Be warned that there is no additional confirmation. The directory will be removed entirely and immediately. .. _s3qlctrl: Runtime Configuration ===================== The `s3qlctrl` can be used to control a mounted S3QL file system. Its syntax is :: s3qlctrl [options] ... `` must be the location of a mounted S3QL file system. For a list of valid options, run `s3qlctrl --help`. `` may be either of: :flushcache: Flush file system cache. The command blocks until the cache has been flushed. :dropcache: Flush, and then drop file system cache. The command blocks until the cache has been flushed and dropped. :log: Change log level. :cachesize: Change file system cache size. :upload-meta: Trigger a metadata upload. s3ql-2.26/rst/conf.py0000644000175000017500000002023012577121514016120 0ustar nikrationikratio00000000000000# -*- coding: utf-8 -*- # # This file containts the Sphinx configuration to generate the # HTML and PDF documentation from the plain text (RST) source. # # This file does not contain any S3QL documentation itself. # # Add a custom role for command line options that does not try to # reference anything. def add_literal_role(rolename): from docutils.parsers.rst import roles from docutils import nodes nodeclass = nodes.literal generic = roles.GenericRole(rolename, nodeclass) role = roles.CustomRole(rolename, generic, {'classes': [rolename]}) roles.register_local_role(rolename, role) add_literal_role('cmdopt') add_literal_role('var') # A variable defined in a :samp: role # Add our own Pygments Lexer import pygments.lexers._mapping as pmap pmap.LEXERS['CommandLineLexer'] = ('cmdline_lexer', 'CommandLine', ('CommandLine', 'commandline'), (), ()) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.append(os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.ifconfig', 'sphinx_pipeinclude' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # Warn about unresolved references nitpicky = True # General information about the project. project = u'S3QL' copyright = u'© 2008 Nikolaus Rath ' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. #version = # The full version, including alpha/beta/rc tags. #release = # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [ 'include' ] # The reST default role (used for this markup: `text`) to use for all documents. default_role = 'file' # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'colorful' highlight_language = 'commandline' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'sphinxdoc' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {'stickysidebar': 'true'} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_use_modindex = False # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'S3QLdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'manual.tex', u'S3QL Documentation', u'Nikolaus Rath', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('man/mkfs', 'mkfs.s3ql', u'Create an S3QL file system', None, 1), ('man/mount', 'mount.s3ql', u'Mount an S3QL file system', None, 1), ('man/umount', 'umount.s3ql', u'Unmount an S3QL file system', None, 1), ('man/fsck', 'fsck.s3ql', u'Check an S3QL file system for errors', None, 1), ('man/ctrl', 's3qlctrl', u'Control a mounted S3QL file system', None, 1), ('man/adm', 's3qladm', u'Manage S3QL file systems', None, 1), ('man/cp', 's3qlcp', u'Copy-on-write replication on S3QL file systems', None, 1), ('man/lock', 's3qllock', u'Make trees on an S3QL file system immutable', None, 1), ('man/rm', 's3qlrm', u'Fast tree removal on S3QL file systems', None, 1), ('man/stat', 's3qlstat', u'Gather S3QL file system statistics', None, 1), ('man/expire_backups', 'expire_backups', u'Intelligently expire old backups', None, 1), ('man/pcp', 'pcp', u'Recursive, parallel copy of directory trees', None, 1), ('man/oauth_client', 's3ql_oauth_client', u'Obtain Google Storage OAuth2 tokens', None, 1), ('man/verify', 's3ql_verify', u'Verify data in an S3QL file system', None, 1), ] s3ql-2.26/rst/tips.rst0000644000175000017500000000673412557313377016357 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ============= Tips & Tricks ============= .. _ssh_tipp: SSH Backend =========== By combining S3QL's local backend with `sshfs `_, it is possible to store an S3QL file system on arbitrary SSH servers: first mount the remote target directory into the local filesystem, :: sshfs user@my.server.com:/mnt/s3ql /mnt/sshfs and then give the mountpoint to S3QL as a local destination:: mount.s3ql local:///mnt/sshfs/myfsdata /mnt/s3ql Permanently mounted backup file system ====================================== If you use S3QL as a backup file system, it can be useful to mount the file system permanently (rather than just mounting it for a backup and unmounting it afterwards). Especially if your file system becomes large, this saves you long mount- and unmount times if you only want to restore a single file. If you decide to do so, you should make sure to * Use :ref:`s3qllock ` to ensure that backups are immutable after they have been made. * Call :ref:`s3qlctrl upload-meta ` right after a every backup to make sure that the newest metadata is stored safely (if you do backups often enough, this may also allow you to set the :cmdopt:`--metadata-upload-interval` option of :program:`mount.s3ql` to zero). .. _copy_performance: Improving copy performance ========================== .. NOTE:: The following applies only when copying data **from** an S3QL file system, **not** when copying data **to** an S3QL file system. If you want to copy a lot of smaller files *from* an S3QL file system (e.g. for a system restore) you will probably notice that the performance is rather bad. The reason for this is intrinsic to the way S3QL works. Whenever you read a file, S3QL first has to retrieve this file over the network from the backend. This takes a minimum amount of time (the network latency), no matter how big or small the file is. So when you copy lots of small files, 99% of the time is actually spend waiting for network data. Theoretically, this problem is easy to solve: you just have to copy several files at the same time. In practice, however, almost all unix utilities (``cp``, ``rsync``, ``tar`` and friends) insist on copying data one file at a time. This makes a lot of sense when copying data on the local hard disk, but in case of S3QL this is really unfortunate. The best workaround that has been found so far is to copy files by starting several rsync processes at once and use exclusion rules to make sure that they work on different sets of files. For example, the following script will start 3 rsync instances. The first instance handles all filenames starting with a-f, the second the filenames from g-l and the third covers the rest. The ``+ */`` rule ensures that every instance looks into all directories. :: #!/bin/bash RSYNC_ARGS="-aHv /mnt/s3ql/ /home/restore/" rsync -f "+ */" -f "-! [a-f]*" $RSYNC_ARGS & rsync -f "+ */" -f "-! [g-l]*" $RSYNC_ARGS & rsync -f "+ */" -f "- [a-l]*" $RSYNC_ARGS & wait The optimum number of parallel processes depends on your network connection and the size of the files that you want to transfer. However, starting about 10 processes seems to be a good compromise that increases performance dramatically in almost all situations. S3QL comes with a script named ``pcp.py`` in the ``contrib`` directory that can be used to transfer files in parallel without having to write an explicit script first. See the description of :ref:`pcp` for details. s3ql-2.26/rst/include/0000775000175000017500000000000013246754372016261 5ustar nikrationikratio00000000000000s3ql-2.26/rst/include/postman.rst0000644000175000017500000000041012557313377020465 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- See Also ======== The S3QL homepage is at https://bitbucket.org/nikratio/s3ql/. The full S3QL documentation should also be installed somewhere on your system, common locations are :file:`/usr/share/doc/s3ql` or :file:`/usr/local/doc/s3ql`. s3ql-2.26/rst/include/about.rst0000644000175000017500000000037312557313377020126 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- .. only:: man S3QL is a file system for online data storage. Before using S3QL, make sure to consult the full documentation (rather than just the man pages which only briefly document the available userspace commands). s3ql-2.26/rst/include/exitcodes.rst0000644000175000017500000000025212557313377020777 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- :0: Everything went well. :1: An unexpected error occured. This may indicate a bug in the program. :2: Invalid command line argument. s3ql-2.26/rst/about.rst0000644000175000017500000000041312615000156016455 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- .. include:: ../README.rst :end-before: Typical Usage Contributing ============ The S3QL source code is available both on GitHub_ and BitBucket_. .. _BitBucket: https://bitbucket.org/nikratio/s3ql/ .. _GitHub: https://github.com/s3ql/main s3ql-2.26/rst/mount.rst0000644000175000017500000001665112742247106016531 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ========== Mounting ========== A S3QL file system is mounted with the :program:`mount.s3ql` command. It has the following syntax:: mount.s3ql [options] .. NOTE:: S3QL is not a network file system like `NFS `_ or `CIFS `_. It can only be mounted on one computer at a time. This command accepts the following options: .. pipeinclude:: python ../bin/mount.s3ql --help :start-after: show this help message and exit Permission Checking =================== If the file system is mounted with neither the :cmdopt:`allow-root` nor :cmdopt:`allow-other` option, the mounting user has full permissions on the S3QL file system (he is effectively root). If one (or both) of the options is used, standard unix permission checks apply, i.e. only the real root user has full access and all other users (including the mounting user) are subject to permission checks. Compression Algorithms ====================== S3QL supports three compression algorithms, LZMA, Bzip2 and zlib (with LZMA being the default). The compression algorithm can be specified freely whenever the file system is mounted, since it affects only the compression of new data blocks. Roughly speaking, LZMA is slower but achieves better compression ratios than Bzip2, while Bzip2 in turn is slower but achieves better compression ratios than zlib. For maximum file system performance, the best algorithm therefore depends on your network connection speed: the compression algorithm should be fast enough to saturate your network connection. To find the optimal algorithm and number of parallel compression threads for your system, S3QL ships with a program called `benchmark.py` in the `contrib` directory. You should run this program on a file that has a size that is roughly equal to the block size of your file system and has similar contents. It will then determine the compression speeds for the different algorithms and the upload speeds for the specified backend and recommend the best algorithm that is fast enough to saturate your network connection. Obviously you should make sure that there is little other system load when you run `benchmark.py` (i.e., don't compile software or encode videos at the same time). Notes about Caching =================== S3QL maintains a local cache of the file system data to speed up access. The cache is block based, so it is possible that only parts of a file are in the cache. Maximum Number of Cache Entries ------------------------------- The maximum size of the cache can be configured with the :cmdopt:`--cachesize` option. In addition to that, the maximum number of objects in the cache is limited by the :cmdopt:`--max-cache-entries` option, so it is possible that the cache does not grow up to the maximum cache size because the maximum number of cache elements has been reached. The reason for this limit is that each cache entry requires one open file descriptor, and Linux distributions usually limit the total number of file descriptors per process to about a thousand. If you specify a value for :cmdopt:`--max-cache-entries`, you should therefore make sure to also configure your system to increase the maximum number of open file handles. This can be done temporarily with the :program:`ulimit -n` command. The method to permanently change this limit system-wide depends on your distribution. Cache Flushing and Expiration ----------------------------- S3QL flushes changed blocks in the cache to the backend whenever a block has not been accessed for at least 10 seconds. Note that when a block is flushed, it still remains in the cache. Cache expiration (i.e., removal of blocks from the cache) is only done when the maximum cache size is reached. S3QL always expires the least recently used blocks first. Failure Modes ============= Once an S3QL file system has been mounted, there is a multitude of problems that can occur when communicating with the remote server. Generally, :program:`mount.s3ql` always tries to keep the file system as accessible as possible under the circumstances. That means that if network connectivity is lost, data can still be written as long as there is space in the local cache. Attempts to read data not already present in the cache, however, will block until connection is re-established. If any sort of data corruption is detected, the file system will switch to read-only mode. Attempting to read files that are affected by the corruption will return an input/output error (*errno* set to ``EIO``). In case of other unexpected or fatal problems, :program:`mount.s3ql` terminates, but does not unmount the file system. Any attempt to access the mountpoint will result in a "Transport endpoint not connected" error (*errno* set to ``ESHUTDOWN``). This ensures that a mountpoint whose :program:`mount.s3ql` process has terminated can not be confused with a mountpoint containing an empty file system (which would be fatal if e.g. the mountpoint is automatically mirrored). When this has happened, the mountpoint can be cleared by using the :program:`fusermount` command (provided by FUSE) with the ``-u`` parameter. :program:`mount.s3ql` will automatically try to re-establish the connection to the server if network connectivity is lost, and retry sending a request when the connection is established but the remote server signals a temporary problem. These attempts will be made at increasing intervals for a period up to 24 hours, with retry intervals starting at 20 ms and increasing up to 5 minutes. After 24 hours, :program:`mount.s3ql` will give up and terminate, leaving the mountpoint inaccessible as described above. Generally, :program:`mount.s3ql` will also emit log messages for any unusual conditions that it encounters. The destination for these messages can be set with the :cmdopt:`--log` parameter. It is highly recommended to periodically check these logs, for example with a tool like logcheck_. Many potential issues that :program:`mount.s3ql` may encounter do not justify restricting access to the file system, but should nevertheless be investigated if they occur. Checking the log messages is the only way to find out about them. .. _logcheck: http://sourceforge.net/projects/logcheck/ Automatic Mounting ================== If you want to mount and umount an S3QL file system automatically at system startup and shutdown, you should do so with a dedicated S3QL init job (instead of using :file:`/etc/fstab`. When using systemd, :program:`mount.s3ql` can be run as a service of type ``notify``. .. NOTE:: In principle, it is also possible to automatically mount an S3QL file system with an appropriate entry in `/etc/fstab`. However, this is not recommended for several reasons: * file systems mounted in :file:`/etc/fstab` will be unmounted with the :program:`umount` command, so your system will not wait until all data has been uploaded but shutdown (or restart) immediately (this is a FUSE limitation, see `issue #1 `_). * There is no way to tell the system that mounting S3QL requires a Python interpreter to be available, so it may attempt to run :program:`mount.s3ql` before it has mounted the volume containing the Python interpreter. * There is no standard way to tell the system that internet connection has to be up before the S3QL file system can be mounted. s3ql-2.26/rst/durability.rst0000644000175000017500000002663212577121514017537 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- .. _durability: ======================================= Important Rules to Avoid Losing Data ======================================= Most S3QL backends store data in distributed storage systems. These systems differ from a traditional, local hard disk in several important ways. In order to avoid losing data, this section should be read very carefully. Rules in a Nutshell =================== To avoid losing your data, obey the following rules: #. Know what durability you can expect from your chosen storage provider. The durability describes how likely it is that a stored object becomes damaged over time. Such data corruption can never be prevented completely, techniques like geographic replication and RAID storage just reduce the likelihood of it to happen (i.e., increase the durability). #. When choosing a backend and storage provider, keep in mind that when using S3QL, the effective durability of the file system data will be reduced because of S3QL's data de-duplication feature. #. Determine your storage service's consistency window. The consistency window that is important for S3QL is the smaller of the times for which: - a newly created object may not yet be included in the list of stored objects - an attempt to read a newly created object may fail with the storage service reporting that the object does not exist If *one* of the above times is zero, we say that as far as S3QL is concerned the storage service has *immediate* consistency. If your storage provider claims that *neither* of the above can ever happen, while at the same time promising high durability, you should choose a respectable provider instead. #. When mounting the same file system on different computers (or on the same computer but with different :cmdopt:`--cachedir` directories), the time that passes between the first and second of invocation of :program:`mount.s3ql` must be at least as long as your storage service's consistency window. If your storage service offers immediate consistency, you do not need to wait at all. #. Before running :program:`fsck.s3ql` or :program:`s3qladm`, the file system must have been left untouched for the length of the consistency window. If your storage service offers immediate consistency, you do not need to wait at all. The rest of this section explains the above rules and the reasons for them in more detail. It also contains a list of the consistency windows for a number of larger storage providers. Consistency Window List ======================= The following is a list of the consistency windows (as far as S3QL is concerned) for a number of storage providers. This list doesn't come with any guarantees and may be outdated. If your storage provider is not included, or if you need more reliable information, check with your storage provider. ======================================= =================== Storage Provider Consistency Window ======================================= =================== Amazon S3 in the US standard region No guarantees Amazon S3 in other regions Immediate Google Storage Immediate ======================================= =================== Data Consistency ================ In contrast to the typical hard disk, most storage providers do not guarantee *immediate consistency* of written data. This means that: * after an object has been stored, requests to read this object may still fail or return the prior contents for a little while. * after an object has been deleted, attempts to read it may still return the (old) data for some time, and it may still remain in the list of stored objects for some time. * after a new object has been created, it may still not be included when retrieving the list of stored objects for some time. Of course, none of this is acceptable for a file system, and S3QL generally handles any of the above situations internally so that it always provides a fully consistent file system to the user. However, there are some situations where an S3QL user nevertheless needs to be aware of the peculiarities of his chosen storage service. Suppose that you mount the file system, store some new data, delete some old data and unmount it. If you then mount the file system again right away on another computer, there is no guarantee that S3QL will see any of the changes that the first S3QL process has made. At least in theory it is therefore possible that on the second mount, S3QL does not see any of the changes that you have done and presents you an "old version" of the file system without them. Even worse, if you notice the problem and unmount the file system, S3QL will upload the old status (which S3QL necessarily has to consider as current) and thereby permanently override the newer version (even though this change may not become immediately visible either). S3QL uses several techniques to reduce the likelihood of this to happen (see :ref:`impl_details` for more information on this), but without support from the storage service, the possibility cannot be eliminated completely. The same problem of course also applies when checking the file system. If the storage service provides S3QL with only partially updated data, S3QL has no way to find out if this a real consistency problem that needs to be fixed or if it is only a temporary problem that will resolve itself automatically (because there are still changes that have not become visible yet). This is where the so called *consistency window* comes in. The consistency window is the maximum time (after writing or deleting the object) for which any of the above "outdated responses" may be received. If the consistency window is zero, i.e. all changes are immediately effective, the storage service is said to have *immediate consistency*. If the window is infinite, i.e. there is no upper bound on the time it may take for changes to become effect, the storage service is said to be *eventually consistent*. Note that often there are different consistency windows for the different operations. For example, Google Storage offers immediate consistency when reading data, but only eventual consistency for the list of stored objects. To prevent the problem of S3QL working with an outdated copy of the file system data, it is therefore sufficient to simply wait for the consistency window to pass before mounting the file system again (or running a file system check). The length of the consistency window changes from storage service to storage service, and if your service is not included in the list below, you should check the web page or ask the technical support of your storage provider. The window that is important for S3QL is the smaller of the times for which - a newly created object may not yet be included in the list of stored objects - an attempt to read a newly created object may fail with the storage service reporting that the object does not exist Unfortunately, many storage providers are hesitant to guarantee anything but eventual consistency, i.e. the length of the consistency window is potentially infinite. In that case you simply have to pick a length that you consider "safe enough". For example, even though Amazon is only guaranteeing eventual consistency, the ordinary consistency window for data stored in S3 is just a few seconds, and only in exceptional circumstances (i.e., core network outages) it may rise up to hours (`source `_). .. _backend_reliability: Data Durability =============== The durability of a storage service a measure of the average probability of a storage object to become corrupted over time. The lower the chance of data loss, the higher the durability. Storage services like Amazon S3 claim to achieve a durability of up to 99.999999999% over a year, i.e. if you store 100000000 objects for 100 years, you can expect that at the end of that time one object will be corrupted or lost. S3QL is designed to reduce redundancy and store data in the smallest possible form. Therefore, S3QL is generally not able to compensate for any such losses, and when choosing a storage service you should carefully review if the offered durability matches your requirements. When doing this, there are two factors that should be kept in mind. Firstly, even though S3QL is not able to compensate for storage service failures, it is able to detect them: when trying to access data that has been lost or corrupted by the storage service, an IO error will be returned and the mount point will become inaccessible to ensure that the problem is noticed. Secondly, the consequences of a data loss by the storage service can be significantly more severe than you may expect because of S3QL's data de-duplication feature: a data loss in the storage service at time *x* may cause data that is written *after* time *x* to be lost as well. Consider the following scenario: #. You store an important file in the S3QL file system. #. The storage service loses the data blocks of this file. As long as you do not access the file or run :program:`fsck.s3ql`, S3QL is not aware that the data has been lost by the storage service. #. You save an additional copy of the important file in a different location on the same S3QL file system. #. S3QL detects that the contents of the new file are identical to the data blocks that have been stored earlier. Since at this point S3QL is not aware that these blocks have been lost by the storage service, it does not save another copy of the file contents in the storage service but relies on the (presumably) existing blocks instead. #. Therefore, even though you saved another copy, you still do not have a backup of the important file (since both copies refer to the same data blocks that have been lost by the storage service). For some storage services, :program:`fsck.s3ql` can mitigate this effect. When :program:`fsck.s3ql` runs, it asks the storage service for a list of all stored objects. If objects are missing, it can then mark the damaged files and prevent the problem from spreading forwards in time. Figuratively speaking, this establishes a "checkpoint": data loss that occurred before running :program:`fsck.s3ql` can not affect any file system operations that are performed after the check. Unfortunately, many storage services only "discover" that objects are missing or broken when the object actually needs to be retrieved. In this case, :program:`fsck.s3ql` will not learn anything by just querying the list of objects. This effect can be mitigated to some degree by using the :program:`s3ql_verify` command in additon to :program:`fsck.s3ql`. :program:`s3ql_verify` asks the storage service to look up every stored object and may therefore take much longer than running :program:`fsck.s3ql`, but can also offer a much stronger assurance that no data has been lost by the storage service. To "recover" from damaged storage objects in the backend, the damaged objects found by :program:`s3ql_verify` have to be explicitly deleted (so that a successive :program:`fsck.s3ql` is able detect them as missing, correct the file system metadata, and move any affected files to :file:`lost+found`). This procedure is currently not automated, so it is generally a good idea to choose a storage service where the expected data durability is high enough so that the possibility of a lost object (and thus the need to run any full checks) can be neglected over long periods of time. s3ql-2.26/rst/man/0000775000175000017500000000000013246754372015411 5ustar nikrationikratio00000000000000s3ql-2.26/rst/man/cp.rst0000644000175000017500000000604712557313377016552 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ===================== The |command| command ===================== Synopsis ======== :: s3qlcp [options] Description =========== .. include:: ../include/about.rst The |command| command duplicates the directory tree :var:`source-dir` into :var:`dest-dir` without physically copying the file contents. Both source and destination must lie inside the same S3QL file system. .. begin_main_content The replication will not take any additional space. Only if one of directories is modified later on, the modified data will take additional storage space. `s3qlcp` can only be called by the user that mounted the file system and (if the file system was mounted with `--allow-other` or `--allow-root`) the root user. Note that: * After the replication, both source and target directory will still be completely ordinary directories. You can regard `` as a snapshot of `` or vice versa. However, the most common usage of `s3qlcp` is to regularly duplicate the same source directory, say `documents`, to different target directories. For a e.g. monthly replication, the target directories would typically be named something like `documents_January` for the replication in January, `documents_February` for the replication in February etc. In this case it is clear that the target directories should be regarded as snapshots of the source directory. * Exactly the same effect could be achieved by an ordinary copy program like `cp -a`. However, this procedure would be orders of magnitude slower, because `cp` would have to read every file completely (so that S3QL had to fetch all the data over the network from the backend) before writing them into the destination folder. Snapshotting vs Hardlinking --------------------------- Snapshot support in S3QL is inspired by the hardlinking feature that is offered by programs like `rsync `_ or `storeBackup `_. These programs can create a hardlink instead of copying a file if an identical file already exists in the backup. However, using hardlinks has two large disadvantages: * backups and restores always have to be made with a special program that takes care of the hardlinking. The backup must not be touched by any other programs (they may make changes that inadvertently affect other hardlinked files) * special care needs to be taken to handle files which are already hardlinked (the restore program needs to know that the hardlink was not just introduced by the backup program to safe space) S3QL snapshots do not have these problems, and they can be used with any backup program. .. end_main_content Options ======= The |command| command accepts the following options: .. pipeinclude:: python ../../bin/s3qlcp --help :start-after: show this help message and exit Exit Codes ========== |command| may terminate with the following exit codes: .. include:: ../include/exitcodes.rst .. include:: ../include/postman.rst .. |command| replace:: :program:`s3qlcp` s3ql-2.26/rst/man/oauth_client.rst0000644000175000017500000000211312557313377020614 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- .. _oauth_client: ===================== The |command| command ===================== Synopsis ======== :: s3ql_oauth_client [options] Description =========== .. include:: ../include/about.rst The |command| command may be used to obtain OAuth2 authentication tokens for use with Google Storage. It requests "user code" from Google which has to be pasted into the browser to complete the authentication process interactively. Once authentication in the browser has been completed, |command| displays the OAuth2 refresh token. When combined with the special username ``oauth2``, the refresh token can be used as a backend passphrase when using the Google Storage S3QL backend. Options ======= The |command| command accepts the following options: .. pipeinclude:: python ../../bin/s3ql_oauth_client --help :start-after: show this help message and exit Exit Codes ========== |command| may terminate with the following exit codes: .. include:: ../include/exitcodes.rst .. include:: ../include/postman.rst .. |command| replace:: :program:`s3ql_oauth_client` s3ql-2.26/rst/man/adm.rst0000644000175000017500000000364512577121514016702 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ===================== The |command| command ===================== Synopsis ======== :: s3qladm [options] where :var:`action` may be either of :program:`passphrase`, :program:`upgrade`, :program:`delete` or :program:`download-metadata`. Description =========== .. include:: ../include/about.rst The |command| command performs various operations on *unmounted* S3QL file systems. The file system *must not be mounted* when using |command| or things will go wrong badly. The storage url depends on the backend that is used. The S3QL User's Guide should be consulted for a description of the available backends. Options ======= The |command| command accepts the following options. .. pipeinclude:: python ../../bin/s3qladm --help :start-after: show this help message and exit Actions ======= The following actions may be specified: passphrase Changes the encryption passphrase of the file system. upgrade Upgrade the file system to the newest revision. delete Delete the file system with all the stored data. download-metadata Interactively download backups of the file system metadata. Exit Codes ========== |command| may terminate with the following exit codes: .. include:: ../include/exitcodes.rst :3: Invalid backend option. :10: Could not open log file for writing. :11: No such backend. :12: Authentication file has insecure permissions. :13: Unable to parse proxy settings. :14: Invalid credentials (Authentication failed). :15: No permission to access backend (Authorization denied). :16: Invalid storage URL, specified location does not exist in backend. :17: Wrong file system passphrase. :18: No S3QL file system found at given storage URL. :19: Unable to connect to backend, can't resolve hostname. :45: Unable to access cache directory. .. include:: ../include/postman.rst .. |command| replace:: :program:`s3qladm` s3ql-2.26/rst/man/umount.rst0000644000175000017500000000236212557313377017473 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ===================== The |command| command ===================== Synopsis ======== :: umount.s3ql [options] Description =========== .. include:: ../include/about.rst The |command| command unmounts the S3QL file system mounted in the directory *mount point* and blocks until all data has been uploaded to the storage backend. Only the user who mounted the file system with :program:`mount.s3ql` is able to unmount it with |command|. If you are root and want to unmount an S3QL file system mounted by an ordinary user, you have to use the :program:`fusermount -u` or :command:`umount` command instead. Note that these commands do not block until all data has been uploaded, so if you use them instead of :program:`umount.s3ql` then you should manually wait for the :program:`mount.s3ql` process to terminate before shutting down the system. Options ======= The |command| command accepts the following options. .. pipeinclude:: python ../../bin/umount.s3ql --help :start-after: show this help message and exit Exit Codes ========== |command| may terminate with the following exit codes: .. include:: ../include/exitcodes.rst .. include:: ../include/postman.rst .. |command| replace:: :program:`umount.s3ql` s3ql-2.26/rst/man/mkfs.rst0000644000175000017500000000301412577121514017067 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ===================== The |command| command ===================== Synopsis ======== :: mkfs.s3ql [options] Description =========== .. include:: ../include/about.rst The |command| command creates a new file system in the location specified by *storage url*. The storage url depends on the backend that is used. The S3QL User's Guide should be consulted for a description of the available backends. Unless you have specified the `--plain` option, `mkfs.s3ql` will ask you to enter an encryption password. This password will *not* be read from an authentication file specified with the :cmdopt:`--authfile` option to prevent accidental creation of an encrypted file system. Options ======= The |command| command accepts the following options. .. pipeinclude:: python ../../bin/mkfs.s3ql --help :start-after: show this help message and exit Exit Codes ========== |command| may terminate with the following exit codes: .. include:: ../include/exitcodes.rst :3: Invalid backend option. :11: No such backend. :12: Authentication file has insecure permissions. :13: Unable to parse proxy settings. :14: Invalid credentials (Authentication failed). :15: No permission to access backend (Authorization denied). :16: Invalid storage URL, specified location does not exist in backend. :19: Unable to connect to backend, can't resolve hostname. :45: Unable to access cache directory. .. include:: ../include/postman.rst .. |command| replace:: :program:`mkfs.s3ql` s3ql-2.26/rst/man/lock.rst0000644000175000017500000000534012557313377017073 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ===================== The |command| command ===================== Synopsis ======== :: s3qllock [options] Description =========== .. include:: ../include/about.rst The :program:`s3qllock` command makes a directory tree in an S3QL file system immutable. Immutable trees can no longer be changed in any way whatsoever. You can not add new files or directories and you can not change or delete existing files and directories. The only way to get rid of an immutable tree is to use the :program:`s3qlrm` command. |command| can only be called by the user that mounted the file system and (if the file system was mounted with :cmdopt:`--allow-other` or :cmdopt:`--allow-root`) the root user. Rationale ========= .. begin_main_content Immutability is a feature designed for backups. Traditionally, backups have been made on external tape drives. Once a backup was made, the tape drive was removed and locked somewhere in a shelf. This has the great advantage that the contents of the backup are now permanently fixed. Nothing (short of physical destruction) can change or delete files in the backup. In contrast, when backing up into an online storage system like S3QL, all backups are available every time the file system is mounted. Nothing prevents a file in an old backup from being changed again later on. In the worst case, this may make your entire backup system worthless. Imagine that your system gets infected by a nasty virus that simply deletes all files it can find -- if the virus is active while the backup file system is mounted, the virus will destroy all your old backups as well! Even if the possibility of a malicious virus or trojan horse is excluded, being able to change a backup after it has been made is generally not a good idea. A common S3QL use case is to keep the file system mounted at all times and periodically create backups with :program:`rsync -a`. This allows every user to recover her files from a backup without having to call the system administrator. However, this also allows every user to accidentally change or delete files *in* one of the old backups. Making a backup immutable protects you against all these problems. Unless you happen to run into a virus that was specifically programmed to attack S3QL file systems, backups can be neither deleted nor changed after they have been made immutable. .. end_main_content Options ======= The |command| command accepts the following options: .. pipeinclude:: python ../../bin/s3qllock --help :start-after: show this help message and exit Exit Codes ========== |command| may terminate with the following exit codes: .. include:: ../include/exitcodes.rst .. include:: ../include/postman.rst .. |command| replace:: :program:`s3qllock` s3ql-2.26/rst/man/stat.rst0000644000175000017500000000153012557313377017113 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ===================== The |command| command ===================== Synopsis ======== :: s3qlstat [options] Description =========== .. include:: ../include/about.rst The |command| command prints statistics about the S3QL file system mounted at :var:`mountpoint`. |command| can only be called by the user that mounted the file system and (if the file system was mounted with :cmdopt:`--allow-other` or :cmdopt:`--allow-root`) the root user. Options ======= The |command| command accepts the following options: .. pipeinclude:: python ../../bin/s3qlstat --help :start-after: show this help message and exit Exit Codes ========== |command| may terminate with the following exit codes: .. include:: ../include/exitcodes.rst .. include:: ../include/postman.rst .. |command| replace:: :program:`s3qlstat` s3ql-2.26/rst/man/expire_backups.rst0000644000175000017500000000673112615000156021133 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ===================== The |command| command ===================== Synopsis ======== :: expire_backups [options] [ ...] Description =========== The |command| command intelligently remove old backups that are no longer needed. .. begin_main_content To define what backups you want to keep for how long, you define a number of *age ranges*. :program:`expire_backups` ensures that you will have at least one backup in each age range at all times. It will keep exactly as many backups as are required for that and delete any backups that become redundant. Age ranges are specified by giving a list of range boundaries in terms of backup cycles. Every time you create a new backup, the existing backups age by one cycle. Example: when :program:`expire_backups` is called with the age range definition ``1 3 7 14 31``, it will guarantee that you always have the following backups available: #. A backup that is 0 to 1 cycles old (i.e, the most recent backup) #. A backup that is 1 to 3 cycles old #. A backup that is 3 to 7 cycles old #. A backup that is 7 to 14 cycles old #. A backup that is 14 to 31 cycles old .. NOTE:: If you do backups in fixed intervals, then one cycle will be equivalent to the backup interval. The advantage of specifying the age ranges in terms of backup cycles rather than days or weeks is that it allows you to gracefully handle irregular backup intervals. Imagine that for some reason you do not turn on your computer for one month. Now all your backups are at least a month old, and if you had specified the above backup strategy in terms of absolute ages, they would all be deleted! Specifying age ranges in terms of backup cycles avoids these sort of problems. :program:`expire_backups` usage is simple. It requires backups to be stored in directories of the form ``year-month-day_hour:minute:seconds`` (``YYYY-MM-DD_HH:mm:ss``) and works on all backups in the current directory. So for the above backup strategy, the correct invocation would be:: expire_backups.py 1 3 7 14 31 When storing your backups on an S3QL file system, you probably want to specify the ``--use-s3qlrm`` option as well. This tells :program:`expire_backups` to use the :ref:`s3qlrm ` command to delete directories. :program:`expire_backups` uses a "state file" to keep track which backups are how many cycles old (since this cannot be inferred from the dates contained in the directory names). The standard name for this state file is :file:`.expire_backups.dat`. If this file gets damaged or deleted, :program:`expire_backups` no longer knows the ages of the backups and refuses to work. In this case you can use the :cmdopt:`--reconstruct-state` option to try to reconstruct the state from the backup dates. However, the accuracy of this reconstruction depends strongly on how rigorous you have been with making backups (it is only completely correct if the time between subsequent backups has always been exactly the same), so it's generally a good idea not to tamper with the state file. .. end_main_content Options ======= The |command| command accepts the following options: .. pipeinclude:: python ../../contrib/expire_backups.py --help :start-after: show this help message and exit Exit Codes ========== |command| may terminate with the following exit codes: .. include:: ../include/exitcodes.rst See Also ======== |command| is shipped as part of S3QL, https://bitbucket.org/nikratio/s3ql/. .. |command| replace:: :program:`expire_backups` s3ql-2.26/rst/man/mount.rst0000644000175000017500000000361312577121514017276 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ===================== The |command| command ===================== Synopsis ======== :: mount.s3ql [options] Description =========== .. include:: ../include/about.rst The |command| command mounts the S3QL file system stored in *storage url* in the directory *mount point*. The storage url depends on the backend that is used. The S3QL User's Guide should be consulted for a description of the available backends. Options ======= The |command| command accepts the following options. .. pipeinclude:: python ../../bin/mount.s3ql --help --log none :start-after: show this help message and exit Exit Codes ========== |command| may terminate with the following exit codes: .. include:: ../include/exitcodes.rst :3: Invalid backend option. :10: Could not open log file for writing. :11: No such backend. :12: Authentication file has insecure permissions. :13: Unable to parse proxy settings. :14: Invalid credentials (Authentication failed). :15: No permission to access backend (Authorization denied). :16: Invalid storage URL, specified location does not exist in backend. :17: Wrong file system passphrase. :18: No S3QL file system found at given storage URL. :19: Unable to connect to backend, can't resolve hostname. :30: File system was not unmounted cleanly. :31: File system appears to be mounted elsewhere. :32: Unsupported file system revision (too old). :33: Unsupported file system revision (too new). :34: Insufficient free nodes, need to run :program:`fsck.s3ql`. :35: Attempted to mount read-only, this is not supported. :36: Mountpoint does not exist. :37: Not enough available file descriptors. :39: Unable to bind file system to mountpoint. :45: Unable to access cache directory. .. include:: ../include/postman.rst .. |command| replace:: :program:`mount.s3ql` s3ql-2.26/rst/man/pcp.rst0000644000175000017500000000207112557313377016723 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ===================== The |command| command ===================== Synopsis ======== :: pcp [options] [ ...] Description =========== The |command| command is a is a wrapper that starts several :program:`sync` processes to copy directory trees in parallel. This is allows much better copying performance on file system that have relatively high latency when retrieving individual files like S3QL. **Note**: Using this program only improves performance when copying *from* an S3QL file system. When copying *to* an S3QL file system, using |command| is more likely to *decrease* performance. Options ======= The |command| command accepts the following options: .. pipeinclude:: python ../../contrib/pcp.py --help :start-after: show this help message and exit Exit Codes ========== |command| may terminate with the following exit codes: .. include:: ../include/exitcodes.rst See Also ======== |command| is shipped as part of S3QL, https://bitbucket.org/nikratio/s3ql/. .. |command| replace:: :program:`pcp` s3ql-2.26/rst/man/ctrl.rst0000644000175000017500000000370312557313377017110 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ===================== The |command| command ===================== Synopsis ======== :: s3qlctrl [options] ... where :var:`action` may be either of :program:`flushcache`, :program:`upload-meta`, :program:`cachesize` or :program:`log-metadata`. Description =========== .. include:: ../include/about.rst The |command| command performs various actions on the S3QL file system mounted in :var:`mountpoint`. |command| can only be called by the user that mounted the file system and (if the file system was mounted with :cmdopt:`--allow-other` or :cmdopt:`--allow-root`) the root user. The following actions may be specified: flushcache Uploads all changed file data to the backend. upload-meta Upload metadata to the backend. All file system operations will block while a snapshot of the metadata is prepared for upload. cachesize Changes the cache size of the file system. This action requires an additional argument that specifies the new cache size in KiB, so the complete command line is:: s3qlctrl [options] cachesize log Change the amount of information that is logged into :file:`~/.s3ql/mount.log` file. The complete syntax is:: s3qlctrl [options] log [ [ ...]] here :var:`level` is the desired new log level and may be either of *debug*, *info* or *warn*. One or more :var:`module` may only be specified with the *debug* level and allow to restrict the debug output to just the listed modules. Options ======= The |command| command also accepts the following options, no matter what specific action is being invoked: .. pipeinclude:: python ../../bin/s3qlctrl --help :start-after: show this help message and exit Exit Codes ========== |command| may terminate with the following exit codes: .. include:: ../include/exitcodes.rst .. include:: ../include/postman.rst .. |command| replace:: :program:`s3qlctrl` s3ql-2.26/rst/man/index.rst0000644000175000017500000000053412577121514017242 0ustar nikrationikratio00000000000000 Manpages ======== The man pages are installed with S3QL on your system and can be viewed with the :command:`man` command. For reference, they are also included here in the User's Guide. .. toctree:: :maxdepth: 1 mkfs adm mount stat ctrl cp rm lock umount fsck oauth_client verify pcp expire_backups s3ql-2.26/rst/man/verify.rst0000644000175000017500000000342712577121514017443 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ===================== The |command| command ===================== Synopsis ======== :: s3ql_verify [options] Description =========== .. include:: ../include/about.rst The |command| command verifies all data in the file system. In contrast to :program:`fsck.s3ql`, |command| does not trust the object listing returned by the backend, but actually attempts to retrieve every object. It therefore takes a lot longer. The format of :var:`` depends on the backend that is used. The S3QL User's Guide should be consulted for a description of the available backends. Options ======= The |command| command accepts the following options. .. pipeinclude:: python ../../bin/s3ql_verify --help --log none :start-after: show this help message and exit Exit Codes ========== |command| may terminate with the following exit codes: .. include:: ../include/exitcodes.rst :3: Invalid backend option. :10: Could not open log file for writing. :11: No such backend. :12: Authentication file has insecure permissions. :13: Unable to parse proxy settings. :14: Invalid credentials (Authentication failed). :15: No permission to access backend (Authorization denied). :16: Invalid storage URL, specified location does not exist in backend. :17: Wrong file system passphrase. :18: No S3QL file system found at given storage URL. :19: Unable to connect to backend, can't resolve hostname. :32: Unsupported file system revision (too old). :33: Unsupported file system revision (too new). :45: Unable to access cache directory. :46: The file system data was verified, and some objects were found to be missing or corrupted. .. include:: ../include/postman.rst .. |command| replace:: :program:`s3ql_verify` s3ql-2.26/rst/man/fsck.rst0000664000175000017500000000407613160156175017070 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ===================== The |command| command ===================== Synopsis ======== :: fsck.s3ql [options] Description =========== .. include:: ../include/about.rst The |command| command checks the file system in the location specified by *storage url* for errors and attempts to repair any problems. The storage url depends on the backend that is used. The S3QL User's Guide should be consulted for a description of the available backends. Options ======= The |command| command accepts the following options. .. pipeinclude:: python ../../bin/fsck.s3ql --help --log none :start-after: show this help message and exit Exit Codes ========== If |command| found any file system errors (no matter if they were corrected or not), the exit code will be 128 plus one of the codes listed below. If no errors were found, the following exit codes are used as-is: .. include:: ../include/exitcodes.rst :3: Invalid backend option. :10: Could not open log file for writing. :11: No such backend. :12: Authentication file has insecure permissions. :13: Unable to parse proxy settings. :14: Invalid credentials (Authentication failed). :15: No permission to access backend (Authorization denied). :16: Invalid storage URL, specified location does not exist in backend. :17: Wrong file system passphrase. :18: No S3QL file system found at given storage URL. :19: Unable to connect to backend, can't resolve hostname. :32: Unsupported file system revision (too old). :33: Unsupported file system revision (too new). :40: Cannot check mounted file system. :41: User input required, but running in batch mode. :42: File system check aborted by user. :43: Local metadata is corrupted. :44: Uncorrectable errors found. :45: Unable to access cache directory. :128: This error code will be *added* to one of the codes above if any file system errors have been found (no matter if they were corrected or not). .. include:: ../include/postman.rst .. |command| replace:: :program:`fsck.s3ql` s3ql-2.26/rst/man/rm.rst0000644000175000017500000000220012557313377016551 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ===================== The |command| command ===================== Synopsis ======== :: s3qlrm [options] Description =========== .. include:: ../include/about.rst The |command| command recursively deletes files and directories on an S3QL file system. Although |command| is faster than using e.g. :command:`rm -r``, the main reason for its existence is that it allows you to delete immutable trees (which can be created with :program:`s3qllock`) as well. Be warned that there is no additional confirmation. The directory will be removed entirely and immediately. |command| can only be called by the user that mounted the file system and (if the file system was mounted with :cmdopt:`--allow-other` or :cmdopt:`--allow-root`) the root user. Options ======= The |command| command accepts the following options: .. pipeinclude:: python ../../bin/s3qlrm --help :start-after: show this help message and exit Exit Codes ========== |command| may terminate with the following exit codes: .. include:: ../include/exitcodes.rst .. include:: ../include/postman.rst .. |command| replace:: :program:`s3qlrm` s3ql-2.26/rst/authinfo.rst0000644000175000017500000000430212557313377017202 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- .. _authinfo: ==================================== Storing Authentication Information ==================================== Normally, S3QL reads username and password for the backend as well as an encryption passphrase for the file system from the terminal. Most commands also accept an :cmdopt:`--authfile` parameter that can be used to read this information from a file instead. The authentication file consists of sections, led by a ``[section]`` header and followed by ``name: value`` entries. The section headers themselves are not used by S3QL but have to be unique within the file. In each section, the following entries can be defined: :storage-url: Specifies the storage url to which this section applies. If a storage url starts with the value of this entry, the section is considered applicable. :backend-login: Specifies the username to use for authentication with the backend. :backend-password: Specifies the password to use for authentication with the backend. :fs-passphrase: Specifies the passphrase to use to decrypt the file system (if it is encrypted). When reading the authentication file, S3QL considers every applicable section in order and uses the last value that it found for each entry. For example, consider the following authentication file:: [s3] storage-url: s3:// backend-login: joe backend-password: notquitesecret [fs1] storage-url: s3://joes-first-bucket fs-passphrase: neitheristhis [fs2] storage-url: s3://joes-second-bucket fs-passphrase: swordfish [fs3] storage-url: s3://joes-second-bucket/with-prefix backend-login: bill backend-password: bi23ll fs-passphrase: ll23bi With this authentication file, S3QL would try to log in as "joe" whenever the s3 backend is used, except when accessing a storage url that begins with "s3://joes-second-bucket/with-prefix". In that case, the last section becomes active and S3QL would use the "bill" credentials. Furthermore, file system encryption passphrases will be used for storage urls that start with "s3://joes-first-bucket" or "s3://joes-second-bucket". The authentication file is parsed by the `Python ConfigParser module `_. s3ql-2.26/rst/backends.rst0000664000175000017500000003472713160156175017147 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- .. _storage_backends: ================== Storage Backends ================== S3QL supports different *backends* to store data at different service providers and using different protocols. A *storage url* specifies a backend together with some backend-specific information and uniquely identifies an S3QL file system. The form of the storage url depends on the backend and is described for every backend below. Furthermore, every S3QL commands that accepts a storage url also accepts a :cmdopt:`--backend-options` parameter than can be used to pass backend-specific options to the backend module. The available options are documented with the respective backends below. All storage backends respect the :envvar:`!http_proxy` (for plain HTTP connections) and :envvar:`!https_proxy` (for SSL connections) environment variables. .. note:: Storage backends are not necessarily compatible. Don't expect that you can e.g. copy the data stored by the local backend into Amazon S3 using some non-S3QL tool and then access it with S3QL's S3 backend. If you want to copy file systems from one backend to another, you need to use the :file:`clone_fs.py` script (from the :file:`contrib` directory in the S3QL tarball). Google Storage ============== .. program:: gs_backend `Google Storage `_ is an online storage service offered by Google. To use the Google Storage backend, you need to have (or sign up for) a Google account, and then `activate Google Storage `_ for your account. The account is free, you will pay only for the amount of storage and traffic that you actually use. There are two ways to access Google storage: #. Use S3-like authentication. To do this, first `set a default project `_. Then use the `key management tool `_ to retrieve your *Google Storage developer access key* and *Google Storage developer secret* and use that as backend login and backend password. #. Use OAuth2 authentication. In this case you need to use ``oauth2`` as the backend login, and a valid OAuth2 refresh token as the backend password. To obtain a refresh token, you can use the :ref:`s3ql_oauth_client ` program. It will instruct you to open a specific URL in your browser, enter a code and authenticate with your Google account. Once this procedure is complete, :ref:`s3ql_oauth_client ` will print out the refresh token. Note that you need to do this procedure only once, the refresh token will remain valid until you explicitly revoke it. To create a Google Storage bucket, you can use e.g. the `Google Storage Manager`_. The storage URL for accessing the bucket in S3QL is then :: gs:/// Here *bucketname* is the name of the bucket, and *prefix* can be an arbitrary prefix that will be prepended to all object names used by S3QL. This allows you to store several S3QL file systems in the same Google Storage bucket. The Google Storage backend accepts the following backend options: .. option:: no-ssl Disable encrypted (https) connections and use plain HTTP instead. .. option:: ssl-ca-path= Instead of using the system's default certificate store, validate the server certificate against the specified CA certificates. :var:`` may be either a file containing multiple certificates, or a directory containing one certificate per file. .. option:: tcp-timeout Specifies the timeout used for TCP connections. If no data can be exchanged with the remote server for longer than this period, the TCP connection is closed and re-established (default: 20 seconds). .. _`Google Storage Manager`: https://sandbox.google.com/storage/ Amazon S3 ========= .. program:: s3_backend `Amazon S3 `_ is the online storage service offered by `Amazon Web Services (AWS) `_. To use the S3 backend, you first need to sign up for an AWS account. The account is free, you will pay only for the amount of storage and traffic that you actually use. After that, you need to create a bucket that will hold the S3QL file system, e.g. using the `AWS Management Console `_. For best performance, it is recommend to create the bucket in the geographically closest storage region, but not the US Standard region (see :ref:`durability` for the reason). The storage URL for accessing S3 buckets in S3QL has the form :: s3://// *prefix* can be an arbitrary prefix that will be prepended to all object names used by S3QL. This allows you to store several S3QL file systems in the same S3 bucket. For example, the storage URL :: s3://ap-south-1/foomart.net/data/s3ql_backup/ refers to the *foomart.net* bucket in the *ap-south-1* region. All storage objects that S3QL stores in this bucket will be prefixed with *data/s3ql_backup/*. Note that the backend login and password for accessing S3 are not the user id and password that you use to log into the Amazon Webpage, but the *AWS access key id* and *AWS secret access key* shown under `My Account/Access Identifiers `_. The Amazon S3 backend accepts the following backend options: .. option:: no-ssl Disable encrypted (https) connections and use plain HTTP instead. .. option:: ssl-ca-path= Instead of using the system's default certificate store, validate the server certificate against the specified CA certificates. :var:`` may be either a file containing multiple certificates, or a directory containing one certificate per file. .. option:: tcp-timeout Specifies the timeout used for TCP connections. If no data can be exchanged with the remote server for longer than this period, the TCP connection is closed and re-established (default: 20 seconds). .. option:: sse Enable server side encryption. Both costs & benefits of S3 server side encryption are probably rather small, and this option does *not* affect any client side encryption performed by S3QL itself. .. option:: ia Use infrequent access storage class for new objects. .. option:: rrs Enable reduced redundancy storage for newly created objects (overwrites the *ia* option). When enabling this option, it is strongly recommended to periodically run :ref:`s3ql_verify `, because objects that are lost by the storage backend may cause subsequent data loss even later in time due to the data de-duplication feature of S3QL (see :ref:`backend_reliability` for details). .. _openstack_backend: OpenStack/Swift =============== .. program:: swift_backend OpenStack_ is an open-source cloud server application suite. Swift_ is the cloud storage module of OpenStack. Swift/OpenStack storage is offered by many different companies. There are two different storage URL for the OpenStack backend that make use of different authentication APIs. For legacy (v1) authentication, the storage URL is :: swift://[:]/[/] for Keystone (v2) authentication, the storage URL is :: swiftks://[:]/:[/] Note that when using Keystone authentication, you can (and have to) specify the storage region of the container as well. In both cases, *hostname* name should be the name of the authentication server. The storage container must already exist (most OpenStack providers offer either a web frontend or a command line tool for creating containers). *prefix* can be an arbitrary prefix that will be prepended to all object names used by S3QL, which can be used to store multiple S3QL file systems in the same container. When using legacy authentication, the backend login and password correspond to the OpenStack username and API Access Key. When using Keystone authentication, the backend password is your regular OpenStack password and the backend login combines you OpenStack username and tenant name in the form `:`. If no tenant is required, the OpenStack username alone may be used as backend login. The OpenStack backend accepts the following backend options: .. option:: no-ssl Use plain HTTP to connect to the authentication server. This option does not directly affect the connection to the storage server. Whether HTTPS or plain HTTP is used to connect to the storage server is determined by the authentication server. .. option:: ssl-ca-path= Instead of using the system's default certificate store, validate the server certificate against the specified CA certificates. :var:`` may be either a file containing multiple certificates, or a directory containing one certificate per file. .. option:: tcp-timeout Specifies the timeout used for TCP connections. If no data can be exchanged with the remote server for longer than this period, the TCP connection is closed and re-established (default: 20 seconds). .. option:: disable-expect100 If this option is specified, S3QL does not use the ``Expect: continue`` header (cf. `RFC2616, section 8.2.3`__) when uploading data to the server. This can be used to work around broken storage servers that don't fully support HTTP 1.1, but may decrease performance as object data will be transmitted to the server more than once in some circumstances. .. option:: no-feature-detection If this option is specified, S3QL does not try to dynamically detect advanced features of the Swift backend. In this case S3QL can only use the least common denominator of supported Swift versions and configurations. .. __: http://tools.ietf.org/html/rfc2616#section-8.2.3 .. _OpenStack: http://www.openstack.org/ .. _Swift: http://openstack.org/projects/storage/ .. NOTE:: The Swift API unfortunately lacks a number of features that S3QL normally makes use of. S3QL works around these deficiencies as much as possible. However, this means that storing data using the Swift backend generally requires more network round-trips and transfer volume than the other backends. Also, S3QL requires Swift storage servers to provide immediate consistency for newly created objects. Rackspace CloudFiles ==================== Rackspace_ CloudFiles uses OpenStack_ internally, so it is possible to just use the OpenStack/Swift backend (see above) with ``auth.api.rackspacecloud.com`` as the host name. For convenince, there is also a special ``rackspace`` backend that uses a storage URL of the form :: rackspace:///[/] The storage container must already exist in the selected region. *prefix* can be an arbitrary prefix that will be prepended to all object names used by S3QL and can be used to store several S3QL file systems in the same container. You can create a storage container for S3QL using the `Cloud Control Panel `_ (click on *Files* in the topmost menu bar). The Rackspace backend accepts the same backend options as the :ref:`OpenStack backend `. .. _Rackspace: http://www.rackspace.com/ S3 compatible ============= .. program:: s3c_backend The S3 compatible backend allows S3QL to access any storage service that uses the same protocol as Amazon S3. The storage URL has the form :: s3c://:// Here *bucketname* is the name of an (existing) bucket, and *prefix* can be an arbitrary prefix that will be prepended to all object names used by S3QL. This allows you to store several S3QL file systems in the same bucket. The S3 compatible backend accepts the following backend options: .. option:: no-ssl Disable encrypted (https) connections and use plain HTTP instead. .. option:: ssl-ca-path= Instead of using the system's default certificate store, validate the server certificate against the specified CA certificates. :var:`` may be either a file containing multiple certificates, or a directory containing one certificate per file. .. option:: tcp-timeout Specifies the timeout used for TCP connections. If no data can be exchanged with the remote server for longer than this period, the TCP connection is closed and re-established (default: 20 seconds). .. option:: disable-expect100 If this option is specified, S3QL does not use the ``Expect: continue`` header (cf. `RFC2616, section 8.2.3`__) when uploading data to the server. This can be used to work around broken storage servers that don't fully support HTTP 1.1, but may decrease performance as object data will be transmitted to the server more than once in some circumstances. .. __: http://tools.ietf.org/html/rfc2616#section-8.2.3 .. option:: dumb-copy If this option is specified, S3QL assumes that a COPY request to the storage server has succeeded as soon as the server returns a ``200 OK`` status. The `S3 COPY API`_ specifies that the storage server may still return an error in the request body (see the `copy proposal`__ for the rationale), so this option should only be used if you are certain that your storage server only returns ``200 OK`` when the copy operation has been completely and successfully carried out. Using this option may be neccessary if your storage server does not return a valid response body for a succesfull copy operation. .. _`S3 COPY API`: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html .. __: https://doc.s3.amazonaws.com/proposals/copy.html Local ===== S3QL is also able to store its data on the local file system. This can be used to backup data on external media, or to access external services that S3QL can not talk to directly (e.g., it is possible to store data over SSH by first mounting the remote system using sshfs_ and then using the local backend to store the data in the sshfs mountpoint). The storage URL for local storage is :: local:// Note that you have to write three consecutive slashes to specify an absolute path, e.g. `local:///var/archive`. Also, relative paths will automatically be converted to absolute paths before the authentication file (see :ref:`authinfo`) is read, i.e. if you are in the `/home/john` directory and try to mount `local://s3ql`, the corresponding section in the authentication file must match the storage url `local:///home/john/s3ql`. The local backend does not accept any backend options. .. _sshfs: http://fuse.sourceforge.net/sshfs.html s3ql-2.26/rst/index.rst0000644000175000017500000000045312557217667016504 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- =================== S3QL User's Guide =================== .. toctree:: :maxdepth: 2 about installation backends durability mkfs adm mount special umount fsck authinfo contrib tips issues man/index resources impl_details s3ql-2.26/rst/fsck.rst0000644000175000017500000000733212577121514016311 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- Checking for Errors =================== It is recommended to periodically run the :program:`fsck.s3ql` and :program:`s3ql_verify` commands (in this order) to ensure that the file system is consistent, and that there has been no data corruption or data loss in the storage backend. :program:`fsck.s3ql` is intended to detect and correct problems with the internal file system structure, caused by e.g. a file system crash or a bug in S3QL. It assumes that the storage backend can be fully trusted, i.e. if the backend reports that a specific storage object exists, :program:`fsck.s3ql` takes that as proof that the data is present and intact. In contrast to that, the :program:`s3ql_verify` command is intended to check the consistency of the storage backend. It assumes that the internal file system data is correct, and verifies that all data can actually be retrieved from the backend. Running :program:`s3ql_verify` may therefore take much longer than running :program:`fsck.s3ql`. Checking and repairing internal file system errors -------------------------------------------------- :program:`fsck.s3ql` checks that the internal file system structure is consistent and attempts to correct any problems it finds. If an S3QL file system has not been unmounted correcly for any reason, you need to run :program:`fsck.s3ql` before you can mount the file system again. The :program:`fsck.s3ql` command has the following syntax:: fsck.s3ql [options] This command accepts the following options: .. pipeinclude:: python ../bin/fsck.s3ql --help :start-after: show this help message and exit .. _s3ql_verify: Detecting and handling backend data corruption ---------------------------------------------- The :program:`s3ql_verify` command verifies all data in the file system. In contrast to :program:`fsck.s3ql`, :program:`s3ql_verify` does not trust the object listing returned by the backend, but actually attempts to retrieve every object. By default, :program:`s3ql_verify` will attempt to retrieve just the metadata for every object (for e.g. the S3-compatible or Google Storage backends this corresponds to a ``HEAD`` request for each object), which is generally sufficient to determine if the object still exists. When specifying the :cmdopt:`--data` option, :program:`s3ql_verify` will instead read every object entirely. To determine how much data will be transmitted in total when using :cmdopt:`--data`, look at the *After compression* row in the :ref:`s3qlstat ` output. :program:`s3ql_verify` is not able to correct any data corruption that it finds. Instead, a list of the corrupted and/or missing objects is written to a file and the decision about the proper course of action is left to the user. If you have administrative access to the backend server, you may want to investigate the cause of the corruption or check if the missing/corrupted objects can be restored from backups. If you believe that the missing/corrupted objects are indeed lost irrevocably, you can use the :ref:`remove_objects` script (from the :file:`contrib` directory of the S3QL distribution) to explicitly delete the objects from the storage backend. After that, you should run :program:`fsck.s3ql`. Since the (now explicitly deleted) objects should now no longer be included in the object index reported by the backend, :program:`fsck.s3ql` will identify the objects as missing, update the internal file system structures accordingly, and move the affected files into the :file:`lost+found` directory. The :program:`s3ql_verify` command has the following syntax:: s3ql_verify [options] This command accepts the following options: .. pipeinclude:: python ../bin/s3ql_verify --help :start-after: show this help message and exit s3ql-2.26/rst/resources.rst0000644000175000017500000000132012557313377017374 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- .. _resources: ================================ Further Resources / Getting Help ================================ If you have questions or problems with S3QL that you weren't able to resolve with this manual, you might want to consider the following other resources: * The `S3QL Wiki `_ * The `S3QL FAQ `_ * The `S3QL Mailing List `_. You can subscribe by sending a mail to `s3ql+subscribe@googlegroups.com `_. Please report any bugs you may encounter in the `Issue Tracker `_. s3ql-2.26/rst/issues.rst0000644000175000017500000001162212742247106016673 0ustar nikrationikratio00000000000000.. -*- mode: rst -*- ============ Known Issues ============ * S3QL de-duplicates data blocks based solely only on SHA256 checksums, without doing a byte-by-byte comparison of the blocks. Since it is possible for two data blocks to have the same checksum despite having different contents, this can lead to problems. If two such blocks are stored in an S3QL file system, the data in one block will be lost and replaced by the data in the other block. However, the chances of this occuring for any two blocks are about 1 in 10^77 (2^256). For a file system that holds a total of 10^34 blocks, the chances of a collision increase to about 1 in 10^9. Storing more than 10^34 blocks (or about 10^25 TB with an (extremely small) block size of 4 kB) is therefore not recommended. Being exceptionally unlucky may also be a disadvantage. * S3QL does not support Access Control Lists (ACLs). This is due to a bug in the FUSE library and will therefore hopefully be fixed at some point. See `issue #16 `_ for more details. * As of Linux kernel 3.5 S3QL file systems do not implement the "write protect" bit on directories. In other words, even if a directory has the write protect bit set, the owner of the directory can delete any files and (empty) subdirectories inside it. This is a bug in the FUSE kernel module (cf. https://github.com/libfuse/libfuse/issues/23) and needs to be fixed in the kernel. Unfortunately it does not look as if this is going to be fixed anytime soon (as of 2016/2/28). * S3QL is rather slow when an application tries to write data in unreasonably small chunks. If a 1 MiB file is copied in chunks of 1 KB, this will take more than 10 times as long as when it's copied with the (recommended) chunk size of 128 KiB. This is a limitation of the FUSE library (which does not yet support write caching) which will hopefully be addressed in some future FUSE version. Most applications, including e.g. GNU `cp` and `rsync`, use reasonably large buffers and are therefore not affected by this problem and perform very efficient on S3QL file systems. However, if you encounter unexpectedly slow performance with a specific program, this might be due to the program using very small write buffers. Although this is not really a bug in the program, it might be worth to ask the program's authors for help. * S3QL always updates file and directory access times as if the ``relatime`` mount option has been specified: the access time ("atime") is only updated if it is currently earlier than either the status change time ("ctime") or modification time ("mtime"). * S3QL directories always have an `st_nlink` value of 1. This may confuse programs that rely on directories having `st_nlink` values of *(2 + number of sub directories)*. Note that this is not a bug in S3QL. Including sub directories in the `st_nlink` value is a Unix convention, but by no means a requirement. If an application blindly relies on this convention being followed, then this is a bug in the application. A prominent example are early versions of GNU find, which required the `--noleaf` option to work correctly on S3QL file systems. This bug has already been fixed in recent find versions. * The `umount` and `fusermount -u` commands will *not* block until all data has been uploaded to the backend. (this is a FUSE limitation that will hopefully be removed in the future, see `issue #1 `_). If you use either command to unmount an S3QL file system, you have to take care to explicitly wait for the `mount.s3ql` process to terminate before you shut down or restart the system. Therefore it is generally not a good idea to mount an S3QL file system in `/etc/fstab` (you should use a dedicated init script instead). * S3QL relies on the backends not to run out of space. This is a given for big storage providers like Amazon S3 or Google Storage, but you may stumble upon this if you use your own server or smaller providers. If there is no space left in the backend, attempts to write more data into the S3QL file system will fail and the file system will be in an inconsistent state and require a file system check (and you should make sure to make space available in the backend before running the check). Unfortunately, there is no way to handle insufficient space in the backend without leaving the file system inconsistent. Since S3QL first writes data into the cache, it can no longer return an error when it later turns out that the cache can not be committed to the backend. * When using python-dugong versions 3.3 or earlier, S3QL supports only CONNECT-style proxying, which may cause issues with some proxy servers when using plain HTTP. Upgrading to python-dugong 3.4 or newer removes this limitation. s3ql-2.26/setup.cfg0000644000175000017500000000011113246754372015636 0ustar nikrationikratio00000000000000[easy_install] allow_hosts = None [egg_info] tag_build = tag_date = 0 s3ql-2.26/doc/0000775000175000017500000000000013246754372014573 5ustar nikrationikratio00000000000000s3ql-2.26/doc/latex/0000775000175000017500000000000013246754372015710 5ustar nikrationikratio00000000000000s3ql-2.26/doc/latex/python.ist0000664000175000017500000000033412761727673017757 0ustar nikrationikratio00000000000000line_max 100 headings_flag 1 heading_prefix " \\bigletter " preamble "\\begin{theindex} \\def\\bigletter#1{{\\Large\\sffamily#1}\\nopagebreak\\vspace{1mm}} " symhead_positive "{Symbols}" numhead_positive "{Numbers}" s3ql-2.26/doc/latex/manual.aux0000664000175000017500000011310213246754372017702 0ustar nikrationikratio00000000000000\relax \providecommand\hyper@newdestlabel[2]{} \providecommand\HyperFirstAtBeginDocument{\AtBeginDocument} \HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined \global\let\oldcontentsline\contentsline \gdef\contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}} \global\let\oldnewlabel\newlabel \gdef\newlabel#1#2{\newlabelxx{#1}#2} \gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}} \AtEndDocument{\ifx\hyper@anchor\@undefined \let\contentsline\oldcontentsline \let\newlabel\oldnewlabel \fi} \fi} \global\let\hyper@last\relax \gdef\HyperFirstAtBeginDocument#1{#1} \providecommand\HyField@AuxAddToFields[1]{} \providecommand\HyField@AuxAddToCoFields[2]{} \select@language{english} \@writefile{toc}{\select@language{english}} \@writefile{lof}{\select@language{english}} \@writefile{lot}{\select@language{english}} \newlabel{index::doc}{{}{1}{}{section*.2}{}} \@writefile{toc}{\contentsline {chapter}{\numberline {1}S3QL}{1}{chapter.1}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} \@writefile{loliteral-block}{\addvspace {10\p@ }} \newlabel{about:s3ql-user-s-guide}{{1}{1}{S3QL}{chapter.1}{}} \newlabel{about::doc}{{1}{1}{S3QL}{chapter.1}{}} \newlabel{about:s3ql}{{1}{1}{S3QL}{chapter.1}{}} \@writefile{toc}{\contentsline {section}{\numberline {1.1}Features}{1}{section.1.1}} \newlabel{about:openstack}{{1.1}{1}{Features}{section.1.1}{}} \newlabel{about:features}{{1.1}{1}{Features}{section.1.1}{}} \@writefile{toc}{\contentsline {section}{\numberline {1.2}Development Status}{2}{section.1.2}} \newlabel{about:development-status}{{1.2}{2}{Development Status}{section.1.2}{}} \@writefile{toc}{\contentsline {section}{\numberline {1.3}Supported Platforms}{2}{section.1.3}} \newlabel{about:supported-platforms}{{1.3}{2}{Supported Platforms}{section.1.3}{}} \@writefile{toc}{\contentsline {section}{\numberline {1.4}Contributing}{2}{section.1.4}} \newlabel{about:contributing}{{1.4}{2}{Contributing}{section.1.4}{}} \@writefile{toc}{\contentsline {chapter}{\numberline {2}Installation}{3}{chapter.2}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} \@writefile{loliteral-block}{\addvspace {10\p@ }} \newlabel{installation:installation}{{2}{3}{Installation}{chapter.2}{}} \newlabel{installation::doc}{{2}{3}{Installation}{chapter.2}{}} \newlabel{installation:github}{{2}{3}{Installation}{chapter.2}{}} \@writefile{toc}{\contentsline {section}{\numberline {2.1}Dependencies}{3}{section.2.1}} \newlabel{installation:dependencies}{{2.1}{3}{Dependencies}{section.2.1}{}} \@writefile{toc}{\contentsline {section}{\numberline {2.2}Installing S3QL}{4}{section.2.2}} \newlabel{installation:inst-s3ql}{{2.2}{4}{Installing S3QL}{section.2.2}{}} \newlabel{installation:installing-s3ql}{{2.2}{4}{Installing S3QL}{section.2.2}{}} \@writefile{toc}{\contentsline {section}{\numberline {2.3}Development Version}{4}{section.2.3}} \newlabel{installation:development-version}{{2.3}{4}{Development Version}{section.2.3}{}} \@writefile{toc}{\contentsline {section}{\numberline {2.4}Running tests requiring remote servers}{4}{section.2.4}} \newlabel{installation:running-tests-requiring-remote-servers}{{2.4}{4}{Running tests requiring remote servers}{section.2.4}{}} \@writefile{toc}{\contentsline {chapter}{\numberline {3}Storage Backends}{7}{chapter.3}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} \@writefile{loliteral-block}{\addvspace {10\p@ }} \newlabel{backends:id1}{{3}{7}{Storage Backends}{chapter.3}{}} \newlabel{backends::doc}{{3}{7}{Storage Backends}{chapter.3}{}} \newlabel{backends:storage-backends}{{3}{7}{Storage Backends}{chapter.3}{}} \newlabel{backends:sphinx}{{3}{7}{Storage Backends}{chapter.3}{}} \@writefile{toc}{\contentsline {section}{\numberline {3.1}Google Storage}{7}{section.3.1}} \newlabel{backends:google-storage}{{3.1}{7}{Google Storage}{section.3.1}{}} \newlabel{backends:cmdoption-gs_backend-arg-no-ssl}{{3.1}{7}{Google Storage}{section*.3}{}} \newlabel{backends:cmdoption-gs_backend-arg-ssl-ca-path}{{3.1}{8}{Google Storage}{section*.4}{}} \newlabel{backends:cmdoption-gs_backend-arg-tcp-timeout}{{3.1}{8}{Google Storage}{section*.5}{}} \@writefile{toc}{\contentsline {section}{\numberline {3.2}Amazon S3}{8}{section.3.2}} \newlabel{backends:amazon-s3}{{3.2}{8}{Amazon S3}{section.3.2}{}} \newlabel{backends:google-storage-manager}{{3.2}{8}{Amazon S3}{section.3.2}{}} \newlabel{backends:cmdoption-s3_backend-arg-no-ssl}{{3.2}{8}{Amazon S3}{section*.6}{}} \newlabel{backends:cmdoption-s3_backend-arg-ssl-ca-path}{{3.2}{8}{Amazon S3}{section*.7}{}} \newlabel{backends:cmdoption-s3_backend-arg-tcp-timeout}{{3.2}{8}{Amazon S3}{section*.8}{}} \newlabel{backends:cmdoption-s3_backend-arg-sse}{{3.2}{8}{Amazon S3}{section*.9}{}} \newlabel{backends:cmdoption-s3_backend-arg-ia}{{3.2}{8}{Amazon S3}{section*.10}{}} \newlabel{backends:cmdoption-s3_backend-arg-rrs}{{3.2}{8}{Amazon S3}{section*.11}{}} \@writefile{toc}{\contentsline {section}{\numberline {3.3}OpenStack/Swift}{9}{section.3.3}} \newlabel{backends:openstack-swift}{{3.3}{9}{OpenStack/Swift}{section.3.3}{}} \newlabel{backends:openstack-backend}{{3.3}{9}{OpenStack/Swift}{section.3.3}{}} \newlabel{backends:cmdoption-swift_backend-arg-no-ssl}{{3.3}{9}{OpenStack/Swift}{section*.12}{}} \newlabel{backends:cmdoption-swift_backend-arg-ssl-ca-path}{{3.3}{9}{OpenStack/Swift}{section*.13}{}} \newlabel{backends:cmdoption-swift_backend-arg-tcp-timeout}{{3.3}{9}{OpenStack/Swift}{section*.14}{}} \newlabel{backends:cmdoption-swift_backend-arg-disable-expect100}{{3.3}{9}{OpenStack/Swift}{section*.15}{}} \newlabel{backends:cmdoption-swift_backend-arg-no-feature-detection}{{3.3}{9}{OpenStack/Swift}{section*.16}{}} \@writefile{toc}{\contentsline {section}{\numberline {3.4}Rackspace CloudFiles}{10}{section.3.4}} \newlabel{backends:rackspace-cloudfiles}{{3.4}{10}{Rackspace CloudFiles}{section.3.4}{}} \@writefile{toc}{\contentsline {section}{\numberline {3.5}S3 compatible}{10}{section.3.5}} \newlabel{backends:s3-compatible}{{3.5}{10}{S3 compatible}{section.3.5}{}} \newlabel{backends:rackspace}{{3.5}{10}{S3 compatible}{section.3.5}{}} \newlabel{backends:cmdoption-s3c_backend-arg-no-ssl}{{3.5}{10}{S3 compatible}{section*.17}{}} \newlabel{backends:cmdoption-s3c_backend-arg-ssl-ca-path}{{3.5}{10}{S3 compatible}{section*.18}{}} \newlabel{backends:cmdoption-s3c_backend-arg-tcp-timeout}{{3.5}{10}{S3 compatible}{section*.19}{}} \newlabel{backends:cmdoption-s3c_backend-arg-disable-expect100}{{3.5}{10}{S3 compatible}{section*.20}{}} \newlabel{backends:cmdoption-s3c_backend-arg-dumb-copy}{{3.5}{10}{S3 compatible}{section*.21}{}} \@writefile{toc}{\contentsline {section}{\numberline {3.6}Local}{11}{section.3.6}} \newlabel{backends:id6}{{3.6}{11}{Local}{section.3.6}{}} \newlabel{backends:local}{{3.6}{11}{Local}{section.3.6}{}} \@writefile{toc}{\contentsline {chapter}{\numberline {4}Important Rules to Avoid Losing Data}{13}{chapter.4}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} \@writefile{loliteral-block}{\addvspace {10\p@ }} \newlabel{durability:durability}{{4}{13}{Important Rules to Avoid Losing Data}{chapter.4}{}} \newlabel{durability::doc}{{4}{13}{Important Rules to Avoid Losing Data}{chapter.4}{}} \newlabel{durability:sshfs}{{4}{13}{Important Rules to Avoid Losing Data}{chapter.4}{}} \newlabel{durability:important-rules-to-avoid-losing-data}{{4}{13}{Important Rules to Avoid Losing Data}{chapter.4}{}} \@writefile{toc}{\contentsline {section}{\numberline {4.1}Rules in a Nutshell}{13}{section.4.1}} \newlabel{durability:rules-in-a-nutshell}{{4.1}{13}{Rules in a Nutshell}{section.4.1}{}} \@writefile{toc}{\contentsline {section}{\numberline {4.2}Consistency Window List}{14}{section.4.2}} \newlabel{durability:consistency-window-list}{{4.2}{14}{Consistency Window List}{section.4.2}{}} \@writefile{toc}{\contentsline {section}{\numberline {4.3}Data Consistency}{14}{section.4.3}} \newlabel{durability:data-consistency}{{4.3}{14}{Data Consistency}{section.4.3}{}} \@writefile{toc}{\contentsline {section}{\numberline {4.4}Data Durability}{15}{section.4.4}} \newlabel{durability:backend-reliability}{{4.4}{15}{Data Durability}{section.4.4}{}} \newlabel{durability:data-durability}{{4.4}{15}{Data Durability}{section.4.4}{}} \@writefile{toc}{\contentsline {chapter}{\numberline {5}File System Creation}{17}{chapter.5}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} \@writefile{loliteral-block}{\addvspace {10\p@ }} \newlabel{mkfs::doc}{{5}{17}{File System Creation}{chapter.5}{}} \newlabel{mkfs:file-system-creation}{{5}{17}{File System Creation}{chapter.5}{}} \@writefile{toc}{\contentsline {chapter}{\numberline {6}Managing File Systems}{19}{chapter.6}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} \@writefile{loliteral-block}{\addvspace {10\p@ }} \newlabel{adm:managing-file-systems}{{6}{19}{Managing File Systems}{chapter.6}{}} \newlabel{adm::doc}{{6}{19}{Managing File Systems}{chapter.6}{}} \@writefile{toc}{\contentsline {section}{\numberline {6.1}Changing the Passphrase}{19}{section.6.1}} \newlabel{adm:changing-the-passphrase}{{6.1}{19}{Changing the Passphrase}{section.6.1}{}} \@writefile{toc}{\contentsline {section}{\numberline {6.2}Upgrading the file system}{20}{section.6.2}} \newlabel{adm:upgrading-the-file-system}{{6.2}{20}{Upgrading the file system}{section.6.2}{}} \@writefile{toc}{\contentsline {section}{\numberline {6.3}Deleting a file system}{20}{section.6.3}} \newlabel{adm:deleting-a-file-system}{{6.3}{20}{Deleting a file system}{section.6.3}{}} \@writefile{toc}{\contentsline {section}{\numberline {6.4}Restoring Metadata Backups}{20}{section.6.4}} \newlabel{adm:restoring-metadata-backups}{{6.4}{20}{Restoring Metadata Backups}{section.6.4}{}} \@writefile{toc}{\contentsline {chapter}{\numberline {7}Mounting}{21}{chapter.7}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} \@writefile{loliteral-block}{\addvspace {10\p@ }} \newlabel{mount:mounting}{{7}{21}{Mounting}{chapter.7}{}} \newlabel{mount::doc}{{7}{21}{Mounting}{chapter.7}{}} \@writefile{toc}{\contentsline {section}{\numberline {7.1}Permission Checking}{22}{section.7.1}} \newlabel{mount:permission-checking}{{7.1}{22}{Permission Checking}{section.7.1}{}} \@writefile{toc}{\contentsline {section}{\numberline {7.2}Compression Algorithms}{22}{section.7.2}} \newlabel{mount:compression-algorithms}{{7.2}{22}{Compression Algorithms}{section.7.2}{}} \@writefile{toc}{\contentsline {section}{\numberline {7.3}Notes about Caching}{23}{section.7.3}} \newlabel{mount:notes-about-caching}{{7.3}{23}{Notes about Caching}{section.7.3}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {7.3.1}Maximum Number of Cache Entries}{23}{subsection.7.3.1}} \newlabel{mount:maximum-number-of-cache-entries}{{7.3.1}{23}{Maximum Number of Cache Entries}{subsection.7.3.1}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {7.3.2}Cache Flushing and Expiration}{23}{subsection.7.3.2}} \newlabel{mount:cache-flushing-and-expiration}{{7.3.2}{23}{Cache Flushing and Expiration}{subsection.7.3.2}{}} \@writefile{toc}{\contentsline {section}{\numberline {7.4}Failure Modes}{23}{section.7.4}} \newlabel{mount:failure-modes}{{7.4}{23}{Failure Modes}{section.7.4}{}} \@writefile{toc}{\contentsline {section}{\numberline {7.5}Automatic Mounting}{24}{section.7.5}} \newlabel{mount:logcheck}{{7.5}{24}{Automatic Mounting}{section.7.5}{}} \newlabel{mount:automatic-mounting}{{7.5}{24}{Automatic Mounting}{section.7.5}{}} \@writefile{toc}{\contentsline {chapter}{\numberline {8}Advanced S3QL Features}{25}{chapter.8}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} \@writefile{loliteral-block}{\addvspace {10\p@ }} \newlabel{special::doc}{{8}{25}{Advanced S3QL Features}{chapter.8}{}} \newlabel{special:advanced-s3ql-features}{{8}{25}{Advanced S3QL Features}{chapter.8}{}} \@writefile{toc}{\contentsline {section}{\numberline {8.1}Snapshotting and Copy-on-Write}{25}{section.8.1}} \newlabel{special:snapshotting-and-copy-on-write}{{8.1}{25}{Snapshotting and Copy-on-Write}{section.8.1}{}} \newlabel{special:s3qlcp}{{8.1}{25}{Snapshotting and Copy-on-Write}{section.8.1}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {8.1.1}Snapshotting vs Hardlinking}{25}{subsection.8.1.1}} \newlabel{special:snapshotting-vs-hardlinking}{{8.1.1}{25}{Snapshotting vs Hardlinking}{subsection.8.1.1}{}} \@writefile{toc}{\contentsline {section}{\numberline {8.2}Getting Statistics}{26}{section.8.2}} \newlabel{special:s3qlstat}{{8.2}{26}{Getting Statistics}{section.8.2}{}} \newlabel{special:getting-statistics}{{8.2}{26}{Getting Statistics}{section.8.2}{}} \@writefile{toc}{\contentsline {section}{\numberline {8.3}Immutable Trees}{26}{section.8.3}} \newlabel{special:immutable-trees}{{8.3}{26}{Immutable Trees}{section.8.3}{}} \newlabel{special:s3qllock}{{8.3}{26}{Immutable Trees}{section.8.3}{}} \@writefile{toc}{\contentsline {section}{\numberline {8.4}Fast Recursive Removal}{27}{section.8.4}} \newlabel{special:fast-recursive-removal}{{8.4}{27}{Fast Recursive Removal}{section.8.4}{}} \newlabel{special:s3qlrm}{{8.4}{27}{Fast Recursive Removal}{section.8.4}{}} \@writefile{toc}{\contentsline {section}{\numberline {8.5}Runtime Configuration}{27}{section.8.5}} \newlabel{special:runtime-configuration}{{8.5}{27}{Runtime Configuration}{section.8.5}{}} \newlabel{special:s3qlctrl}{{8.5}{27}{Runtime Configuration}{section.8.5}{}} \@writefile{toc}{\contentsline {chapter}{\numberline {9}Unmounting}{29}{chapter.9}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} \@writefile{loliteral-block}{\addvspace {10\p@ }} \newlabel{umount::doc}{{9}{29}{Unmounting}{chapter.9}{}} \newlabel{umount:unmounting}{{9}{29}{Unmounting}{chapter.9}{}} \@writefile{toc}{\contentsline {chapter}{\numberline {10}Checking for Errors}{31}{chapter.10}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} \@writefile{loliteral-block}{\addvspace {10\p@ }} \newlabel{fsck:checking-for-errors}{{10}{31}{Checking for Errors}{chapter.10}{}} \newlabel{fsck::doc}{{10}{31}{Checking for Errors}{chapter.10}{}} \@writefile{toc}{\contentsline {section}{\numberline {10.1}Checking and repairing internal file system errors}{31}{section.10.1}} \newlabel{fsck:checking-and-repairing-internal-file-system-errors}{{10.1}{31}{Checking and repairing internal file system errors}{section.10.1}{}} \@writefile{toc}{\contentsline {section}{\numberline {10.2}Detecting and handling backend data corruption}{32}{section.10.2}} \newlabel{fsck:s3ql-verify}{{10.2}{32}{Detecting and handling backend data corruption}{section.10.2}{}} \newlabel{fsck:detecting-and-handling-backend-data-corruption}{{10.2}{32}{Detecting and handling backend data corruption}{section.10.2}{}} \@writefile{toc}{\contentsline {chapter}{\numberline {11}Storing Authentication Information}{35}{chapter.11}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} \@writefile{loliteral-block}{\addvspace {10\p@ }} \newlabel{authinfo::doc}{{11}{35}{Storing Authentication Information}{chapter.11}{}} \newlabel{authinfo:authinfo}{{11}{35}{Storing Authentication Information}{chapter.11}{}} \newlabel{authinfo:storing-authentication-information}{{11}{35}{Storing Authentication Information}{chapter.11}{}} \@writefile{toc}{\contentsline {chapter}{\numberline {12}Contributed Programs}{37}{chapter.12}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} \@writefile{loliteral-block}{\addvspace {10\p@ }} \newlabel{contrib::doc}{{12}{37}{Contributed Programs}{chapter.12}{}} \newlabel{contrib:contributed-programs}{{12}{37}{Contributed Programs}{chapter.12}{}} \@writefile{toc}{\contentsline {section}{\numberline {12.1}benchmark.py}{37}{section.12.1}} \newlabel{contrib:benchmark-py}{{12.1}{37}{benchmark.py}{section.12.1}{}} \@writefile{toc}{\contentsline {section}{\numberline {12.2}clone\_fs.py}{37}{section.12.2}} \newlabel{contrib:clone-fs-py}{{12.2}{37}{clone\_fs.py}{section.12.2}{}} \@writefile{toc}{\contentsline {section}{\numberline {12.3}pcp.py}{37}{section.12.3}} \newlabel{contrib:pcp}{{12.3}{37}{pcp.py}{section.12.3}{}} \newlabel{contrib:pcp-py}{{12.3}{37}{pcp.py}{section.12.3}{}} \@writefile{toc}{\contentsline {section}{\numberline {12.4}s3ql\_backup.sh}{37}{section.12.4}} \newlabel{contrib:s3ql-backup-sh}{{12.4}{37}{s3ql\_backup.sh}{section.12.4}{}} \@writefile{toc}{\contentsline {section}{\numberline {12.5}expire\_backups.py}{38}{section.12.5}} \newlabel{contrib:expire-backups-py}{{12.5}{38}{expire\_backups.py}{section.12.5}{}} \@writefile{toc}{\contentsline {section}{\numberline {12.6}remove\_objects.py}{39}{section.12.6}} \newlabel{contrib:remove-objects}{{12.6}{39}{remove\_objects.py}{section.12.6}{}} \newlabel{contrib:remove-objects-py}{{12.6}{39}{remove\_objects.py}{section.12.6}{}} \@writefile{toc}{\contentsline {chapter}{\numberline {13}Tips \& Tricks}{41}{chapter.13}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} \@writefile{loliteral-block}{\addvspace {10\p@ }} \newlabel{tips:tips-tricks}{{13}{41}{Tips \& Tricks}{chapter.13}{}} \newlabel{tips::doc}{{13}{41}{Tips \& Tricks}{chapter.13}{}} \@writefile{toc}{\contentsline {section}{\numberline {13.1}SSH Backend}{41}{section.13.1}} \newlabel{tips:ssh-tipp}{{13.1}{41}{SSH Backend}{section.13.1}{}} \newlabel{tips:ssh-backend}{{13.1}{41}{SSH Backend}{section.13.1}{}} \@writefile{toc}{\contentsline {section}{\numberline {13.2}Permanently mounted backup file system}{41}{section.13.2}} \newlabel{tips:permanently-mounted-backup-file-system}{{13.2}{41}{Permanently mounted backup file system}{section.13.2}{}} \@writefile{toc}{\contentsline {section}{\numberline {13.3}Improving copy performance}{41}{section.13.3}} \newlabel{tips:improving-copy-performance}{{13.3}{41}{Improving copy performance}{section.13.3}{}} \newlabel{tips:copy-performance}{{13.3}{41}{Improving copy performance}{section.13.3}{}} \@writefile{toc}{\contentsline {chapter}{\numberline {14}Known Issues}{43}{chapter.14}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} \@writefile{loliteral-block}{\addvspace {10\p@ }} \newlabel{issues:known-issues}{{14}{43}{Known Issues}{chapter.14}{}} \newlabel{issues::doc}{{14}{43}{Known Issues}{chapter.14}{}} \@writefile{toc}{\contentsline {chapter}{\numberline {15}Manpages}{45}{chapter.15}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} \@writefile{loliteral-block}{\addvspace {10\p@ }} \newlabel{man/index:manpages}{{15}{45}{Manpages}{chapter.15}{}} \newlabel{man/index::doc}{{15}{45}{Manpages}{chapter.15}{}} \@writefile{toc}{\contentsline {section}{\numberline {15.1}The \textbf {\texttt {mkfs.s3ql}} command}{45}{section.15.1}} \newlabel{man/mkfs:the-command-command}{{15.1}{45}{The \textbf {\texttt {mkfs.s3ql}} command}{section.15.1}{}} \newlabel{man/mkfs::doc}{{15.1}{45}{The \textbf {\texttt {mkfs.s3ql}} command}{section.15.1}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.1.1}Synopsis}{45}{subsection.15.1.1}} \newlabel{man/mkfs:synopsis}{{15.1.1}{45}{Synopsis}{subsection.15.1.1}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.1.2}Description}{45}{subsection.15.1.2}} \newlabel{man/mkfs:description}{{15.1.2}{45}{Description}{subsection.15.1.2}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.1.3}Options}{45}{subsection.15.1.3}} \newlabel{man/mkfs:options}{{15.1.3}{45}{Options}{subsection.15.1.3}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.1.4}Exit Codes}{46}{subsection.15.1.4}} \newlabel{man/mkfs:exit-codes}{{15.1.4}{46}{Exit Codes}{subsection.15.1.4}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.1.5}See Also}{46}{subsection.15.1.5}} \newlabel{man/mkfs:see-also}{{15.1.5}{46}{See Also}{subsection.15.1.5}{}} \@writefile{toc}{\contentsline {section}{\numberline {15.2}The \textbf {\texttt {s3qladm}} command}{46}{section.15.2}} \newlabel{man/adm:the-command-command}{{15.2}{46}{The \textbf {\texttt {s3qladm}} command}{section.15.2}{}} \newlabel{man/adm::doc}{{15.2}{46}{The \textbf {\texttt {s3qladm}} command}{section.15.2}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.2.1}Synopsis}{46}{subsection.15.2.1}} \newlabel{man/adm:synopsis}{{15.2.1}{46}{Synopsis}{subsection.15.2.1}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.2.2}Description}{47}{subsection.15.2.2}} \newlabel{man/adm:description}{{15.2.2}{47}{Description}{subsection.15.2.2}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.2.3}Options}{47}{subsection.15.2.3}} \newlabel{man/adm:options}{{15.2.3}{47}{Options}{subsection.15.2.3}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.2.4}Actions}{47}{subsection.15.2.4}} \newlabel{man/adm:actions}{{15.2.4}{47}{Actions}{subsection.15.2.4}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.2.5}Exit Codes}{47}{subsection.15.2.5}} \newlabel{man/adm:exit-codes}{{15.2.5}{47}{Exit Codes}{subsection.15.2.5}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.2.6}See Also}{48}{subsection.15.2.6}} \newlabel{man/adm:see-also}{{15.2.6}{48}{See Also}{subsection.15.2.6}{}} \@writefile{toc}{\contentsline {section}{\numberline {15.3}The \textbf {\texttt {mount.s3ql}} command}{48}{section.15.3}} \newlabel{man/mount:the-command-command}{{15.3}{48}{The \textbf {\texttt {mount.s3ql}} command}{section.15.3}{}} \newlabel{man/mount::doc}{{15.3}{48}{The \textbf {\texttt {mount.s3ql}} command}{section.15.3}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.3.1}Synopsis}{48}{subsection.15.3.1}} \newlabel{man/mount:synopsis}{{15.3.1}{48}{Synopsis}{subsection.15.3.1}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.3.2}Description}{48}{subsection.15.3.2}} \newlabel{man/mount:description}{{15.3.2}{48}{Description}{subsection.15.3.2}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.3.3}Options}{48}{subsection.15.3.3}} \newlabel{man/mount:options}{{15.3.3}{48}{Options}{subsection.15.3.3}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.3.4}Exit Codes}{49}{subsection.15.3.4}} \newlabel{man/mount:exit-codes}{{15.3.4}{49}{Exit Codes}{subsection.15.3.4}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.3.5}See Also}{50}{subsection.15.3.5}} \newlabel{man/mount:see-also}{{15.3.5}{50}{See Also}{subsection.15.3.5}{}} \@writefile{toc}{\contentsline {section}{\numberline {15.4}The \textbf {\texttt {s3qlstat}} command}{50}{section.15.4}} \newlabel{man/stat:the-command-command}{{15.4}{50}{The \textbf {\texttt {s3qlstat}} command}{section.15.4}{}} \newlabel{man/stat::doc}{{15.4}{50}{The \textbf {\texttt {s3qlstat}} command}{section.15.4}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.4.1}Synopsis}{50}{subsection.15.4.1}} \newlabel{man/stat:synopsis}{{15.4.1}{50}{Synopsis}{subsection.15.4.1}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.4.2}Description}{50}{subsection.15.4.2}} \newlabel{man/stat:description}{{15.4.2}{50}{Description}{subsection.15.4.2}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.4.3}Options}{51}{subsection.15.4.3}} \newlabel{man/stat:options}{{15.4.3}{51}{Options}{subsection.15.4.3}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.4.4}Exit Codes}{51}{subsection.15.4.4}} \newlabel{man/stat:exit-codes}{{15.4.4}{51}{Exit Codes}{subsection.15.4.4}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.4.5}See Also}{51}{subsection.15.4.5}} \newlabel{man/stat:see-also}{{15.4.5}{51}{See Also}{subsection.15.4.5}{}} \@writefile{toc}{\contentsline {section}{\numberline {15.5}The \textbf {\texttt {s3qlctrl}} command}{51}{section.15.5}} \newlabel{man/ctrl:the-command-command}{{15.5}{51}{The \textbf {\texttt {s3qlctrl}} command}{section.15.5}{}} \newlabel{man/ctrl::doc}{{15.5}{51}{The \textbf {\texttt {s3qlctrl}} command}{section.15.5}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.5.1}Synopsis}{51}{subsection.15.5.1}} \newlabel{man/ctrl:synopsis}{{15.5.1}{51}{Synopsis}{subsection.15.5.1}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.5.2}Description}{51}{subsection.15.5.2}} \newlabel{man/ctrl:description}{{15.5.2}{51}{Description}{subsection.15.5.2}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.5.3}Options}{52}{subsection.15.5.3}} \newlabel{man/ctrl:options}{{15.5.3}{52}{Options}{subsection.15.5.3}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.5.4}Exit Codes}{52}{subsection.15.5.4}} \newlabel{man/ctrl:exit-codes}{{15.5.4}{52}{Exit Codes}{subsection.15.5.4}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.5.5}See Also}{52}{subsection.15.5.5}} \newlabel{man/ctrl:see-also}{{15.5.5}{52}{See Also}{subsection.15.5.5}{}} \@writefile{toc}{\contentsline {section}{\numberline {15.6}The \textbf {\texttt {s3qlcp}} command}{52}{section.15.6}} \newlabel{man/cp:the-command-command}{{15.6}{52}{The \textbf {\texttt {s3qlcp}} command}{section.15.6}{}} \newlabel{man/cp::doc}{{15.6}{52}{The \textbf {\texttt {s3qlcp}} command}{section.15.6}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.6.1}Synopsis}{52}{subsection.15.6.1}} \newlabel{man/cp:synopsis}{{15.6.1}{52}{Synopsis}{subsection.15.6.1}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.6.2}Description}{53}{subsection.15.6.2}} \newlabel{man/cp:description}{{15.6.2}{53}{Description}{subsection.15.6.2}{}} \@writefile{toc}{\contentsline {subsubsection}{Snapshotting vs Hardlinking}{53}{subsubsection*.22}} \newlabel{man/cp:snapshotting-vs-hardlinking}{{15.6.2}{53}{Snapshotting vs Hardlinking}{subsubsection*.22}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.6.3}Options}{53}{subsection.15.6.3}} \newlabel{man/cp:options}{{15.6.3}{53}{Options}{subsection.15.6.3}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.6.4}Exit Codes}{53}{subsection.15.6.4}} \newlabel{man/cp:exit-codes}{{15.6.4}{53}{Exit Codes}{subsection.15.6.4}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.6.5}See Also}{54}{subsection.15.6.5}} \newlabel{man/cp:see-also}{{15.6.5}{54}{See Also}{subsection.15.6.5}{}} \@writefile{toc}{\contentsline {section}{\numberline {15.7}The \textbf {\texttt {s3qlrm}} command}{54}{section.15.7}} \newlabel{man/rm:the-command-command}{{15.7}{54}{The \textbf {\texttt {s3qlrm}} command}{section.15.7}{}} \newlabel{man/rm::doc}{{15.7}{54}{The \textbf {\texttt {s3qlrm}} command}{section.15.7}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.7.1}Synopsis}{54}{subsection.15.7.1}} \newlabel{man/rm:synopsis}{{15.7.1}{54}{Synopsis}{subsection.15.7.1}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.7.2}Description}{54}{subsection.15.7.2}} \newlabel{man/rm:description}{{15.7.2}{54}{Description}{subsection.15.7.2}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.7.3}Options}{54}{subsection.15.7.3}} \newlabel{man/rm:options}{{15.7.3}{54}{Options}{subsection.15.7.3}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.7.4}Exit Codes}{54}{subsection.15.7.4}} \newlabel{man/rm:exit-codes}{{15.7.4}{54}{Exit Codes}{subsection.15.7.4}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.7.5}See Also}{55}{subsection.15.7.5}} \newlabel{man/rm:see-also}{{15.7.5}{55}{See Also}{subsection.15.7.5}{}} \@writefile{toc}{\contentsline {section}{\numberline {15.8}The \textbf {\texttt {s3qllock}} command}{55}{section.15.8}} \newlabel{man/lock:the-command-command}{{15.8}{55}{The \textbf {\texttt {s3qllock}} command}{section.15.8}{}} \newlabel{man/lock::doc}{{15.8}{55}{The \textbf {\texttt {s3qllock}} command}{section.15.8}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.8.1}Synopsis}{55}{subsection.15.8.1}} \newlabel{man/lock:synopsis}{{15.8.1}{55}{Synopsis}{subsection.15.8.1}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.8.2}Description}{55}{subsection.15.8.2}} \newlabel{man/lock:description}{{15.8.2}{55}{Description}{subsection.15.8.2}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.8.3}Rationale}{55}{subsection.15.8.3}} \newlabel{man/lock:rationale}{{15.8.3}{55}{Rationale}{subsection.15.8.3}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.8.4}Options}{55}{subsection.15.8.4}} \newlabel{man/lock:options}{{15.8.4}{55}{Options}{subsection.15.8.4}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.8.5}Exit Codes}{56}{subsection.15.8.5}} \newlabel{man/lock:exit-codes}{{15.8.5}{56}{Exit Codes}{subsection.15.8.5}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.8.6}See Also}{56}{subsection.15.8.6}} \newlabel{man/lock:see-also}{{15.8.6}{56}{See Also}{subsection.15.8.6}{}} \@writefile{toc}{\contentsline {section}{\numberline {15.9}The \textbf {\texttt {umount.s3ql}} command}{56}{section.15.9}} \newlabel{man/umount:the-command-command}{{15.9}{56}{The \textbf {\texttt {umount.s3ql}} command}{section.15.9}{}} \newlabel{man/umount::doc}{{15.9}{56}{The \textbf {\texttt {umount.s3ql}} command}{section.15.9}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.9.1}Synopsis}{56}{subsection.15.9.1}} \newlabel{man/umount:synopsis}{{15.9.1}{56}{Synopsis}{subsection.15.9.1}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.9.2}Description}{56}{subsection.15.9.2}} \newlabel{man/umount:description}{{15.9.2}{56}{Description}{subsection.15.9.2}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.9.3}Options}{56}{subsection.15.9.3}} \newlabel{man/umount:options}{{15.9.3}{56}{Options}{subsection.15.9.3}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.9.4}Exit Codes}{57}{subsection.15.9.4}} \newlabel{man/umount:exit-codes}{{15.9.4}{57}{Exit Codes}{subsection.15.9.4}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.9.5}See Also}{57}{subsection.15.9.5}} \newlabel{man/umount:see-also}{{15.9.5}{57}{See Also}{subsection.15.9.5}{}} \@writefile{toc}{\contentsline {section}{\numberline {15.10}The \textbf {\texttt {fsck.s3ql}} command}{57}{section.15.10}} \newlabel{man/fsck:the-command-command}{{15.10}{57}{The \textbf {\texttt {fsck.s3ql}} command}{section.15.10}{}} \newlabel{man/fsck::doc}{{15.10}{57}{The \textbf {\texttt {fsck.s3ql}} command}{section.15.10}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.10.1}Synopsis}{57}{subsection.15.10.1}} \newlabel{man/fsck:synopsis}{{15.10.1}{57}{Synopsis}{subsection.15.10.1}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.10.2}Description}{57}{subsection.15.10.2}} \newlabel{man/fsck:description}{{15.10.2}{57}{Description}{subsection.15.10.2}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.10.3}Options}{57}{subsection.15.10.3}} \newlabel{man/fsck:options}{{15.10.3}{57}{Options}{subsection.15.10.3}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.10.4}Exit Codes}{58}{subsection.15.10.4}} \newlabel{man/fsck:exit-codes}{{15.10.4}{58}{Exit Codes}{subsection.15.10.4}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.10.5}See Also}{59}{subsection.15.10.5}} \newlabel{man/fsck:see-also}{{15.10.5}{59}{See Also}{subsection.15.10.5}{}} \@writefile{toc}{\contentsline {section}{\numberline {15.11}The \textbf {\texttt {s3ql\_oauth\_client}} command}{59}{section.15.11}} \newlabel{man/oauth_client:the-command-command}{{15.11}{59}{The \textbf {\texttt {s3ql\_oauth\_client}} command}{section.15.11}{}} \newlabel{man/oauth_client::doc}{{15.11}{59}{The \textbf {\texttt {s3ql\_oauth\_client}} command}{section.15.11}{}} \newlabel{man/oauth_client:oauth-client}{{15.11}{59}{The \textbf {\texttt {s3ql\_oauth\_client}} command}{section.15.11}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.11.1}Synopsis}{59}{subsection.15.11.1}} \newlabel{man/oauth_client:synopsis}{{15.11.1}{59}{Synopsis}{subsection.15.11.1}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.11.2}Description}{59}{subsection.15.11.2}} \newlabel{man/oauth_client:description}{{15.11.2}{59}{Description}{subsection.15.11.2}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.11.3}Options}{59}{subsection.15.11.3}} \newlabel{man/oauth_client:options}{{15.11.3}{59}{Options}{subsection.15.11.3}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.11.4}Exit Codes}{59}{subsection.15.11.4}} \newlabel{man/oauth_client:exit-codes}{{15.11.4}{59}{Exit Codes}{subsection.15.11.4}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.11.5}See Also}{60}{subsection.15.11.5}} \newlabel{man/oauth_client:see-also}{{15.11.5}{60}{See Also}{subsection.15.11.5}{}} \@writefile{toc}{\contentsline {section}{\numberline {15.12}The \textbf {\texttt {s3ql\_verify}} command}{60}{section.15.12}} \newlabel{man/verify:the-command-command}{{15.12}{60}{The \textbf {\texttt {s3ql\_verify}} command}{section.15.12}{}} \newlabel{man/verify::doc}{{15.12}{60}{The \textbf {\texttt {s3ql\_verify}} command}{section.15.12}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.12.1}Synopsis}{60}{subsection.15.12.1}} \newlabel{man/verify:synopsis}{{15.12.1}{60}{Synopsis}{subsection.15.12.1}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.12.2}Description}{60}{subsection.15.12.2}} \newlabel{man/verify:description}{{15.12.2}{60}{Description}{subsection.15.12.2}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.12.3}Options}{60}{subsection.15.12.3}} \newlabel{man/verify:options}{{15.12.3}{60}{Options}{subsection.15.12.3}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.12.4}Exit Codes}{61}{subsection.15.12.4}} \newlabel{man/verify:exit-codes}{{15.12.4}{61}{Exit Codes}{subsection.15.12.4}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.12.5}See Also}{61}{subsection.15.12.5}} \newlabel{man/verify:see-also}{{15.12.5}{61}{See Also}{subsection.15.12.5}{}} \@writefile{toc}{\contentsline {section}{\numberline {15.13}The \textbf {\texttt {pcp}} command}{61}{section.15.13}} \newlabel{man/pcp:the-command-command}{{15.13}{61}{The \textbf {\texttt {pcp}} command}{section.15.13}{}} \newlabel{man/pcp::doc}{{15.13}{61}{The \textbf {\texttt {pcp}} command}{section.15.13}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.13.1}Synopsis}{61}{subsection.15.13.1}} \newlabel{man/pcp:synopsis}{{15.13.1}{61}{Synopsis}{subsection.15.13.1}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.13.2}Description}{61}{subsection.15.13.2}} \newlabel{man/pcp:description}{{15.13.2}{61}{Description}{subsection.15.13.2}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.13.3}Options}{62}{subsection.15.13.3}} \newlabel{man/pcp:options}{{15.13.3}{62}{Options}{subsection.15.13.3}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.13.4}Exit Codes}{62}{subsection.15.13.4}} \newlabel{man/pcp:exit-codes}{{15.13.4}{62}{Exit Codes}{subsection.15.13.4}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.13.5}See Also}{62}{subsection.15.13.5}} \newlabel{man/pcp:see-also}{{15.13.5}{62}{See Also}{subsection.15.13.5}{}} \@writefile{toc}{\contentsline {section}{\numberline {15.14}The \textbf {\texttt {expire\_backups}} command}{62}{section.15.14}} \newlabel{man/expire_backups:the-command-command}{{15.14}{62}{The \textbf {\texttt {expire\_backups}} command}{section.15.14}{}} \newlabel{man/expire_backups::doc}{{15.14}{62}{The \textbf {\texttt {expire\_backups}} command}{section.15.14}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.14.1}Synopsis}{62}{subsection.15.14.1}} \newlabel{man/expire_backups:synopsis}{{15.14.1}{62}{Synopsis}{subsection.15.14.1}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.14.2}Description}{62}{subsection.15.14.2}} \newlabel{man/expire_backups:description}{{15.14.2}{62}{Description}{subsection.15.14.2}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.14.3}Options}{63}{subsection.15.14.3}} \newlabel{man/expire_backups:options}{{15.14.3}{63}{Options}{subsection.15.14.3}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.14.4}Exit Codes}{64}{subsection.15.14.4}} \newlabel{man/expire_backups:exit-codes}{{15.14.4}{64}{Exit Codes}{subsection.15.14.4}{}} \@writefile{toc}{\contentsline {subsection}{\numberline {15.14.5}See Also}{64}{subsection.15.14.5}} \newlabel{man/expire_backups:see-also}{{15.14.5}{64}{See Also}{subsection.15.14.5}{}} \@writefile{toc}{\contentsline {chapter}{\numberline {16}Further Resources / Getting Help}{65}{chapter.16}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} \@writefile{loliteral-block}{\addvspace {10\p@ }} \newlabel{resources::doc}{{16}{65}{Further Resources / Getting Help}{chapter.16}{}} \newlabel{resources:resources}{{16}{65}{Further Resources / Getting Help}{chapter.16}{}} \newlabel{resources:further-resources-getting-help}{{16}{65}{Further Resources / Getting Help}{chapter.16}{}} \@writefile{toc}{\contentsline {chapter}{\numberline {17}Implementation Details}{67}{chapter.17}} \@writefile{lof}{\addvspace {10\p@ }} \@writefile{lot}{\addvspace {10\p@ }} \@writefile{loliteral-block}{\addvspace {10\p@ }} \newlabel{impl_details:impl-details}{{17}{67}{Implementation Details}{chapter.17}{}} \newlabel{impl_details::doc}{{17}{67}{Implementation Details}{chapter.17}{}} \newlabel{impl_details:implementation-details}{{17}{67}{Implementation Details}{chapter.17}{}} \@writefile{toc}{\contentsline {section}{\numberline {17.1}Metadata Storage}{67}{section.17.1}} \newlabel{impl_details:metadata-storage}{{17.1}{67}{Metadata Storage}{section.17.1}{}} \@writefile{toc}{\contentsline {section}{\numberline {17.2}Data Storage}{67}{section.17.2}} \newlabel{impl_details:data-storage}{{17.2}{67}{Data Storage}{section.17.2}{}} \@writefile{toc}{\contentsline {section}{\numberline {17.3}Data De-Duplication}{68}{section.17.3}} \newlabel{impl_details:data-de-duplication}{{17.3}{68}{Data De-Duplication}{section.17.3}{}} \@writefile{toc}{\contentsline {section}{\numberline {17.4}Caching}{68}{section.17.4}} \newlabel{impl_details:caching}{{17.4}{68}{Caching}{section.17.4}{}} \@writefile{toc}{\contentsline {section}{\numberline {17.5}Eventual Consistency Handling}{68}{section.17.5}} \newlabel{impl_details:eventual-consistency-handling}{{17.5}{68}{Eventual Consistency Handling}{section.17.5}{}} \@writefile{toc}{\contentsline {section}{\numberline {17.6}Encryption}{68}{section.17.6}} \newlabel{impl_details:encryption}{{17.6}{68}{Encryption}{section.17.6}{}} s3ql-2.26/doc/latex/Makefile0000664000175000017500000000437313015321157017340 0ustar nikrationikratio00000000000000# Makefile for Sphinx LaTeX output ALLDOCS = $(basename $(wildcard *.tex)) ALLPDF = $(addsuffix .pdf,$(ALLDOCS)) ALLDVI = $(addsuffix .dvi,$(ALLDOCS)) # Prefix for archive names ARCHIVEPRREFIX = # Additional LaTeX options LATEXOPTS = # format: pdf or dvi FMT = pdf LATEX = latex PDFLATEX = pdflatex MAKEINDEX = makeindex all: $(ALLPDF) all-pdf: $(ALLPDF) all-dvi: $(ALLDVI) all-ps: all-dvi for f in *.dvi; do dvips $$f; done all-pdf-ja: for f in *.pdf *.png *.gif *.jpg *.jpeg; do extractbb $$f; done for f in *.tex; do platex -kanji=utf8 $(LATEXOPTS) $$f; done for f in *.tex; do platex -kanji=utf8 $(LATEXOPTS) $$f; done for f in *.tex; do platex -kanji=utf8 $(LATEXOPTS) $$f; done -for f in *.idx; do mendex -U -f -d "`basename $$f .idx`.dic" -s python.ist $$f; done for f in *.tex; do platex -kanji=utf8 $(LATEXOPTS) $$f; done for f in *.tex; do platex -kanji=utf8 $(LATEXOPTS) $$f; done for f in *.dvi; do dvipdfmx $$f; done zip: all-$(FMT) mkdir $(ARCHIVEPREFIX)docs-$(FMT) cp $(ALLPDF) $(ARCHIVEPREFIX)docs-$(FMT) zip -q -r -9 $(ARCHIVEPREFIX)docs-$(FMT).zip $(ARCHIVEPREFIX)docs-$(FMT) rm -r $(ARCHIVEPREFIX)docs-$(FMT) tar: all-$(FMT) mkdir $(ARCHIVEPREFIX)docs-$(FMT) cp $(ALLPDF) $(ARCHIVEPREFIX)docs-$(FMT) tar cf $(ARCHIVEPREFIX)docs-$(FMT).tar $(ARCHIVEPREFIX)docs-$(FMT) rm -r $(ARCHIVEPREFIX)docs-$(FMT) gz: tar gzip -9 < $(ARCHIVEPREFIX)docs-$(FMT).tar > $(ARCHIVEPREFIX)docs-$(FMT).tar.gz bz2: tar bzip2 -9 -k $(ARCHIVEPREFIX)docs-$(FMT).tar xz: tar xz -9 -k $(ARCHIVEPREFIX)docs-$(FMT).tar # The number of LaTeX runs is quite conservative, but I don't expect it # to get run often, so the little extra time won't hurt. %.dvi: %.tex $(LATEX) $(LATEXOPTS) '$<' $(LATEX) $(LATEXOPTS) '$<' $(LATEX) $(LATEXOPTS) '$<' -$(MAKEINDEX) -s python.ist '$(basename $<).idx' $(LATEX) $(LATEXOPTS) '$<' $(LATEX) $(LATEXOPTS) '$<' %.pdf: %.tex $(PDFLATEX) $(LATEXOPTS) '$<' $(PDFLATEX) $(LATEXOPTS) '$<' $(PDFLATEX) $(LATEXOPTS) '$<' -$(MAKEINDEX) -s python.ist '$(basename $<).idx' $(PDFLATEX) $(LATEXOPTS) '$<' $(PDFLATEX) $(LATEXOPTS) '$<' clean: rm -f *.log *.ind *.aux *.toc *.syn *.idx *.out *.ilg *.pla *.ps *.tar *.tar.gz *.tar.bz2 *.tar.xz $(ALLPDF) $(ALLDVI) .PHONY: all all-pdf all-dvi all-ps clean zip tar gz bz2 xz .PHONY: all-pdf-ja s3ql-2.26/doc/latex/fncychap.sty0000664000175000017500000004452712062620216020240 0ustar nikrationikratio00000000000000%%% Copyright Ulf A. Lindgren %%% %%% Note Premission is granted to modify this file under %%% the condition that it is saved using another %%% file and package name. %%% %%% Revision 1.1 (1997) %%% %%% Jan. 8th Modified package name base date option %%% Jan. 22th Modified FmN and FmTi for error in book.cls %%% \MakeUppercase{#}->{\MakeUppercase#} %%% Apr. 6th Modified Lenny option to prevent undesired %%% skip of line. %%% Nov. 8th Fixed \@chapapp for AMS %%% %%% Revision 1.2 (1998) %%% %%% Feb. 11th Fixed appendix problem related to Bjarne %%% Aug. 11th Fixed problem related to 11pt and 12pt %%% suggested by Tomas Lundberg. THANKS! %%% %%% Revision 1.3 (2004) %%% Sep. 20th problem with frontmatter, mainmatter and %%% backmatter, pointed out by Lapo Mori %%% %%% Revision 1.31 (2004) %%% Sep. 21th problem with the Rejne definition streched text %%% caused ugly gaps in the vrule aligned with the title %%% text. Kindly pointed out to me by Hendri Adriaens %%% %%% Revision 1.32 (2005) %%% Jun. 23th compatibility problem with the KOMA class 'scrbook.cls' %%% a remedy is a redefinition of '\@schapter' in %%% line with that used in KOMA. The problem was pointed %%% out to me by Mikkel Holm Olsen %%% %%% Revision 1.33 (2005) %%% Aug. 9th misspelled ``TWELV'' corrected, the error was pointed %%% out to me by George Pearson %%% %%% Revision 1.34 (2007) %%% Added an alternative to Lenny provided by Peter %%% Osborne (2005-11-28) %%% Corrected front, main and back matter, based on input %%% from Bas van Gils (2006-04-24) %%% Jul. 30th Added Bjornstrup option provided by Jean-Marc %%% Francois (2007-01-05). %%% Reverted to \MakeUppercase{#} see rev 1.1, solved %%% problem with MakeUppercase and MakeLowercase pointed %%% out by Marco Feuerstein (2007-06-06) %%% Last modified Jul. 2007 \NeedsTeXFormat{LaTeX2e}[1995/12/01] \ProvidesPackage{fncychap} [2007/07/30 v1.34 LaTeX package (Revised chapters)] %%%% For conditional inclusion of color \newif\ifusecolor \usecolorfalse %%%% DEFINITION OF Chapapp variables \newcommand{\CNV}{\huge\bfseries} \newcommand{\ChNameVar}[1]{\renewcommand{\CNV}{#1}} %%%% DEFINITION OF TheChapter variables \newcommand{\CNoV}{\huge\bfseries} \newcommand{\ChNumVar}[1]{\renewcommand{\CNoV}{#1}} \newif\ifUCN \UCNfalse \newif\ifLCN \LCNfalse \def\ChNameLowerCase{\LCNtrue\UCNfalse} \def\ChNameUpperCase{\UCNtrue\LCNfalse} \def\ChNameAsIs{\UCNfalse\LCNfalse} %%%%% Fix for AMSBook 971008 \@ifundefined{@chapapp}{\let\@chapapp\chaptername}{} %%%%% Fix for Bjarne and appendix 980211 \newif\ifinapp \inappfalse \renewcommand\appendix{\par \setcounter{chapter}{0}% \setcounter{section}{0}% \inapptrue% \renewcommand\@chapapp{\appendixname}% \renewcommand\thechapter{\@Alph\c@chapter}} %%%%% Fix for frontmatter, mainmatter, and backmatter 040920 \@ifundefined{@mainmatter}{\newif\if@mainmatter \@mainmattertrue}{} %%%%% \newcommand{\FmN}[1]{% \ifUCN {\MakeUppercase{#1}}\LCNfalse \else \ifLCN {\MakeLowercase{#1}}\UCNfalse \else #1 \fi \fi} %%%% DEFINITION OF Title variables \newcommand{\CTV}{\Huge\bfseries} \newcommand{\ChTitleVar}[1]{\renewcommand{\CTV}{#1}} %%%% DEFINITION OF the basic rule width \newlength{\RW} \setlength{\RW}{1pt} \newcommand{\ChRuleWidth}[1]{\setlength{\RW}{#1}} \newif\ifUCT \UCTfalse \newif\ifLCT \LCTfalse \def\ChTitleLowerCase{\LCTtrue\UCTfalse} \def\ChTitleUpperCase{\UCTtrue\LCTfalse} \def\ChTitleAsIs{\UCTfalse\LCTfalse} \newcommand{\FmTi}[1]{% \ifUCT {\MakeUppercase{#1}}\LCTfalse \else \ifLCT {\MakeLowercase{#1}}\UCTfalse \else {#1} \fi \fi} \newlength{\mylen} \newlength{\myhi} \newlength{\px} \newlength{\py} \newlength{\pyy} \newlength{\pxx} \def\mghrulefill#1{\leavevmode\leaders\hrule\@height #1\hfill\kern\z@} \newcommand{\DOCH}{% \CNV\FmN{\@chapapp}\space \CNoV\thechapter \par\nobreak \vskip 20\p@ } \newcommand{\DOTI}[1]{% \CTV\FmTi{#1}\par\nobreak \vskip 40\p@ } \newcommand{\DOTIS}[1]{% \CTV\FmTi{#1}\par\nobreak \vskip 40\p@ } %%%%%% SONNY DEF \DeclareOption{Sonny}{% \ChNameVar{\Large\sf} \ChNumVar{\Huge} \ChTitleVar{\Large\sf} \ChRuleWidth{0.5pt} \ChNameUpperCase \renewcommand{\DOCH}{% \raggedleft \CNV\FmN{\@chapapp}\space \CNoV\thechapter \par\nobreak \vskip 40\p@} \renewcommand{\DOTI}[1]{% \CTV\raggedleft\mghrulefill{\RW}\par\nobreak \vskip 5\p@ \CTV\FmTi{#1}\par\nobreak \mghrulefill{\RW}\par\nobreak \vskip 40\p@} \renewcommand{\DOTIS}[1]{% \CTV\raggedleft\mghrulefill{\RW}\par\nobreak \vskip 5\p@ \CTV\FmTi{#1}\par\nobreak \mghrulefill{\RW}\par\nobreak \vskip 40\p@} } %%%%%% LENNY DEF \DeclareOption{Lenny}{% \ChNameVar{\fontsize{14}{16}\usefont{OT1}{phv}{m}{n}\selectfont} \ChNumVar{\fontsize{60}{62}\usefont{OT1}{ptm}{m}{n}\selectfont} \ChTitleVar{\Huge\bfseries\rm} \ChRuleWidth{1pt} \renewcommand{\DOCH}{% \settowidth{\px}{\CNV\FmN{\@chapapp}} \addtolength{\px}{2pt} \settoheight{\py}{\CNV\FmN{\@chapapp}} \addtolength{\py}{1pt} \settowidth{\mylen}{\CNV\FmN{\@chapapp}\space\CNoV\thechapter} \addtolength{\mylen}{1pt} \settowidth{\pxx}{\CNoV\thechapter} \addtolength{\pxx}{-1pt} \settoheight{\pyy}{\CNoV\thechapter} \addtolength{\pyy}{-2pt} \setlength{\myhi}{\pyy} \addtolength{\myhi}{-1\py} \par \parbox[b]{\textwidth}{% \rule[\py]{\RW}{\myhi}% \hskip -\RW% \rule[\pyy]{\px}{\RW}% \hskip -\px% \raggedright% \CNV\FmN{\@chapapp}\space\CNoV\thechapter% \hskip1pt% \mghrulefill{\RW}% \rule{\RW}{\pyy}\par\nobreak% \vskip -\baselineskip% \vskip -\pyy% \hskip \mylen% \mghrulefill{\RW}\par\nobreak% \vskip \pyy}% \vskip 20\p@} \renewcommand{\DOTI}[1]{% \raggedright \CTV\FmTi{#1}\par\nobreak \vskip 40\p@} \renewcommand{\DOTIS}[1]{% \raggedright \CTV\FmTi{#1}\par\nobreak \vskip 40\p@} } %%%%%% Peter Osbornes' version of LENNY DEF \DeclareOption{PetersLenny}{% % five new lengths \newlength{\bl} % bottom left : orig \space \setlength{\bl}{6pt} \newcommand{\BL}[1]{\setlength{\bl}{#1}} \newlength{\br} % bottom right : orig 1pt \setlength{\br}{1pt} \newcommand{\BR}[1]{\setlength{\br}{#1}} \newlength{\tl} % top left : orig 2pt \setlength{\tl}{2pt} \newcommand{\TL}[1]{\setlength{\tl}{#1}} \newlength{\trr} % top right :orig 1pt \setlength{\trr}{1pt} \newcommand{\TR}[1]{\setlength{\trr}{#1}} \newlength{\blrule} % top right :orig 1pt \setlength{\trr}{0pt} \newcommand{\BLrule}[1]{\setlength{\blrule}{#1}} \ChNameVar{\fontsize{14}{16}\usefont{OT1}{phv}{m}{n}\selectfont} \ChNumVar{\fontsize{60}{62}\usefont{OT1}{ptm}{m}{n}\selectfont} \ChTitleVar{\Huge\bfseries\rm} \ChRuleWidth{1pt} \renewcommand{\DOCH}{% %%%%%%% tweaks for 1--9 and A--Z \ifcase\c@chapter\relax% \or\BL{-3pt}\TL{-4pt}\BR{0pt}\TR{-6pt}%1 \or\BL{0pt}\TL{-4pt}\BR{2pt}\TR{-4pt}%2 \or\BL{0pt}\TL{-4pt}\BR{2pt}\TR{-4pt}%3 \or\BL{0pt}\TL{5pt}\BR{2pt}\TR{-4pt}%4 \or\BL{0pt}\TL{3pt}\BR{2pt}\TR{-4pt}%5 \or\BL{-1pt}\TL{0pt}\BR{2pt}\TR{-2pt}%6 \or\BL{0pt}\TL{-3pt}\BR{2pt}\TR{-2pt}%7 \or\BL{0pt}\TL{-3pt}\BR{2pt}\TR{-2pt}%8 \or\BL{0pt}\TL{-3pt}\BR{-4pt}\TR{-2pt}%9 \or\BL{-3pt}\TL{-3pt}\BR{2pt}\TR{-7pt}%10 \or\BL{-6pt}\TL{-6pt}\BR{0pt}\TR{-9pt}%11 \or\BL{-6pt}\TL{-6pt}\BR{2pt}\TR{-7pt}%12 \or\BL{-5pt}\TL{-5pt}\BR{0pt}\TR{-9pt}%13 \or\BL{-6pt}\TL{-6pt}\BR{0pt}\TR{-9pt}%14 \or\BL{-3pt}\TL{-3pt}\BR{3pt}\TR{-6pt}%15 \or\BL{-3pt}\TL{-3pt}\BR{3pt}\TR{-6pt}%16 \or\BL{-5pt}\TL{-3pt}\BR{-8pt}\TR{-6pt}%17 \or\BL{-5pt}\TL{-5pt}\BR{0pt}\TR{-9pt}%18 \or\BL{-3pt}\TL{-3pt}\BR{-6pt}\TR{-9pt}%19 \or\BL{0pt}\TL{0pt}\BR{0pt}\TR{-5pt}%20 \fi \ifinapp\ifcase\c@chapter\relax% \or\BL{0pt}\TL{14pt}\BR{5pt}\TR{-19pt}%A \or\BL{0pt}\TL{-5pt}\BR{-3pt}\TR{-8pt}%B \or\BL{-3pt}\TL{-2pt}\BR{1pt}\TR{-6pt}\BLrule{0pt}%C \or\BL{0pt}\TL{-5pt}\BR{-3pt}\TR{-8pt}\BLrule{0pt}%D \or\BL{0pt}\TL{-5pt}\BR{2pt}\TR{-3pt}%E \or\BL{0pt}\TL{-5pt}\BR{-10pt}\TR{-1pt}%F \or\BL{-3pt}\TL{0pt}\BR{0pt}\TR{-7pt}%G \or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}%H \or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}%I \or\BL{2pt}\TL{0pt}\BR{-3pt}\TR{1pt}%J \or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}%K \or\BL{0pt}\TL{-5pt}\BR{2pt}\TR{-19pt}%L \or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}%M \or\BL{0pt}\TL{-5pt}\BR{-2pt}\TR{-1pt}%N \or\BL{-3pt}\TL{-2pt}\BR{-3pt}\TR{-11pt}%O \or\BL{0pt}\TL{-5pt}\BR{-9pt}\TR{-3pt}%P \or\BL{-3pt}\TL{-2pt}\BR{-3pt}\TR{-11pt}%Q \or\BL{0pt}\TL{-5pt}\BR{4pt}\TR{-8pt}%R \or\BL{-2pt}\TL{-2pt}\BR{-2pt}\TR{-7pt}%S \or\BL{-3pt}\TL{0pt}\BR{-5pt}\TR{4pt}\BLrule{8pt}%T \or\BL{-7pt}\TL{-11pt}\BR{-5pt}\TR{-7pt}\BLrule{0pt}%U \or\BL{-14pt}\TL{-5pt}\BR{-14pt}\TR{-1pt}\BLrule{14pt}%V \or\BL{-10pt}\TL{-9pt}\BR{-13pt}\TR{-3pt}\BLrule{7pt}%W \or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}\BLrule{0pt}%X \or\BL{-6pt}\TL{-4pt}\BR{-7pt}\TR{1pt}\BLrule{7pt}%Y \or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}\BLrule{0pt}%Z \fi\fi %%%%%%% \settowidth{\px}{\CNV\FmN{\@chapapp}} \addtolength{\px}{\tl} %MOD change 2pt to \tl \settoheight{\py}{\CNV\FmN{\@chapapp}} \addtolength{\py}{1pt} \settowidth{\mylen}{\CNV\FmN{\@chapapp}\space\CNoV\thechapter} \addtolength{\mylen}{\trr}% MOD change 1pt to \tr \settowidth{\pxx}{\CNoV\thechapter} \addtolength{\pxx}{-1pt} \settoheight{\pyy}{\CNoV\thechapter} \addtolength{\pyy}{-2pt} \setlength{\myhi}{\pyy} \addtolength{\myhi}{-1\py} \par \parbox[b]{\textwidth}{% \rule[\py]{\RW}{\myhi}% \hskip -\RW% \rule[\pyy]{\px}{\RW}% \hskip -\px% \raggedright% \CNV\FmN{\@chapapp}\rule{\blrule}{\RW}\hskip\bl\CNoV\thechapter%MOD % \CNV\FmN{\@chapapp}\space\CNoV\thechapter %ORIGINAL \hskip\br% %MOD 1pt to \br \mghrulefill{\RW}% \rule{\RW}{\pyy}\par\nobreak% \vskip -\baselineskip% \vskip -\pyy% \hskip \mylen% \mghrulefill{\RW}\par\nobreak% \vskip \pyy}% \vskip 20\p@} \renewcommand{\DOTI}[1]{% \raggedright \CTV\FmTi{#1}\par\nobreak \vskip 40\p@} \renewcommand{\DOTIS}[1]{% \raggedright \CTV\FmTi{#1}\par\nobreak \vskip 40\p@} } % %%%%%% BJORNSTRUP DEF \DeclareOption{Bjornstrup}{% \usecolortrue % pzc (Zapf Chancelery) is nice. ppl (Palatino) is cool too. \ChNumVar{\fontsize{76}{80}\usefont{OT1}{pzc}{m}{n}\selectfont} \ChTitleVar{\raggedleft\Large\sffamily\bfseries} \setlength{\myhi}{10pt} % Space between grey box border and text \setlength{\mylen}{\textwidth} \addtolength{\mylen}{-2\myhi} \renewcommand{\DOCH}{% \settowidth{\py}{\CNoV\thechapter} \addtolength{\py}{-10pt} % Amount of space by which the % % number is shifted right \fboxsep=0pt% \colorbox[gray]{.85}{\rule{0pt}{40pt}\parbox[b]{\textwidth}{\hfill}}% \kern-\py\raise20pt% \hbox{\color[gray]{.5}\CNoV\thechapter}\\% } \renewcommand{\DOTI}[1]{% \nointerlineskip\raggedright% \fboxsep=\myhi% \vskip-1ex% \colorbox[gray]{.85}{\parbox[t]{\mylen}{\CTV\FmTi{#1}}}\par\nobreak% \vskip 40\p@% } \renewcommand{\DOTIS}[1]{% \fboxsep=0pt \colorbox[gray]{.85}{\rule{0pt}{40pt}\parbox[b]{\textwidth}{\hfill}}\\% \nointerlineskip\raggedright% \fboxsep=\myhi% \colorbox[gray]{.85}{\parbox[t]{\mylen}{\CTV\FmTi{#1}}}\par\nobreak% \vskip 40\p@% } } %%%%%%% GLENN DEF \DeclareOption{Glenn}{% \ChNameVar{\bfseries\Large\sf} \ChNumVar{\Huge} \ChTitleVar{\bfseries\Large\rm} \ChRuleWidth{1pt} \ChNameUpperCase \ChTitleUpperCase \renewcommand{\DOCH}{% \settoheight{\myhi}{\CTV\FmTi{Test}} \setlength{\py}{\baselineskip} \addtolength{\py}{\RW} \addtolength{\py}{\myhi} \setlength{\pyy}{\py} \addtolength{\pyy}{-1\RW} \raggedright \CNV\FmN{\@chapapp}\space\CNoV\thechapter \hskip 3pt\mghrulefill{\RW}\rule[-1\pyy]{2\RW}{\py}\par\nobreak} \renewcommand{\DOTI}[1]{% \addtolength{\pyy}{-4pt} \settoheight{\myhi}{\CTV\FmTi{#1}} \addtolength{\myhi}{\py} \addtolength{\myhi}{-1\RW} \vskip -1\pyy \rule{2\RW}{\myhi}\mghrulefill{\RW}\hskip 2pt \raggedleft\CTV\FmTi{#1}\par\nobreak \vskip 80\p@} \newlength{\backskip} \renewcommand{\DOTIS}[1]{% % \setlength{\py}{10pt} % \setlength{\pyy}{\py} % \addtolength{\pyy}{\RW} % \setlength{\myhi}{\baselineskip} % \addtolength{\myhi}{\pyy} % \mghrulefill{\RW}\rule[-1\py]{2\RW}{\pyy}\par\nobreak % \addtolength{}{} %\vskip -1\baselineskip % \rule{2\RW}{\myhi}\mghrulefill{\RW}\hskip 2pt % \raggedleft\CTV\FmTi{#1}\par\nobreak % \vskip 60\p@} %% Fix suggested by Tomas Lundberg \setlength{\py}{25pt} % eller vad man vill \setlength{\pyy}{\py} \setlength{\backskip}{\py} \addtolength{\backskip}{2pt} \addtolength{\pyy}{\RW} \setlength{\myhi}{\baselineskip} \addtolength{\myhi}{\pyy} \mghrulefill{\RW}\rule[-1\py]{2\RW}{\pyy}\par\nobreak \vskip -1\backskip \rule{2\RW}{\myhi}\mghrulefill{\RW}\hskip 3pt % \raggedleft\CTV\FmTi{#1}\par\nobreak \vskip 40\p@} } %%%%%%% CONNY DEF \DeclareOption{Conny}{% \ChNameUpperCase \ChTitleUpperCase \ChNameVar{\centering\Huge\rm\bfseries} \ChNumVar{\Huge} \ChTitleVar{\centering\Huge\rm} \ChRuleWidth{2pt} \renewcommand{\DOCH}{% \mghrulefill{3\RW}\par\nobreak \vskip -0.5\baselineskip \mghrulefill{\RW}\par\nobreak \CNV\FmN{\@chapapp}\space \CNoV\thechapter \par\nobreak \vskip -0.5\baselineskip } \renewcommand{\DOTI}[1]{% \mghrulefill{\RW}\par\nobreak \CTV\FmTi{#1}\par\nobreak \vskip 60\p@ } \renewcommand{\DOTIS}[1]{% \mghrulefill{\RW}\par\nobreak \CTV\FmTi{#1}\par\nobreak \vskip 60\p@ } } %%%%%%% REJNE DEF \DeclareOption{Rejne}{% \ChNameUpperCase \ChTitleUpperCase \ChNameVar{\centering\Large\rm} \ChNumVar{\Huge} \ChTitleVar{\centering\Huge\rm} \ChRuleWidth{1pt} \renewcommand{\DOCH}{% \settoheight{\py}{\CNoV\thechapter} \parskip=0pt plus 1pt % Set parskip to default, just in case v1.31 \addtolength{\py}{-1pt} \CNV\FmN{\@chapapp}\par\nobreak \vskip 20\p@ \setlength{\myhi}{2\baselineskip} \setlength{\px}{\myhi} \addtolength{\px}{-1\RW} \rule[-1\px]{\RW}{\myhi}\mghrulefill{\RW}\hskip 10pt\raisebox{-0.5\py}{\CNoV\thechapter}\hskip 10pt\mghrulefill{\RW}\rule[-1\px]{\RW}{\myhi}\par\nobreak \vskip -3\p@% Added -2pt vskip to correct for streched text v1.31 } \renewcommand{\DOTI}[1]{% \setlength{\mylen}{\textwidth} \parskip=0pt plus 1pt % Set parskip to default, just in case v1.31 \addtolength{\mylen}{-2\RW} {\vrule width\RW}\parbox{\mylen}{\CTV\FmTi{#1}}{\vrule width\RW}\par\nobreak% \vskip -3pt\rule{\RW}{2\baselineskip}\mghrulefill{\RW}\rule{\RW}{2\baselineskip}% \vskip 60\p@% Added -2pt in vskip to correct for streched text v1.31 } \renewcommand{\DOTIS}[1]{% \setlength{\py}{\fboxrule} \setlength{\fboxrule}{\RW} \setlength{\mylen}{\textwidth} \addtolength{\mylen}{-2\RW} \fbox{\parbox{\mylen}{\vskip 2\baselineskip\CTV\FmTi{#1}\par\nobreak\vskip \baselineskip}} \setlength{\fboxrule}{\py} \vskip 60\p@ } } %%%%%%% BJARNE DEF \DeclareOption{Bjarne}{% \ChNameUpperCase \ChTitleUpperCase \ChNameVar{\raggedleft\normalsize\rm} \ChNumVar{\raggedleft \bfseries\Large} \ChTitleVar{\raggedleft \Large\rm} \ChRuleWidth{1pt} %% Note thechapter -> c@chapter fix appendix bug %% Fixed misspelled 12 \newcounter{AlphaCnt} \newcounter{AlphaDecCnt} \newcommand{\AlphaNo}{% \ifcase\number\theAlphaCnt \ifnum\c@chapter=0 ZERO\else{}\fi \or ONE\or TWO\or THREE\or FOUR\or FIVE \or SIX\or SEVEN\or EIGHT\or NINE\or TEN \or ELEVEN\or TWELVE\or THIRTEEN\or FOURTEEN\or FIFTEEN \or SIXTEEN\or SEVENTEEN\or EIGHTEEN\or NINETEEN\fi } \newcommand{\AlphaDecNo}{% \setcounter{AlphaDecCnt}{0} \@whilenum\number\theAlphaCnt>0\do {\addtocounter{AlphaCnt}{-10} \addtocounter{AlphaDecCnt}{1}} \ifnum\number\theAlphaCnt=0 \else \addtocounter{AlphaDecCnt}{-1} \addtocounter{AlphaCnt}{10} \fi \ifcase\number\theAlphaDecCnt\or TEN\or TWENTY\or THIRTY\or FORTY\or FIFTY\or SIXTY\or SEVENTY\or EIGHTY\or NINETY\fi } \newcommand{\TheAlphaChapter}{% \ifinapp \thechapter \else \setcounter{AlphaCnt}{\c@chapter} \ifnum\c@chapter<20 \AlphaNo \else \AlphaDecNo\AlphaNo \fi \fi } \renewcommand{\DOCH}{% \mghrulefill{\RW}\par\nobreak \CNV\FmN{\@chapapp}\par\nobreak \CNoV\TheAlphaChapter\par\nobreak \vskip -1\baselineskip\vskip 5pt\mghrulefill{\RW}\par\nobreak \vskip 20\p@ } \renewcommand{\DOTI}[1]{% \CTV\FmTi{#1}\par\nobreak \vskip 40\p@ } \renewcommand{\DOTIS}[1]{% \CTV\FmTi{#1}\par\nobreak \vskip 40\p@ } } \DeclareOption*{% \PackageWarning{fancychapter}{unknown style option} } \ProcessOptions* \relax \ifusecolor \RequirePackage{color} \fi \def\@makechapterhead#1{% \vspace*{50\p@}% {\parindent \z@ \raggedright \normalfont \ifnum \c@secnumdepth >\m@ne \if@mainmatter%%%%% Fix for frontmatter, mainmatter, and backmatter 040920 \DOCH \fi \fi \interlinepenalty\@M \if@mainmatter%%%%% Fix for frontmatter, mainmatter, and backmatter 060424 \DOTI{#1}% \else% \DOTIS{#1}% \fi }} %%% Begin: To avoid problem with scrbook.cls (fncychap version 1.32) %%OUT: %\def\@schapter#1{\if@twocolumn % \@topnewpage[\@makeschapterhead{#1}]% % \else % \@makeschapterhead{#1}% % \@afterheading % \fi} %%IN: \def\@schapter#1{% \if@twocolumn% \@makeschapterhead{#1}% \else% \@makeschapterhead{#1}% \@afterheading% \fi} %%% End: To avoid problem with scrbook.cls (fncychap version 1.32) \def\@makeschapterhead#1{% \vspace*{50\p@}% {\parindent \z@ \raggedright \normalfont \interlinepenalty\@M \DOTIS{#1} \vskip 40\p@ }} \endinput s3ql-2.26/doc/latex/sphinx.sty0000664000175000017500000010605213015321157017747 0ustar nikrationikratio00000000000000% % sphinx.sty % % Adapted from the old python.sty, mostly written by Fred Drake, % by Georg Brandl. % \NeedsTeXFormat{LaTeX2e}[1995/12/01] \ProvidesPackage{sphinx}[2010/01/15 LaTeX package (Sphinx markup)] % this is the \ltx@ifundefined of ltxcmds.sty, which is loaded by % hyperref.sty, but we need it before, and initial ltxcmds.sty % as in TL2009/Debian had wrong definition. \newcommand{\spx@ifundefined}[1]{% \ifcsname #1\endcsname \expandafter\ifx\csname #1\endcsname\relax \expandafter\expandafter\expandafter\@firstoftwo \else \expandafter\expandafter\expandafter\@secondoftwo \fi \else \expandafter\@firstoftwo \fi } \@ifclassloaded{memoir}{}{\RequirePackage{fancyhdr}} % for \text macro and \iffirstchoice@ conditional even if amsmath not loaded \RequirePackage{amstext} \RequirePackage{textcomp} % fancybox not used anymore and will be removed at Sphinx-1.5 \RequirePackage{fancybox} \RequirePackage{titlesec} \RequirePackage{tabulary} \RequirePackage{makeidx} % For framing code-blocks and warning type notices, and shadowing topics \RequirePackage{framed} \newif\ifspx@inframed % flag set if we are in a framed environment % ifthen not used anymore and will be removed at Sphinx-1.5 \RequirePackage{ifthen} % The xcolor package draws better fcolorboxes around verbatim code \IfFileExists{xcolor.sty}{ \RequirePackage{xcolor} }{ \RequirePackage{color} } % For highlighted code. \RequirePackage{fancyvrb} % For table captions. \RequirePackage{threeparttable} % Handle footnotes in tables. \RequirePackage{footnote} \makesavenoteenv{tabulary} % For floating figures in the text. \RequirePackage{wrapfig} % Separate paragraphs by space by default. \RequirePackage{parskip} % For parsed-literal blocks. \RequirePackage{alltt} % Display "real" single quotes in literal blocks. \RequirePackage{upquote} % For the H specifier. Do not \restylefloat{figure}, it breaks Sphinx code % for allowing figures in tables. \RequirePackage{float} % Redefine these colors to your liking in the preamble. \definecolor{TitleColor}{rgb}{0.126,0.263,0.361} \definecolor{InnerLinkColor}{rgb}{0.208,0.374,0.486} \definecolor{OuterLinkColor}{rgb}{0.216,0.439,0.388} % Redefine these colors to something if you want to have colored % background and border for code examples. \definecolor{VerbatimColor}{rgb}{1,1,1} \definecolor{VerbatimBorderColor}{rgb}{0,0,0} % Uncomment these two lines to ignore the paper size and make the page % size more like a typical published manual. %\renewcommand{\paperheight}{9in} %\renewcommand{\paperwidth}{8.5in} % typical squarish manual %\renewcommand{\paperwidth}{7in} % O'Reilly ``Programmming Python'' % use pdfoutput for pTeX and dvipdfmx % when pTeX (\kanjiskip is defined), set pdfoutput to evade \include{pdfcolor} \ifx\kanjiskip\undefined\else \newcount\pdfoutput\pdfoutput=0 \fi \RequirePackage{graphicx} % for PDF output, use colors and maximal compression \newif\ifsphinxpdfoutput % used in \maketitle \ifx\pdfoutput\undefined\else \ifnum\pdfoutput=\z@ \let\py@NormalColor\relax \let\py@TitleColor\relax \else \sphinxpdfoutputtrue \input{pdfcolor} \def\py@NormalColor{\color[rgb]{0.0,0.0,0.0}} \def\py@TitleColor{\color{TitleColor}} \pdfcompresslevel=9 \fi \fi % XeLaTeX can do colors, too \ifx\XeTeXrevision\undefined\else \def\py@NormalColor{\color[rgb]{0.0,0.0,0.0}} \def\py@TitleColor{\color{TitleColor}} \fi % Increase printable page size (copied from fullpage.sty) \topmargin 0pt \advance \topmargin by -\headheight \advance \topmargin by -\headsep % attempt to work a little better for A4 users \textheight \paperheight \advance\textheight by -2in \oddsidemargin 0pt \evensidemargin 0pt %\evensidemargin -.25in % for ``manual size'' documents \marginparwidth 0.5in \textwidth \paperwidth \advance\textwidth by -2in % Style parameters and macros used by most documents here \raggedbottom \sloppy \hbadness = 5000 % don't print trivial gripes \pagestyle{empty} % start this way % Use this to set the font family for headers and other decor: \newcommand{\py@HeaderFamily}{\sffamily\bfseries} \newcommand{\sphinxSetHeaderFamily}[1]{\renewcommand{\py@HeaderFamily}{#1}} % Redefine the 'normal' header/footer style when using "fancyhdr" package: \spx@ifundefined{fancyhf}{}{ % Use \pagestyle{normal} as the primary pagestyle for text. \fancypagestyle{normal}{ \fancyhf{} \fancyfoot[LE,RO]{{\py@HeaderFamily\thepage}} \fancyfoot[LO]{{\py@HeaderFamily\nouppercase{\rightmark}}} \fancyfoot[RE]{{\py@HeaderFamily\nouppercase{\leftmark}}} \fancyhead[LE,RO]{{\py@HeaderFamily \@title, \py@release}} \renewcommand{\headrulewidth}{0.4pt} \renewcommand{\footrulewidth}{0.4pt} % define chaptermark with \@chappos when \@chappos is available for Japanese \spx@ifundefined{@chappos}{} {\def\chaptermark##1{\markboth{\@chapapp\space\thechapter\space\@chappos\space ##1}{}}} } % Update the plain style so we get the page number & footer line, % but not a chapter or section title. This is to keep the first % page of a chapter and the blank page between chapters `clean.' \fancypagestyle{plain}{ \fancyhf{} \fancyfoot[LE,RO]{{\py@HeaderFamily\thepage}} \renewcommand{\headrulewidth}{0pt} \renewcommand{\footrulewidth}{0.4pt} } } % Some custom font markup commands. % *** the macros without \sphinx prefix are still defined at bottom of file *** \newcommand{\sphinxstrong}[1]{{\textbf{#1}}} % let \sphinxcode and \sphinxbfcode use straight quotes. \@noligs patched by upquote, % but needs protection in "moving arguments" such as for captions. % Use \scantokens to handle e.g. \item[{\sphinxcode{'fontenc'}}] \DeclareRobustCommand{\sphinxcode}[1]{{\@noligs\scantokens{\texttt{#1}\relax}}} \newcommand{\sphinxbfcode}[1]{\sphinxcode{\bfseries#1}} \newcommand{\sphinxemail}[1]{\textsf{#1}} \newcommand{\sphinxtablecontinued}[1]{\textsf{#1}} \newcommand{\sphinxtitleref}[1]{\emph{#1}} \newcommand{\sphinxmenuselection}[1]{\emph{#1}} \newcommand{\sphinxaccelerator}[1]{\underline{#1}} \newcommand{\sphinxcrossref}[1]{\emph{#1}} \newcommand{\sphinxtermref}[1]{\emph{#1}} % miscellaneous related to footnotes \newcommand*{\sphinxAtStartFootnote}{\mbox{ }} % Support large numbered footnotes in minipage (cf. admonitions) \def\thempfootnote{\arabic{mpfootnote}} % Redefine the Verbatim environment to allow border and background colors % and to handle the top caption in a non separable by pagebreak way. % The original environment is still used for verbatims within tables. \let\OriginalVerbatim=\Verbatim \let\endOriginalVerbatim=\endVerbatim \newcommand\spx@colorbox [2]{% % #1 will be \fcolorbox or, for first part of frame: \spx@fcolorbox % let the framing obey the current indentation (adapted from framed.sty's code). \hskip\@totalleftmargin \hskip-\fboxsep\hskip-\fboxrule #1{VerbatimBorderColor}{VerbatimColor}{#2}% \hskip-\fboxsep\hskip-\fboxrule \hskip-\linewidth \hskip-\@totalleftmargin \hskip\columnwidth } % use of \color@b@x here is compatible with both xcolor.sty and color.sty \def\spx@fcolorbox #1#2% {\color@b@x {\fboxsep\z@\color{#1}\spx@VerbatimFBox}{\color{#2}}}% % The title is specified from outside as macro \sphinxVerbatimTitle. % \sphinxVerbatimTitle is reset to empty after each use of Verbatim. \newcommand*\sphinxVerbatimTitle {} % Holder macro for labels of literal blocks. Set-up by LaTeX writer. \newcommand*\sphinxLiteralBlockLabel {} \newcommand*\sphinxSetupCaptionForVerbatim [2] {% \needspace{\sphinxliteralblockneedspace}% % insert a \label via \sphinxLiteralBlockLabel % reset to normal the color for the literal block caption % the caption inserts \abovecaptionskip whitespace above itself (usually 10pt) % there is also \belowcaptionskip but it is usually zero, hence the \smallskip \def\sphinxVerbatimTitle {\py@NormalColor\captionof{#1}{\sphinxLiteralBlockLabel #2}\smallskip }% } % Inspired and adapted from framed.sty's \CustomFBox with extra handling % of a non separable by pagebreak caption, and controlled counter stepping. \newif\ifspx@myfirstframedpass \long\def\spx@VerbatimFBox#1{% \leavevmode \begingroup % framed.sty does some measuring but this macro adds possibly a caption % use amsmath conditional to inhibit the caption counter stepping after % first pass \ifspx@myfirstframedpass\else\firstchoice@false\fi \setbox\@tempboxa\hbox{\kern\fboxsep{#1}\kern\fboxsep}% \hbox {\lower\dimexpr\fboxrule+\fboxsep+\dp\@tempboxa \hbox{% \vbox{\ifx\sphinxVerbatimTitle\empty\else % add the caption in a centered way above possibly indented frame % hide its width from framed.sty's measuring step % note that the caption brings \abovecaptionskip top vertical space \moveright\dimexpr\fboxrule+.5\wd\@tempboxa \hb@xt@\z@{\hss\begin{minipage}{\wd\@tempboxa}% \sphinxVerbatimTitle \end{minipage}\hss}\fi % draw frame border _latest_ to avoid pdf viewer issue \kern\fboxrule \hbox{\kern\fboxrule \vbox{\vskip\fboxsep\copy\@tempboxa\vskip\fboxsep}% \kern-\wd\@tempboxa\kern-\fboxrule \vrule\@width\fboxrule \kern\wd\@tempboxa \vrule\@width\fboxrule}% \kern-\dimexpr\fboxsep+\ht\@tempboxa+\dp\@tempboxa +\fboxsep+\fboxrule\relax \hrule\@height\fboxrule \kern\dimexpr\fboxsep+\ht\@tempboxa+\dp\@tempboxa+\fboxsep\relax \hrule\@height\fboxrule}% }}% \endgroup \global\spx@myfirstframedpassfalse } % For linebreaks inside Verbatim environment from package fancyvrb. \newbox\sphinxcontinuationbox \newbox\sphinxvisiblespacebox % These are user customizable e.g. from latex_elements's preamble key. % Use of \textvisiblespace for compatibility with XeTeX/LuaTeX/fontspec. \newcommand*\sphinxvisiblespace {\textcolor{red}{\textvisiblespace}} \newcommand*\sphinxcontinuationsymbol {\textcolor{red}{\llap{\tiny$\m@th\hookrightarrow$}}} \newcommand*\sphinxcontinuationindent {3ex } \newcommand*\sphinxafterbreak {\kern\sphinxcontinuationindent\copy\sphinxcontinuationbox} % Take advantage of the already applied Pygments mark-up to insert % potential linebreaks for TeX processing. % {, <, #, %, $, ' and ": go to next line. % _, }, ^, &, >, - and ~: stay at end of broken line. % Use of \textquotesingle for straight quote. \newcommand*\sphinxbreaksatspecials {% \def\PYGZus{\discretionary{\char`\_}{\sphinxafterbreak}{\char`\_}}% \def\PYGZob{\discretionary{}{\sphinxafterbreak\char`\{}{\char`\{}}% \def\PYGZcb{\discretionary{\char`\}}{\sphinxafterbreak}{\char`\}}}% \def\PYGZca{\discretionary{\char`\^}{\sphinxafterbreak}{\char`\^}}% \def\PYGZam{\discretionary{\char`\&}{\sphinxafterbreak}{\char`\&}}% \def\PYGZlt{\discretionary{}{\sphinxafterbreak\char`\<}{\char`\<}}% \def\PYGZgt{\discretionary{\char`\>}{\sphinxafterbreak}{\char`\>}}% \def\PYGZsh{\discretionary{}{\sphinxafterbreak\char`\#}{\char`\#}}% \def\PYGZpc{\discretionary{}{\sphinxafterbreak\char`\%}{\char`\%}}% \def\PYGZdl{\discretionary{}{\sphinxafterbreak\char`\$}{\char`\$}}% \def\PYGZhy{\discretionary{\char`\-}{\sphinxafterbreak}{\char`\-}}% \def\PYGZsq{\discretionary{}{\sphinxafterbreak\textquotesingle}{\textquotesingle}}% \def\PYGZdq{\discretionary{}{\sphinxafterbreak\char`\"}{\char`\"}}% \def\PYGZti{\discretionary{\char`\~}{\sphinxafterbreak}{\char`\~}}% } \def\sphinx@verbatim@nolig@list {\do \`}% % Some characters . , ; ? ! / are not pygmentized. % This macro makes them "active" and they will insert potential linebreaks \newcommand*\sphinxbreaksatpunct {% \lccode`\~`\.\lowercase{\def~}{\discretionary{\char`\.}{\sphinxafterbreak}{\char`\.}}% \lccode`\~`\,\lowercase{\def~}{\discretionary{\char`\,}{\sphinxafterbreak}{\char`\,}}% \lccode`\~`\;\lowercase{\def~}{\discretionary{\char`\;}{\sphinxafterbreak}{\char`\;}}% \lccode`\~`\:\lowercase{\def~}{\discretionary{\char`\:}{\sphinxafterbreak}{\char`\:}}% \lccode`\~`\?\lowercase{\def~}{\discretionary{\char`\?}{\sphinxafterbreak}{\char`\?}}% \lccode`\~`\!\lowercase{\def~}{\discretionary{\char`\!}{\sphinxafterbreak}{\char`\!}}% \lccode`\~`\/\lowercase{\def~}{\discretionary{\char`\/}{\sphinxafterbreak}{\char`\/}}% \catcode`\.\active \catcode`\,\active \catcode`\;\active \catcode`\:\active \catcode`\?\active \catcode`\!\active \catcode`\/\active \lccode`\~`\~ } \renewcommand{\Verbatim}[1][1]{% % quit horizontal mode if we are still in a paragraph \par % list starts new par, but we don't want it to be set apart vertically \parskip\z@skip % first, let's check if there is a caption \ifx\sphinxVerbatimTitle\empty \addvspace\z@% counteract possible previous negative skip (French lists!) \smallskip % there was no caption. Check if nevertheless a label was set. \ifx\sphinxLiteralBlockLabel\empty\else % we require some space to be sure hyperlink target from \phantomsection % will not be separated from upcoming verbatim by a page break \needspace{\sphinxliteralblockwithoutcaptionneedspace}% \phantomsection\sphinxLiteralBlockLabel \fi \fi % non-empty \sphinxVerbatimTitle has label inside it (in case there is one) % Customize framed.sty \MakeFramed to glue caption to literal block \global\spx@myfirstframedpasstrue % via \spx@fcolorbox, will use \spx@VerbatimFBox which inserts title \def\FrameCommand {\spx@colorbox\spx@fcolorbox }% \let\FirstFrameCommand\FrameCommand % for mid pages and last page portion of (long) split frame: \def\MidFrameCommand{\spx@colorbox\fcolorbox }% \let\LastFrameCommand\MidFrameCommand % fancyvrb's Verbatim puts each input line in (unbreakable) horizontal boxes. % This customization wraps each line from the input in a \vtop, thus % allowing it to wrap and display on two or more lines in the latex output. % - The codeline counter will be increased only once. % - The wrapped material will not break across pages, it is impossible % to achieve this without extensive rewrite of fancyvrb. % - The (not used in sphinx) obeytabs option to Verbatim is % broken by this change (showtabs and tabspace work). \sbox\sphinxcontinuationbox {\sphinxcontinuationsymbol}% \sbox\sphinxvisiblespacebox {\FV@SetupFont\sphinxvisiblespace}% \def\FancyVerbFormatLine ##1{\hsize\linewidth \vtop{\raggedright\hyphenpenalty\z@\exhyphenpenalty\z@ \doublehyphendemerits\z@\finalhyphendemerits\z@ \strut ##1\strut}% }% % If the linebreak is at a space, the latter will be displayed as visible % space at end of first line, and a continuation symbol starts next line. % Stretch/shrink are however usually zero for typewriter font. \def\FV@Space {% \nobreak\hskip\z@ plus\fontdimen3\font minus\fontdimen4\font \discretionary{\copy\sphinxvisiblespacebox}{\sphinxafterbreak} {\kern\fontdimen2\font}% }% % go around fancyvrb's check of @currenvir (for case of minipage below) \renewcommand*{\VerbatimEnvironment}{\gdef\FV@EnvironName{Verbatim}}% % go around fancyvrb's check of current list depth \def\@toodeep {\advance\@listdepth\@ne}% % Allow breaks at special characters using \PYG... macros. \sphinxbreaksatspecials % The list environment is needed to control perfectly the vertical space. % Note: \OuterFrameSep used by framed.sty is later set to \topsep hence 0pt. % - if caption: vertical space above caption = (\abovecaptionskip + D) with % D = \baselineskip-\FrameHeightAdjust, and then \smallskip above frame. % - if no caption: (\smallskip + D) above frame. By default D=6pt. % Use trivlist rather than list to avoid possible "too deeply nested" error. \itemsep \z@skip \topsep \z@skip \partopsep \z@skip% trivlist will set \parsep to \parskip = zero (see above) % \leftmargin will be set to zero by trivlist \rightmargin\z@ \parindent \z@% becomes \itemindent. Default zero, but perhaps overwritten. \trivlist\item\relax % use a minipage if we are already inside a framed environment \ifspx@inframed\noindent\begin{minipage}{\linewidth}\fi \MakeFramed {% adapted over from framed.sty's snugshade environment \advance\hsize-\width\@totalleftmargin\z@\linewidth\hsize \@setminipage }% \small % For grid placement from \strut's in \FancyVerbFormatLine \lineskip\z@skip % Breaks at punctuation characters . , ; ? ! and / need catcode=\active % and the active comma should not be overwritten by \@noligs \let\verbatim@nolig@list \sphinx@verbatim@nolig@list \OriginalVerbatim[#1,codes*=\sphinxbreaksatpunct]% } \renewcommand{\endVerbatim}{% \endOriginalVerbatim \par\unskip\@minipagefalse\endMakeFramed \ifspx@inframed\end{minipage}\fi \endtrivlist } % define macro to frame contents and add shadow on right and bottom % use public names for customizable lengths \newlength\sphinxshadowsep \setlength\sphinxshadowsep {5pt} \newlength\sphinxshadowsize \setlength\sphinxshadowsize {4pt} \newlength\sphinxshadowrule % this uses \fboxrule value at loading time of sphinx.sty (0.4pt normally) \setlength\sphinxshadowrule {\fboxrule} \long\def\spx@ShadowFBox#1{% \leavevmode\begingroup % first we frame the box #1 \setbox\@tempboxa \hbox{\vrule\@width\sphinxshadowrule \vbox{\hrule\@height\sphinxshadowrule \kern\sphinxshadowsep \hbox{\kern\sphinxshadowsep #1\kern\sphinxshadowsep}% \kern\sphinxshadowsep \hrule\@height\sphinxshadowrule}% \vrule\@width\sphinxshadowrule}% % Now we add the shadow, like \shadowbox from fancybox.sty would do \dimen@\dimexpr.5\sphinxshadowrule+\sphinxshadowsize\relax \hbox{\vbox{\offinterlineskip \hbox{\copy\@tempboxa\kern-.5\sphinxshadowrule % add shadow on right side \lower\sphinxshadowsize \hbox{\vrule\@height\ht\@tempboxa \@width\dimen@}% }% \kern-\dimen@ % shift back vertically to bottom of frame % and add shadow at bottom \moveright\sphinxshadowsize \vbox{\hrule\@width\wd\@tempboxa \@height\dimen@}% }% % move left by the size of right shadow so shadow adds no width \kern-\sphinxshadowsize }% \endgroup } % use framed.sty to allow page breaks in frame+shadow % works well inside Lists and Quote-like environments % produced by ``topic'' directive (or local contents) % could nest if LaTeX writer authorized it \newenvironment{sphinxShadowBox} {\def\FrameCommand {\spx@ShadowFBox }% % configure framed.sty not to add extra vertical spacing \spx@ifundefined{OuterFrameSep}{}{\OuterFrameSep\z@skip}% % the \trivlist will add the vertical spacing on top and bottom which is % typical of center environment as used in Sphinx <= 1.4.1 % the \noindent has the effet of an extra blank line on top, to % imitate closely the layout from Sphinx <= 1.4.1; the \FrameHeightAdjust % will put top part of frame on this baseline. \def\FrameHeightAdjust {\baselineskip}% \trivlist\item\noindent % use a minipage if we are already inside a framed environment \ifspx@inframed\begin{minipage}{\linewidth}\fi \MakeFramed {\spx@inframedtrue % framed.sty puts into "\width" the added width (=2shadowsep+2shadowrule) % adjust \hsize to what the contents must use \advance\hsize-\width % adjust LaTeX parameters to behave properly in indented/quoted contexts \FrameRestore % typeset the contents as in a minipage (Sphinx <= 1.4.1 used a minipage and % itemize/enumerate are therein typeset more tightly, we want to keep % that). We copy-paste from LaTeX source code but don't do a real minipage. \@pboxswfalse % for footnotes, but Sphinx inactivates footnotes in topics \def\@mpfn{mpfootnote}\def\thempfn{\thempfootnote}\c@mpfootnote\z@ \let\@footnotetext\@mpfootnotetext \let\@listdepth\@mplistdepth \@mplistdepth\z@ \@minipagerestore \@setminipage }% }% {% insert the "endminipage" code \par\unskip % handle (currently non existing) minipage style footnotes \ifvoid\@mpfootins\else \vskip\skip\@mpfootins\normalcolor\footnoterule\unvbox\@mpfootins \fi \@minipagefalse \endMakeFramed \ifspx@inframed\end{minipage}\fi \endtrivlist } % \moduleauthor{name}{email} \newcommand{\moduleauthor}[2]{} % \sectionauthor{name}{email} \newcommand{\sectionauthor}[2]{} % Augment the sectioning commands used to get our own font family in place, % and reset some internal data items: \titleformat{\section}{\Large\py@HeaderFamily}% {\py@TitleColor\thesection}{0.5em}{\py@TitleColor}{\py@NormalColor} \titleformat{\subsection}{\large\py@HeaderFamily}% {\py@TitleColor\thesubsection}{0.5em}{\py@TitleColor}{\py@NormalColor} \titleformat{\subsubsection}{\py@HeaderFamily}% {\py@TitleColor\thesubsubsection}{0.5em}{\py@TitleColor}{\py@NormalColor} % By default paragraphs (and subsubsections) will not be numbered because % sphinxmanual.cls and sphinxhowto.cls set secnumdepth to 2 \titleformat{\paragraph}{\py@HeaderFamily}% {\py@TitleColor\theparagraph}{0.5em}{\py@TitleColor}{\py@NormalColor} \titleformat{\subparagraph}{\py@HeaderFamily}% {\py@TitleColor\thesubparagraph}{0.5em}{\py@TitleColor}{\py@NormalColor} % {fulllineitems} is the main environment for object descriptions. % \newcommand{\py@itemnewline}[1]{% \@tempdima\linewidth% \advance\@tempdima \leftmargin\makebox[\@tempdima][l]{#1}% } \newenvironment{fulllineitems}{ \begin{list}{}{\labelwidth \leftmargin \labelsep 0pt \rightmargin 0pt \topsep -\parskip \partopsep \parskip \itemsep -\parsep \let\makelabel=\py@itemnewline} }{\end{list}} % \optional is used for ``[, arg]``, i.e. desc_optional nodes. \newcommand{\sphinxoptional}[1]{% {\textnormal{\Large[}}{#1}\hspace{0.5mm}{\textnormal{\Large]}}} \newlength{\py@argswidth} \newcommand{\py@sigparams}[2]{% \parbox[t]{\py@argswidth}{#1\sphinxcode{)}#2}} \newcommand{\pysigline}[1]{\item[#1]\nopagebreak} \newcommand{\pysiglinewithargsret}[3]{% \settowidth{\py@argswidth}{#1\sphinxcode{(}}% \addtolength{\py@argswidth}{-2\py@argswidth}% \addtolength{\py@argswidth}{\linewidth}% \item[#1\sphinxcode{(}\py@sigparams{#2}{#3}]} % Production lists % \newenvironment{productionlist}{ % \def\sphinxoptional##1{{\Large[}##1{\Large]}} \def\production##1##2{\\\sphinxcode{##1}&::=&\sphinxcode{##2}} \def\productioncont##1{\\& &\sphinxcode{##1}} \parindent=2em \indent \setlength{\LTpre}{0pt} \setlength{\LTpost}{0pt} \begin{longtable}[l]{lcl} }{% \end{longtable} } % Notices / Admonitions % % Code adapted from framed.sty's "snugshade" environment. % Nesting works (inner frames do not allow page breaks). \newcommand{\py@heavybox}{\par \setlength{\FrameRule}{\p@}% 1pt \setlength{\FrameSep}{\dimexpr.6\baselineskip-\FrameRule\relax} % configure framed.sty's parameters to obtain same vertical spacing % as for "light" boxes. We need for this to manually insert parskip glue and % revert a skip done by framed before the frame. \spx@ifundefined{OuterFrameSep}{}{\OuterFrameSep\z@skip}% \vspace{\FrameHeightAdjust} % copied/adapted from framed.sty's snugshade \def\FrameCommand##1{\hskip\@totalleftmargin \fboxsep\FrameSep \fboxrule\FrameRule\fbox{##1}% \hskip-\linewidth \hskip-\@totalleftmargin \hskip\columnwidth}% % use a minipage if we are already inside a framed environment \ifspx@inframed \noindent\begin{minipage}{\linewidth} \else % handle case where notice is first thing in a list item (or is quoted) \if@inlabel \noindent\par\vspace{-\baselineskip} \else \vspace{\parskip} \fi \fi \MakeFramed {\spx@inframedtrue \advance\hsize-\width \@totalleftmargin\z@ \linewidth\hsize % minipage initialization copied from LaTeX source code. \@pboxswfalse % for footnotes \def\@mpfn{mpfootnote}\def\thempfn{\thempfootnote}\c@mpfootnote\z@ \let\@footnotetext\@mpfootnotetext \let\@listdepth\@mplistdepth \@mplistdepth\z@ \@minipagerestore \@setminipage }% } \newcommand{\py@endheavybox}{% \par\unskip % handles footnotes \ifvoid\@mpfootins\else \vskip\skip\@mpfootins\normalcolor\footnoterule\unvbox\@mpfootins \fi \@minipagefalse \endMakeFramed \ifspx@inframed\end{minipage}\fi % arrange for similar spacing below frame as for "light" boxes. \vskip .4\baselineskip } \newcommand{\py@lightbox}{% \par\allowbreak \noindent\rule{\linewidth}{0.5pt}\par\nobreak {\parskip\z@skip\noindent}% } \newcommand{\py@endlightbox}{% \par % counteract previous possible negative skip (French lists!): % (we can't cancel that any earlier \vskip introduced a potential pagebreak) \ifdim\lastskip<\z@\vskip-\lastskip\fi \nobreak\vbox{\noindent\kern\@totalleftmargin \rule[.4\baselineskip]{\linewidth}{0.5pt}\hss}\allowbreak } % Some are quite plain: \newcommand{\py@noticestart@note}{\py@lightbox} \newcommand{\py@noticeend@note}{\py@endlightbox} \newcommand{\py@noticestart@hint}{\py@lightbox} \newcommand{\py@noticeend@hint}{\py@endlightbox} \newcommand{\py@noticestart@important}{\py@lightbox} \newcommand{\py@noticeend@important}{\py@endlightbox} \newcommand{\py@noticestart@tip}{\py@lightbox} \newcommand{\py@noticeend@tip}{\py@endlightbox} % Others gets more visible distinction: \newcommand{\py@noticestart@warning}{\py@heavybox} \newcommand{\py@noticeend@warning}{\py@endheavybox} \newcommand{\py@noticestart@caution}{\py@heavybox} \newcommand{\py@noticeend@caution}{\py@endheavybox} \newcommand{\py@noticestart@attention}{\py@heavybox} \newcommand{\py@noticeend@attention}{\py@endheavybox} \newcommand{\py@noticestart@danger}{\py@heavybox} \newcommand{\py@noticeend@danger}{\py@endheavybox} \newcommand{\py@noticestart@error}{\py@heavybox} \newcommand{\py@noticeend@error}{\py@endheavybox} \newenvironment{notice}[2]{ \def\py@noticetype{#1} \csname py@noticestart@#1\endcsname \sphinxstrong{#2} % <- legacy code creates a space after {#2} }{\csname py@noticeend@\py@noticetype\endcsname} % Allow the release number to be specified independently of the % \date{}. This allows the date to reflect the document's date and % release to specify the release that is documented. % \newcommand{\py@release}{} \newcommand{\version}{} \newcommand{\shortversion}{} \newcommand{\releaseinfo}{} \newcommand{\releasename}{Release} \newcommand{\release}[1]{% \renewcommand{\py@release}{\releasename\space\version}% \renewcommand{\version}{#1}} \newcommand{\setshortversion}[1]{% \renewcommand{\shortversion}{#1}} \newcommand{\setreleaseinfo}[1]{% \renewcommand{\releaseinfo}{#1}} % Allow specification of the author's address separately from the % author's name. This can be used to format them differently, which % is a good thing. % \newcommand{\py@authoraddress}{} \newcommand{\authoraddress}[1]{\renewcommand{\py@authoraddress}{#1}} % This sets up the fancy chapter headings that make the documents look % at least a little better than the usual LaTeX output. % \spx@ifundefined{ChTitleVar}{}{ \ChNameVar{\raggedleft\normalsize\py@HeaderFamily} \ChNumVar{\raggedleft \bfseries\Large\py@HeaderFamily} \ChTitleVar{\raggedleft \textrm{\Huge\py@HeaderFamily}} % This creates chapter heads without the leading \vspace*{}: \def\@makechapterhead#1{% {\parindent \z@ \raggedright \normalfont \ifnum \c@secnumdepth >\m@ne \DOCH \fi \interlinepenalty\@M \DOTI{#1} } } } % Redefine description environment so that it is usable inside fulllineitems. % \renewcommand{\description}{% \list{}{\labelwidth\z@% \itemindent-\leftmargin% \labelsep5pt% \let\makelabel=\descriptionlabel}} % Definition lists; requested by AMK for HOWTO documents. Probably useful % elsewhere as well, so keep in in the general style support. % \newenvironment{definitions}{% \begin{description}% \def\term##1{\item[##1]\mbox{}\\*[0mm]} }{% \end{description}% } % Tell TeX about pathological hyphenation cases: \hyphenation{Base-HTTP-Re-quest-Hand-ler} % The following is stuff copied from docutils' latex writer. % \newcommand{\optionlistlabel}[1]{\normalfont\bfseries #1 \hfill}% \bf deprecated \newenvironment{optionlist}[1] {\begin{list}{} {\setlength{\labelwidth}{#1} \setlength{\rightmargin}{1cm} \setlength{\leftmargin}{\rightmargin} \addtolength{\leftmargin}{\labelwidth} \addtolength{\leftmargin}{\labelsep} \renewcommand{\makelabel}{\optionlistlabel}} }{\end{list}} \newlength{\lineblockindentation} \setlength{\lineblockindentation}{2.5em} \newenvironment{lineblock}[1] {\begin{list}{} {\setlength{\partopsep}{\parskip} \addtolength{\partopsep}{\baselineskip} \topsep0pt\itemsep0.15\baselineskip\parsep0pt \leftmargin#1} \raggedright} {\end{list}} % Re-define \includegraphics to resize images larger than the line width % if the size is not specified. % Warning: future version of Sphinx will not modify original \includegraphics, % Below custom code will be direct definition of \sphinxincludegraphics, with % \py@Oldincludegraphics replaced by direct use of original \includegraphics. \let\py@Oldincludegraphics\includegraphics \newbox\spx@image@box \renewcommand*{\includegraphics}[2][\@empty]{% \ifx\@empty #1% attention, #1 could be bb.., bad if first after \ifx \setbox\spx@image@box=\hbox{\py@Oldincludegraphics{#2}}% \ifdim \wd\spx@image@box>\linewidth \py@Oldincludegraphics[width=\linewidth]{#2}% \else \leavevmode\box\spx@image@box \fi \else \py@Oldincludegraphics[#1]{#2}% \fi } % Writer will put \sphinxincludegraphics in LaTeX source, and with this, % documents which used their own modified \includegraphics will compile % as before. But see warning above. \newcommand*{\sphinxincludegraphics}{\includegraphics} % to make pdf with correct encoded bookmarks in Japanese % this should precede the hyperref package \ifx\kanjiskip\undefined % for non-Japanese: make sure bookmarks are ok also with lualatex \PassOptionsToPackage{pdfencoding=unicode}{hyperref} \else \usepackage{atbegshi} \ifx\ucs\undefined \ifnum 42146=\euc"A4A2 \AtBeginShipoutFirst{\special{pdf:tounicode EUC-UCS2}} \else \AtBeginShipoutFirst{\special{pdf:tounicode 90ms-RKSJ-UCS2}} \fi \else \AtBeginShipoutFirst{\special{pdf:tounicode UTF8-UCS2}} \fi \fi % Include hyperref last. \RequirePackage[colorlinks,breaklinks, linkcolor=InnerLinkColor,filecolor=OuterLinkColor, menucolor=OuterLinkColor,urlcolor=OuterLinkColor, citecolor=InnerLinkColor]{hyperref} % Fix anchor placement for figures with captions. % (Note: we don't use a package option here; instead, we give an explicit % \capstart for figures that actually have a caption.) \RequirePackage{hypcap} % Set up styles of URL: it should be placed after hyperref \urlstyle{same} % From docutils.writers.latex2e % inline markup (custom roles) % \DUrole{#1}{#2} tries \DUrole#1{#2} \providecommand*{\DUrole}[2]{% \ifcsname DUrole#1\endcsname% \csname DUrole#1\endcsname{#2}% \else% backwards compatibility: try \docutilsrole#1{#2} \ifcsname docutilsrole#1\endcsname% \csname docutilsrole#1\endcsname{#2}% \else% #2% \fi% \fi% } \providecommand*{\DUprovidelength}[2]{% \ifdefined#1\else\newlength{#1}\setlength{#1}{#2}\fi } \DUprovidelength{\DUlineblockindent}{2.5em} \ifdefined\DUlineblock\else \newenvironment{DUlineblock}[1]{% \list{}{\setlength{\partopsep}{\parskip} \addtolength{\partopsep}{\baselineskip} \setlength{\topsep}{0pt} \setlength{\itemsep}{0.15\baselineskip} \setlength{\parsep}{0pt} \setlength{\leftmargin}{#1}} \raggedright } {\endlist} \fi % From footmisc.sty: allows footnotes in titles \let\FN@sf@@footnote\footnote \def\footnote{\ifx\protect\@typeset@protect \expandafter\FN@sf@@footnote \else \expandafter\FN@sf@gobble@opt \fi } \edef\FN@sf@gobble@opt{\noexpand\protect \expandafter\noexpand\csname FN@sf@gobble@opt \endcsname} \expandafter\def\csname FN@sf@gobble@opt \endcsname{% \@ifnextchar[%] \FN@sf@gobble@twobracket \@gobble } \def\FN@sf@gobble@twobracket[#1]#2{} % adjust the margins for footer, % this works with the jsclasses only (Japanese standard document classes) \ifx\@jsc@uplatextrue\undefined\else \hypersetup{setpagesize=false} \setlength\footskip{2\baselineskip} \addtolength{\textheight}{-2\baselineskip} \fi % fix the double index and bibliography on the table of contents % in jsclasses (Japanese standard document classes) \ifx\@jsc@uplatextrue\undefined\else \renewcommand{\theindex}{ \cleardoublepage \phantomsection \py@OldTheindex } \renewcommand{\thebibliography}[1]{ \cleardoublepage \phantomsection \py@OldThebibliography{1} } \fi % disable \@chappos in Appendix in pTeX \ifx\kanjiskip\undefined\else \let\py@OldAppendix=\appendix \renewcommand{\appendix}{ \py@OldAppendix \gdef\@chappos{} } \fi % Define literal-block environment \RequirePackage{newfloat} \DeclareFloatingEnvironment{literal-block} \spx@ifundefined{c@chapter} {\SetupFloatingEnvironment{literal-block}{within=section,placement=h}} {\SetupFloatingEnvironment{literal-block}{within=chapter,placement=h}} \SetupFloatingEnvironment{literal-block}{name=List} % control caption around literal-block \RequirePackage{capt-of} \RequirePackage{needspace} % if the left page space is less than \literalblockneedspace, insert page-break \newcommand{\sphinxliteralblockneedspace}{5\baselineskip} \newcommand{\sphinxliteralblockwithoutcaptionneedspace}{1.5\baselineskip} % figure in table \newenvironment{sphinxfigure-in-table}[1][\linewidth]{% \def\@captype{figure}% \begin{minipage}{#1}% }{\end{minipage}} % store original \caption macro for use with figures in longtable and tabulary \AtBeginDocument{\let\spx@originalcaption\caption} \newcommand*\sphinxfigcaption {\ifx\equation$%$% this is trick to identify tabulary first pass \firstchoice@false\else\firstchoice@true\fi \spx@originalcaption } % by default, also define macros with the no-prefix names \ifsphinxKeepOldNames \typeout{** (sphinx) defining (legacy) text style macros without \string\sphinx\space prefix} \typeout{** if clashes with packages, set latex_keep_old_macro_names=False in conf.py} \@for\@tempa:=strong,bfcode,email,tablecontinued,titleref,% menuselection,accelerator,crossref,termref,optional\do {% first, check if command with no prefix already exists \expandafter\newcommand\csname\@tempa\endcsname{}% % if no error give it the meaning defined so far with \sphinx prefix \expandafter\let\csname\@tempa\expandafter\endcsname \csname sphinx\@tempa\endcsname % redefine the \sphinx prefixed macro to expand to non-prefixed one \expandafter\def\csname sphinx\@tempa\expandafter\endcsname \expandafter{\csname\@tempa\endcsname}% } % robustified case needs special treatment \newcommand\code{}\let\code\relax \DeclareRobustCommand{\code}[1]{{\@noligs\scantokens{\texttt{#1}\relax}}} \def\sphinxcode{\code}% \fi s3ql-2.26/doc/latex/manual.tex0000664000175000017500000051726213246754365017726 0ustar nikrationikratio00000000000000% Generated by Sphinx. \def\sphinxdocclass{report} \newif\ifsphinxKeepOldNames \sphinxKeepOldNamestrue \documentclass[letterpaper,10pt,english]{sphinxmanual} \usepackage{iftex} \ifPDFTeX \usepackage[utf8]{inputenc} \fi \ifdefined\DeclareUnicodeCharacter \DeclareUnicodeCharacter{00A0}{\nobreakspace} \fi \usepackage{cmap} \usepackage[T1]{fontenc} \usepackage{amsmath,amssymb,amstext} \usepackage{babel} \usepackage{times} \usepackage[Bjarne]{fncychap} \usepackage{longtable} \usepackage{sphinx} \usepackage{multirow} \usepackage{eqparbox} \addto\captionsenglish{\renewcommand{\figurename}{Fig.\@ }} \addto\captionsenglish{\renewcommand{\tablename}{Table }} \SetupFloatingEnvironment{literal-block}{name=Listing } \addto\extrasenglish{\def\pageautorefname{page}} \setcounter{tocdepth}{1} \title{S3QL Documentation} \date{Mar 04, 2018} \release{2.26} \author{Nikolaus Rath} \newcommand{\sphinxlogo}{} \renewcommand{\releasename}{Release} \makeindex \makeatletter \def\PYG@reset{\let\PYG@it=\relax \let\PYG@bf=\relax% \let\PYG@ul=\relax \let\PYG@tc=\relax% \let\PYG@bc=\relax \let\PYG@ff=\relax} \def\PYG@tok#1{\csname PYG@tok@#1\endcsname} \def\PYG@toks#1+{\ifx\relax#1\empty\else% \PYG@tok{#1}\expandafter\PYG@toks\fi} \def\PYG@do#1{\PYG@bc{\PYG@tc{\PYG@ul{% \PYG@it{\PYG@bf{\PYG@ff{#1}}}}}}} \def\PYG#1#2{\PYG@reset\PYG@toks#1+\relax+\PYG@do{#2}} \expandafter\def\csname PYG@tok@kn\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.53,0.00}{##1}}} \expandafter\def\csname PYG@tok@w\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}} \expandafter\def\csname PYG@tok@sd\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.87,0.27,0.13}{##1}}} \expandafter\def\csname PYG@tok@vi\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.20,0.20,0.73}{##1}}} \expandafter\def\csname PYG@tok@bp\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} \expandafter\def\csname PYG@tok@mb\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.40,0.00,0.93}{##1}}} \expandafter\def\csname PYG@tok@gs\endcsname{\let\PYG@bf=\textbf} \expandafter\def\csname PYG@tok@ge\endcsname{\let\PYG@it=\textit} \expandafter\def\csname PYG@tok@mi\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.87}{##1}}} \expandafter\def\csname PYG@tok@nn\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.05,0.52,0.71}{##1}}} \expandafter\def\csname PYG@tok@cpf\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}} \expandafter\def\csname PYG@tok@mf\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.40,0.00,0.93}{##1}}} \expandafter\def\csname PYG@tok@sb\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}} \expandafter\def\csname PYG@tok@cs\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.80,0.00,0.00}{##1}}} \expandafter\def\csname PYG@tok@go\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}} \expandafter\def\csname PYG@tok@m\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.40,0.00,0.93}{##1}}} \expandafter\def\csname PYG@tok@cp\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.33,0.47,0.60}{##1}}} \expandafter\def\csname PYG@tok@kc\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.53,0.00}{##1}}} \expandafter\def\csname PYG@tok@vm\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.60,0.40,0.20}{##1}}} \expandafter\def\csname PYG@tok@sa\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}} \expandafter\def\csname PYG@tok@si\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{0.93,0.93,0.93}{\strut ##1}}} \expandafter\def\csname PYG@tok@s\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}} \expandafter\def\csname PYG@tok@na\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.80}{##1}}} \expandafter\def\csname PYG@tok@sh\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}} \expandafter\def\csname PYG@tok@nv\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.60,0.40,0.20}{##1}}} \expandafter\def\csname PYG@tok@gh\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}} \expandafter\def\csname PYG@tok@mh\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.33,0.53}{##1}}} \expandafter\def\csname PYG@tok@gt\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}} \expandafter\def\csname PYG@tok@no\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.20,0.40}{##1}}} \expandafter\def\csname PYG@tok@o\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.20,0.20,0.20}{##1}}} \expandafter\def\csname PYG@tok@mo\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.27,0.00,0.93}{##1}}} \expandafter\def\csname PYG@tok@vc\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.20,0.40,0.60}{##1}}} \expandafter\def\csname PYG@tok@nb\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} \expandafter\def\csname PYG@tok@vg\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.87,0.47,0.00}{##1}}} \expandafter\def\csname PYG@tok@gu\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.50,0.00,0.50}{##1}}} \expandafter\def\csname PYG@tok@ni\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.53,0.00,0.00}{##1}}} \expandafter\def\csname PYG@tok@nc\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.73,0.00,0.40}{##1}}} \expandafter\def\csname PYG@tok@nl\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.60,0.47,0.00}{##1}}} \expandafter\def\csname PYG@tok@kt\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.20,0.20,0.60}{##1}}} \expandafter\def\csname PYG@tok@ow\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.00}{##1}}} \expandafter\def\csname PYG@tok@sr\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.00}{##1}}\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,1.00}{\strut ##1}}} \expandafter\def\csname PYG@tok@gp\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.78,0.36,0.04}{##1}}} \expandafter\def\csname PYG@tok@nt\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.47,0.00}{##1}}} \expandafter\def\csname PYG@tok@s2\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}} \expandafter\def\csname PYG@tok@ne\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}} \expandafter\def\csname PYG@tok@ss\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.67,0.40,0.00}{##1}}} \expandafter\def\csname PYG@tok@gi\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.63,0.00}{##1}}} \expandafter\def\csname PYG@tok@nf\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.40,0.73}{##1}}} \expandafter\def\csname PYG@tok@kr\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.53,0.00}{##1}}} \expandafter\def\csname PYG@tok@il\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.87}{##1}}} \expandafter\def\csname PYG@tok@se\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}} \expandafter\def\csname PYG@tok@ch\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}} \expandafter\def\csname PYG@tok@sc\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}} \expandafter\def\csname PYG@tok@gd\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.63,0.00,0.00}{##1}}} \expandafter\def\csname PYG@tok@kd\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.53,0.00}{##1}}} \expandafter\def\csname PYG@tok@kp\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.20,0.53}{##1}}} \expandafter\def\csname PYG@tok@c1\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}} \expandafter\def\csname PYG@tok@cm\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}} \expandafter\def\csname PYG@tok@err\endcsname{\def\PYG@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.67,0.67}{\strut ##1}}} \expandafter\def\csname PYG@tok@s1\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}} \expandafter\def\csname PYG@tok@fm\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.40,0.73}{##1}}} \expandafter\def\csname PYG@tok@c\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}} \expandafter\def\csname PYG@tok@k\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.53,0.00}{##1}}} \expandafter\def\csname PYG@tok@dl\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}} \expandafter\def\csname PYG@tok@nd\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.33,0.33,0.33}{##1}}} \expandafter\def\csname PYG@tok@gr\endcsname{\def\PYG@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}} \expandafter\def\csname PYG@tok@sx\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.87,0.13,0.00}{##1}}\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}} \def\PYGZbs{\char`\\} \def\PYGZus{\char`\_} \def\PYGZob{\char`\{} \def\PYGZcb{\char`\}} \def\PYGZca{\char`\^} \def\PYGZam{\char`\&} \def\PYGZlt{\char`\<} \def\PYGZgt{\char`\>} \def\PYGZsh{\char`\#} \def\PYGZpc{\char`\%} \def\PYGZdl{\char`\$} \def\PYGZhy{\char`\-} \def\PYGZsq{\char`\'} \def\PYGZdq{\char`\"} \def\PYGZti{\char`\~} % for compatibility with earlier versions \def\PYGZat{@} \def\PYGZlb{[} \def\PYGZrb{]} \makeatother \renewcommand\PYGZsq{\textquotesingle} \begin{document} \maketitle \tableofcontents \phantomsection\label{index::doc} \chapter{S3QL} \label{about:s3ql-user-s-guide}\label{about::doc}\label{about:s3ql} S3QL is a file system that stores all its data online using storage services like \href{http://code.google.com/apis/storage/}{Google Storage}, \href{http://aws.amazon.com/s3}{Amazon S3}, or \href{http://openstack.org/projects/storage/}{OpenStack}. S3QL effectively provides a hard disk of dynamic, infinite capacity that can be accessed from any computer with internet access. S3QL is a standard conforming, full featured UNIX file system that is conceptually indistinguishable from any local file system. Furthermore, S3QL has additional features like compression, encryption, data de-duplication, immutable trees and snapshotting which make it especially suitable for online backup and archival. S3QL is designed to favor simplicity and elegance over performance and feature-creep. Care has been taken to make the source code as readable and serviceable as possible. Solid error detection and error handling have been included from the very first line, and S3QL comes with extensive automated test cases for all its components. \section{Features} \label{about:openstack}\label{about:features}\begin{itemize} \item {} \textbf{Transparency.} Conceptually, S3QL is indistinguishable from a local file system. For example, it supports hardlinks, symlinks, standard unix permissions, extended attributes and file sizes up to 2 TB. \item {} \textbf{Dynamic Size.} The size of an S3QL file system grows and shrinks dynamically as required. \item {} \textbf{Compression.} Before storage, all data may compressed with the LZMA, bzip2 or deflate (gzip) algorithm. \item {} \textbf{Encryption.} After compression (but before upload), all data can be AES encrypted with a 256 bit key. An additional SHA256 HMAC checksum is used to protect the data against manipulation. \item {} \textbf{Data De-duplication.} If several files have identical contents, the redundant data will be stored only once. This works across all files stored in the file system, and also if only some parts of the files are identical while other parts differ. \item {} \textbf{Immutable Trees.} Directory trees can be made immutable, so that their contents can no longer be changed in any way whatsoever. This can be used to ensure that backups can not be modified after they have been made. \item {} \textbf{Copy-on-Write/Snapshotting.} S3QL can replicate entire directory trees without using any additional storage space. Only if one of the copies is modified, the part of the data that has been modified will take up additional storage space. This can be used to create intelligent snapshots that preserve the state of a directory at different points in time using a minimum amount of space. \item {} \textbf{High Performance independent of network latency.} All operations that do not write or read file contents (like creating directories or moving, renaming, and changing permissions of files and directories) are very fast because they are carried out without any network transactions. S3QL achieves this by saving the entire file and directory structure in a database. This database is locally cached and the remote copy updated asynchronously. \item {} \textbf{Support for low bandwidth connections.} S3QL splits file contents into smaller blocks and caches blocks locally. This minimizes both the number of network transactions required for reading and writing data, and the amount of data that has to be transferred when only parts of a file are read or written. \end{itemize} \section{Development Status} \label{about:development-status} S3QL is considered stable and suitable for production use. Starting with version 2.17.1, S3QL uses semantic versioning. This means that backwards-incompatible versions (e.g., versions that require an upgrade of the file system revision) will be reflected in an increase of the major version number. \section{Supported Platforms} \label{about:supported-platforms} S3QL is developed and tested under Linux. Users have also reported running S3QL successfully on OS-X, FreeBSD and NetBSD. We try to maintain compatibility with these systems, but (due to lack of pre-release testers) we cannot guarantee that every release will run on all non-Linux systems. Please report any bugs you find, and we will try to fix them. \section{Contributing} \label{about:contributing} The S3QL source code is available both on \href{https://github.com/s3ql/main}{GitHub} and \href{https://bitbucket.org/nikratio/s3ql/}{BitBucket}. \chapter{Installation} \label{installation:installation}\label{installation::doc}\label{installation:github} S3QL depends on several other programs and libraries that have to be installed first. The best method to satisfy these dependencies depends on your distribution. The following instructions are for S3QL 2.26 and should be applicable to any system. The \href{https://bitbucket.org/nikratio/s3ql/wiki/Home}{S3QL Wiki} contains \href{https://bitbucket.org/nikratio/s3ql/wiki/Installation}{additional help} help for specific distributions and operating systems. Note, however, that S3QL wiki is editable by anyone. The information there has thus not been vetted by the S3QL maintainers, and may be wrong, out-of-date, or even dangerous. Generally, you should only follow steps from the Wiki that you fully understand yourself, and fall back on the instructions below when in doubt. \section{Dependencies} \label{installation:dependencies} The following is a list of the programs and libraries required for running S3QL. Generally, you should first check if your distribution already provides a suitable packages and only install from source if that is not the case. \begin{itemize} \item {} Kernel: Linux 2.6.9 or newer or FreeBSD with \href{http://www.freshports.org/sysutils/fusefs-kmod/}{FUSE4BSD}. Starting with kernel 2.6.26 you will get significantly better write performance, so under Linux you should actually use \emph{2.6.26 or newer whenever possible}. \item {} The \href{http://psmisc.sf.net/}{psmisc} utilities. \item {} \href{http://www.sqlite.org/}{SQLite} version 3.7.0 or newer. SQLite has to be installed as a \emph{shared library} with development headers. \item {} \href{http://www.python.org/}{Python} 3.3.0 or newer. Make sure to also install the development headers. \item {} The following Python modules: \begin{itemize} \item {} \href{https://pypi.python.org/pypi/setuptools}{setuptools}, version 1.0 or newer. \item {} \href{https://www.dlitz.net/software/pycrypto/}{pycrypto} \item {} \href{https://pypi.python.org/pypi/defusedxml/}{defusedxml} \item {} \href{https://pypi.python.org/pypi/requests/}{requests} (optional, required for OAuth2 authentication with Google Storage) \item {} \href{https://github.com/systemd/python-systemd}{systemd} (optional, for enabling systemd support). \item {} \href{https://github.com/rogerbinns/apsw}{apsw}, version 3.7.0 or newer. \item {} \href{https://bitbucket.org/nikratio/python-llfuse/}{llfuse}, any version between 1.0 (inclusive) and 2.0 (exclusive) \item {} \href{https://bitbucket.org/nikratio/python-dugong/}{dugong}, any version between 3.4 (inclusive) and 4.0 (exclusive) \item {} \href{http://pytest.org/}{pytest}, version 2.7 or newer (optional, to run unit tests) \end{itemize} To check if a specific module \sphinxcode{\textless{}module\textgreater{}} is installed, execute \sphinxcode{python3 -c 'import \emph{\textless{}module\textgreater{}}; print(\emph{\textless{}module\textgreater{}}.\_\_version\_\_)'}. This will result in an \sphinxcode{ImportError} if the module is not installed, and will print the installed version if the module is installed. \end{itemize} \section{Installing S3QL} \label{installation:inst-s3ql}\label{installation:installing-s3ql} To build and install S3QL itself, proceed as follows: \begin{enumerate} \item {} Download S3QL from \url{https://bitbucket.org/nikratio/s3ql/downloads} \item {} Unpack it into a folder of your choice \item {} Run \sphinxcode{python3 setup.py build\_ext -{-}inplace} to build S3QL. \item {} Run \sphinxcode{python3 -m pytest tests/} to run a self-test. If this fails, ask for help on the \href{http://groups.google.com/group/s3ql}{mailing list} or report a bug in the \href{https://bitbucket.org/nikratio/s3ql/issues}{issue tracker}. \end{enumerate} Now you have three options: \begin{itemize} \item {} You can run the S3QL commands from the \sphinxcode{bin/} directory. \item {} You can install S3QL system-wide for all users. To do that, you have to run \sphinxcode{sudo python3 setup.py install}. \item {} You can install S3QL into \sphinxcode{\textasciitilde{}/.local} by executing \sphinxcode{python3 setup.py install -{-}user}. In this case you should make sure that \sphinxcode{\textasciitilde{}/.local/bin} is in your \sphinxcode{\$PATH} environment variable. \end{itemize} \section{Development Version} \label{installation:development-version} If you have checked out the unstable development version from the Mercurial repository, a bit more effort is required. You'll also need: \begin{itemize} \item {} Version 0.24 or newer of the \href{http://www.cython.org/}{Cython} compiler. \item {} Version 1.2b1 or newer of the \href{http://sphinx.pocoo.org/}{Sphinx} document processor. \end{itemize} With these additional dependencies installed, S3QL can be build and tested with \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{python3 setup.py build\PYGZus{}cython} \PYG{l}{python3 setup.py build\PYGZus{}ext \PYGZhy{}\PYGZhy{}inplace} \PYG{l}{python3 \PYGZhy{}m pytest tests/} \end{Verbatim} Note that when building from the Mercurial or Git repository, building and testing is done with several additional checks. This may cause compilation and/or tests to fail even though there are no problems with functionality. For example, any use of functions that are scheduled for deprecation in future Python version will cause tests to fail. If you would rather just check for functionality, you can delete the \sphinxcode{MANIFEST.in} file. In that case, the build system will behave as it does for a regular release. The HTML and PDF documentation can be generated with \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{python3 setup.py build\PYGZus{}sphinx} \end{Verbatim} and S3QL can be installed as usual with \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{python3 setup.py install }\PYG{g+ge}{[\PYGZhy{}\PYGZhy{}user]} \end{Verbatim} \section{Running tests requiring remote servers} \label{installation:running-tests-requiring-remote-servers} By default, tests requiring a connection to a remote storage backend are skipped. If you would like to run these tests too (which is always a good idea), you have to create additional entries in your \sphinxcode{\textasciitilde{}/.s3ql/authinfo2} file that tell S3QL what server and credentials to use for these tests. These entries have the following form: \begin{Verbatim}[commandchars=\\\{\}] \PYG{g+ge}{[\PYGZlt{}BACKEND\PYGZgt{}\PYGZhy{}test]} \PYG{l}{backend\PYGZhy{}login: }\PYG{n+nv}{\PYGZlt{}user\PYGZgt{}} \PYG{l}{backend\PYGZhy{}password: }\PYG{n+nv}{\PYGZlt{}password\PYGZgt{}} \PYG{l}{test\PYGZhy{}fs: }\PYG{n+nv}{\PYGZlt{}storage\PYGZhy{}url\PYGZgt{}} \end{Verbatim} Here \emph{\textless{}BACKEND\textgreater{}} specifies the backend that you want to test (e.g. \emph{s3}, \emph{s3c}, \emph{gs}, or \emph{swift}), \emph{\textless{}user\textgreater{}} and \emph{\textless{}password\textgreater{}} are the backend authentication credentials, and \emph{\textless{}storage-url\textgreater{}} specifies the full storage URL that will be used for testing. \textbf{Any existing S3QL file system in this storage URL will be destroyed during testing}. For example, to run tests that need connection to a Google Storage server, you would add something like \begin{Verbatim}[commandchars=\\\{\}] \PYG{g+ge}{[gs\PYGZhy{}test]} \PYG{l}{backend\PYGZhy{}login: GOOGIGWLONT238MD7HZ4} \PYG{l}{backend\PYGZhy{}password: rmEbstjscoeunt1249oes1298gauidbs3hl} \PYG{l}{test\PYGZhy{}fs: gs://joes\PYGZhy{}gs\PYGZhy{}bucket/s3ql\PYGZus{}tests/} \end{Verbatim} On the next run of \sphinxcode{runtest.py} (or \sphinxcode{py.test} when using the development version), the additional tests will be run. If the tests are still skipped, you can get more information about why tests are being skipped by passing the \sphinxcode{-rs} argument to \sphinxcode{runtest.py}/\sphinxcode{py.test}. \chapter{Storage Backends} \label{backends:id1}\label{backends::doc}\label{backends:storage-backends}\label{backends:sphinx} S3QL supports different \emph{backends} to store data at different service providers and using different protocols. A \emph{storage url} specifies a backend together with some backend-specific information and uniquely identifies an S3QL file system. The form of the storage url depends on the backend and is described for every backend below. Furthermore, every S3QL commands that accepts a storage url also accepts a \sphinxcode{-{-}backend-options} parameter than can be used to pass backend-specific options to the backend module. The available options are documented with the respective backends below. All storage backends respect the \sphinxcode{http\_proxy} (for plain HTTP connections) and \sphinxcode{https\_proxy} (for SSL connections) environment variables. \begin{notice}{note}{Note:} Storage backends are not necessarily compatible. Don't expect that you can e.g. copy the data stored by the local backend into Amazon S3 using some non-S3QL tool and then access it with S3QL's S3 backend. If you want to copy file systems from one backend to another, you need to use the \sphinxcode{clone\_fs.py} script (from the \sphinxcode{contrib} directory in the S3QL tarball). \end{notice} \section{Google Storage} \label{backends:google-storage} \href{http://code.google.com/apis/storage/}{Google Storage} is an online storage service offered by Google. To use the Google Storage backend, you need to have (or sign up for) a Google account, and then \href{http://code.google.com/apis/storage/docs/signup.html}{activate Google Storage} for your account. The account is free, you will pay only for the amount of storage and traffic that you actually use. There are two ways to access Google storage: \begin{enumerate} \item {} Use S3-like authentication. To do this, first \href{https://developers.google.com/storage/docs/migrating\#defaultproj}{set a default project}. Then use the \href{https://code.google.com/apis/console/\#:storage:legacy}{key management tool} to retrieve your \emph{Google Storage developer access key} and \emph{Google Storage developer secret} and use that as backend login and backend password. \item {} Use OAuth2 authentication. In this case you need to use \sphinxcode{oauth2} as the backend login, and a valid OAuth2 refresh token as the backend password. To obtain a refresh token, you can use the {\hyperref[man/oauth_client:oauth\string-client]{\sphinxcrossref{\DUrole{std,std-ref}{s3ql\_oauth\_client}}}} program. It will instruct you to open a specific URL in your browser, enter a code and authenticate with your Google account. Once this procedure is complete, {\hyperref[man/oauth_client:oauth\string-client]{\sphinxcrossref{\DUrole{std,std-ref}{s3ql\_oauth\_client}}}} will print out the refresh token. Note that you need to do this procedure only once, the refresh token will remain valid until you explicitly revoke it. \end{enumerate} To create a Google Storage bucket, you can use e.g. the \href{https://sandbox.google.com/storage/}{Google Storage Manager}. The storage URL for accessing the bucket in S3QL is then \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{gs://}\PYG{n+nv}{\PYGZlt{}bucketname\PYGZgt{}}\PYG{l}{/}\PYG{n+nv}{\PYGZlt{}prefix\PYGZgt{}} \end{Verbatim} Here \emph{bucketname} is the name of the bucket, and \emph{prefix} can be an arbitrary prefix that will be prepended to all object names used by S3QL. This allows you to store several S3QL file systems in the same Google Storage bucket. The Google Storage backend accepts the following backend options: \index{gs\_backend command line option!no-ssl}\index{no-ssl!gs\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-gs_backend-arg-no-ssl}\pysigline{\sphinxbfcode{no-ssl}\sphinxcode{}} Disable encrypted (https) connections and use plain HTTP instead. \end{fulllineitems} \index{gs\_backend command line option!ssl-ca-path=\textless{}path\textgreater{}}\index{ssl-ca-path=\textless{}path\textgreater{}!gs\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-gs_backend-arg-ssl-ca-path}\pysigline{\sphinxbfcode{ssl-ca-path}\sphinxcode{=\textless{}path\textgreater{}}} Instead of using the system's default certificate store, validate the server certificate against the specified CA certificates. \sphinxcode{\textless{}path\textgreater{}} may be either a file containing multiple certificates, or a directory containing one certificate per file. \end{fulllineitems} \index{gs\_backend command line option!tcp-timeout}\index{tcp-timeout!gs\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-gs_backend-arg-tcp-timeout}\pysigline{\sphinxbfcode{tcp-timeout}\sphinxcode{}} Specifies the timeout used for TCP connections. If no data can be exchanged with the remote server for longer than this period, the TCP connection is closed and re-established (default: 20 seconds). \end{fulllineitems} \section{Amazon S3} \label{backends:amazon-s3}\label{backends:google-storage-manager} \href{http://aws.amazon.com/s3}{Amazon S3} is the online storage service offered by \href{http://aws.amazon.com/}{Amazon Web Services (AWS)}. To use the S3 backend, you first need to sign up for an AWS account. The account is free, you will pay only for the amount of storage and traffic that you actually use. After that, you need to create a bucket that will hold the S3QL file system, e.g. using the \href{https://console.aws.amazon.com/s3/home}{AWS Management Console}. For best performance, it is recommend to create the bucket in the geographically closest storage region, but not the US Standard region (see {\hyperref[durability:durability]{\sphinxcrossref{\DUrole{std,std-ref}{Important Rules to Avoid Losing Data}}}} for the reason). The storage URL for accessing S3 buckets in S3QL has the form \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3://}\PYG{n+nv}{\PYGZlt{}region\PYGZgt{}}\PYG{l}{/}\PYG{n+nv}{\PYGZlt{}bucket\PYGZgt{}}\PYG{l}{/}\PYG{n+nv}{\PYGZlt{}prefix\PYGZgt{}} \end{Verbatim} \emph{prefix} can be an arbitrary prefix that will be prepended to all object names used by S3QL. This allows you to store several S3QL file systems in the same S3 bucket. For example, the storage URL \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3://ap\PYGZhy{}south\PYGZhy{}1/foomart.net/data/s3ql\PYGZus{}backup/} \end{Verbatim} refers to the \emph{foomart.net} bucket in the \emph{ap-south-1} region. All storage objects that S3QL stores in this bucket will be prefixed with \emph{data/s3ql\_backup/}. Note that the backend login and password for accessing S3 are not the user id and password that you use to log into the Amazon Webpage, but the \emph{AWS access key id} and \emph{AWS secret access key} shown under \href{https://aws-portal.amazon.com/gp/aws/developer/account/index.html?ie=UTF8\&action=access-key}{My Account/Access Identifiers}. The Amazon S3 backend accepts the following backend options: \index{s3\_backend command line option!no-ssl}\index{no-ssl!s3\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-s3_backend-arg-no-ssl}\pysigline{\sphinxbfcode{no-ssl}\sphinxcode{}} Disable encrypted (https) connections and use plain HTTP instead. \end{fulllineitems} \index{s3\_backend command line option!ssl-ca-path=\textless{}path\textgreater{}}\index{ssl-ca-path=\textless{}path\textgreater{}!s3\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-s3_backend-arg-ssl-ca-path}\pysigline{\sphinxbfcode{ssl-ca-path}\sphinxcode{=\textless{}path\textgreater{}}} Instead of using the system's default certificate store, validate the server certificate against the specified CA certificates. \sphinxcode{\textless{}path\textgreater{}} may be either a file containing multiple certificates, or a directory containing one certificate per file. \end{fulllineitems} \index{s3\_backend command line option!tcp-timeout}\index{tcp-timeout!s3\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-s3_backend-arg-tcp-timeout}\pysigline{\sphinxbfcode{tcp-timeout}\sphinxcode{}} Specifies the timeout used for TCP connections. If no data can be exchanged with the remote server for longer than this period, the TCP connection is closed and re-established (default: 20 seconds). \end{fulllineitems} \index{s3\_backend command line option!sse}\index{sse!s3\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-s3_backend-arg-sse}\pysigline{\sphinxbfcode{sse}\sphinxcode{}} Enable server side encryption. Both costs \& benefits of S3 server side encryption are probably rather small, and this option does \emph{not} affect any client side encryption performed by S3QL itself. \end{fulllineitems} \index{s3\_backend command line option!ia}\index{ia!s3\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-s3_backend-arg-ia}\pysigline{\sphinxbfcode{ia}\sphinxcode{}} Use infrequent access storage class for new objects. \end{fulllineitems} \index{s3\_backend command line option!rrs}\index{rrs!s3\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-s3_backend-arg-rrs}\pysigline{\sphinxbfcode{rrs}\sphinxcode{}} Enable reduced redundancy storage for newly created objects (overwrites the \emph{ia} option). When enabling this option, it is strongly recommended to periodically run {\hyperref[fsck:s3ql\string-verify]{\sphinxcrossref{\DUrole{std,std-ref}{s3ql\_verify}}}}, because objects that are lost by the storage backend may cause subsequent data loss even later in time due to the data de-duplication feature of S3QL (see {\hyperref[durability:backend\string-reliability]{\sphinxcrossref{\DUrole{std,std-ref}{Data Durability}}}} for details). \end{fulllineitems} \section{OpenStack/Swift} \label{backends:openstack-swift}\label{backends:openstack-backend} \href{http://www.openstack.org/}{OpenStack} is an open-source cloud server application suite. \href{http://openstack.org/projects/storage/}{Swift} is the cloud storage module of OpenStack. Swift/OpenStack storage is offered by many different companies. There are two different storage URL for the OpenStack backend that make use of different authentication APIs. For legacy (v1) authentication, the storage URL is \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{swift://}\PYG{n+nv}{\PYGZlt{}hostname\PYGZgt{}}\PYG{g+ge}{[:\PYGZlt{}port\PYGZgt{}]}\PYG{l}{/}\PYG{n+nv}{\PYGZlt{}container\PYGZgt{}}\PYG{g+ge}{[/\PYGZlt{}prefix\PYGZgt{}]} \end{Verbatim} for Keystone (v2) authentication, the storage URL is \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{swiftks://}\PYG{n+nv}{\PYGZlt{}hostname\PYGZgt{}}\PYG{g+ge}{[:\PYGZlt{}port\PYGZgt{}]}\PYG{l}{/}\PYG{n+nv}{\PYGZlt{}region\PYGZgt{}}\PYG{l}{:}\PYG{n+nv}{\PYGZlt{}container\PYGZgt{}}\PYG{g+ge}{[/\PYGZlt{}prefix\PYGZgt{}]} \end{Verbatim} Note that when using Keystone authentication, you can (and have to) specify the storage region of the container as well. In both cases, \emph{hostname} name should be the name of the authentication server. The storage container must already exist (most OpenStack providers offer either a web frontend or a command line tool for creating containers). \emph{prefix} can be an arbitrary prefix that will be prepended to all object names used by S3QL, which can be used to store multiple S3QL file systems in the same container. When using legacy authentication, the backend login and password correspond to the OpenStack username and API Access Key. When using Keystone authentication, the backend password is your regular OpenStack password and the backend login combines you OpenStack username and tenant name in the form \sphinxcode{\textless{}tenant\textgreater{}:\textless{}user\textgreater{}}. If no tenant is required, the OpenStack username alone may be used as backend login. The OpenStack backend accepts the following backend options: \index{swift\_backend command line option!no-ssl}\index{no-ssl!swift\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-swift_backend-arg-no-ssl}\pysigline{\sphinxbfcode{no-ssl}\sphinxcode{}} Use plain HTTP to connect to the authentication server. This option does not directly affect the connection to the storage server. Whether HTTPS or plain HTTP is used to connect to the storage server is determined by the authentication server. \end{fulllineitems} \index{swift\_backend command line option!ssl-ca-path=\textless{}path\textgreater{}}\index{ssl-ca-path=\textless{}path\textgreater{}!swift\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-swift_backend-arg-ssl-ca-path}\pysigline{\sphinxbfcode{ssl-ca-path}\sphinxcode{=\textless{}path\textgreater{}}} Instead of using the system's default certificate store, validate the server certificate against the specified CA certificates. \sphinxcode{\textless{}path\textgreater{}} may be either a file containing multiple certificates, or a directory containing one certificate per file. \end{fulllineitems} \index{swift\_backend command line option!tcp-timeout}\index{tcp-timeout!swift\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-swift_backend-arg-tcp-timeout}\pysigline{\sphinxbfcode{tcp-timeout}\sphinxcode{}} Specifies the timeout used for TCP connections. If no data can be exchanged with the remote server for longer than this period, the TCP connection is closed and re-established (default: 20 seconds). \end{fulllineitems} \index{swift\_backend command line option!disable-expect100}\index{disable-expect100!swift\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-swift_backend-arg-disable-expect100}\pysigline{\sphinxbfcode{disable-expect100}\sphinxcode{}} If this option is specified, S3QL does not use the \sphinxcode{Expect: continue} header (cf. \href{http://tools.ietf.org/html/rfc2616\#section-8.2.3}{RFC2616, section 8.2.3}) when uploading data to the server. This can be used to work around broken storage servers that don't fully support HTTP 1.1, but may decrease performance as object data will be transmitted to the server more than once in some circumstances. \end{fulllineitems} \index{swift\_backend command line option!no-feature-detection}\index{no-feature-detection!swift\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-swift_backend-arg-no-feature-detection}\pysigline{\sphinxbfcode{no-feature-detection}\sphinxcode{}} If this option is specified, S3QL does not try to dynamically detect advanced features of the Swift backend. In this case S3QL can only use the least common denominator of supported Swift versions and configurations. \end{fulllineitems} \begin{notice}{note}{Note:} The Swift API unfortunately lacks a number of features that S3QL normally makes use of. S3QL works around these deficiencies as much as possible. However, this means that storing data using the Swift backend generally requires more network round-trips and transfer volume than the other backends. Also, S3QL requires Swift storage servers to provide immediate consistency for newly created objects. \end{notice} \section{Rackspace CloudFiles} \label{backends:rackspace-cloudfiles} \href{http://www.rackspace.com/}{Rackspace} CloudFiles uses \href{http://www.openstack.org/}{OpenStack} internally, so it is possible to just use the OpenStack/Swift backend (see above) with \sphinxcode{auth.api.rackspacecloud.com} as the host name. For convenince, there is also a special \sphinxcode{rackspace} backend that uses a storage URL of the form \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{rackspace://}\PYG{n+nv}{\PYGZlt{}region\PYGZgt{}}\PYG{l}{/}\PYG{n+nv}{\PYGZlt{}container\PYGZgt{}}\PYG{g+ge}{[/\PYGZlt{}prefix\PYGZgt{}]} \end{Verbatim} The storage container must already exist in the selected region. \emph{prefix} can be an arbitrary prefix that will be prepended to all object names used by S3QL and can be used to store several S3QL file systems in the same container. You can create a storage container for S3QL using the \href{https://mycloud.rackspace.com/}{Cloud Control Panel} (click on \emph{Files} in the topmost menu bar). The Rackspace backend accepts the same backend options as the {\hyperref[backends:openstack\string-backend]{\sphinxcrossref{\DUrole{std,std-ref}{OpenStack backend}}}}. \section{S3 compatible} \label{backends:s3-compatible}\label{backends:rackspace} The S3 compatible backend allows S3QL to access any storage service that uses the same protocol as Amazon S3. The storage URL has the form \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3c://}\PYG{n+nv}{\PYGZlt{}hostname\PYGZgt{}}\PYG{l}{:}\PYG{n+nv}{\PYGZlt{}port\PYGZgt{}}\PYG{l}{/}\PYG{n+nv}{\PYGZlt{}bucketname\PYGZgt{}}\PYG{l}{/}\PYG{n+nv}{\PYGZlt{}prefix\PYGZgt{}} \end{Verbatim} Here \emph{bucketname} is the name of an (existing) bucket, and \emph{prefix} can be an arbitrary prefix that will be prepended to all object names used by S3QL. This allows you to store several S3QL file systems in the same bucket. The S3 compatible backend accepts the following backend options: \index{s3c\_backend command line option!no-ssl}\index{no-ssl!s3c\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-s3c_backend-arg-no-ssl}\pysigline{\sphinxbfcode{no-ssl}\sphinxcode{}} Disable encrypted (https) connections and use plain HTTP instead. \end{fulllineitems} \index{s3c\_backend command line option!ssl-ca-path=\textless{}path\textgreater{}}\index{ssl-ca-path=\textless{}path\textgreater{}!s3c\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-s3c_backend-arg-ssl-ca-path}\pysigline{\sphinxbfcode{ssl-ca-path}\sphinxcode{=\textless{}path\textgreater{}}} Instead of using the system's default certificate store, validate the server certificate against the specified CA certificates. \sphinxcode{\textless{}path\textgreater{}} may be either a file containing multiple certificates, or a directory containing one certificate per file. \end{fulllineitems} \index{s3c\_backend command line option!tcp-timeout}\index{tcp-timeout!s3c\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-s3c_backend-arg-tcp-timeout}\pysigline{\sphinxbfcode{tcp-timeout}\sphinxcode{}} Specifies the timeout used for TCP connections. If no data can be exchanged with the remote server for longer than this period, the TCP connection is closed and re-established (default: 20 seconds). \end{fulllineitems} \index{s3c\_backend command line option!disable-expect100}\index{disable-expect100!s3c\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-s3c_backend-arg-disable-expect100}\pysigline{\sphinxbfcode{disable-expect100}\sphinxcode{}} If this option is specified, S3QL does not use the \sphinxcode{Expect: continue} header (cf. \href{http://tools.ietf.org/html/rfc2616\#section-8.2.3}{RFC2616, section 8.2.3}) when uploading data to the server. This can be used to work around broken storage servers that don't fully support HTTP 1.1, but may decrease performance as object data will be transmitted to the server more than once in some circumstances. \end{fulllineitems} \index{s3c\_backend command line option!dumb-copy}\index{dumb-copy!s3c\_backend command line option} \begin{fulllineitems} \phantomsection\label{backends:cmdoption-s3c_backend-arg-dumb-copy}\pysigline{\sphinxbfcode{dumb-copy}\sphinxcode{}} If this option is specified, S3QL assumes that a COPY request to the storage server has succeeded as soon as the server returns a \sphinxcode{200 OK} status. The \href{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html}{S3 COPY API} specifies that the storage server may still return an error in the request body (see the \href{https://doc.s3.amazonaws.com/proposals/copy.html}{copy proposal} for the rationale), so this option should only be used if you are certain that your storage server only returns \sphinxcode{200 OK} when the copy operation has been completely and successfully carried out. Using this option may be neccessary if your storage server does not return a valid response body for a succesfull copy operation. \end{fulllineitems} \section{Local} \label{backends:id6}\label{backends:local} S3QL is also able to store its data on the local file system. This can be used to backup data on external media, or to access external services that S3QL can not talk to directly (e.g., it is possible to store data over SSH by first mounting the remote system using \href{http://fuse.sourceforge.net/sshfs.html}{sshfs} and then using the local backend to store the data in the sshfs mountpoint). The storage URL for local storage is \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{local://}\PYG{n+nv}{\PYGZlt{}path\PYGZgt{}} \end{Verbatim} Note that you have to write three consecutive slashes to specify an absolute path, e.g. \sphinxcode{local:///var/archive}. Also, relative paths will automatically be converted to absolute paths before the authentication file (see {\hyperref[authinfo:authinfo]{\sphinxcrossref{\DUrole{std,std-ref}{Storing Authentication Information}}}}) is read, i.e. if you are in the \sphinxcode{/home/john} directory and try to mount \sphinxcode{local://s3ql}, the corresponding section in the authentication file must match the storage url \sphinxcode{local:///home/john/s3ql}. The local backend does not accept any backend options. \chapter{Important Rules to Avoid Losing Data} \label{durability:durability}\label{durability::doc}\label{durability:sshfs}\label{durability:important-rules-to-avoid-losing-data} Most S3QL backends store data in distributed storage systems. These systems differ from a traditional, local hard disk in several important ways. In order to avoid losing data, this section should be read very carefully. \section{Rules in a Nutshell} \label{durability:rules-in-a-nutshell} To avoid losing your data, obey the following rules: \begin{enumerate} \item {} Know what durability you can expect from your chosen storage provider. The durability describes how likely it is that a stored object becomes damaged over time. Such data corruption can never be prevented completely, techniques like geographic replication and RAID storage just reduce the likelihood of it to happen (i.e., increase the durability). \item {} When choosing a backend and storage provider, keep in mind that when using S3QL, the effective durability of the file system data will be reduced because of S3QL's data de-duplication feature. \item {} Determine your storage service's consistency window. The consistency window that is important for S3QL is the smaller of the times for which: \begin{itemize} \item {} a newly created object may not yet be included in the list of stored objects \item {} an attempt to read a newly created object may fail with the storage service reporting that the object does not exist \end{itemize} If \emph{one} of the above times is zero, we say that as far as S3QL is concerned the storage service has \emph{immediate} consistency. If your storage provider claims that \emph{neither} of the above can ever happen, while at the same time promising high durability, you should choose a respectable provider instead. \item {} When mounting the same file system on different computers (or on the same computer but with different \sphinxcode{-{-}cachedir} directories), the time that passes between the first and second of invocation of \textbf{\texttt{mount.s3ql}} must be at least as long as your storage service's consistency window. If your storage service offers immediate consistency, you do not need to wait at all. \item {} Before running \textbf{\texttt{fsck.s3ql}} or \textbf{\texttt{s3qladm}}, the file system must have been left untouched for the length of the consistency window. If your storage service offers immediate consistency, you do not need to wait at all. \end{enumerate} The rest of this section explains the above rules and the reasons for them in more detail. It also contains a list of the consistency windows for a number of larger storage providers. \section{Consistency Window List} \label{durability:consistency-window-list} The following is a list of the consistency windows (as far as S3QL is concerned) for a number of storage providers. This list doesn't come with any guarantees and may be outdated. If your storage provider is not included, or if you need more reliable information, check with your storage provider. \noindent\begin{tabulary}{\linewidth}{|L|L|} \hline \textsf{\relax Storage Provider \unskip}\relax &\textsf{\relax Consistency Window \unskip}\relax \\ \hline Amazon S3 in the US standard region & No guarantees \\ \hline Amazon S3 in other regions & Immediate \\ \hline Google Storage & Immediate \\ \hline\end{tabulary} \section{Data Consistency} \label{durability:data-consistency} In contrast to the typical hard disk, most storage providers do not guarantee \emph{immediate consistency} of written data. This means that: \begin{itemize} \item {} after an object has been stored, requests to read this object may still fail or return the prior contents for a little while. \item {} after an object has been deleted, attempts to read it may still return the (old) data for some time, and it may still remain in the list of stored objects for some time. \item {} after a new object has been created, it may still not be included when retrieving the list of stored objects for some time. \end{itemize} Of course, none of this is acceptable for a file system, and S3QL generally handles any of the above situations internally so that it always provides a fully consistent file system to the user. However, there are some situations where an S3QL user nevertheless needs to be aware of the peculiarities of his chosen storage service. Suppose that you mount the file system, store some new data, delete some old data and unmount it. If you then mount the file system again right away on another computer, there is no guarantee that S3QL will see any of the changes that the first S3QL process has made. At least in theory it is therefore possible that on the second mount, S3QL does not see any of the changes that you have done and presents you an ``old version'' of the file system without them. Even worse, if you notice the problem and unmount the file system, S3QL will upload the old status (which S3QL necessarily has to consider as current) and thereby permanently override the newer version (even though this change may not become immediately visible either). S3QL uses several techniques to reduce the likelihood of this to happen (see {\hyperref[impl_details:impl\string-details]{\sphinxcrossref{\DUrole{std,std-ref}{Implementation Details}}}} for more information on this), but without support from the storage service, the possibility cannot be eliminated completely. The same problem of course also applies when checking the file system. If the storage service provides S3QL with only partially updated data, S3QL has no way to find out if this a real consistency problem that needs to be fixed or if it is only a temporary problem that will resolve itself automatically (because there are still changes that have not become visible yet). This is where the so called \emph{consistency window} comes in. The consistency window is the maximum time (after writing or deleting the object) for which any of the above ``outdated responses'' may be received. If the consistency window is zero, i.e. all changes are immediately effective, the storage service is said to have \emph{immediate consistency}. If the window is infinite, i.e. there is no upper bound on the time it may take for changes to become effect, the storage service is said to be \emph{eventually consistent}. Note that often there are different consistency windows for the different operations. For example, Google Storage offers immediate consistency when reading data, but only eventual consistency for the list of stored objects. To prevent the problem of S3QL working with an outdated copy of the file system data, it is therefore sufficient to simply wait for the consistency window to pass before mounting the file system again (or running a file system check). The length of the consistency window changes from storage service to storage service, and if your service is not included in the list below, you should check the web page or ask the technical support of your storage provider. The window that is important for S3QL is the smaller of the times for which \begin{itemize} \item {} a newly created object may not yet be included in the list of stored objects \item {} an attempt to read a newly created object may fail with the storage service reporting that the object does not exist \end{itemize} Unfortunately, many storage providers are hesitant to guarantee anything but eventual consistency, i.e. the length of the consistency window is potentially infinite. In that case you simply have to pick a length that you consider ``safe enough''. For example, even though Amazon is only guaranteeing eventual consistency, the ordinary consistency window for data stored in S3 is just a few seconds, and only in exceptional circumstances (i.e., core network outages) it may rise up to hours (\href{http://forums.aws.amazon.com/message.jspa?messageID=38471\#38471}{source}). \section{Data Durability} \label{durability:backend-reliability}\label{durability:data-durability} The durability of a storage service a measure of the average probability of a storage object to become corrupted over time. The lower the chance of data loss, the higher the durability. Storage services like Amazon S3 claim to achieve a durability of up to 99.999999999\% over a year, i.e. if you store 100000000 objects for 100 years, you can expect that at the end of that time one object will be corrupted or lost. S3QL is designed to reduce redundancy and store data in the smallest possible form. Therefore, S3QL is generally not able to compensate for any such losses, and when choosing a storage service you should carefully review if the offered durability matches your requirements. When doing this, there are two factors that should be kept in mind. Firstly, even though S3QL is not able to compensate for storage service failures, it is able to detect them: when trying to access data that has been lost or corrupted by the storage service, an IO error will be returned and the mount point will become inaccessible to ensure that the problem is noticed. Secondly, the consequences of a data loss by the storage service can be significantly more severe than you may expect because of S3QL's data de-duplication feature: a data loss in the storage service at time \emph{x} may cause data that is written \emph{after} time \emph{x} to be lost as well. Consider the following scenario: \begin{enumerate} \item {} You store an important file in the S3QL file system. \item {} The storage service loses the data blocks of this file. As long as you do not access the file or run \textbf{\texttt{fsck.s3ql}}, S3QL is not aware that the data has been lost by the storage service. \item {} You save an additional copy of the important file in a different location on the same S3QL file system. \item {} S3QL detects that the contents of the new file are identical to the data blocks that have been stored earlier. Since at this point S3QL is not aware that these blocks have been lost by the storage service, it does not save another copy of the file contents in the storage service but relies on the (presumably) existing blocks instead. \item {} Therefore, even though you saved another copy, you still do not have a backup of the important file (since both copies refer to the same data blocks that have been lost by the storage service). \end{enumerate} For some storage services, \textbf{\texttt{fsck.s3ql}} can mitigate this effect. When \textbf{\texttt{fsck.s3ql}} runs, it asks the storage service for a list of all stored objects. If objects are missing, it can then mark the damaged files and prevent the problem from spreading forwards in time. Figuratively speaking, this establishes a ``checkpoint'': data loss that occurred before running \textbf{\texttt{fsck.s3ql}} can not affect any file system operations that are performed after the check. Unfortunately, many storage services only ``discover'' that objects are missing or broken when the object actually needs to be retrieved. In this case, \textbf{\texttt{fsck.s3ql}} will not learn anything by just querying the list of objects. This effect can be mitigated to some degree by using the \textbf{\texttt{s3ql\_verify}} command in additon to \textbf{\texttt{fsck.s3ql}}. \textbf{\texttt{s3ql\_verify}} asks the storage service to look up every stored object and may therefore take much longer than running \textbf{\texttt{fsck.s3ql}}, but can also offer a much stronger assurance that no data has been lost by the storage service. To ``recover'' from damaged storage objects in the backend, the damaged objects found by \textbf{\texttt{s3ql\_verify}} have to be explicitly deleted (so that a successive \textbf{\texttt{fsck.s3ql}} is able detect them as missing, correct the file system metadata, and move any affected files to \sphinxcode{lost+found}). This procedure is currently not automated, so it is generally a good idea to choose a storage service where the expected data durability is high enough so that the possibility of a lost object (and thus the need to run any full checks) can be neglected over long periods of time. \chapter{File System Creation} \label{mkfs::doc}\label{mkfs:file-system-creation} A S3QL file system is created with the \textbf{\texttt{mkfs.s3ql}} command. It has the following syntax: \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{mkfs.s3ql }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}storage url\PYGZgt{}} \end{Verbatim} This command accepts the following options: \begin{quote} \begin{optionlist}{3cm} \item [-{-}cachedir \textless{}path\textgreater{}] Store cached data in this directory (default: \sphinxcode{\textasciitilde{}/.s3ql)} \item [-{-}authfile \textless{}path\textgreater{}] Read authentication credentials from this file (default: \sphinxcode{\textasciitilde{}/.s3ql/authinfo2)} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}backend-options \textless{}options\textgreater{}] Backend specific options (separate by commas). See backend documentation for available options. \item [-{-}version] just print program version and exit \item [-L \textless{}name\textgreater{}] Filesystem label \item [-{-}max-obj-size \textless{}size\textgreater{}] Maximum size of storage objects in KiB. Files bigger than this will be spread over multiple objects in the storage backend. Default: 10240 KiB. \item [-{-}plain] Create unencrypted file system. \item [-{-}force] Overwrite any existing data. \end{optionlist} \end{quote} Unless you have specified the \sphinxcode{-{-}plain} option, \textbf{\texttt{mkfs.s3ql}} will ask you to enter an encryption password. This password will \emph{not} be read from an authentication file specified with the \sphinxcode{-{-}authfile} option to prevent accidental creation of an encrypted file system. Note that: \begin{itemize} \item {} All data that is stored under the given storage url is assumed to managed exclusively by S3QL. Trying to manually save additional objects (or remove or manipulate existing objects) will lead to file system corruption, and \textbf{\texttt{fsck.s3ql}} may delete objects that do not belong to the file system. \item {} With most storage backends, slashes in the storage url prefix do not have special meaning. For example, the storage urls \sphinxcode{s3://mybucket/myprefix/} and \sphinxcode{s3://mybucket/myprefix} are distinct. In the first case, the prefix is \sphinxcode{myprefix/}, while in the second it is \sphinxcode{myprefix}. \item {} S3QL file systems can not be ``stacked'', i.e. you cannot have one file system stored at \sphinxcode{s3://bucketname/outerprefix} and a second one at \sphinxcode{s3://bucketname/outerprefix/innerprefix}. \end{itemize} \chapter{Managing File Systems} \label{adm:managing-file-systems}\label{adm::doc} The \sphinxcode{s3qladm} command performs various operations on \emph{unmounted} S3QL file systems. The file system \emph{must not be mounted} when using \sphinxcode{s3qladm} or things will go wrong badly. The syntax is \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3qladm }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}action\PYGZgt{}}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}storage\PYGZhy{}url\PYGZgt{}} \end{Verbatim} where \sphinxcode{action} may be either of \textbf{\texttt{passphrase}}, \textbf{\texttt{upgrade}}, \textbf{\texttt{clear}} or \textbf{\texttt{download-metadata}}. The \textbf{\texttt{s3qladm}} accepts the following general options, no matter what specific action is being invoked: \begin{quote} \begin{optionlist}{3cm} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}log \textless{}target\textgreater{}] Destination for log messages. Specify \sphinxcode{none} for standard output or \sphinxcode{syslog} for the system logging daemon. Anything else will be interpreted as a file name. Log files will be rotated when they reach 1 MiB, and at most 5 old log files will be kept. Default: \sphinxcode{None} \item [-{-}authfile \textless{}path\textgreater{}] Read authentication credentials from this file (default: \sphinxcode{\textasciitilde{}/.s3ql/authinfo2)} \item [-{-}backend-options \textless{}options\textgreater{}] Backend specific options (separate by commas). See backend documentation for available options. \item [-{-}cachedir \textless{}path\textgreater{}] Store cached data in this directory (default: \sphinxcode{\textasciitilde{}/.s3ql)} \item [-{-}version] just print program version and exit \end{optionlist} \end{quote} Hint: run \sphinxcode{s3qladm \textless{}action\textgreater{} -{-}help} to get help on the additional arguments that the different actions take. \section{Changing the Passphrase} \label{adm:changing-the-passphrase} To change the passphrase of a file system, use the \sphinxcode{passphrase} subcommand: \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3qladm passphrase }\PYG{n+nv}{\PYGZlt{}storage url\PYGZgt{}} \end{Verbatim} \section{Upgrading the file system} \label{adm:upgrading-the-file-system} If you have installed a new version of S3QL, it may sometimes be necessary to upgrade the file system metadata as well. Note that in this case the file system can no longer be accessed with older versions of S3QL after the upgrade. During the upgrade you have to make sure that the command is not interrupted, and that no one else tries to mount, check or upgrade the file system at the same time. To upgrade a file system from the previous to the current revision, execute \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3qladm upgrade }\PYG{n+nv}{\PYGZlt{}storage url\PYGZgt{}} \end{Verbatim} \section{Deleting a file system} \label{adm:deleting-a-file-system} A file system can be deleted with: \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3qladm clear }\PYG{n+nv}{\PYGZlt{}storage url\PYGZgt{}} \end{Verbatim} This physically deletes all the data and file system structures. \section{Restoring Metadata Backups} \label{adm:restoring-metadata-backups} If the most-recent copy of the file system metadata has been damaged irreparably, it is possible to restore one of the automatically created backup copies. The command \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3qladm download\PYGZhy{}metadata }\PYG{n+nv}{\PYGZlt{}storage url\PYGZgt{}} \end{Verbatim} will give you a list of the available metadata backups and allow you to download them. This will create two new files in the current directory, ending in \sphinxcode{.db} and \sphinxcode{.params}. To actually use the downloaded backup, you need to move these files into the \sphinxcode{\textasciitilde{}/.s3ql/} directory and run \sphinxcode{fsck.s3ql}. \begin{notice}{warning}{Warning:} You should probably not use this functionality without having asked for help on the mailing list first (see {\hyperref[resources:resources]{\sphinxcrossref{\DUrole{std,std-ref}{Further Resources / Getting Help}}}}). \end{notice} \chapter{Mounting} \label{mount:mounting}\label{mount::doc} A S3QL file system is mounted with the \textbf{\texttt{mount.s3ql}} command. It has the following syntax: \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{mount.s3ql }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}storage url\PYGZgt{}}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}mountpoint\PYGZgt{}} \end{Verbatim} \begin{notice}{note}{Note:} S3QL is not a network file system like \href{http://en.wikipedia.org/wiki/Network\_File\_System\_\%28protocol\%29}{NFS} or \href{http://en.wikipedia.org/wiki/CIFS}{CIFS}. It can only be mounted on one computer at a time. \end{notice} This command accepts the following options: \begin{quote} \begin{optionlist}{3cm} \item [-{-}log \textless{}target\textgreater{}] Destination for log messages. Specify \sphinxcode{none} for standard output or \sphinxcode{syslog} for the system logging daemon. Anything else will be interpreted as a file name. Log files will be rotated when they reach 1 MiB, and at most 5 old log files will be kept. Default: \sphinxcode{\textasciitilde{}/.s3ql/mount.log} \item [-{-}cachedir \textless{}path\textgreater{}] Store cached data in this directory (default: \sphinxcode{\textasciitilde{}/.s3ql)} \item [-{-}authfile \textless{}path\textgreater{}] Read authentication credentials from this file (default: \sphinxcode{\textasciitilde{}/.s3ql/authinfo2)} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}backend-options \textless{}options\textgreater{}] Backend specific options (separate by commas). See backend documentation for available options. \item [-{-}version] just print program version and exit \item [-{-}cachesize \textless{}size\textgreater{}] Cache size in KiB (default: autodetect). \item [-{-}max-cache-entries \textless{}num\textgreater{}] Maximum number of entries in cache (default: autodetect). Each cache entry requires one file descriptor, so if you increase this number you have to make sure that your process file descriptor limit (as set with \sphinxcode{ulimit -n}) is high enough (at least the number of cache entries + 100). \item [-{-}allow-other] Normally, only the user who called \sphinxcode{mount.s3ql} can access the mount point. This user then also has full access to it, independent of individual file permissions. If the \sphinxcode{-{-}allow-other} option is specified, other users can access the mount point as well and individual file permissions are taken into account for all users. \item [-{-}allow-root] Like \sphinxcode{-{-}allow-other}, but restrict access to the mounting user and the root user. \item [-{-}fg] Do not daemonize, stay in foreground \item [-{-}upstart] Stay in foreground and raise SIGSTOP once mountpoint is up. \item [-{-}compress \textless{}algorithm-lvl\textgreater{}] Compression algorithm and compression level to use when storing new data. \emph{algorithm} may be any of \sphinxcode{lzma}, \sphinxcode{bzip2}, \sphinxcode{zlib}, or none. \emph{lvl} may be any integer from 0 (fastest) to 9 (slowest). Default: \sphinxcode{lzma-6} \item [-{-}metadata-upload-interval \textless{}seconds\textgreater{}] Interval in seconds between complete metadata uploads. Set to 0 to disable. Default: 24h. \item [-{-}threads \textless{}no\textgreater{}] Number of parallel upload threads to use (default: auto). \item [-{-}nfs] Enable some optimizations for exporting the file system over NFS. (default: False) \end{optionlist} \end{quote} \section{Permission Checking} \label{mount:permission-checking} If the file system is mounted with neither the \sphinxcode{allow-root} nor \sphinxcode{allow-other} option, the mounting user has full permissions on the S3QL file system (he is effectively root). If one (or both) of the options is used, standard unix permission checks apply, i.e. only the real root user has full access and all other users (including the mounting user) are subject to permission checks. \section{Compression Algorithms} \label{mount:compression-algorithms} S3QL supports three compression algorithms, LZMA, Bzip2 and zlib (with LZMA being the default). The compression algorithm can be specified freely whenever the file system is mounted, since it affects only the compression of new data blocks. Roughly speaking, LZMA is slower but achieves better compression ratios than Bzip2, while Bzip2 in turn is slower but achieves better compression ratios than zlib. For maximum file system performance, the best algorithm therefore depends on your network connection speed: the compression algorithm should be fast enough to saturate your network connection. To find the optimal algorithm and number of parallel compression threads for your system, S3QL ships with a program called \sphinxcode{benchmark.py} in the \sphinxcode{contrib} directory. You should run this program on a file that has a size that is roughly equal to the block size of your file system and has similar contents. It will then determine the compression speeds for the different algorithms and the upload speeds for the specified backend and recommend the best algorithm that is fast enough to saturate your network connection. Obviously you should make sure that there is little other system load when you run \sphinxcode{benchmark.py} (i.e., don't compile software or encode videos at the same time). \section{Notes about Caching} \label{mount:notes-about-caching} S3QL maintains a local cache of the file system data to speed up access. The cache is block based, so it is possible that only parts of a file are in the cache. \subsection{Maximum Number of Cache Entries} \label{mount:maximum-number-of-cache-entries} The maximum size of the cache can be configured with the \sphinxcode{-{-}cachesize} option. In addition to that, the maximum number of objects in the cache is limited by the \sphinxcode{-{-}max-cache-entries} option, so it is possible that the cache does not grow up to the maximum cache size because the maximum number of cache elements has been reached. The reason for this limit is that each cache entry requires one open file descriptor, and Linux distributions usually limit the total number of file descriptors per process to about a thousand. If you specify a value for \sphinxcode{-{-}max-cache-entries}, you should therefore make sure to also configure your system to increase the maximum number of open file handles. This can be done temporarily with the \textbf{\texttt{ulimit -n}} command. The method to permanently change this limit system-wide depends on your distribution. \subsection{Cache Flushing and Expiration} \label{mount:cache-flushing-and-expiration} S3QL flushes changed blocks in the cache to the backend whenever a block has not been accessed for at least 10 seconds. Note that when a block is flushed, it still remains in the cache. Cache expiration (i.e., removal of blocks from the cache) is only done when the maximum cache size is reached. S3QL always expires the least recently used blocks first. \section{Failure Modes} \label{mount:failure-modes} Once an S3QL file system has been mounted, there is a multitude of problems that can occur when communicating with the remote server. Generally, \textbf{\texttt{mount.s3ql}} always tries to keep the file system as accessible as possible under the circumstances. That means that if network connectivity is lost, data can still be written as long as there is space in the local cache. Attempts to read data not already present in the cache, however, will block until connection is re-established. If any sort of data corruption is detected, the file system will switch to read-only mode. Attempting to read files that are affected by the corruption will return an input/output error (\emph{errno} set to \sphinxcode{EIO}). In case of other unexpected or fatal problems, \textbf{\texttt{mount.s3ql}} terminates, but does not unmount the file system. Any attempt to access the mountpoint will result in a ``Transport endpoint not connected'' error (\emph{errno} set to \sphinxcode{ESHUTDOWN}). This ensures that a mountpoint whose \textbf{\texttt{mount.s3ql}} process has terminated can not be confused with a mountpoint containing an empty file system (which would be fatal if e.g. the mountpoint is automatically mirrored). When this has happened, the mountpoint can be cleared by using the \textbf{\texttt{fusermount}} command (provided by FUSE) with the \sphinxcode{-u} parameter. \textbf{\texttt{mount.s3ql}} will automatically try to re-establish the connection to the server if network connectivity is lost, and retry sending a request when the connection is established but the remote server signals a temporary problem. These attempts will be made at increasing intervals for a period up to 24 hours, with retry intervals starting at 20 ms and increasing up to 5 minutes. After 24 hours, \textbf{\texttt{mount.s3ql}} will give up and terminate, leaving the mountpoint inaccessible as described above. Generally, \textbf{\texttt{mount.s3ql}} will also emit log messages for any unusual conditions that it encounters. The destination for these messages can be set with the \sphinxcode{-{-}log} parameter. It is highly recommended to periodically check these logs, for example with a tool like \href{http://sourceforge.net/projects/logcheck/}{logcheck}. Many potential issues that \textbf{\texttt{mount.s3ql}} may encounter do not justify restricting access to the file system, but should nevertheless be investigated if they occur. Checking the log messages is the only way to find out about them. \section{Automatic Mounting} \label{mount:logcheck}\label{mount:automatic-mounting} If you want to mount and umount an S3QL file system automatically at system startup and shutdown, you should do so with a dedicated S3QL init job (instead of using \sphinxcode{/etc/fstab}. When using systemd, \textbf{\texttt{mount.s3ql}} can be run as a service of type \sphinxcode{notify}. \begin{notice}{note}{Note:} In principle, it is also possible to automatically mount an S3QL file system with an appropriate entry in \sphinxcode{/etc/fstab}. However, this is not recommended for several reasons: \begin{itemize} \item {} file systems mounted in \sphinxcode{/etc/fstab} will be unmounted with the \textbf{\texttt{umount}} command, so your system will not wait until all data has been uploaded but shutdown (or restart) immediately (this is a FUSE limitation, see \href{https://bitbucket.org/nikratio/s3ql/issue/1/blocking-fusermount-and-umount}{issue \#1}). \item {} There is no way to tell the system that mounting S3QL requires a Python interpreter to be available, so it may attempt to run \textbf{\texttt{mount.s3ql}} before it has mounted the volume containing the Python interpreter. \item {} There is no standard way to tell the system that internet connection has to be up before the S3QL file system can be mounted. \end{itemize} \end{notice} \chapter{Advanced S3QL Features} \label{special::doc}\label{special:advanced-s3ql-features} \section{Snapshotting and Copy-on-Write} \label{special:snapshotting-and-copy-on-write}\label{special:s3qlcp} The command \sphinxcode{s3qlcp} can be used to duplicate a directory tree without physically copying the file contents. This is made possible by the data de-duplication feature of S3QL. The syntax of \sphinxcode{s3qlcp} is: \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3qlcp }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}src\PYGZgt{}}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}target\PYGZgt{}} \end{Verbatim} This will replicate the contents of the directory \sphinxcode{\textless{}src\textgreater{}} in the directory \sphinxcode{\textless{}target\textgreater{}}. \sphinxcode{\textless{}src\textgreater{}} has to be an existing directory and \sphinxcode{\textless{}target\textgreater{}} must not exist. Moreover, both directories have to be within the same S3QL file system. The replication will not take any additional space. Only if one of directories is modified later on, the modified data will take additional storage space. \sphinxcode{s3qlcp} can only be called by the user that mounted the file system and (if the file system was mounted with \sphinxcode{-{-}allow-other} or \sphinxcode{-{-}allow-root}) the root user. Note that: \begin{itemize} \item {} After the replication, both source and target directory will still be completely ordinary directories. You can regard \sphinxcode{\textless{}src\textgreater{}} as a snapshot of \sphinxcode{\textless{}target\textgreater{}} or vice versa. However, the most common usage of \sphinxcode{s3qlcp} is to regularly duplicate the same source directory, say \sphinxcode{documents}, to different target directories. For a e.g. monthly replication, the target directories would typically be named something like \sphinxcode{documents\_January} for the replication in January, \sphinxcode{documents\_February} for the replication in February etc. In this case it is clear that the target directories should be regarded as snapshots of the source directory. \item {} Exactly the same effect could be achieved by an ordinary copy program like \sphinxcode{cp -a}. However, this procedure would be orders of magnitude slower, because \sphinxcode{cp} would have to read every file completely (so that S3QL had to fetch all the data over the network from the backend) before writing them into the destination folder. \end{itemize} \subsection{Snapshotting vs Hardlinking} \label{special:snapshotting-vs-hardlinking} Snapshot support in S3QL is inspired by the hardlinking feature that is offered by programs like \href{http://www.samba.org/rsync}{rsync} or \href{http://savannah.nongnu.org/projects/storebackup}{storeBackup}. These programs can create a hardlink instead of copying a file if an identical file already exists in the backup. However, using hardlinks has two large disadvantages: \begin{itemize} \item {} backups and restores always have to be made with a special program that takes care of the hardlinking. The backup must not be touched by any other programs (they may make changes that inadvertently affect other hardlinked files) \item {} special care needs to be taken to handle files which are already hardlinked (the restore program needs to know that the hardlink was not just introduced by the backup program to safe space) \end{itemize} S3QL snapshots do not have these problems, and they can be used with any backup program. \section{Getting Statistics} \label{special:s3qlstat}\label{special:getting-statistics} You can get more information about a mounted S3QL file system with the \sphinxcode{s3qlstat} command. It has the following syntax: \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3qlstat }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}mountpoint\PYGZgt{}} \end{Verbatim} This will print out something like this \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{Directory entries: 1488068} \PYG{l}{Inodes: 1482991} \PYG{l}{Data blocks: 87948} \PYG{l}{Total data size: 400 GiB} \PYG{l}{After de\PYGZhy{}duplication: 51 GiB (12.98\PYGZpc{} of total)} \PYG{l}{After compression: 43 GiB (10.85\PYGZpc{} of total, 83.60\PYGZpc{} of de\PYGZhy{}duplicated)} \PYG{l}{Database size: 172 MiB (uncompressed)} \PYG{l}{(some values do not take into account not\PYGZhy{}yet\PYGZhy{}uploaded dirty blocks in cache)} \end{Verbatim} Probably the most interesting numbers are the total size of your data, the total size after duplication, and the final size after de-duplication and compression. \sphinxcode{s3qlstat} can only be called by the user that mounted the file system and (if the file system was mounted with \sphinxcode{-{-}allow-other} or \sphinxcode{-{-}allow-root}) the root user. For a full list of available options, run \sphinxcode{s3qlstat -{-}help}. \section{Immutable Trees} \label{special:immutable-trees}\label{special:s3qllock} The command \textbf{\texttt{s3qllock}} can be used to make a directory tree immutable. Immutable trees can no longer be changed in any way whatsoever. You can not add new files or directories and you can not change or delete existing files and directories. The only way to get rid of an immutable tree is to use the \textbf{\texttt{s3qlrm}} command (see below). For example, to make the directory tree beneath the directory \sphinxcode{2010-04-21} immutable, execute \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3qllock 2010\PYGZhy{}04\PYGZhy{}21} \end{Verbatim} Immutability is a feature designed for backups. Traditionally, backups have been made on external tape drives. Once a backup was made, the tape drive was removed and locked somewhere in a shelf. This has the great advantage that the contents of the backup are now permanently fixed. Nothing (short of physical destruction) can change or delete files in the backup. In contrast, when backing up into an online storage system like S3QL, all backups are available every time the file system is mounted. Nothing prevents a file in an old backup from being changed again later on. In the worst case, this may make your entire backup system worthless. Imagine that your system gets infected by a nasty virus that simply deletes all files it can find -- if the virus is active while the backup file system is mounted, the virus will destroy all your old backups as well! Even if the possibility of a malicious virus or trojan horse is excluded, being able to change a backup after it has been made is generally not a good idea. A common S3QL use case is to keep the file system mounted at all times and periodically create backups with \textbf{\texttt{rsync -a}}. This allows every user to recover her files from a backup without having to call the system administrator. However, this also allows every user to accidentally change or delete files \emph{in} one of the old backups. Making a backup immutable protects you against all these problems. Unless you happen to run into a virus that was specifically programmed to attack S3QL file systems, backups can be neither deleted nor changed after they have been made immutable. \section{Fast Recursive Removal} \label{special:fast-recursive-removal}\label{special:s3qlrm} The \sphinxcode{s3qlrm} command can be used to recursively delete files and directories on an S3QL file system. Although \sphinxcode{s3qlrm} is faster than using e.g. \sphinxcode{rm -r}, the main reason for its existence is that it allows you to delete immutable trees as well. The syntax is rather simple: \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3qlrm }\PYG{n+nv}{\PYGZlt{}directory\PYGZgt{}} \end{Verbatim} Be warned that there is no additional confirmation. The directory will be removed entirely and immediately. \section{Runtime Configuration} \label{special:runtime-configuration}\label{special:s3qlctrl} The \sphinxcode{s3qlctrl} can be used to control a mounted S3QL file system. Its syntax is \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3qlctrl }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}action\PYGZgt{}}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}mountpoint\PYGZgt{}}\PYG{l}{ ...} \end{Verbatim} \sphinxcode{\textless{}mountpoint\textgreater{}} must be the location of a mounted S3QL file system. For a list of valid options, run \sphinxcode{s3qlctrl -{-}help}. \sphinxcode{\textless{}action\textgreater{}} may be either of: \begin{quote} \begin{quote}\begin{description} \item[{flushcache}] \leavevmode Flush file system cache. The command blocks until the cache has been flushed. \item[{dropcache}] \leavevmode Flush, and then drop file system cache. The command blocks until the cache has been flushed and dropped. \item[{log}] \leavevmode Change log level. \item[{cachesize}] \leavevmode Change file system cache size. \item[{upload-meta}] \leavevmode Trigger a metadata upload. \end{description}\end{quote} \end{quote} \chapter{Unmounting} \label{umount::doc}\label{umount:unmounting} To unmount an S3QL file system, use the command: \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{umount.s3ql }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}mountpoint\PYGZgt{}} \end{Verbatim} This will block until all data has been written to the backend. Only the user who mounted the file system with \textbf{\texttt{mount.s3ql}} is able to unmount it again. If you are root and want to unmount an S3QL file system mounted by an ordinary user, you have to use the \textbf{\texttt{fusermount -u}} or \textbf{\texttt{umount}} command instead. Note that these commands do not block until all data has been uploaded, so if you use them instead of \sphinxcode{umount.s3ql} then you should manually wait for the \sphinxcode{mount.s3ql} process to terminate before shutting down the system. The \textbf{\texttt{umount.s3ql}} command accepts the following options: \begin{quote} \begin{optionlist}{3cm} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}version] just print program version and exit \item [-{-}lazy, -z] Lazy umount. Detaches the file system immediately, even if there are still open files. The data will be uploaded in the background once all open files have been closed. \end{optionlist} \end{quote} If, for some reason, the \sphinxcode{umount.sql} command does not work, the file system can also be unmounted with \sphinxcode{fusermount -u -z}. Note that this command will return immediately and the file system may continue to upload data in the background for a while longer. \chapter{Checking for Errors} \label{fsck:checking-for-errors}\label{fsck::doc} It is recommended to periodically run the \textbf{\texttt{fsck.s3ql}} and \textbf{\texttt{s3ql\_verify}} commands (in this order) to ensure that the file system is consistent, and that there has been no data corruption or data loss in the storage backend. \textbf{\texttt{fsck.s3ql}} is intended to detect and correct problems with the internal file system structure, caused by e.g. a file system crash or a bug in S3QL. It assumes that the storage backend can be fully trusted, i.e. if the backend reports that a specific storage object exists, \textbf{\texttt{fsck.s3ql}} takes that as proof that the data is present and intact. In contrast to that, the \textbf{\texttt{s3ql\_verify}} command is intended to check the consistency of the storage backend. It assumes that the internal file system data is correct, and verifies that all data can actually be retrieved from the backend. Running \textbf{\texttt{s3ql\_verify}} may therefore take much longer than running \textbf{\texttt{fsck.s3ql}}. \section{Checking and repairing internal file system errors} \label{fsck:checking-and-repairing-internal-file-system-errors} \textbf{\texttt{fsck.s3ql}} checks that the internal file system structure is consistent and attempts to correct any problems it finds. If an S3QL file system has not been unmounted correcly for any reason, you need to run \textbf{\texttt{fsck.s3ql}} before you can mount the file system again. The \textbf{\texttt{fsck.s3ql}} command has the following syntax: \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{fsck.s3ql }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}storage url\PYGZgt{}} \end{Verbatim} This command accepts the following options: \begin{quote} \begin{optionlist}{3cm} \item [-{-}log \textless{}target\textgreater{}] Destination for log messages. Specify \sphinxcode{none} for standard output or \sphinxcode{syslog} for the system logging daemon. Anything else will be interpreted as a file name. Log files will be rotated when they reach 1 MiB, and at most 5 old log files will be kept. Default: \sphinxcode{\textasciitilde{}/.s3ql/fsck.log} \item [-{-}cachedir \textless{}path\textgreater{}] Store cached data in this directory (default: \sphinxcode{\textasciitilde{}/.s3ql)} \item [-{-}authfile \textless{}path\textgreater{}] Read authentication credentials from this file (default: \sphinxcode{\textasciitilde{}/.s3ql/authinfo2)} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}backend-options \textless{}options\textgreater{}] Backend specific options (separate by commas). See backend documentation for available options. \item [-{-}version] just print program version and exit \item [-{-}batch] If user input is required, exit without prompting. \item [-{-}force] Force checking even if file system is marked clean. \item [-{-}force-remote] Force use of remote metadata even when this would likely result in data loss. \end{optionlist} \end{quote} \section{Detecting and handling backend data corruption} \label{fsck:s3ql-verify}\label{fsck:detecting-and-handling-backend-data-corruption} The \textbf{\texttt{s3ql\_verify}} command verifies all data in the file system. In contrast to \textbf{\texttt{fsck.s3ql}}, \textbf{\texttt{s3ql\_verify}} does not trust the object listing returned by the backend, but actually attempts to retrieve every object. By default, \textbf{\texttt{s3ql\_verify}} will attempt to retrieve just the metadata for every object (for e.g. the S3-compatible or Google Storage backends this corresponds to a \sphinxcode{HEAD} request for each object), which is generally sufficient to determine if the object still exists. When specifying the \sphinxcode{-{-}data} option, \textbf{\texttt{s3ql\_verify}} will instead read every object entirely. To determine how much data will be transmitted in total when using \sphinxcode{-{-}data}, look at the \emph{After compression} row in the {\hyperref[special:s3qlstat]{\sphinxcrossref{\DUrole{std,std-ref}{s3qlstat}}}} output. \textbf{\texttt{s3ql\_verify}} is not able to correct any data corruption that it finds. Instead, a list of the corrupted and/or missing objects is written to a file and the decision about the proper course of action is left to the user. If you have administrative access to the backend server, you may want to investigate the cause of the corruption or check if the missing/corrupted objects can be restored from backups. If you believe that the missing/corrupted objects are indeed lost irrevocably, you can use the {\hyperref[contrib:remove\string-objects]{\sphinxcrossref{\DUrole{std,std-ref}{remove\_objects.py}}}} script (from the \sphinxcode{contrib} directory of the S3QL distribution) to explicitly delete the objects from the storage backend. After that, you should run \textbf{\texttt{fsck.s3ql}}. Since the (now explicitly deleted) objects should now no longer be included in the object index reported by the backend, \textbf{\texttt{fsck.s3ql}} will identify the objects as missing, update the internal file system structures accordingly, and move the affected files into the \sphinxcode{lost+found} directory. The \textbf{\texttt{s3ql\_verify}} command has the following syntax: \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3ql\PYGZus{}verify }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}storage url\PYGZgt{}} \end{Verbatim} This command accepts the following options: \begin{quote} \begin{optionlist}{3cm} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}version] just print program version and exit \item [-{-}cachedir \textless{}path\textgreater{}] Store cached data in this directory (default: \sphinxcode{\textasciitilde{}/.s3ql)} \item [-{-}authfile \textless{}path\textgreater{}] Read authentication credentials from this file (default: \sphinxcode{\textasciitilde{}/.s3ql/authinfo2)} \item [-{-}backend-options \textless{}options\textgreater{}] Backend specific options (separate by commas). See backend documentation for available options. \item [-{-}missing-file \textless{}name\textgreater{}] File to store keys of missing objects. \item [-{-}corrupted-file \textless{}name\textgreater{}] File to store keys of corrupted objects. \item [-{-}data] Read every object completely, instead of checking just the metadata. \item [-{-}parallel PARALLEL] Number of connections to use in parallel. \item [-{-}start-with \textless{}n\textgreater{}] Skip over first \textless{}n\textgreater{} objects and with verifying object \textless{}n\textgreater{}+1. \end{optionlist} \end{quote} \chapter{Storing Authentication Information} \label{authinfo::doc}\label{authinfo:authinfo}\label{authinfo:storing-authentication-information} Normally, S3QL reads username and password for the backend as well as an encryption passphrase for the file system from the terminal. Most commands also accept an \sphinxcode{-{-}authfile} parameter that can be used to read this information from a file instead. The authentication file consists of sections, led by a \sphinxcode{{[}section{]}} header and followed by \sphinxcode{name: value} entries. The section headers themselves are not used by S3QL but have to be unique within the file. In each section, the following entries can be defined: \begin{quote}\begin{description} \item[{storage-url}] \leavevmode Specifies the storage url to which this section applies. If a storage url starts with the value of this entry, the section is considered applicable. \item[{backend-login}] \leavevmode Specifies the username to use for authentication with the backend. \item[{backend-password}] \leavevmode Specifies the password to use for authentication with the backend. \item[{fs-passphrase}] \leavevmode Specifies the passphrase to use to decrypt the file system (if it is encrypted). \end{description}\end{quote} When reading the authentication file, S3QL considers every applicable section in order and uses the last value that it found for each entry. For example, consider the following authentication file: \begin{Verbatim}[commandchars=\\\{\}] \PYG{g+ge}{[s3]} \PYG{l}{storage\PYGZhy{}url: s3://} \PYG{l}{backend\PYGZhy{}login: joe} \PYG{l}{backend\PYGZhy{}password: notquitesecret} \PYG{g+ge}{[fs1]} \PYG{l}{storage\PYGZhy{}url: s3://joes\PYGZhy{}first\PYGZhy{}bucket} \PYG{l}{fs\PYGZhy{}passphrase: neitheristhis} \PYG{g+ge}{[fs2]} \PYG{l}{storage\PYGZhy{}url: s3://joes\PYGZhy{}second\PYGZhy{}bucket} \PYG{l}{fs\PYGZhy{}passphrase: swordfish} \PYG{g+ge}{[fs3]} \PYG{l}{storage\PYGZhy{}url: s3://joes\PYGZhy{}second\PYGZhy{}bucket/with\PYGZhy{}prefix} \PYG{l}{backend\PYGZhy{}login: bill} \PYG{l}{backend\PYGZhy{}password: bi23ll} \PYG{l}{fs\PYGZhy{}passphrase: ll23bi} \end{Verbatim} With this authentication file, S3QL would try to log in as ``joe'' whenever the s3 backend is used, except when accessing a storage url that begins with ``s3://joes-second-bucket/with-prefix''. In that case, the last section becomes active and S3QL would use the ``bill'' credentials. Furthermore, file system encryption passphrases will be used for storage urls that start with ``s3://joes-first-bucket'' or ``s3://joes-second-bucket''. The authentication file is parsed by the \href{http://docs.python.org/library/configparser.html}{Python ConfigParser module}. \chapter{Contributed Programs} \label{contrib::doc}\label{contrib:contributed-programs} S3QL comes with a few contributed programs that are not part of the core distribution (and are therefore not installed automatically by default), but which may nevertheless be useful. These programs are in the \sphinxcode{contrib} directory of the source distribution or in \sphinxcode{/usr/share/doc/s3ql/contrib} if you installed S3QL from a package. \section{benchmark.py} \label{contrib:benchmark-py} This program measures S3QL write performance, uplink bandwidth and compression speed to determine the limiting factor. It also gives recommendation for compression algorithm and number of upload threads to achieve maximum performance. \section{clone\_fs.py} \label{contrib:clone-fs-py} This program physically clones an S3QL file system from one backend into another, without recompressing or reencrypting. It can be used to migrate S3 buckets to a different storage region or storage class (standard or reduced redundancy). \section{pcp.py} \label{contrib:pcp}\label{contrib:pcp-py} \sphinxcode{pcp.py} is a wrapper program that starts several rsync processes to copy directory trees in parallel. This is important because transferring files in parallel significantly enhances performance when copying data from an S3QL file system (see {\hyperref[tips:copy\string-performance]{\sphinxcrossref{\DUrole{std,std-ref}{Improving copy performance}}}} for details). To recursively copy the directory \sphinxcode{/mnt/home-backup} into \sphinxcode{/home/joe} using 8 parallel processes and preserving permissions, you would execute \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{pcp.py \PYGZhy{}a \PYGZhy{}\PYGZhy{}processes=8 /mnt/home\PYGZhy{}backup/ /home/joe} \end{Verbatim} \section{s3ql\_backup.sh} \label{contrib:s3ql-backup-sh} This is an example script that demonstrates how to set up a simple but powerful backup solution using S3QL and \href{http://samba.org/rsync}{rsync}. The \sphinxcode{s3ql\_backup.sh} script automates the following steps: \begin{enumerate} \item {} Mount the file system \item {} Replicate the previous backup with {\hyperref[special:s3qlcp]{\sphinxcrossref{\DUrole{std,std-ref}{s3qlcp}}}} \item {} Update the new copy with the data from the backup source using rsync \item {} Make the new backup immutable with {\hyperref[special:s3qllock]{\sphinxcrossref{\DUrole{std,std-ref}{s3qllock}}}} \item {} Delete old backups that are no longer needed \item {} Unmount the file system \end{enumerate} The backups are stored in directories of the form \sphinxcode{YYYY-MM-DD\_HH:mm:SS} and the {\hyperref[contrib:expire\string-backups\string-py]{\sphinxcrossref{expire\_backups.py}}} command is used to delete old backups. \section{expire\_backups.py} \label{contrib:expire-backups-py} \textbf{\texttt{expire\_backups.py}} is a program to intelligently remove old backups that are no longer needed. To define what backups you want to keep for how long, you define a number of \emph{age ranges}. \textbf{\texttt{expire\_backups}} ensures that you will have at least one backup in each age range at all times. It will keep exactly as many backups as are required for that and delete any backups that become redundant. Age ranges are specified by giving a list of range boundaries in terms of backup cycles. Every time you create a new backup, the existing backups age by one cycle. Example: when \textbf{\texttt{expire\_backups}} is called with the age range definition \sphinxcode{1 3 7 14 31}, it will guarantee that you always have the following backups available: \begin{enumerate} \item {} A backup that is 0 to 1 cycles old (i.e, the most recent backup) \item {} A backup that is 1 to 3 cycles old \item {} A backup that is 3 to 7 cycles old \item {} A backup that is 7 to 14 cycles old \item {} A backup that is 14 to 31 cycles old \end{enumerate} \begin{notice}{note}{Note:} If you do backups in fixed intervals, then one cycle will be equivalent to the backup interval. The advantage of specifying the age ranges in terms of backup cycles rather than days or weeks is that it allows you to gracefully handle irregular backup intervals. Imagine that for some reason you do not turn on your computer for one month. Now all your backups are at least a month old, and if you had specified the above backup strategy in terms of absolute ages, they would all be deleted! Specifying age ranges in terms of backup cycles avoids these sort of problems. \end{notice} \textbf{\texttt{expire\_backups}} usage is simple. It requires backups to be stored in directories of the form \sphinxcode{year-month-day\_hour:minute:seconds} (\sphinxcode{YYYY-MM-DD\_HH:mm:ss}) and works on all backups in the current directory. So for the above backup strategy, the correct invocation would be: \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{expire\PYGZus{}backups.py 1 3 7 14 31} \end{Verbatim} When storing your backups on an S3QL file system, you probably want to specify the \sphinxcode{-{-}use-s3qlrm} option as well. This tells \textbf{\texttt{expire\_backups}} to use the {\hyperref[special:s3qlrm]{\sphinxcrossref{\DUrole{std,std-ref}{s3qlrm}}}} command to delete directories. \textbf{\texttt{expire\_backups}} uses a ``state file'' to keep track which backups are how many cycles old (since this cannot be inferred from the dates contained in the directory names). The standard name for this state file is \sphinxcode{.expire\_backups.dat}. If this file gets damaged or deleted, \textbf{\texttt{expire\_backups}} no longer knows the ages of the backups and refuses to work. In this case you can use the \sphinxcode{-{-}reconstruct-state} option to try to reconstruct the state from the backup dates. However, the accuracy of this reconstruction depends strongly on how rigorous you have been with making backups (it is only completely correct if the time between subsequent backups has always been exactly the same), so it's generally a good idea not to tamper with the state file. For a full list of available options, run \textbf{\texttt{expire\_backups.py -{-}help}}. \section{remove\_objects.py} \label{contrib:remove-objects}\label{contrib:remove-objects-py} \textbf{\texttt{remove\_objects.py}} is a program to remove a list of objects from a storage backend. Since it acts on the backend-level, the backend need not contain an S3QL file system. \chapter{Tips \& Tricks} \label{tips:tips-tricks}\label{tips::doc} \section{SSH Backend} \label{tips:ssh-tipp}\label{tips:ssh-backend} By combining S3QL's local backend with \href{http://fuse.sourceforge.net/sshfs.html}{sshfs}, it is possible to store an S3QL file system on arbitrary SSH servers: first mount the remote target directory into the local filesystem, \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{sshfs user@my.server.com:/mnt/s3ql /mnt/sshfs} \end{Verbatim} and then give the mountpoint to S3QL as a local destination: \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{mount.s3ql local:///mnt/sshfs/myfsdata /mnt/s3ql} \end{Verbatim} \section{Permanently mounted backup file system} \label{tips:permanently-mounted-backup-file-system} If you use S3QL as a backup file system, it can be useful to mount the file system permanently (rather than just mounting it for a backup and unmounting it afterwards). Especially if your file system becomes large, this saves you long mount- and unmount times if you only want to restore a single file. If you decide to do so, you should make sure to \begin{itemize} \item {} Use {\hyperref[special:s3qllock]{\sphinxcrossref{\DUrole{std,std-ref}{s3qllock}}}} to ensure that backups are immutable after they have been made. \item {} Call {\hyperref[special:s3qlctrl]{\sphinxcrossref{\DUrole{std,std-ref}{s3qlctrl upload-meta}}}} right after a every backup to make sure that the newest metadata is stored safely (if you do backups often enough, this may also allow you to set the \sphinxcode{-{-}metadata-upload-interval} option of \textbf{\texttt{mount.s3ql}} to zero). \end{itemize} \section{Improving copy performance} \label{tips:improving-copy-performance}\label{tips:copy-performance} \begin{notice}{note}{Note:} The following applies only when copying data \textbf{from} an S3QL file system, \textbf{not} when copying data \textbf{to} an S3QL file system. \end{notice} If you want to copy a lot of smaller files \emph{from} an S3QL file system (e.g. for a system restore) you will probably notice that the performance is rather bad. The reason for this is intrinsic to the way S3QL works. Whenever you read a file, S3QL first has to retrieve this file over the network from the backend. This takes a minimum amount of time (the network latency), no matter how big or small the file is. So when you copy lots of small files, 99\% of the time is actually spend waiting for network data. Theoretically, this problem is easy to solve: you just have to copy several files at the same time. In practice, however, almost all unix utilities (\sphinxcode{cp}, \sphinxcode{rsync}, \sphinxcode{tar} and friends) insist on copying data one file at a time. This makes a lot of sense when copying data on the local hard disk, but in case of S3QL this is really unfortunate. The best workaround that has been found so far is to copy files by starting several rsync processes at once and use exclusion rules to make sure that they work on different sets of files. For example, the following script will start 3 rsync instances. The first instance handles all filenames starting with a-f, the second the filenames from g-l and the third covers the rest. The \sphinxcode{+ */} rule ensures that every instance looks into all directories. \begin{Verbatim}[commandchars=\\\{\}] \PYG{c}{\PYGZsh{}!/bin/bash} \PYG{l}{RSYNC\PYGZus{}ARGS=\PYGZdq{}\PYGZhy{}aHv /mnt/s3ql/ /home/restore/\PYGZdq{}} \PYG{l}{rsync \PYGZhy{}f \PYGZdq{}+ */\PYGZdq{} \PYGZhy{}f \PYGZdq{}\PYGZhy{}! }\PYG{g+ge}{[a\PYGZhy{}f]}\PYG{l}{*\PYGZdq{} \PYGZdl{}RSYNC\PYGZus{}ARGS \PYGZam{}} \PYG{l}{rsync \PYGZhy{}f \PYGZdq{}+ */\PYGZdq{} \PYGZhy{}f \PYGZdq{}\PYGZhy{}! }\PYG{g+ge}{[g\PYGZhy{}l]}\PYG{l}{*\PYGZdq{} \PYGZdl{}RSYNC\PYGZus{}ARGS \PYGZam{}} \PYG{l}{rsync \PYGZhy{}f \PYGZdq{}+ */\PYGZdq{} \PYGZhy{}f \PYGZdq{}\PYGZhy{} }\PYG{g+ge}{[a\PYGZhy{}l]}\PYG{l}{*\PYGZdq{} \PYGZdl{}RSYNC\PYGZus{}ARGS \PYGZam{}} \PYG{l}{wait} \end{Verbatim} The optimum number of parallel processes depends on your network connection and the size of the files that you want to transfer. However, starting about 10 processes seems to be a good compromise that increases performance dramatically in almost all situations. S3QL comes with a script named \sphinxcode{pcp.py} in the \sphinxcode{contrib} directory that can be used to transfer files in parallel without having to write an explicit script first. See the description of {\hyperref[contrib:pcp]{\sphinxcrossref{\DUrole{std,std-ref}{pcp.py}}}} for details. \chapter{Known Issues} \label{issues:known-issues}\label{issues::doc}\begin{itemize} \item {} S3QL de-duplicates data blocks based solely only on SHA256 checksums, without doing a byte-by-byte comparison of the blocks. Since it is possible for two data blocks to have the same checksum despite having different contents, this can lead to problems. If two such blocks are stored in an S3QL file system, the data in one block will be lost and replaced by the data in the other block. However, the chances of this occuring for any two blocks are about 1 in 10\textasciicircum{}77 (2\textasciicircum{}256). For a file system that holds a total of 10\textasciicircum{}34 blocks, the chances of a collision increase to about 1 in 10\textasciicircum{}9. Storing more than 10\textasciicircum{}34 blocks (or about 10\textasciicircum{}25 TB with an (extremely small) block size of 4 kB) is therefore not recommended. Being exceptionally unlucky may also be a disadvantage. \item {} S3QL does not support Access Control Lists (ACLs). This is due to a bug in the FUSE library and will therefore hopefully be fixed at some point. See \href{https://bitbucket.org/nikratio/s3ql/issue/16/support-access-control-lists-acls}{issue \#16} for more details. \item {} As of Linux kernel 3.5 S3QL file systems do not implement the ``write protect'' bit on directories. In other words, even if a directory has the write protect bit set, the owner of the directory can delete any files and (empty) subdirectories inside it. This is a bug in the FUSE kernel module (cf. \url{https://github.com/libfuse/libfuse/issues/23}) and needs to be fixed in the kernel. Unfortunately it does not look as if this is going to be fixed anytime soon (as of 2016/2/28). \item {} S3QL is rather slow when an application tries to write data in unreasonably small chunks. If a 1 MiB file is copied in chunks of 1 KB, this will take more than 10 times as long as when it's copied with the (recommended) chunk size of 128 KiB. This is a limitation of the FUSE library (which does not yet support write caching) which will hopefully be addressed in some future FUSE version. Most applications, including e.g. GNU \sphinxcode{cp} and \sphinxcode{rsync}, use reasonably large buffers and are therefore not affected by this problem and perform very efficient on S3QL file systems. However, if you encounter unexpectedly slow performance with a specific program, this might be due to the program using very small write buffers. Although this is not really a bug in the program, it might be worth to ask the program's authors for help. \item {} S3QL always updates file and directory access times as if the \sphinxcode{relatime} mount option has been specified: the access time (``atime'') is only updated if it is currently earlier than either the status change time (``ctime'') or modification time (``mtime''). \item {} S3QL directories always have an \sphinxcode{st\_nlink} value of 1. This may confuse programs that rely on directories having \sphinxcode{st\_nlink} values of \emph{(2 + number of sub directories)}. Note that this is not a bug in S3QL. Including sub directories in the \sphinxcode{st\_nlink} value is a Unix convention, but by no means a requirement. If an application blindly relies on this convention being followed, then this is a bug in the application. A prominent example are early versions of GNU find, which required the \sphinxcode{-{-}noleaf} option to work correctly on S3QL file systems. This bug has already been fixed in recent find versions. \item {} The \sphinxcode{umount} and \sphinxcode{fusermount -u} commands will \emph{not} block until all data has been uploaded to the backend. (this is a FUSE limitation that will hopefully be removed in the future, see \href{https://bitbucket.org/nikratio/s3ql/issue/1/blocking-fusermount-and-umount}{issue \#1}). If you use either command to unmount an S3QL file system, you have to take care to explicitly wait for the \sphinxcode{mount.s3ql} process to terminate before you shut down or restart the system. Therefore it is generally not a good idea to mount an S3QL file system in \sphinxcode{/etc/fstab} (you should use a dedicated init script instead). \item {} S3QL relies on the backends not to run out of space. This is a given for big storage providers like Amazon S3 or Google Storage, but you may stumble upon this if you use your own server or smaller providers. If there is no space left in the backend, attempts to write more data into the S3QL file system will fail and the file system will be in an inconsistent state and require a file system check (and you should make sure to make space available in the backend before running the check). Unfortunately, there is no way to handle insufficient space in the backend without leaving the file system inconsistent. Since S3QL first writes data into the cache, it can no longer return an error when it later turns out that the cache can not be committed to the backend. \item {} When using python-dugong versions 3.3 or earlier, S3QL supports only CONNECT-style proxying, which may cause issues with some proxy servers when using plain HTTP. Upgrading to python-dugong 3.4 or newer removes this limitation. \end{itemize} \chapter{Manpages} \label{man/index:manpages}\label{man/index::doc} The man pages are installed with S3QL on your system and can be viewed with the \textbf{\texttt{man}} command. For reference, they are also included here in the User's Guide. \section{The \textbf{\texttt{mkfs.s3ql}} command} \label{man/mkfs:the-command-command}\label{man/mkfs::doc} \subsection{Synopsis} \label{man/mkfs:synopsis} \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{mkfs.s3ql }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}storage url\PYGZgt{}} \end{Verbatim} \subsection{Description} \label{man/mkfs:description} The \textbf{\texttt{mkfs.s3ql}} command creates a new file system in the location specified by \emph{storage url}. The storage url depends on the backend that is used. The S3QL User's Guide should be consulted for a description of the available backends. Unless you have specified the \sphinxcode{-{-}plain} option, \sphinxcode{mkfs.s3ql} will ask you to enter an encryption password. This password will \emph{not} be read from an authentication file specified with the \sphinxcode{-{-}authfile} option to prevent accidental creation of an encrypted file system. \subsection{Options} \label{man/mkfs:options} The \textbf{\texttt{mkfs.s3ql}} command accepts the following options. \begin{quote} \begin{optionlist}{3cm} \item [-{-}cachedir \textless{}path\textgreater{}] Store cached data in this directory (default: \sphinxcode{\textasciitilde{}/.s3ql)} \item [-{-}authfile \textless{}path\textgreater{}] Read authentication credentials from this file (default: \sphinxcode{\textasciitilde{}/.s3ql/authinfo2)} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}backend-options \textless{}options\textgreater{}] Backend specific options (separate by commas). See backend documentation for available options. \item [-{-}version] just print program version and exit \item [-L \textless{}name\textgreater{}] Filesystem label \item [-{-}max-obj-size \textless{}size\textgreater{}] Maximum size of storage objects in KiB. Files bigger than this will be spread over multiple objects in the storage backend. Default: 10240 KiB. \item [-{-}plain] Create unencrypted file system. \item [-{-}force] Overwrite any existing data. \end{optionlist} \end{quote} \subsection{Exit Codes} \label{man/mkfs:exit-codes} \textbf{\texttt{mkfs.s3ql}} may terminate with the following exit codes: \begin{quote}\begin{description} \item[{0}] \leavevmode Everything went well. \item[{1}] \leavevmode An unexpected error occured. This may indicate a bug in the program. \item[{2}] \leavevmode Invalid command line argument. \item[{3}] \leavevmode Invalid backend option. \item[{11}] \leavevmode No such backend. \item[{12}] \leavevmode Authentication file has insecure permissions. \item[{13}] \leavevmode Unable to parse proxy settings. \item[{14}] \leavevmode Invalid credentials (Authentication failed). \item[{15}] \leavevmode No permission to access backend (Authorization denied). \item[{16}] \leavevmode Invalid storage URL, specified location does not exist in backend. \item[{19}] \leavevmode Unable to connect to backend, can't resolve hostname. \item[{45}] \leavevmode Unable to access cache directory. \end{description}\end{quote} \subsection{See Also} \label{man/mkfs:see-also} The S3QL homepage is at \url{https://bitbucket.org/nikratio/s3ql/}. The full S3QL documentation should also be installed somewhere on your system, common locations are \sphinxcode{/usr/share/doc/s3ql} or \sphinxcode{/usr/local/doc/s3ql}. \section{The \textbf{\texttt{s3qladm}} command} \label{man/adm:the-command-command}\label{man/adm::doc} \subsection{Synopsis} \label{man/adm:synopsis} \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3qladm }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}action\PYGZgt{}}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}storage url\PYGZgt{}} \end{Verbatim} where \sphinxcode{action} may be either of \textbf{\texttt{passphrase}}, \textbf{\texttt{upgrade}}, \textbf{\texttt{delete}} or \textbf{\texttt{download-metadata}}. \subsection{Description} \label{man/adm:description} The \textbf{\texttt{s3qladm}} command performs various operations on \emph{unmounted} S3QL file systems. The file system \emph{must not be mounted} when using \textbf{\texttt{s3qladm}} or things will go wrong badly. The storage url depends on the backend that is used. The S3QL User's Guide should be consulted for a description of the available backends. \subsection{Options} \label{man/adm:options} The \textbf{\texttt{s3qladm}} command accepts the following options. \begin{quote} \begin{optionlist}{3cm} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}log \textless{}target\textgreater{}] Destination for log messages. Specify \sphinxcode{none} for standard output or \sphinxcode{syslog} for the system logging daemon. Anything else will be interpreted as a file name. Log files will be rotated when they reach 1 MiB, and at most 5 old log files will be kept. Default: \sphinxcode{None} \item [-{-}authfile \textless{}path\textgreater{}] Read authentication credentials from this file (default: \sphinxcode{\textasciitilde{}/.s3ql/authinfo2)} \item [-{-}backend-options \textless{}options\textgreater{}] Backend specific options (separate by commas). See backend documentation for available options. \item [-{-}cachedir \textless{}path\textgreater{}] Store cached data in this directory (default: \sphinxcode{\textasciitilde{}/.s3ql)} \item [-{-}version] just print program version and exit \end{optionlist} \end{quote} Hint: run \sphinxcode{s3qladm \textless{}action\textgreater{} -{-}help} to get help on the additional arguments that the different actions take. \subsection{Actions} \label{man/adm:actions} The following actions may be specified: \begin{description} \item[{passphrase}] \leavevmode Changes the encryption passphrase of the file system. \item[{upgrade}] \leavevmode Upgrade the file system to the newest revision. \item[{delete}] \leavevmode Delete the file system with all the stored data. \item[{download-metadata}] \leavevmode Interactively download backups of the file system metadata. \end{description} \subsection{Exit Codes} \label{man/adm:exit-codes} \textbf{\texttt{s3qladm}} may terminate with the following exit codes: \begin{quote}\begin{description} \item[{0}] \leavevmode Everything went well. \item[{1}] \leavevmode An unexpected error occured. This may indicate a bug in the program. \item[{2}] \leavevmode Invalid command line argument. \item[{3}] \leavevmode Invalid backend option. \item[{10}] \leavevmode Could not open log file for writing. \item[{11}] \leavevmode No such backend. \item[{12}] \leavevmode Authentication file has insecure permissions. \item[{13}] \leavevmode Unable to parse proxy settings. \item[{14}] \leavevmode Invalid credentials (Authentication failed). \item[{15}] \leavevmode No permission to access backend (Authorization denied). \item[{16}] \leavevmode Invalid storage URL, specified location does not exist in backend. \item[{17}] \leavevmode Wrong file system passphrase. \item[{18}] \leavevmode No S3QL file system found at given storage URL. \item[{19}] \leavevmode Unable to connect to backend, can't resolve hostname. \item[{45}] \leavevmode Unable to access cache directory. \end{description}\end{quote} \subsection{See Also} \label{man/adm:see-also} The S3QL homepage is at \url{https://bitbucket.org/nikratio/s3ql/}. The full S3QL documentation should also be installed somewhere on your system, common locations are \sphinxcode{/usr/share/doc/s3ql} or \sphinxcode{/usr/local/doc/s3ql}. \section{The \textbf{\texttt{mount.s3ql}} command} \label{man/mount:the-command-command}\label{man/mount::doc} \subsection{Synopsis} \label{man/mount:synopsis} \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{mount.s3ql }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}storage url\PYGZgt{}}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}mount point\PYGZgt{}} \end{Verbatim} \subsection{Description} \label{man/mount:description} The \textbf{\texttt{mount.s3ql}} command mounts the S3QL file system stored in \emph{storage url} in the directory \emph{mount point}. The storage url depends on the backend that is used. The S3QL User's Guide should be consulted for a description of the available backends. \subsection{Options} \label{man/mount:options} The \textbf{\texttt{mount.s3ql}} command accepts the following options. \begin{quote} \begin{optionlist}{3cm} \item [-{-}log \textless{}target\textgreater{}] Destination for log messages. Specify \sphinxcode{none} for standard output or \sphinxcode{syslog} for the system logging daemon. Anything else will be interpreted as a file name. Log files will be rotated when they reach 1 MiB, and at most 5 old log files will be kept. Default: \sphinxcode{\textasciitilde{}/.s3ql/mount.log} \item [-{-}cachedir \textless{}path\textgreater{}] Store cached data in this directory (default: \sphinxcode{\textasciitilde{}/.s3ql)} \item [-{-}authfile \textless{}path\textgreater{}] Read authentication credentials from this file (default: \sphinxcode{\textasciitilde{}/.s3ql/authinfo2)} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}backend-options \textless{}options\textgreater{}] Backend specific options (separate by commas). See backend documentation for available options. \item [-{-}version] just print program version and exit \item [-{-}cachesize \textless{}size\textgreater{}] Cache size in KiB (default: autodetect). \item [-{-}max-cache-entries \textless{}num\textgreater{}] Maximum number of entries in cache (default: autodetect). Each cache entry requires one file descriptor, so if you increase this number you have to make sure that your process file descriptor limit (as set with \sphinxcode{ulimit -n}) is high enough (at least the number of cache entries + 100). \item [-{-}allow-other] Normally, only the user who called \sphinxcode{mount.s3ql} can access the mount point. This user then also has full access to it, independent of individual file permissions. If the \sphinxcode{-{-}allow-other} option is specified, other users can access the mount point as well and individual file permissions are taken into account for all users. \item [-{-}allow-root] Like \sphinxcode{-{-}allow-other}, but restrict access to the mounting user and the root user. \item [-{-}fg] Do not daemonize, stay in foreground \item [-{-}upstart] Stay in foreground and raise SIGSTOP once mountpoint is up. \item [-{-}compress \textless{}algorithm-lvl\textgreater{}] Compression algorithm and compression level to use when storing new data. \emph{algorithm} may be any of \sphinxcode{lzma}, \sphinxcode{bzip2}, \sphinxcode{zlib}, or none. \emph{lvl} may be any integer from 0 (fastest) to 9 (slowest). Default: \sphinxcode{lzma-6} \item [-{-}metadata-upload-interval \textless{}seconds\textgreater{}] Interval in seconds between complete metadata uploads. Set to 0 to disable. Default: 24h. \item [-{-}threads \textless{}no\textgreater{}] Number of parallel upload threads to use (default: auto). \item [-{-}nfs] Enable some optimizations for exporting the file system over NFS. (default: False) \end{optionlist} \end{quote} \subsection{Exit Codes} \label{man/mount:exit-codes} \textbf{\texttt{mount.s3ql}} may terminate with the following exit codes: \begin{quote}\begin{description} \item[{0}] \leavevmode Everything went well. \item[{1}] \leavevmode An unexpected error occured. This may indicate a bug in the program. \item[{2}] \leavevmode Invalid command line argument. \item[{3}] \leavevmode Invalid backend option. \item[{10}] \leavevmode Could not open log file for writing. \item[{11}] \leavevmode No such backend. \item[{12}] \leavevmode Authentication file has insecure permissions. \item[{13}] \leavevmode Unable to parse proxy settings. \item[{14}] \leavevmode Invalid credentials (Authentication failed). \item[{15}] \leavevmode No permission to access backend (Authorization denied). \item[{16}] \leavevmode Invalid storage URL, specified location does not exist in backend. \item[{17}] \leavevmode Wrong file system passphrase. \item[{18}] \leavevmode No S3QL file system found at given storage URL. \item[{19}] \leavevmode Unable to connect to backend, can't resolve hostname. \item[{30}] \leavevmode File system was not unmounted cleanly. \item[{31}] \leavevmode File system appears to be mounted elsewhere. \item[{32}] \leavevmode Unsupported file system revision (too old). \item[{33}] \leavevmode Unsupported file system revision (too new). \item[{34}] \leavevmode Insufficient free nodes, need to run \textbf{\texttt{fsck.s3ql}}. \item[{35}] \leavevmode Attempted to mount read-only, this is not supported. \item[{36}] \leavevmode Mountpoint does not exist. \item[{37}] \leavevmode Not enough available file descriptors. \item[{39}] \leavevmode Unable to bind file system to mountpoint. \item[{45}] \leavevmode Unable to access cache directory. \end{description}\end{quote} \subsection{See Also} \label{man/mount:see-also} The S3QL homepage is at \url{https://bitbucket.org/nikratio/s3ql/}. The full S3QL documentation should also be installed somewhere on your system, common locations are \sphinxcode{/usr/share/doc/s3ql} or \sphinxcode{/usr/local/doc/s3ql}. \section{The \textbf{\texttt{s3qlstat}} command} \label{man/stat:the-command-command}\label{man/stat::doc} \subsection{Synopsis} \label{man/stat:synopsis} \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3qlstat }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}mountpoint\PYGZgt{}} \end{Verbatim} \subsection{Description} \label{man/stat:description} The \textbf{\texttt{s3qlstat}} command prints statistics about the S3QL file system mounted at \sphinxcode{mountpoint}. \textbf{\texttt{s3qlstat}} can only be called by the user that mounted the file system and (if the file system was mounted with \sphinxcode{-{-}allow-other} or \sphinxcode{-{-}allow-root}) the root user. \subsection{Options} \label{man/stat:options} The \textbf{\texttt{s3qlstat}} command accepts the following options: \begin{quote} \begin{optionlist}{3cm} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}version] just print program version and exit \item [-{-}raw] Do not pretty-print numbers \end{optionlist} \end{quote} \subsection{Exit Codes} \label{man/stat:exit-codes} \textbf{\texttt{s3qlstat}} may terminate with the following exit codes: \begin{quote}\begin{description} \item[{0}] \leavevmode Everything went well. \item[{1}] \leavevmode An unexpected error occured. This may indicate a bug in the program. \item[{2}] \leavevmode Invalid command line argument. \end{description}\end{quote} \subsection{See Also} \label{man/stat:see-also} The S3QL homepage is at \url{https://bitbucket.org/nikratio/s3ql/}. The full S3QL documentation should also be installed somewhere on your system, common locations are \sphinxcode{/usr/share/doc/s3ql} or \sphinxcode{/usr/local/doc/s3ql}. \section{The \textbf{\texttt{s3qlctrl}} command} \label{man/ctrl:the-command-command}\label{man/ctrl::doc} \subsection{Synopsis} \label{man/ctrl:synopsis} \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3qlctrl }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}action\PYGZgt{}}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}mountpoint\PYGZgt{}}\PYG{l}{ ...} \end{Verbatim} where \sphinxcode{action} may be either of \textbf{\texttt{flushcache}}, \textbf{\texttt{upload-meta}}, \textbf{\texttt{cachesize}} or \textbf{\texttt{log-metadata}}. \subsection{Description} \label{man/ctrl:description} The \textbf{\texttt{s3qlctrl}} command performs various actions on the S3QL file system mounted in \sphinxcode{mountpoint}. \textbf{\texttt{s3qlctrl}} can only be called by the user that mounted the file system and (if the file system was mounted with \sphinxcode{-{-}allow-other} or \sphinxcode{-{-}allow-root}) the root user. The following actions may be specified: \begin{description} \item[{flushcache}] \leavevmode Uploads all changed file data to the backend. \item[{upload-meta}] \leavevmode Upload metadata to the backend. All file system operations will block while a snapshot of the metadata is prepared for upload. \item[{cachesize}] \leavevmode Changes the cache size of the file system. This action requires an additional argument that specifies the new cache size in KiB, so the complete command line is: \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3qlctrl }\PYG{g+ge}{[options]}\PYG{l}{ cachesize }\PYG{n+nv}{\PYGZlt{}mountpoint\PYGZgt{}}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}new\PYGZhy{}cache\PYGZhy{}size\PYGZgt{}} \end{Verbatim} \item[{log}] \leavevmode Change the amount of information that is logged into \sphinxcode{\textasciitilde{}/.s3ql/mount.log} file. The complete syntax is: \begin{Verbatim}[commandchars=\\\{\}] s3qlctrl [options] log \PYGZlt{}mountpoint\PYGZgt{} \PYGZlt{}level\PYGZgt{} [\PYGZlt{}module\PYGZgt{} [\PYGZlt{}module\PYGZgt{} ...]] \end{Verbatim} here \sphinxcode{level} is the desired new log level and may be either of \emph{debug}, \emph{info} or \emph{warn}. One or more \sphinxcode{module} may only be specified with the \emph{debug} level and allow to restrict the debug output to just the listed modules. \end{description} \subsection{Options} \label{man/ctrl:options} The \textbf{\texttt{s3qlctrl}} command also accepts the following options, no matter what specific action is being invoked: \begin{quote} \begin{optionlist}{3cm} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}version] just print program version and exit \end{optionlist} \end{quote} Hint: run \sphinxcode{s3qlctrl \textless{}action\textgreater{} -{-}help} to get help on the additional arguments that the different actions take. \subsection{Exit Codes} \label{man/ctrl:exit-codes} \textbf{\texttt{s3qlctrl}} may terminate with the following exit codes: \begin{quote}\begin{description} \item[{0}] \leavevmode Everything went well. \item[{1}] \leavevmode An unexpected error occured. This may indicate a bug in the program. \item[{2}] \leavevmode Invalid command line argument. \end{description}\end{quote} \subsection{See Also} \label{man/ctrl:see-also} The S3QL homepage is at \url{https://bitbucket.org/nikratio/s3ql/}. The full S3QL documentation should also be installed somewhere on your system, common locations are \sphinxcode{/usr/share/doc/s3ql} or \sphinxcode{/usr/local/doc/s3ql}. \section{The \textbf{\texttt{s3qlcp}} command} \label{man/cp:the-command-command}\label{man/cp::doc} \subsection{Synopsis} \label{man/cp:synopsis} \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3qlcp }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}source\PYGZhy{}dir\PYGZgt{}}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}dest\PYGZhy{}dir\PYGZgt{}} \end{Verbatim} \subsection{Description} \label{man/cp:description} The \textbf{\texttt{s3qlcp}} command duplicates the directory tree \sphinxcode{source-dir} into \sphinxcode{dest-dir} without physically copying the file contents. Both source and destination must lie inside the same S3QL file system. The replication will not take any additional space. Only if one of directories is modified later on, the modified data will take additional storage space. \sphinxcode{s3qlcp} can only be called by the user that mounted the file system and (if the file system was mounted with \sphinxcode{-{-}allow-other} or \sphinxcode{-{-}allow-root}) the root user. Note that: \begin{itemize} \item {} After the replication, both source and target directory will still be completely ordinary directories. You can regard \sphinxcode{\textless{}src\textgreater{}} as a snapshot of \sphinxcode{\textless{}target\textgreater{}} or vice versa. However, the most common usage of \sphinxcode{s3qlcp} is to regularly duplicate the same source directory, say \sphinxcode{documents}, to different target directories. For a e.g. monthly replication, the target directories would typically be named something like \sphinxcode{documents\_January} for the replication in January, \sphinxcode{documents\_February} for the replication in February etc. In this case it is clear that the target directories should be regarded as snapshots of the source directory. \item {} Exactly the same effect could be achieved by an ordinary copy program like \sphinxcode{cp -a}. However, this procedure would be orders of magnitude slower, because \sphinxcode{cp} would have to read every file completely (so that S3QL had to fetch all the data over the network from the backend) before writing them into the destination folder. \end{itemize} \subsubsection{Snapshotting vs Hardlinking} \label{man/cp:snapshotting-vs-hardlinking} Snapshot support in S3QL is inspired by the hardlinking feature that is offered by programs like \href{http://www.samba.org/rsync}{rsync} or \href{http://savannah.nongnu.org/projects/storebackup}{storeBackup}. These programs can create a hardlink instead of copying a file if an identical file already exists in the backup. However, using hardlinks has two large disadvantages: \begin{itemize} \item {} backups and restores always have to be made with a special program that takes care of the hardlinking. The backup must not be touched by any other programs (they may make changes that inadvertently affect other hardlinked files) \item {} special care needs to be taken to handle files which are already hardlinked (the restore program needs to know that the hardlink was not just introduced by the backup program to safe space) \end{itemize} S3QL snapshots do not have these problems, and they can be used with any backup program. \subsection{Options} \label{man/cp:options} The \textbf{\texttt{s3qlcp}} command accepts the following options: \begin{quote} \begin{optionlist}{3cm} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}version] just print program version and exit \end{optionlist} \end{quote} \subsection{Exit Codes} \label{man/cp:exit-codes} \textbf{\texttt{s3qlcp}} may terminate with the following exit codes: \begin{quote}\begin{description} \item[{0}] \leavevmode Everything went well. \item[{1}] \leavevmode An unexpected error occured. This may indicate a bug in the program. \item[{2}] \leavevmode Invalid command line argument. \end{description}\end{quote} \subsection{See Also} \label{man/cp:see-also} The S3QL homepage is at \url{https://bitbucket.org/nikratio/s3ql/}. The full S3QL documentation should also be installed somewhere on your system, common locations are \sphinxcode{/usr/share/doc/s3ql} or \sphinxcode{/usr/local/doc/s3ql}. \section{The \textbf{\texttt{s3qlrm}} command} \label{man/rm:the-command-command}\label{man/rm::doc} \subsection{Synopsis} \label{man/rm:synopsis} \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3qlrm }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}directory\PYGZgt{}} \end{Verbatim} \subsection{Description} \label{man/rm:description} The \textbf{\texttt{s3qlrm}} command recursively deletes files and directories on an S3QL file system. Although \textbf{\texttt{s3qlrm}} is faster than using e.g. \textbf{\texttt{rm -r{}`}}, the main reason for its existence is that it allows you to delete immutable trees (which can be created with \textbf{\texttt{s3qllock}}) as well. Be warned that there is no additional confirmation. The directory will be removed entirely and immediately. \textbf{\texttt{s3qlrm}} can only be called by the user that mounted the file system and (if the file system was mounted with \sphinxcode{-{-}allow-other} or \sphinxcode{-{-}allow-root}) the root user. \subsection{Options} \label{man/rm:options} The \textbf{\texttt{s3qlrm}} command accepts the following options: \begin{quote} \begin{optionlist}{3cm} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}version] just print program version and exit \end{optionlist} \end{quote} \subsection{Exit Codes} \label{man/rm:exit-codes} \textbf{\texttt{s3qlrm}} may terminate with the following exit codes: \begin{quote}\begin{description} \item[{0}] \leavevmode Everything went well. \item[{1}] \leavevmode An unexpected error occured. This may indicate a bug in the program. \item[{2}] \leavevmode Invalid command line argument. \end{description}\end{quote} \subsection{See Also} \label{man/rm:see-also} The S3QL homepage is at \url{https://bitbucket.org/nikratio/s3ql/}. The full S3QL documentation should also be installed somewhere on your system, common locations are \sphinxcode{/usr/share/doc/s3ql} or \sphinxcode{/usr/local/doc/s3ql}. \section{The \textbf{\texttt{s3qllock}} command} \label{man/lock:the-command-command}\label{man/lock::doc} \subsection{Synopsis} \label{man/lock:synopsis} \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3qllock }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}directory\PYGZgt{}} \end{Verbatim} \subsection{Description} \label{man/lock:description} The \textbf{\texttt{s3qllock}} command makes a directory tree in an S3QL file system immutable. Immutable trees can no longer be changed in any way whatsoever. You can not add new files or directories and you can not change or delete existing files and directories. The only way to get rid of an immutable tree is to use the \textbf{\texttt{s3qlrm}} command. \textbf{\texttt{s3qllock}} can only be called by the user that mounted the file system and (if the file system was mounted with \sphinxcode{-{-}allow-other} or \sphinxcode{-{-}allow-root}) the root user. \subsection{Rationale} \label{man/lock:rationale} Immutability is a feature designed for backups. Traditionally, backups have been made on external tape drives. Once a backup was made, the tape drive was removed and locked somewhere in a shelf. This has the great advantage that the contents of the backup are now permanently fixed. Nothing (short of physical destruction) can change or delete files in the backup. In contrast, when backing up into an online storage system like S3QL, all backups are available every time the file system is mounted. Nothing prevents a file in an old backup from being changed again later on. In the worst case, this may make your entire backup system worthless. Imagine that your system gets infected by a nasty virus that simply deletes all files it can find -- if the virus is active while the backup file system is mounted, the virus will destroy all your old backups as well! Even if the possibility of a malicious virus or trojan horse is excluded, being able to change a backup after it has been made is generally not a good idea. A common S3QL use case is to keep the file system mounted at all times and periodically create backups with \textbf{\texttt{rsync -a}}. This allows every user to recover her files from a backup without having to call the system administrator. However, this also allows every user to accidentally change or delete files \emph{in} one of the old backups. Making a backup immutable protects you against all these problems. Unless you happen to run into a virus that was specifically programmed to attack S3QL file systems, backups can be neither deleted nor changed after they have been made immutable. \subsection{Options} \label{man/lock:options} The \textbf{\texttt{s3qllock}} command accepts the following options: \begin{quote} \begin{optionlist}{3cm} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}version] just print program version and exit \end{optionlist} \end{quote} \subsection{Exit Codes} \label{man/lock:exit-codes} \textbf{\texttt{s3qllock}} may terminate with the following exit codes: \begin{quote}\begin{description} \item[{0}] \leavevmode Everything went well. \item[{1}] \leavevmode An unexpected error occured. This may indicate a bug in the program. \item[{2}] \leavevmode Invalid command line argument. \end{description}\end{quote} \subsection{See Also} \label{man/lock:see-also} The S3QL homepage is at \url{https://bitbucket.org/nikratio/s3ql/}. The full S3QL documentation should also be installed somewhere on your system, common locations are \sphinxcode{/usr/share/doc/s3ql} or \sphinxcode{/usr/local/doc/s3ql}. \section{The \textbf{\texttt{umount.s3ql}} command} \label{man/umount:the-command-command}\label{man/umount::doc} \subsection{Synopsis} \label{man/umount:synopsis} \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{umount.s3ql }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}mount point\PYGZgt{}} \end{Verbatim} \subsection{Description} \label{man/umount:description} The \textbf{\texttt{umount.s3ql}} command unmounts the S3QL file system mounted in the directory \emph{mount point} and blocks until all data has been uploaded to the storage backend. Only the user who mounted the file system with \textbf{\texttt{mount.s3ql}} is able to unmount it with \textbf{\texttt{umount.s3ql}}. If you are root and want to unmount an S3QL file system mounted by an ordinary user, you have to use the \textbf{\texttt{fusermount -u}} or \textbf{\texttt{umount}} command instead. Note that these commands do not block until all data has been uploaded, so if you use them instead of \textbf{\texttt{umount.s3ql}} then you should manually wait for the \textbf{\texttt{mount.s3ql}} process to terminate before shutting down the system. \subsection{Options} \label{man/umount:options} The \textbf{\texttt{umount.s3ql}} command accepts the following options. \begin{quote} \begin{optionlist}{3cm} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}version] just print program version and exit \item [-{-}lazy, -z] Lazy umount. Detaches the file system immediately, even if there are still open files. The data will be uploaded in the background once all open files have been closed. \end{optionlist} \end{quote} \subsection{Exit Codes} \label{man/umount:exit-codes} \textbf{\texttt{umount.s3ql}} may terminate with the following exit codes: \begin{quote}\begin{description} \item[{0}] \leavevmode Everything went well. \item[{1}] \leavevmode An unexpected error occured. This may indicate a bug in the program. \item[{2}] \leavevmode Invalid command line argument. \end{description}\end{quote} \subsection{See Also} \label{man/umount:see-also} The S3QL homepage is at \url{https://bitbucket.org/nikratio/s3ql/}. The full S3QL documentation should also be installed somewhere on your system, common locations are \sphinxcode{/usr/share/doc/s3ql} or \sphinxcode{/usr/local/doc/s3ql}. \section{The \textbf{\texttt{fsck.s3ql}} command} \label{man/fsck:the-command-command}\label{man/fsck::doc} \subsection{Synopsis} \label{man/fsck:synopsis} \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{fsck.s3ql }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}storage url\PYGZgt{}} \end{Verbatim} \subsection{Description} \label{man/fsck:description} The \textbf{\texttt{fsck.s3ql}} command checks the file system in the location specified by \emph{storage url} for errors and attempts to repair any problems. The storage url depends on the backend that is used. The S3QL User's Guide should be consulted for a description of the available backends. \subsection{Options} \label{man/fsck:options} The \textbf{\texttt{fsck.s3ql}} command accepts the following options. \begin{quote} \begin{optionlist}{3cm} \item [-{-}log \textless{}target\textgreater{}] Destination for log messages. Specify \sphinxcode{none} for standard output or \sphinxcode{syslog} for the system logging daemon. Anything else will be interpreted as a file name. Log files will be rotated when they reach 1 MiB, and at most 5 old log files will be kept. Default: \sphinxcode{\textasciitilde{}/.s3ql/fsck.log} \item [-{-}cachedir \textless{}path\textgreater{}] Store cached data in this directory (default: \sphinxcode{\textasciitilde{}/.s3ql)} \item [-{-}authfile \textless{}path\textgreater{}] Read authentication credentials from this file (default: \sphinxcode{\textasciitilde{}/.s3ql/authinfo2)} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}backend-options \textless{}options\textgreater{}] Backend specific options (separate by commas). See backend documentation for available options. \item [-{-}version] just print program version and exit \item [-{-}batch] If user input is required, exit without prompting. \item [-{-}force] Force checking even if file system is marked clean. \item [-{-}force-remote] Force use of remote metadata even when this would likely result in data loss. \end{optionlist} \end{quote} \subsection{Exit Codes} \label{man/fsck:exit-codes} If \textbf{\texttt{fsck.s3ql}} found any file system errors (no matter if they were corrected or not), the exit code will be 128 plus one of the codes listed below. If no errors were found, the following exit codes are used as-is: \begin{quote}\begin{description} \item[{0}] \leavevmode Everything went well. \item[{1}] \leavevmode An unexpected error occured. This may indicate a bug in the program. \item[{2}] \leavevmode Invalid command line argument. \item[{3}] \leavevmode Invalid backend option. \item[{10}] \leavevmode Could not open log file for writing. \item[{11}] \leavevmode No such backend. \item[{12}] \leavevmode Authentication file has insecure permissions. \item[{13}] \leavevmode Unable to parse proxy settings. \item[{14}] \leavevmode Invalid credentials (Authentication failed). \item[{15}] \leavevmode No permission to access backend (Authorization denied). \item[{16}] \leavevmode Invalid storage URL, specified location does not exist in backend. \item[{17}] \leavevmode Wrong file system passphrase. \item[{18}] \leavevmode No S3QL file system found at given storage URL. \item[{19}] \leavevmode Unable to connect to backend, can't resolve hostname. \item[{32}] \leavevmode Unsupported file system revision (too old). \item[{33}] \leavevmode Unsupported file system revision (too new). \item[{40}] \leavevmode Cannot check mounted file system. \item[{41}] \leavevmode User input required, but running in batch mode. \item[{42}] \leavevmode File system check aborted by user. \item[{43}] \leavevmode Local metadata is corrupted. \item[{44}] \leavevmode Uncorrectable errors found. \item[{45}] \leavevmode Unable to access cache directory. \item[{128}] \leavevmode This error code will be \emph{added} to one of the codes above if any file system errors have been found (no matter if they were corrected or not). \end{description}\end{quote} \subsection{See Also} \label{man/fsck:see-also} The S3QL homepage is at \url{https://bitbucket.org/nikratio/s3ql/}. The full S3QL documentation should also be installed somewhere on your system, common locations are \sphinxcode{/usr/share/doc/s3ql} or \sphinxcode{/usr/local/doc/s3ql}. \section{The \textbf{\texttt{s3ql\_oauth\_client}} command} \label{man/oauth_client:the-command-command}\label{man/oauth_client::doc}\label{man/oauth_client:oauth-client} \subsection{Synopsis} \label{man/oauth_client:synopsis} \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3ql\PYGZus{}oauth\PYGZus{}client }\PYG{g+ge}{[options]} \end{Verbatim} \subsection{Description} \label{man/oauth_client:description} The \textbf{\texttt{s3ql\_oauth\_client}} command may be used to obtain OAuth2 authentication tokens for use with Google Storage. It requests ``user code'' from Google which has to be pasted into the browser to complete the authentication process interactively. Once authentication in the browser has been completed, \textbf{\texttt{s3ql\_oauth\_client}} displays the OAuth2 refresh token. When combined with the special username \sphinxcode{oauth2}, the refresh token can be used as a backend passphrase when using the Google Storage S3QL backend. \subsection{Options} \label{man/oauth_client:options} The \textbf{\texttt{s3ql\_oauth\_client}} command accepts the following options: \begin{quote} \begin{optionlist}{3cm} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}version] just print program version and exit \end{optionlist} \end{quote} \subsection{Exit Codes} \label{man/oauth_client:exit-codes} \textbf{\texttt{s3ql\_oauth\_client}} may terminate with the following exit codes: \begin{quote}\begin{description} \item[{0}] \leavevmode Everything went well. \item[{1}] \leavevmode An unexpected error occured. This may indicate a bug in the program. \item[{2}] \leavevmode Invalid command line argument. \end{description}\end{quote} \subsection{See Also} \label{man/oauth_client:see-also} The S3QL homepage is at \url{https://bitbucket.org/nikratio/s3ql/}. The full S3QL documentation should also be installed somewhere on your system, common locations are \sphinxcode{/usr/share/doc/s3ql} or \sphinxcode{/usr/local/doc/s3ql}. \section{The \textbf{\texttt{s3ql\_verify}} command} \label{man/verify:the-command-command}\label{man/verify::doc} \subsection{Synopsis} \label{man/verify:synopsis} \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{s3ql\PYGZus{}verify }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}storage url\PYGZgt{}} \end{Verbatim} \subsection{Description} \label{man/verify:description} The \textbf{\texttt{s3ql\_verify}} command verifies all data in the file system. In contrast to \textbf{\texttt{fsck.s3ql}}, \textbf{\texttt{s3ql\_verify}} does not trust the object listing returned by the backend, but actually attempts to retrieve every object. It therefore takes a lot longer. The format of \sphinxcode{\textless{}storage url\textgreater{}} depends on the backend that is used. The S3QL User's Guide should be consulted for a description of the available backends. \subsection{Options} \label{man/verify:options} The \textbf{\texttt{s3ql\_verify}} command accepts the following options. \begin{quote} \begin{optionlist}{3cm} \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}quiet] be really quiet \item [-{-}version] just print program version and exit \item [-{-}cachedir \textless{}path\textgreater{}] Store cached data in this directory (default: \sphinxcode{\textasciitilde{}/.s3ql)} \item [-{-}authfile \textless{}path\textgreater{}] Read authentication credentials from this file (default: \sphinxcode{\textasciitilde{}/.s3ql/authinfo2)} \item [-{-}backend-options \textless{}options\textgreater{}] Backend specific options (separate by commas). See backend documentation for available options. \item [-{-}missing-file \textless{}name\textgreater{}] File to store keys of missing objects. \item [-{-}corrupted-file \textless{}name\textgreater{}] File to store keys of corrupted objects. \item [-{-}data] Read every object completely, instead of checking just the metadata. \item [-{-}parallel PARALLEL] Number of connections to use in parallel. \item [-{-}start-with \textless{}n\textgreater{}] Skip over first \textless{}n\textgreater{} objects and with verifying object \textless{}n\textgreater{}+1. \end{optionlist} \end{quote} \subsection{Exit Codes} \label{man/verify:exit-codes} \textbf{\texttt{s3ql\_verify}} may terminate with the following exit codes: \begin{quote}\begin{description} \item[{0}] \leavevmode Everything went well. \item[{1}] \leavevmode An unexpected error occured. This may indicate a bug in the program. \item[{2}] \leavevmode Invalid command line argument. \item[{3}] \leavevmode Invalid backend option. \item[{10}] \leavevmode Could not open log file for writing. \item[{11}] \leavevmode No such backend. \item[{12}] \leavevmode Authentication file has insecure permissions. \item[{13}] \leavevmode Unable to parse proxy settings. \item[{14}] \leavevmode Invalid credentials (Authentication failed). \item[{15}] \leavevmode No permission to access backend (Authorization denied). \item[{16}] \leavevmode Invalid storage URL, specified location does not exist in backend. \item[{17}] \leavevmode Wrong file system passphrase. \item[{18}] \leavevmode No S3QL file system found at given storage URL. \item[{19}] \leavevmode Unable to connect to backend, can't resolve hostname. \item[{32}] \leavevmode Unsupported file system revision (too old). \item[{33}] \leavevmode Unsupported file system revision (too new). \item[{45}] \leavevmode Unable to access cache directory. \item[{46}] \leavevmode The file system data was verified, and some objects were found to be missing or corrupted. \end{description}\end{quote} \subsection{See Also} \label{man/verify:see-also} The S3QL homepage is at \url{https://bitbucket.org/nikratio/s3ql/}. The full S3QL documentation should also be installed somewhere on your system, common locations are \sphinxcode{/usr/share/doc/s3ql} or \sphinxcode{/usr/local/doc/s3ql}. \section{The \textbf{\texttt{pcp}} command} \label{man/pcp:the-command-command}\label{man/pcp::doc} \subsection{Synopsis} \label{man/pcp:synopsis} \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{pcp }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}source\PYGZgt{}}\PYG{l}{ }\PYG{g+ge}{[\PYGZlt{}source\PYGZgt{} ...]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}destination\PYGZgt{}} \end{Verbatim} \subsection{Description} \label{man/pcp:description} The \textbf{\texttt{pcp}} command is a is a wrapper that starts several \textbf{\texttt{sync}} processes to copy directory trees in parallel. This is allows much better copying performance on file system that have relatively high latency when retrieving individual files like S3QL. \textbf{Note}: Using this program only improves performance when copying \emph{from} an S3QL file system. When copying \emph{to} an S3QL file system, using \textbf{\texttt{pcp}} is more likely to \emph{decrease} performance. \subsection{Options} \label{man/pcp:options} The \textbf{\texttt{pcp}} command accepts the following options: \begin{quote} \begin{optionlist}{3cm} \item [-{-}quiet] be really quiet \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}version] just print program version and exit \item [-a] Pass -aHAX option to rsync. \item [-{-}processes \textless{}no\textgreater{}] Number of rsync processes to use (default: 10). \end{optionlist} \end{quote} \subsection{Exit Codes} \label{man/pcp:exit-codes} \textbf{\texttt{pcp}} may terminate with the following exit codes: \begin{quote}\begin{description} \item[{0}] \leavevmode Everything went well. \item[{1}] \leavevmode An unexpected error occured. This may indicate a bug in the program. \item[{2}] \leavevmode Invalid command line argument. \end{description}\end{quote} \subsection{See Also} \label{man/pcp:see-also} \textbf{\texttt{pcp}} is shipped as part of S3QL, \url{https://bitbucket.org/nikratio/s3ql/}. \section{The \textbf{\texttt{expire\_backups}} command} \label{man/expire_backups:the-command-command}\label{man/expire_backups::doc} \subsection{Synopsis} \label{man/expire_backups:synopsis} \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{expire\PYGZus{}backups }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\PYGZlt{}age\PYGZgt{}}\PYG{l}{ }\PYG{g+ge}{[\PYGZlt{}age\PYGZgt{} ...]} \end{Verbatim} \subsection{Description} \label{man/expire_backups:description} The \textbf{\texttt{expire\_backups}} command intelligently remove old backups that are no longer needed. To define what backups you want to keep for how long, you define a number of \emph{age ranges}. \textbf{\texttt{expire\_backups}} ensures that you will have at least one backup in each age range at all times. It will keep exactly as many backups as are required for that and delete any backups that become redundant. Age ranges are specified by giving a list of range boundaries in terms of backup cycles. Every time you create a new backup, the existing backups age by one cycle. Example: when \textbf{\texttt{expire\_backups}} is called with the age range definition \sphinxcode{1 3 7 14 31}, it will guarantee that you always have the following backups available: \begin{enumerate} \item {} A backup that is 0 to 1 cycles old (i.e, the most recent backup) \item {} A backup that is 1 to 3 cycles old \item {} A backup that is 3 to 7 cycles old \item {} A backup that is 7 to 14 cycles old \item {} A backup that is 14 to 31 cycles old \end{enumerate} \begin{notice}{note}{Note:} If you do backups in fixed intervals, then one cycle will be equivalent to the backup interval. The advantage of specifying the age ranges in terms of backup cycles rather than days or weeks is that it allows you to gracefully handle irregular backup intervals. Imagine that for some reason you do not turn on your computer for one month. Now all your backups are at least a month old, and if you had specified the above backup strategy in terms of absolute ages, they would all be deleted! Specifying age ranges in terms of backup cycles avoids these sort of problems. \end{notice} \textbf{\texttt{expire\_backups}} usage is simple. It requires backups to be stored in directories of the form \sphinxcode{year-month-day\_hour:minute:seconds} (\sphinxcode{YYYY-MM-DD\_HH:mm:ss}) and works on all backups in the current directory. So for the above backup strategy, the correct invocation would be: \begin{Verbatim}[commandchars=\\\{\}] \PYG{l}{expire\PYGZus{}backups.py 1 3 7 14 31} \end{Verbatim} When storing your backups on an S3QL file system, you probably want to specify the \sphinxcode{-{-}use-s3qlrm} option as well. This tells \textbf{\texttt{expire\_backups}} to use the {\hyperref[special:s3qlrm]{\sphinxcrossref{\DUrole{std,std-ref}{s3qlrm}}}} command to delete directories. \textbf{\texttt{expire\_backups}} uses a ``state file'' to keep track which backups are how many cycles old (since this cannot be inferred from the dates contained in the directory names). The standard name for this state file is \sphinxcode{.expire\_backups.dat}. If this file gets damaged or deleted, \textbf{\texttt{expire\_backups}} no longer knows the ages of the backups and refuses to work. In this case you can use the \sphinxcode{-{-}reconstruct-state} option to try to reconstruct the state from the backup dates. However, the accuracy of this reconstruction depends strongly on how rigorous you have been with making backups (it is only completely correct if the time between subsequent backups has always been exactly the same), so it's generally a good idea not to tamper with the state file. \subsection{Options} \label{man/expire_backups:options} The \textbf{\texttt{expire\_backups}} command accepts the following options: \begin{quote} \begin{optionlist}{3cm} \item [-{-}quiet] be really quiet \item [-{-}debug-modules \textless{}modules\textgreater{}] Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}debug] Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \sphinxcode{-{-}log} option. \item [-{-}version] just print program version and exit \item [-{-}state \textless{}file\textgreater{}] File to save state information in (default: ''.expire\_backups.dat'') \item [-n] Dry run. Just show which backups would be deleted. \item [-{-}reconstruct-state] Try to reconstruct a missing state file from backup dates. \item [-{-}use-s3qlrm] Use \sphinxcode{s3qlrm} command to delete backups. \end{optionlist} \end{quote} \subsection{Exit Codes} \label{man/expire_backups:exit-codes} \textbf{\texttt{expire\_backups}} may terminate with the following exit codes: \begin{quote}\begin{description} \item[{0}] \leavevmode Everything went well. \item[{1}] \leavevmode An unexpected error occured. This may indicate a bug in the program. \item[{2}] \leavevmode Invalid command line argument. \end{description}\end{quote} \subsection{See Also} \label{man/expire_backups:see-also} \textbf{\texttt{expire\_backups}} is shipped as part of S3QL, \url{https://bitbucket.org/nikratio/s3ql/}. \chapter{Further Resources / Getting Help} \label{resources::doc}\label{resources:resources}\label{resources:further-resources-getting-help} If you have questions or problems with S3QL that you weren't able to resolve with this manual, you might want to consider the following other resources: \begin{itemize} \item {} The \href{https://bitbucket.org/nikratio/s3ql/wiki}{S3QL Wiki} \item {} The \href{https://bitbucket.org/nikratio/s3ql/wiki/FAQ}{S3QL FAQ} \item {} The \href{http://groups.google.com/group/s3ql}{S3QL Mailing List}. You can subscribe by sending a mail to \href{mailto:s3ql+subscribe@googlegroups.com}{s3ql+subscribe@googlegroups.com}. \end{itemize} Please report any bugs you may encounter in the \href{https://bitbucket.org/nikratio/s3ql/issues}{Issue Tracker}. \chapter{Implementation Details} \label{impl_details:impl-details}\label{impl_details::doc}\label{impl_details:implementation-details} This section provides some background information on how S3QL works internally. Reading this section is not necessary to use S3QL. \section{Metadata Storage} \label{impl_details:metadata-storage} Like most unix filesystems, S3QL has a concept of inodes. The contents of directory inodes (aka the names and inodes of the files and sub directories contained in a directory) are stored directly in an \href{http://www.sqlite.org/}{SQLite} database. This database is stored in a special storage object that is downloaded when the file system is mounted and uploaded periodically in the background and when the file system is unmounted. This has two implications: \begin{enumerate} \item {} The entire file system tree can be read from the database. Fetching/storing storage objects from/in the storage backend is only required to access the contents of files (or, more precisely, inodes). This makes most file system operations very fast because no data has to be send over the network. \item {} An S3QL filesystem can only be mounted on one computer at a time, using a single \textbf{\texttt{mount.s3ql}} process. Otherwise changes made in one mountpoint will invariably be overwritten when the second mount point is unmounted. \end{enumerate} Sockets, FIFOs and character devices do not need any additional storage, all information about them is contained in the database. \section{Data Storage} \label{impl_details:data-storage} The contents of file inodes are split into individual blocks. The maximum size of a block is specified when the file system is created and cannot be changed afterwards. Every block is stored as an individual object in the backend, and the mapping from inodes to blocks and from blocks to objects is stored in the database. While the file system is mounted, blocks are cached locally. Blocks can also be compressed and encrypted before they are stored in the storage backend. This happens during upload, i.e. the cached data is unencrypted and uncompressed. If some files have blocks with identical contents, the blocks will be stored in the same backend object (i.e., the data is only stored once). \section{Data De-Duplication} \label{impl_details:data-de-duplication} Instead of uploading every block, S3QL first computes a checksum (a SHA256 hash) to check if an identical blocks has already been stored in an backend object. If that is the case, the new block will be linked to the existing object instead of being uploaded. This procedure is invisible for the user and the contents of the block can still be changed. If several blocks share a backend object and one of the blocks is changed, the changed block is automatically stored in a new object (so that the contents of the other block remain unchanged). \section{Caching} \label{impl_details:caching} When an application tries to read or write from a file, S3QL determines the block that contains the required part of the file and retrieves it from the backend or creates it if it does not yet exist. The block is then held in the cache directory. It is committed to S3 when it has not been accessed for more than a few seconds. Blocks are removed from the cache only when the maximum cache size is reached. When the file system is unmounted, all modified blocks are written to the backend and the cache is cleaned. \section{Eventual Consistency Handling} \label{impl_details:eventual-consistency-handling} S3QL has to take into account that with some storage providers, changes in objects do not propagate immediately. For example, when an Amazon S3 object is uploaded and immediately downloaded again, the downloaded data might not yet reflect the changes done in the upload (see also \url{http://developer.amazonwebservices.com/connect/message.jspa?messageID=38538}) For the data blocks this is not a problem because a data blocks always get a new object ID when they are updated. For the metadata however, S3QL has to make sure that it always downloads the most recent copy of the database when mounting the file system. To that end, metadata versions are numbered, and the most recent version number is stored as part of the object id of a very small ``marker'' object. When S3QL has downloaded the metadata it checks the version number against the marker object and, if the two do not agree, waits for the most recent metadata to become available. Once the current metadata is available, the version number is increased and the marker object updated. \section{Encryption} \label{impl_details:encryption} When the file system is created, \textbf{\texttt{mkfs.s3ql}} generates a 256 bit master key by reading from \sphinxcode{/dev/random}. The master key is encrypted with the passphrase that is entered by the user, and then stored with the rest of the file system data. Since the passphrase is only used to access the master key (which is used to encrypt the actual file system data), the passphrase can easily be changed. Data is encrypted with a new session key for each object and each upload. The session key is generated by appending a nonce to the master key and then calculating the SHA256 hash. The nonce is generated by concatenating the object id and the current UTC time as a 32 bit float. The precision of the time is given by the Python \href{http://docs.python.org/library/time.html\#time.time}{time()} function and usually at least 1 millisecond. The SHA256 implementation is included in the Python standard library. Once the session key has been calculated, a SHA256 HMAC is calculated over the data that is to be uploaded. Afterwards, the data is compressed (unless \sphinxcode{-{-}compress none} was passed to \textbf{\texttt{mount.s3ql}}) and the HMAC inserted at the beginning. Both HMAC and compressed data are then encrypted using 256 bit AES in CTR mode using \href{http://www.pycrypto.org/}{PyCrypto}. Finally, the nonce is inserted in front of the encrypted data and HMAC, and the packet is send to the backend as a new S3 object. \renewcommand{\indexname}{Index} \printindex \end{document} s3ql-2.26/doc/latex/manual.out0000664000175000017500000003001413246754372017714 0ustar nikrationikratio00000000000000\BOOKMARK [0][-]{chapter.1}{\376\377\000S\0003\000Q\000L}{}% 1 \BOOKMARK [1][-]{section.1.1}{\376\377\000F\000e\000a\000t\000u\000r\000e\000s}{chapter.1}% 2 \BOOKMARK [1][-]{section.1.2}{\376\377\000D\000e\000v\000e\000l\000o\000p\000m\000e\000n\000t\000\040\000S\000t\000a\000t\000u\000s}{chapter.1}% 3 \BOOKMARK [1][-]{section.1.3}{\376\377\000S\000u\000p\000p\000o\000r\000t\000e\000d\000\040\000P\000l\000a\000t\000f\000o\000r\000m\000s}{chapter.1}% 4 \BOOKMARK [1][-]{section.1.4}{\376\377\000C\000o\000n\000t\000r\000i\000b\000u\000t\000i\000n\000g}{chapter.1}% 5 \BOOKMARK [0][-]{chapter.2}{\376\377\000I\000n\000s\000t\000a\000l\000l\000a\000t\000i\000o\000n}{}% 6 \BOOKMARK [1][-]{section.2.1}{\376\377\000D\000e\000p\000e\000n\000d\000e\000n\000c\000i\000e\000s}{chapter.2}% 7 \BOOKMARK [1][-]{section.2.2}{\376\377\000I\000n\000s\000t\000a\000l\000l\000i\000n\000g\000\040\000S\0003\000Q\000L}{chapter.2}% 8 \BOOKMARK [1][-]{section.2.3}{\376\377\000D\000e\000v\000e\000l\000o\000p\000m\000e\000n\000t\000\040\000V\000e\000r\000s\000i\000o\000n}{chapter.2}% 9 \BOOKMARK [1][-]{section.2.4}{\376\377\000R\000u\000n\000n\000i\000n\000g\000\040\000t\000e\000s\000t\000s\000\040\000r\000e\000q\000u\000i\000r\000i\000n\000g\000\040\000r\000e\000m\000o\000t\000e\000\040\000s\000e\000r\000v\000e\000r\000s}{chapter.2}% 10 \BOOKMARK [0][-]{chapter.3}{\376\377\000S\000t\000o\000r\000a\000g\000e\000\040\000B\000a\000c\000k\000e\000n\000d\000s}{}% 11 \BOOKMARK [1][-]{section.3.1}{\376\377\000G\000o\000o\000g\000l\000e\000\040\000S\000t\000o\000r\000a\000g\000e}{chapter.3}% 12 \BOOKMARK [1][-]{section.3.2}{\376\377\000A\000m\000a\000z\000o\000n\000\040\000S\0003}{chapter.3}% 13 \BOOKMARK [1][-]{section.3.3}{\376\377\000O\000p\000e\000n\000S\000t\000a\000c\000k\000/\000S\000w\000i\000f\000t}{chapter.3}% 14 \BOOKMARK [1][-]{section.3.4}{\376\377\000R\000a\000c\000k\000s\000p\000a\000c\000e\000\040\000C\000l\000o\000u\000d\000F\000i\000l\000e\000s}{chapter.3}% 15 \BOOKMARK [1][-]{section.3.5}{\376\377\000S\0003\000\040\000c\000o\000m\000p\000a\000t\000i\000b\000l\000e}{chapter.3}% 16 \BOOKMARK [1][-]{section.3.6}{\376\377\000L\000o\000c\000a\000l}{chapter.3}% 17 \BOOKMARK [0][-]{chapter.4}{\376\377\000I\000m\000p\000o\000r\000t\000a\000n\000t\000\040\000R\000u\000l\000e\000s\000\040\000t\000o\000\040\000A\000v\000o\000i\000d\000\040\000L\000o\000s\000i\000n\000g\000\040\000D\000a\000t\000a}{}% 18 \BOOKMARK [1][-]{section.4.1}{\376\377\000R\000u\000l\000e\000s\000\040\000i\000n\000\040\000a\000\040\000N\000u\000t\000s\000h\000e\000l\000l}{chapter.4}% 19 \BOOKMARK [1][-]{section.4.2}{\376\377\000C\000o\000n\000s\000i\000s\000t\000e\000n\000c\000y\000\040\000W\000i\000n\000d\000o\000w\000\040\000L\000i\000s\000t}{chapter.4}% 20 \BOOKMARK [1][-]{section.4.3}{\376\377\000D\000a\000t\000a\000\040\000C\000o\000n\000s\000i\000s\000t\000e\000n\000c\000y}{chapter.4}% 21 \BOOKMARK [1][-]{section.4.4}{\376\377\000D\000a\000t\000a\000\040\000D\000u\000r\000a\000b\000i\000l\000i\000t\000y}{chapter.4}% 22 \BOOKMARK [0][-]{chapter.5}{\376\377\000F\000i\000l\000e\000\040\000S\000y\000s\000t\000e\000m\000\040\000C\000r\000e\000a\000t\000i\000o\000n}{}% 23 \BOOKMARK [0][-]{chapter.6}{\376\377\000M\000a\000n\000a\000g\000i\000n\000g\000\040\000F\000i\000l\000e\000\040\000S\000y\000s\000t\000e\000m\000s}{}% 24 \BOOKMARK [1][-]{section.6.1}{\376\377\000C\000h\000a\000n\000g\000i\000n\000g\000\040\000t\000h\000e\000\040\000P\000a\000s\000s\000p\000h\000r\000a\000s\000e}{chapter.6}% 25 \BOOKMARK [1][-]{section.6.2}{\376\377\000U\000p\000g\000r\000a\000d\000i\000n\000g\000\040\000t\000h\000e\000\040\000f\000i\000l\000e\000\040\000s\000y\000s\000t\000e\000m}{chapter.6}% 26 \BOOKMARK [1][-]{section.6.3}{\376\377\000D\000e\000l\000e\000t\000i\000n\000g\000\040\000a\000\040\000f\000i\000l\000e\000\040\000s\000y\000s\000t\000e\000m}{chapter.6}% 27 \BOOKMARK [1][-]{section.6.4}{\376\377\000R\000e\000s\000t\000o\000r\000i\000n\000g\000\040\000M\000e\000t\000a\000d\000a\000t\000a\000\040\000B\000a\000c\000k\000u\000p\000s}{chapter.6}% 28 \BOOKMARK [0][-]{chapter.7}{\376\377\000M\000o\000u\000n\000t\000i\000n\000g}{}% 29 \BOOKMARK [1][-]{section.7.1}{\376\377\000P\000e\000r\000m\000i\000s\000s\000i\000o\000n\000\040\000C\000h\000e\000c\000k\000i\000n\000g}{chapter.7}% 30 \BOOKMARK [1][-]{section.7.2}{\376\377\000C\000o\000m\000p\000r\000e\000s\000s\000i\000o\000n\000\040\000A\000l\000g\000o\000r\000i\000t\000h\000m\000s}{chapter.7}% 31 \BOOKMARK [1][-]{section.7.3}{\376\377\000N\000o\000t\000e\000s\000\040\000a\000b\000o\000u\000t\000\040\000C\000a\000c\000h\000i\000n\000g}{chapter.7}% 32 \BOOKMARK [1][-]{section.7.4}{\376\377\000F\000a\000i\000l\000u\000r\000e\000\040\000M\000o\000d\000e\000s}{chapter.7}% 33 \BOOKMARK [1][-]{section.7.5}{\376\377\000A\000u\000t\000o\000m\000a\000t\000i\000c\000\040\000M\000o\000u\000n\000t\000i\000n\000g}{chapter.7}% 34 \BOOKMARK [0][-]{chapter.8}{\376\377\000A\000d\000v\000a\000n\000c\000e\000d\000\040\000S\0003\000Q\000L\000\040\000F\000e\000a\000t\000u\000r\000e\000s}{}% 35 \BOOKMARK [1][-]{section.8.1}{\376\377\000S\000n\000a\000p\000s\000h\000o\000t\000t\000i\000n\000g\000\040\000a\000n\000d\000\040\000C\000o\000p\000y\000-\000o\000n\000-\000W\000r\000i\000t\000e}{chapter.8}% 36 \BOOKMARK [1][-]{section.8.2}{\376\377\000G\000e\000t\000t\000i\000n\000g\000\040\000S\000t\000a\000t\000i\000s\000t\000i\000c\000s}{chapter.8}% 37 \BOOKMARK [1][-]{section.8.3}{\376\377\000I\000m\000m\000u\000t\000a\000b\000l\000e\000\040\000T\000r\000e\000e\000s}{chapter.8}% 38 \BOOKMARK [1][-]{section.8.4}{\376\377\000F\000a\000s\000t\000\040\000R\000e\000c\000u\000r\000s\000i\000v\000e\000\040\000R\000e\000m\000o\000v\000a\000l}{chapter.8}% 39 \BOOKMARK [1][-]{section.8.5}{\376\377\000R\000u\000n\000t\000i\000m\000e\000\040\000C\000o\000n\000f\000i\000g\000u\000r\000a\000t\000i\000o\000n}{chapter.8}% 40 \BOOKMARK [0][-]{chapter.9}{\376\377\000U\000n\000m\000o\000u\000n\000t\000i\000n\000g}{}% 41 \BOOKMARK [0][-]{chapter.10}{\376\377\000C\000h\000e\000c\000k\000i\000n\000g\000\040\000f\000o\000r\000\040\000E\000r\000r\000o\000r\000s}{}% 42 \BOOKMARK [1][-]{section.10.1}{\376\377\000C\000h\000e\000c\000k\000i\000n\000g\000\040\000a\000n\000d\000\040\000r\000e\000p\000a\000i\000r\000i\000n\000g\000\040\000i\000n\000t\000e\000r\000n\000a\000l\000\040\000f\000i\000l\000e\000\040\000s\000y\000s\000t\000e\000m\000\040\000e\000r\000r\000o\000r\000s}{chapter.10}% 43 \BOOKMARK [1][-]{section.10.2}{\376\377\000D\000e\000t\000e\000c\000t\000i\000n\000g\000\040\000a\000n\000d\000\040\000h\000a\000n\000d\000l\000i\000n\000g\000\040\000b\000a\000c\000k\000e\000n\000d\000\040\000d\000a\000t\000a\000\040\000c\000o\000r\000r\000u\000p\000t\000i\000o\000n}{chapter.10}% 44 \BOOKMARK [0][-]{chapter.11}{\376\377\000S\000t\000o\000r\000i\000n\000g\000\040\000A\000u\000t\000h\000e\000n\000t\000i\000c\000a\000t\000i\000o\000n\000\040\000I\000n\000f\000o\000r\000m\000a\000t\000i\000o\000n}{}% 45 \BOOKMARK [0][-]{chapter.12}{\376\377\000C\000o\000n\000t\000r\000i\000b\000u\000t\000e\000d\000\040\000P\000r\000o\000g\000r\000a\000m\000s}{}% 46 \BOOKMARK [1][-]{section.12.1}{\376\377\000b\000e\000n\000c\000h\000m\000a\000r\000k\000.\000p\000y}{chapter.12}% 47 \BOOKMARK [1][-]{section.12.2}{\376\377\000c\000l\000o\000n\000e\000\137\000f\000s\000.\000p\000y}{chapter.12}% 48 \BOOKMARK [1][-]{section.12.3}{\376\377\000p\000c\000p\000.\000p\000y}{chapter.12}% 49 \BOOKMARK [1][-]{section.12.4}{\376\377\000s\0003\000q\000l\000\137\000b\000a\000c\000k\000u\000p\000.\000s\000h}{chapter.12}% 50 \BOOKMARK [1][-]{section.12.5}{\376\377\000e\000x\000p\000i\000r\000e\000\137\000b\000a\000c\000k\000u\000p\000s\000.\000p\000y}{chapter.12}% 51 \BOOKMARK [1][-]{section.12.6}{\376\377\000r\000e\000m\000o\000v\000e\000\137\000o\000b\000j\000e\000c\000t\000s\000.\000p\000y}{chapter.12}% 52 \BOOKMARK [0][-]{chapter.13}{\376\377\000T\000i\000p\000s\000\040\000\046\000\040\000T\000r\000i\000c\000k\000s}{}% 53 \BOOKMARK [1][-]{section.13.1}{\376\377\000S\000S\000H\000\040\000B\000a\000c\000k\000e\000n\000d}{chapter.13}% 54 \BOOKMARK [1][-]{section.13.2}{\376\377\000P\000e\000r\000m\000a\000n\000e\000n\000t\000l\000y\000\040\000m\000o\000u\000n\000t\000e\000d\000\040\000b\000a\000c\000k\000u\000p\000\040\000f\000i\000l\000e\000\040\000s\000y\000s\000t\000e\000m}{chapter.13}% 55 \BOOKMARK [1][-]{section.13.3}{\376\377\000I\000m\000p\000r\000o\000v\000i\000n\000g\000\040\000c\000o\000p\000y\000\040\000p\000e\000r\000f\000o\000r\000m\000a\000n\000c\000e}{chapter.13}% 56 \BOOKMARK [0][-]{chapter.14}{\376\377\000K\000n\000o\000w\000n\000\040\000I\000s\000s\000u\000e\000s}{}% 57 \BOOKMARK [0][-]{chapter.15}{\376\377\000M\000a\000n\000p\000a\000g\000e\000s}{}% 58 \BOOKMARK [1][-]{section.15.1}{\376\377\000T\000h\000e\000\040\000m\000k\000f\000s\000.\000s\0003\000q\000l\000\040\000c\000o\000m\000m\000a\000n\000d}{chapter.15}% 59 \BOOKMARK [1][-]{section.15.2}{\376\377\000T\000h\000e\000\040\000s\0003\000q\000l\000a\000d\000m\000\040\000c\000o\000m\000m\000a\000n\000d}{chapter.15}% 60 \BOOKMARK [1][-]{section.15.3}{\376\377\000T\000h\000e\000\040\000m\000o\000u\000n\000t\000.\000s\0003\000q\000l\000\040\000c\000o\000m\000m\000a\000n\000d}{chapter.15}% 61 \BOOKMARK [1][-]{section.15.4}{\376\377\000T\000h\000e\000\040\000s\0003\000q\000l\000s\000t\000a\000t\000\040\000c\000o\000m\000m\000a\000n\000d}{chapter.15}% 62 \BOOKMARK [1][-]{section.15.5}{\376\377\000T\000h\000e\000\040\000s\0003\000q\000l\000c\000t\000r\000l\000\040\000c\000o\000m\000m\000a\000n\000d}{chapter.15}% 63 \BOOKMARK [1][-]{section.15.6}{\376\377\000T\000h\000e\000\040\000s\0003\000q\000l\000c\000p\000\040\000c\000o\000m\000m\000a\000n\000d}{chapter.15}% 64 \BOOKMARK [1][-]{section.15.7}{\376\377\000T\000h\000e\000\040\000s\0003\000q\000l\000r\000m\000\040\000c\000o\000m\000m\000a\000n\000d}{chapter.15}% 65 \BOOKMARK [1][-]{section.15.8}{\376\377\000T\000h\000e\000\040\000s\0003\000q\000l\000l\000o\000c\000k\000\040\000c\000o\000m\000m\000a\000n\000d}{chapter.15}% 66 \BOOKMARK [1][-]{section.15.9}{\376\377\000T\000h\000e\000\040\000u\000m\000o\000u\000n\000t\000.\000s\0003\000q\000l\000\040\000c\000o\000m\000m\000a\000n\000d}{chapter.15}% 67 \BOOKMARK [1][-]{section.15.10}{\376\377\000T\000h\000e\000\040\000f\000s\000c\000k\000.\000s\0003\000q\000l\000\040\000c\000o\000m\000m\000a\000n\000d}{chapter.15}% 68 \BOOKMARK [1][-]{section.15.11}{\376\377\000T\000h\000e\000\040\000s\0003\000q\000l\000\137\000o\000a\000u\000t\000h\000\137\000c\000l\000i\000e\000n\000t\000\040\000c\000o\000m\000m\000a\000n\000d}{chapter.15}% 69 \BOOKMARK [1][-]{section.15.12}{\376\377\000T\000h\000e\000\040\000s\0003\000q\000l\000\137\000v\000e\000r\000i\000f\000y\000\040\000c\000o\000m\000m\000a\000n\000d}{chapter.15}% 70 \BOOKMARK [1][-]{section.15.13}{\376\377\000T\000h\000e\000\040\000p\000c\000p\000\040\000c\000o\000m\000m\000a\000n\000d}{chapter.15}% 71 \BOOKMARK [1][-]{section.15.14}{\376\377\000T\000h\000e\000\040\000e\000x\000p\000i\000r\000e\000\137\000b\000a\000c\000k\000u\000p\000s\000\040\000c\000o\000m\000m\000a\000n\000d}{chapter.15}% 72 \BOOKMARK [0][-]{chapter.16}{\376\377\000F\000u\000r\000t\000h\000e\000r\000\040\000R\000e\000s\000o\000u\000r\000c\000e\000s\000\040\000/\000\040\000G\000e\000t\000t\000i\000n\000g\000\040\000H\000e\000l\000p}{}% 73 \BOOKMARK [0][-]{chapter.17}{\376\377\000I\000m\000p\000l\000e\000m\000e\000n\000t\000a\000t\000i\000o\000n\000\040\000D\000e\000t\000a\000i\000l\000s}{}% 74 \BOOKMARK [1][-]{section.17.1}{\376\377\000M\000e\000t\000a\000d\000a\000t\000a\000\040\000S\000t\000o\000r\000a\000g\000e}{chapter.17}% 75 \BOOKMARK [1][-]{section.17.2}{\376\377\000D\000a\000t\000a\000\040\000S\000t\000o\000r\000a\000g\000e}{chapter.17}% 76 \BOOKMARK [1][-]{section.17.3}{\376\377\000D\000a\000t\000a\000\040\000D\000e\000-\000D\000u\000p\000l\000i\000c\000a\000t\000i\000o\000n}{chapter.17}% 77 \BOOKMARK [1][-]{section.17.4}{\376\377\000C\000a\000c\000h\000i\000n\000g}{chapter.17}% 78 \BOOKMARK [1][-]{section.17.5}{\376\377\000E\000v\000e\000n\000t\000u\000a\000l\000\040\000C\000o\000n\000s\000i\000s\000t\000e\000n\000c\000y\000\040\000H\000a\000n\000d\000l\000i\000n\000g}{chapter.17}% 79 \BOOKMARK [1][-]{section.17.6}{\376\377\000E\000n\000c\000r\000y\000p\000t\000i\000o\000n}{chapter.17}% 80 s3ql-2.26/doc/latex/manual.idx0000664000175000017500000000552213246754372017677 0ustar nikrationikratio00000000000000\indexentry{gs\_backend command line option!no-ssl|hyperpage}{7} \indexentry{no-ssl!gs\_backend command line option|hyperpage}{7} \indexentry{gs\_backend command line option!ssl-ca-path=\textless{}path\textgreater{}|hyperpage}{8} \indexentry{ssl-ca-path=\textless{}path\textgreater{}!gs\_backend command line option|hyperpage}{8} \indexentry{gs\_backend command line option!tcp-timeout|hyperpage}{8} \indexentry{tcp-timeout!gs\_backend command line option|hyperpage}{8} \indexentry{s3\_backend command line option!no-ssl|hyperpage}{8} \indexentry{no-ssl!s3\_backend command line option|hyperpage}{8} \indexentry{s3\_backend command line option!ssl-ca-path=\textless{}path\textgreater{}|hyperpage}{8} \indexentry{ssl-ca-path=\textless{}path\textgreater{}!s3\_backend command line option|hyperpage}{8} \indexentry{s3\_backend command line option!tcp-timeout|hyperpage}{8} \indexentry{tcp-timeout!s3\_backend command line option|hyperpage}{8} \indexentry{s3\_backend command line option!sse|hyperpage}{8} \indexentry{sse!s3\_backend command line option|hyperpage}{8} \indexentry{s3\_backend command line option!ia|hyperpage}{8} \indexentry{ia!s3\_backend command line option|hyperpage}{8} \indexentry{s3\_backend command line option!rrs|hyperpage}{8} \indexentry{rrs!s3\_backend command line option|hyperpage}{8} \indexentry{swift\_backend command line option!no-ssl|hyperpage}{9} \indexentry{no-ssl!swift\_backend command line option|hyperpage}{9} \indexentry{swift\_backend command line option!ssl-ca-path=\textless{}path\textgreater{}|hyperpage}{9} \indexentry{ssl-ca-path=\textless{}path\textgreater{}!swift\_backend command line option|hyperpage}{9} \indexentry{swift\_backend command line option!tcp-timeout|hyperpage}{9} \indexentry{tcp-timeout!swift\_backend command line option|hyperpage}{9} \indexentry{swift\_backend command line option!disable-expect100|hyperpage}{9} \indexentry{disable-expect100!swift\_backend command line option|hyperpage}{9} \indexentry{swift\_backend command line option!no-feature-detection|hyperpage}{9} \indexentry{no-feature-detection!swift\_backend command line option|hyperpage}{9} \indexentry{s3c\_backend command line option!no-ssl|hyperpage}{10} \indexentry{no-ssl!s3c\_backend command line option|hyperpage}{10} \indexentry{s3c\_backend command line option!ssl-ca-path=\textless{}path\textgreater{}|hyperpage}{10} \indexentry{ssl-ca-path=\textless{}path\textgreater{}!s3c\_backend command line option|hyperpage}{10} \indexentry{s3c\_backend command line option!tcp-timeout|hyperpage}{10} \indexentry{tcp-timeout!s3c\_backend command line option|hyperpage}{10} \indexentry{s3c\_backend command line option!disable-expect100|hyperpage}{10} \indexentry{disable-expect100!s3c\_backend command line option|hyperpage}{10} \indexentry{s3c\_backend command line option!dumb-copy|hyperpage}{10} \indexentry{dumb-copy!s3c\_backend command line option|hyperpage}{10} s3ql-2.26/doc/latex/sphinxmanual.cls0000664000175000017500000000743713015321157021116 0ustar nikrationikratio00000000000000% % sphinxmanual.cls for Sphinx (http://sphinx-doc.org/) % \NeedsTeXFormat{LaTeX2e}[1995/12/01] \ProvidesClass{sphinxmanual}[2009/06/02 Document class (Sphinx manual)] \ifx\directlua\undefined\else % if compiling with lualatex 0.85 or later load compatibility patch issued by % the LaTeX team for older packages relying on \pdf named primitives. \IfFileExists{luatex85.sty}{\RequirePackage{luatex85}}{} \fi % chapters starting at odd pages (overridden by 'openany' document option) \PassOptionsToClass{openright}{\sphinxdocclass} % 'oneside' option overriding the 'twoside' default \newif\if@oneside \DeclareOption{oneside}{\@onesidetrue} % Pass remaining document options to the parent class. \DeclareOption*{\PassOptionsToClass{\CurrentOption}{\sphinxdocclass}} \ProcessOptions\relax % Defaults two-side document \if@oneside % nothing to do (oneside is the default) \else \PassOptionsToClass{twoside}{\sphinxdocclass} \fi \LoadClass{\sphinxdocclass} % Set some sane defaults for section numbering depth and TOC depth. You can % reset these counters in your preamble. % \setcounter{secnumdepth}{2} \setcounter{tocdepth}{1} % Change the title page to look a bit better, and fit in with the fncychap % ``Bjarne'' style a bit better. % \renewcommand{\maketitle}{% \begin{titlepage}% \let\footnotesize\small \let\footnoterule\relax \noindent\rule{\textwidth}{1pt}\ifsphinxpdfoutput\newline\null\fi\par \ifsphinxpdfoutput \begingroup % These \defs are required to deal with multi-line authors; it % changes \\ to ', ' (comma-space), making it pass muster for % generating document info in the PDF file. \def\\{, }% \def\and{and }% \pdfinfo{ /Author (\@author) /Title (\@title) }% \endgroup \fi \begin{flushright}% \sphinxlogo \py@HeaderFamily {\Huge \@title \par} {\itshape\LARGE \py@release\releaseinfo \par} \vfill {\LARGE \begin{tabular}[t]{c} \@author \end{tabular} \par} \vfill\vfill {\large \@date \par \vfill \py@authoraddress \par }% \end{flushright}%\par \@thanks \end{titlepage}% \setcounter{footnote}{0}% \let\thanks\relax\let\maketitle\relax %\gdef\@thanks{}\gdef\@author{}\gdef\@title{} } \let\py@OldTableofcontents=\tableofcontents \renewcommand{\tableofcontents}{% % before resetting page counter, let's do the right thing. \if@openright\cleardoublepage\else\clearpage\fi \pagenumbering{roman}% \pagestyle{plain}% \begingroup \parskip \z@skip \py@OldTableofcontents \endgroup % before resetting page counter, let's do the right thing. \if@openright\cleardoublepage\else\clearpage\fi \pagenumbering{arabic}% \ifdefined\fancyhf\pagestyle{normal}\fi } \pagenumbering{alph}% avoid hyperref "duplicate destination" warnings % This is needed to get the width of the section # area wide enough in the % library reference. Doing it here keeps it the same for all the manuals. % \renewcommand*\l@section{\@dottedtocline{1}{1.5em}{2.6em}} \renewcommand*\l@subsection{\@dottedtocline{2}{4.1em}{3.5em}} % Fix the bibliography environment to add an entry to the Table of % Contents. % For a report document class this environment is a chapter. \let\py@OldThebibliography=\thebibliography \renewcommand{\thebibliography}[1]{ \if@openright\cleardoublepage\else\clearpage\fi \phantomsection \py@OldThebibliography{1} \addcontentsline{toc}{chapter}{\bibname} } % Same for the indices. % The memoir class already does this, so we don't duplicate it in that case. % \@ifclassloaded{memoir}{}{ \let\py@OldTheindex=\theindex \renewcommand{\theindex}{ \if@openright\cleardoublepage\else\clearpage\fi \phantomsection \py@OldTheindex \addcontentsline{toc}{chapter}{\indexname} } } s3ql-2.26/doc/latex/tabulary.sty0000664000175000017500000003302413015321157020257 0ustar nikrationikratio00000000000000%% %% This is file `tabulary.sty', %% generated with the docstrip utility. %% %% The original source files were: %% %% tabulary.dtx (with options: `package') %% DRAFT VERSION %% %% File `tabulary.dtx'. %% Copyright (C) 1995 1996 2003 2008 David Carlisle %% This file may be distributed under the terms of the LPPL. %% See 00readme.txt for details. %% \NeedsTeXFormat{LaTeX2e} \ProvidesPackage{tabulary} [2008/12/01 v0.9 tabulary package (DPC)] \RequirePackage{array} \catcode`\Z=14 \DeclareOption{debugshow}{\catcode`\Z=9\relax} \ProcessOptions \def\arraybackslash{\let\\=\@arraycr} \def\@finalstrut#1{% \unskip\ifhmode\nobreak\fi\vrule\@width\z@\@height\z@\@depth\dp#1} \newcount\TY@count \def\tabulary{% \let\TY@final\tabular \let\endTY@final\endtabular \TY@tabular} \def\TY@tabular#1{% \edef\TY@{\@currenvir}% {\ifnum0=`}\fi \@ovxx\TY@linewidth \@ovyy\TY@tablewidth \count@\z@ \@tempswatrue \@whilesw\if@tempswa\fi{% \advance\count@\@ne \expandafter\ifx\csname TY@F\the\count@\endcsname\relax \@tempswafalse \else \expandafter\let\csname TY@SF\the\count@\expandafter\endcsname \csname TY@F\the\count@\endcsname \global\expandafter\let\csname TY@F\the\count@\endcsname\relax \expandafter\let\csname TY@S\the\count@\expandafter\endcsname \csname TY@\the\count@\endcsname \fi}% \global\TY@count\@ne \TY@width\xdef{0pt}% \global\TY@tablewidth\z@ \global\TY@linewidth#1\relax Z\message{^^J^^JTable^^J% Z Target Width: \the\TY@linewidth^^J% Z \string\tabcolsep: \the\tabcolsep\space Z \string\arrayrulewidth: \the\arrayrulewidth\space Z \string\doublerulesep: \the\doublerulesep^^J% Z \string\tymin: \the\tymin\space Z \string\tymax: \the\tymax^^J}% \let\@classz\TY@classz \let\verb\TX@verb \toks@{}\TY@get@body} \let\TY@@mkpream\@mkpream \def\TY@mkpream{% \def\@addamp{% \if@firstamp \@firstampfalse \else \global\advance\TY@count\@ne \edef\@preamble{\@preamble &}\fi \TY@width\xdef{0pt}}% \def\@acol{% \TY@subwidth\col@sep \@addtopreamble{\hskip\col@sep}}% \let\@arrayrule\TY@arrayrule \let\@classvi\TY@classvi \def\@classv{\save@decl \expandafter\NC@ecs\@nextchar\extracolsep{}\extracolsep\@@@ \sbox\z@{\d@llarbegin\@nextchar\d@llarend}% \TY@subwidth{\wd\z@}% \@addtopreamble{\d@llarbegin\the@toks\the\count@\relax\d@llarend}% \prepnext@tok}% \global\let\@mkpream\TY@@mkpream \TY@@mkpream} \def\TY@arrayrule{% \TY@subwidth\arrayrulewidth \@addtopreamble \vline} \def\TY@classvi{\ifcase \@lastchclass \@acol \or \TY@subwidth\doublerulesep \@addtopreamble{\hskip \doublerulesep}\or \@acol \or \@classvii \fi} \def\TY@tab{% \setbox\z@\hbox\bgroup \let\[$\let\]$% \let\equation$\let\endequation$% \col@sep\tabcolsep \let\d@llarbegin\begingroup\let\d@llarend\endgroup \let\@mkpream\TY@mkpream \def\multicolumn##1##2##3{\multispan##1\relax}% \CT@start\TY@tabarray} \def\TY@tabarray{\@ifnextchar[{\TY@array}{\@array[t]}} \def\TY@array[#1]{\@array[t]} \def\TY@width#1{% \expandafter#1\csname TY@\the\TY@count\endcsname} \def\TY@subwidth#1{% \TY@width\dimen@ \advance\dimen@-#1\relax \TY@width\xdef{\the\dimen@}% \global\advance\TY@linewidth-#1\relax} \def\endtabulary{% \gdef\@halignto{}% \let\TY@footnote\footnote% \def\footnote{}% prevent footnotes from doing anything \expandafter\TY@tab\the\toks@ \crcr\omit {\xdef\TY@save@row{}% \loop \advance\TY@count\m@ne \ifnum\TY@count>\z@ \xdef\TY@save@row{\TY@save@row&\omit}% \repeat}\TY@save@row \endarray\global\setbox1=\lastbox\setbox0=\vbox{\unvbox1 \unskip\global\setbox1=\lastbox}\egroup \dimen@\TY@linewidth \divide\dimen@\TY@count \ifdim\dimen@<\tymin \TY@warn{tymin too large (\the\tymin), resetting to \the\dimen@}% \tymin\dimen@ \fi \setbox\tw@=\hbox{\unhbox\@ne \loop \@tempdima=\lastskip \ifdim\@tempdima>\z@ Z \message{ecs=\the\@tempdima^^J}% \global\advance\TY@linewidth-\@tempdima \fi \unskip \setbox\tw@=\lastbox \ifhbox\tw@ Z \message{Col \the\TY@count: Initial=\the\wd\tw@\space}% \ifdim\wd\tw@>\tymax \wd\tw@\tymax Z \message{> max\space}% Z \else Z \message{ \@spaces\space}% \fi \TY@width\dimen@ Z \message{\the\dimen@\space}% \advance\dimen@\wd\tw@ Z \message{Final=\the\dimen@\space}% \TY@width\xdef{\the\dimen@}% \ifdim\dimen@<\tymin Z \message{< tymin}% \global\advance\TY@linewidth-\dimen@ \expandafter\xdef\csname TY@F\the\TY@count\endcsname {\the\dimen@}% \else \expandafter\ifx\csname TY@F\the\TY@count\endcsname\z@ Z \message{***}% \global\advance\TY@linewidth-\dimen@ \expandafter\xdef\csname TY@F\the\TY@count\endcsname {\the\dimen@}% \else Z \message{> tymin}% \global\advance\TY@tablewidth\dimen@ \global\expandafter\let\csname TY@F\the\TY@count\endcsname \maxdimen \fi\fi \advance\TY@count\m@ne \repeat}% \TY@checkmin \TY@checkmin \TY@checkmin \TY@checkmin \TY@count\z@ \let\TY@box\TY@box@v \let\footnote\TY@footnote % restore footnotes {\expandafter\TY@final\the\toks@\endTY@final}% \count@\z@ \@tempswatrue \@whilesw\if@tempswa\fi{% \advance\count@\@ne \expandafter\ifx\csname TY@SF\the\count@\endcsname\relax \@tempswafalse \else \global\expandafter\let\csname TY@F\the\count@\expandafter\endcsname \csname TY@SF\the\count@\endcsname \global\expandafter\let\csname TY@\the\count@\expandafter\endcsname \csname TY@S\the\count@\endcsname \fi}% \TY@linewidth\@ovxx \TY@tablewidth\@ovyy \ifnum0=`{\fi}} \def\TY@checkmin{% \let\TY@checkmin\relax \ifdim\TY@tablewidth>\z@ \Gscale@div\TY@ratio\TY@linewidth\TY@tablewidth \ifdim\TY@tablewidth <\TY@linewidth \def\TY@ratio{1}% \fi \else \TY@warn{No suitable columns!}% \def\TY@ratio{1}% \fi \count@\z@ Z \message{^^JLine Width: \the\TY@linewidth, Z Natural Width: \the\TY@tablewidth, Z Ratio: \TY@ratio^^J}% \@tempdima\z@ \loop \ifnum\count@<\TY@count \advance\count@\@ne \ifdim\csname TY@F\the\count@\endcsname>\tymin \dimen@\csname TY@\the\count@\endcsname \dimen@\TY@ratio\dimen@ \ifdim\dimen@<\tymin Z \message{Column \the\count@\space ->}% \global\expandafter\let\csname TY@F\the\count@\endcsname\tymin \global\advance\TY@linewidth-\tymin \global\advance\TY@tablewidth-\csname TY@\the\count@\endcsname \let\TY@checkmin\TY@@checkmin \else \expandafter\xdef\csname TY@F\the\count@\endcsname{\the\dimen@}% \advance\@tempdima\csname TY@F\the\count@\endcsname \fi \fi Z \dimen@\csname TY@F\the\count@\endcsname\message{\the\dimen@, }% \repeat Z \message{^^JTotal:\the\@tempdima^^J}% } \let\TY@@checkmin\TY@checkmin \newdimen\TY@linewidth \def\tyformat{\everypar{{\nobreak\hskip\z@skip}}} \newdimen\tymin \tymin=10pt \newdimen\tymax \tymax=2\textwidth \def\@testpach{\@chclass \ifnum \@lastchclass=6 \@ne \@chnum \@ne \else \ifnum \@lastchclass=7 5 \else \ifnum \@lastchclass=8 \tw@ \else \ifnum \@lastchclass=9 \thr@@ \else \z@ \ifnum \@lastchclass = 10 \else \edef\@nextchar{\expandafter\string\@nextchar}% \@chnum \if \@nextchar c\z@ \else \if \@nextchar l\@ne \else \if \@nextchar r\tw@ \else \if \@nextchar C7 \else \if \@nextchar L8 \else \if \@nextchar R9 \else \if \@nextchar J10 \else \z@ \@chclass \if\@nextchar |\@ne \else \if \@nextchar !6 \else \if \@nextchar @7 \else \if \@nextchar <8 \else \if \@nextchar >9 \else 10 \@chnum \if \@nextchar m\thr@@\else \if \@nextchar p4 \else \if \@nextchar b5 \else \z@ \@chclass \z@ \@preamerr \z@ \fi \fi \fi \fi\fi \fi \fi\fi \fi \fi \fi \fi \fi \fi \fi \fi \fi \fi \fi \fi} \def\TY@classz{% \@classx \@tempcnta\count@ \ifx\TY@box\TY@box@v \global\advance\TY@count\@ne \fi \let\centering c% \let\raggedright\noindent \let\raggedleft\indent \let\arraybackslash\relax \prepnext@tok \ifnum\@chnum<4 \global\expandafter\let\csname TY@F\the\TY@count\endcsname\z@ \fi \ifnum\@chnum=6 \global\expandafter\let\csname TY@F\the\TY@count\endcsname\z@ \fi \@addtopreamble{% \ifcase\@chnum \hfil \d@llarbegin\insert@column\d@llarend \hfil \or \kern\z@ \d@llarbegin \insert@column \d@llarend \hfil \or \hfil\kern\z@ \d@llarbegin \insert@column \d@llarend \or $\vcenter\@startpbox{\@nextchar}\insert@column \@endpbox $\or \vtop \@startpbox{\@nextchar}\insert@column \@endpbox \or \vbox \@startpbox{\@nextchar}\insert@column \@endpbox \or \d@llarbegin \insert@column \d@llarend \or% dubious "s" case \TY@box\centering\or \TY@box\raggedright\or \TY@box\raggedleft\or \TY@box\relax \fi}\prepnext@tok} \def\TY@box#1{% \ifx\centering#1% \hfil \d@llarbegin\insert@column\d@llarend \hfil \else \ifx\raggedright#1% \kern\z@%<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< \d@llarbegin \insert@column \d@llarend \hfil \else \ifx\raggedleft#1% \hfil\kern\z@ \d@llarbegin \insert@column \d@llarend \else \ifx\relax#1% \d@llarbegin \insert@column \d@llarend \fi \fi \fi \fi} \def\TY@box@v#1{% \vtop \@startpbox{\csname TY@F\the\TY@count\endcsname}% #1\arraybackslash\tyformat \insert@column\@endpbox} \newdimen\TY@tablewidth \def\Gscale@div#1#2#3{% \setlength\dimen@{#3}% \ifdim\dimen@=\z@ \PackageError{graphics}{Division by 0}\@eha \dimen@#2% \fi \edef\@tempd{\the\dimen@}% \setlength\dimen@{#2}% \count@65536\relax \ifdim\dimen@<\z@ \dimen@-\dimen@ \count@-\count@ \fi \loop \ifdim\dimen@<8192\p@ \dimen@\tw@\dimen@ \divide\count@\tw@ \repeat \dimen@ii=\@tempd\relax \divide\dimen@ii\count@ \divide\dimen@\dimen@ii \edef#1{\strip@pt\dimen@}} \long\def\TY@get@body#1\end {\toks@\expandafter{\the\toks@#1}\TY@find@end} \def\TY@find@end#1{% \def\@tempa{#1}% \ifx\@tempa\TY@\def\@tempa{\end{#1}}\expandafter\@tempa \else\toks@\expandafter {\the\toks@\end{#1}}\expandafter\TY@get@body\fi} \def\TY@warn{% \PackageWarning{tabulary}} \catcode`\Z=11 \AtBeginDocument{ \@ifpackageloaded{colortbl}{% \expandafter\def\expandafter\@mkpream\expandafter#\expandafter1% \expandafter{% \expandafter\let\expandafter\CT@setup\expandafter\relax \expandafter\let\expandafter\CT@color\expandafter\relax \expandafter\let\expandafter\CT@do@color\expandafter\relax \expandafter\let\expandafter\color\expandafter\relax \expandafter\let\expandafter\CT@column@color\expandafter\relax \expandafter\let\expandafter\CT@row@color\expandafter\relax \@mkpream{#1}} \let\TY@@mkpream\@mkpream \def\TY@classz{% \@classx \@tempcnta\count@ \ifx\TY@box\TY@box@v \global\advance\TY@count\@ne \fi \let\centering c% \let\raggedright\noindent \let\raggedleft\indent \let\arraybackslash\relax \prepnext@tok \expandafter\CT@extract\the\toks\@tempcnta\columncolor!\@nil \ifnum\@chnum<4 \global\expandafter\let\csname TY@F\the\TY@count\endcsname\z@ \fi \ifnum\@chnum=6 \global\expandafter\let\csname TY@F\the\TY@count\endcsname\z@ \fi \@addtopreamble{% \setbox\z@\hbox\bgroup\bgroup \ifcase\@chnum \hskip\stretch{.5}\kern\z@ \d@llarbegin\insert@column\d@llarend\hskip\stretch{.5}\or \kern\z@%<<<<<<<<<<<<<<<<<<<<<<<<<<< \d@llarbegin \insert@column \d@llarend \hfill \or \hfill\kern\z@ \d@llarbegin \insert@column \d@llarend \or $\vcenter\@startpbox{\@nextchar}\insert@column \@endpbox $\or \vtop \@startpbox{\@nextchar}\insert@column \@endpbox \or \vbox \@startpbox{\@nextchar}\insert@column \@endpbox \or \d@llarbegin \insert@column \d@llarend \or% dubious s case \TY@box\centering\or \TY@box\raggedright\or \TY@box\raggedleft\or \TY@box\relax \fi \egroup\egroup \begingroup \CT@setup \CT@column@color \CT@row@color \CT@do@color \endgroup \@tempdima\ht\z@ \advance\@tempdima\minrowclearance \vrule\@height\@tempdima\@width\z@ \unhbox\z@ }\prepnext@tok}% \def\TY@arrayrule{% \TY@subwidth\arrayrulewidth \@addtopreamble{{\CT@arc@\vline}}}% \def\TY@classvi{\ifcase \@lastchclass \@acol \or \TY@subwidth\doublerulesep \ifx\CT@drsc@\relax \@addtopreamble{\hskip\doublerulesep}% \else \@addtopreamble{{\CT@drsc@\vrule\@width\doublerulesep}}% \fi\or \@acol \or \@classvii \fi}% }{% \let\CT@start\relax } } {\uccode`\*=`\ % \uppercase{\gdef\TX@verb{% \leavevmode\null\TX@vwarn {\ifnum0=`}\fi\ttfamily\let\\\ignorespaces \@ifstar{\let~*\TX@vb}{\TX@vb}}}} \def\TX@vb#1{\def\@tempa##1#1{\toks@{##1}\edef\@tempa{\the\toks@}% \expandafter\TX@v\meaning\@tempa\\ \\\ifnum0=`{\fi}}\@tempa!} \def\TX@v#1!{\afterassignment\TX@vfirst\let\@tempa= } \begingroup \catcode`\*=\catcode`\# \catcode`\#=12 \gdef\TX@vfirst{% \if\@tempa#% \def\@tempb{\TX@v@#}% \else \let\@tempb\TX@v@ \if\@tempa\space~\else\@tempa\fi \fi \@tempb} \gdef\TX@v@*1 *2{% \TX@v@hash*1##\relax\if*2\\\else~\expandafter\TX@v@\fi*2} \gdef\TX@v@hash*1##*2{*1\ifx*2\relax\else#\expandafter\TX@v@hash\fi*2} \endgroup \def\TX@vwarn{% \@warning{\noexpand\verb may be unreliable inside tabularx/y}% \global\let\TX@vwarn\@empty} \endinput %% %% End of file `tabulary.sty'. s3ql-2.26/doc/latex/manual.toc0000664000175000017500000003113713246754372017701 0ustar nikrationikratio00000000000000\select@language {english} \contentsline {chapter}{\numberline {1}S3QL}{1}{chapter.1} \contentsline {section}{\numberline {1.1}Features}{1}{section.1.1} \contentsline {section}{\numberline {1.2}Development Status}{2}{section.1.2} \contentsline {section}{\numberline {1.3}Supported Platforms}{2}{section.1.3} \contentsline {section}{\numberline {1.4}Contributing}{2}{section.1.4} \contentsline {chapter}{\numberline {2}Installation}{3}{chapter.2} \contentsline {section}{\numberline {2.1}Dependencies}{3}{section.2.1} \contentsline {section}{\numberline {2.2}Installing S3QL}{4}{section.2.2} \contentsline {section}{\numberline {2.3}Development Version}{4}{section.2.3} \contentsline {section}{\numberline {2.4}Running tests requiring remote servers}{4}{section.2.4} \contentsline {chapter}{\numberline {3}Storage Backends}{7}{chapter.3} \contentsline {section}{\numberline {3.1}Google Storage}{7}{section.3.1} \contentsline {section}{\numberline {3.2}Amazon S3}{8}{section.3.2} \contentsline {section}{\numberline {3.3}OpenStack/Swift}{9}{section.3.3} \contentsline {section}{\numberline {3.4}Rackspace CloudFiles}{10}{section.3.4} \contentsline {section}{\numberline {3.5}S3 compatible}{10}{section.3.5} \contentsline {section}{\numberline {3.6}Local}{11}{section.3.6} \contentsline {chapter}{\numberline {4}Important Rules to Avoid Losing Data}{13}{chapter.4} \contentsline {section}{\numberline {4.1}Rules in a Nutshell}{13}{section.4.1} \contentsline {section}{\numberline {4.2}Consistency Window List}{14}{section.4.2} \contentsline {section}{\numberline {4.3}Data Consistency}{14}{section.4.3} \contentsline {section}{\numberline {4.4}Data Durability}{15}{section.4.4} \contentsline {chapter}{\numberline {5}File System Creation}{17}{chapter.5} \contentsline {chapter}{\numberline {6}Managing File Systems}{19}{chapter.6} \contentsline {section}{\numberline {6.1}Changing the Passphrase}{19}{section.6.1} \contentsline {section}{\numberline {6.2}Upgrading the file system}{20}{section.6.2} \contentsline {section}{\numberline {6.3}Deleting a file system}{20}{section.6.3} \contentsline {section}{\numberline {6.4}Restoring Metadata Backups}{20}{section.6.4} \contentsline {chapter}{\numberline {7}Mounting}{21}{chapter.7} \contentsline {section}{\numberline {7.1}Permission Checking}{22}{section.7.1} \contentsline {section}{\numberline {7.2}Compression Algorithms}{22}{section.7.2} \contentsline {section}{\numberline {7.3}Notes about Caching}{23}{section.7.3} \contentsline {subsection}{\numberline {7.3.1}Maximum Number of Cache Entries}{23}{subsection.7.3.1} \contentsline {subsection}{\numberline {7.3.2}Cache Flushing and Expiration}{23}{subsection.7.3.2} \contentsline {section}{\numberline {7.4}Failure Modes}{23}{section.7.4} \contentsline {section}{\numberline {7.5}Automatic Mounting}{24}{section.7.5} \contentsline {chapter}{\numberline {8}Advanced S3QL Features}{25}{chapter.8} \contentsline {section}{\numberline {8.1}Snapshotting and Copy-on-Write}{25}{section.8.1} \contentsline {subsection}{\numberline {8.1.1}Snapshotting vs Hardlinking}{25}{subsection.8.1.1} \contentsline {section}{\numberline {8.2}Getting Statistics}{26}{section.8.2} \contentsline {section}{\numberline {8.3}Immutable Trees}{26}{section.8.3} \contentsline {section}{\numberline {8.4}Fast Recursive Removal}{27}{section.8.4} \contentsline {section}{\numberline {8.5}Runtime Configuration}{27}{section.8.5} \contentsline {chapter}{\numberline {9}Unmounting}{29}{chapter.9} \contentsline {chapter}{\numberline {10}Checking for Errors}{31}{chapter.10} \contentsline {section}{\numberline {10.1}Checking and repairing internal file system errors}{31}{section.10.1} \contentsline {section}{\numberline {10.2}Detecting and handling backend data corruption}{32}{section.10.2} \contentsline {chapter}{\numberline {11}Storing Authentication Information}{35}{chapter.11} \contentsline {chapter}{\numberline {12}Contributed Programs}{37}{chapter.12} \contentsline {section}{\numberline {12.1}benchmark.py}{37}{section.12.1} \contentsline {section}{\numberline {12.2}clone\_fs.py}{37}{section.12.2} \contentsline {section}{\numberline {12.3}pcp.py}{37}{section.12.3} \contentsline {section}{\numberline {12.4}s3ql\_backup.sh}{37}{section.12.4} \contentsline {section}{\numberline {12.5}expire\_backups.py}{38}{section.12.5} \contentsline {section}{\numberline {12.6}remove\_objects.py}{39}{section.12.6} \contentsline {chapter}{\numberline {13}Tips \& Tricks}{41}{chapter.13} \contentsline {section}{\numberline {13.1}SSH Backend}{41}{section.13.1} \contentsline {section}{\numberline {13.2}Permanently mounted backup file system}{41}{section.13.2} \contentsline {section}{\numberline {13.3}Improving copy performance}{41}{section.13.3} \contentsline {chapter}{\numberline {14}Known Issues}{43}{chapter.14} \contentsline {chapter}{\numberline {15}Manpages}{45}{chapter.15} \contentsline {section}{\numberline {15.1}The \textbf {\texttt {mkfs.s3ql}} command}{45}{section.15.1} \contentsline {subsection}{\numberline {15.1.1}Synopsis}{45}{subsection.15.1.1} \contentsline {subsection}{\numberline {15.1.2}Description}{45}{subsection.15.1.2} \contentsline {subsection}{\numberline {15.1.3}Options}{45}{subsection.15.1.3} \contentsline {subsection}{\numberline {15.1.4}Exit Codes}{46}{subsection.15.1.4} \contentsline {subsection}{\numberline {15.1.5}See Also}{46}{subsection.15.1.5} \contentsline {section}{\numberline {15.2}The \textbf {\texttt {s3qladm}} command}{46}{section.15.2} \contentsline {subsection}{\numberline {15.2.1}Synopsis}{46}{subsection.15.2.1} \contentsline {subsection}{\numberline {15.2.2}Description}{47}{subsection.15.2.2} \contentsline {subsection}{\numberline {15.2.3}Options}{47}{subsection.15.2.3} \contentsline {subsection}{\numberline {15.2.4}Actions}{47}{subsection.15.2.4} \contentsline {subsection}{\numberline {15.2.5}Exit Codes}{47}{subsection.15.2.5} \contentsline {subsection}{\numberline {15.2.6}See Also}{48}{subsection.15.2.6} \contentsline {section}{\numberline {15.3}The \textbf {\texttt {mount.s3ql}} command}{48}{section.15.3} \contentsline {subsection}{\numberline {15.3.1}Synopsis}{48}{subsection.15.3.1} \contentsline {subsection}{\numberline {15.3.2}Description}{48}{subsection.15.3.2} \contentsline {subsection}{\numberline {15.3.3}Options}{48}{subsection.15.3.3} \contentsline {subsection}{\numberline {15.3.4}Exit Codes}{49}{subsection.15.3.4} \contentsline {subsection}{\numberline {15.3.5}See Also}{50}{subsection.15.3.5} \contentsline {section}{\numberline {15.4}The \textbf {\texttt {s3qlstat}} command}{50}{section.15.4} \contentsline {subsection}{\numberline {15.4.1}Synopsis}{50}{subsection.15.4.1} \contentsline {subsection}{\numberline {15.4.2}Description}{50}{subsection.15.4.2} \contentsline {subsection}{\numberline {15.4.3}Options}{51}{subsection.15.4.3} \contentsline {subsection}{\numberline {15.4.4}Exit Codes}{51}{subsection.15.4.4} \contentsline {subsection}{\numberline {15.4.5}See Also}{51}{subsection.15.4.5} \contentsline {section}{\numberline {15.5}The \textbf {\texttt {s3qlctrl}} command}{51}{section.15.5} \contentsline {subsection}{\numberline {15.5.1}Synopsis}{51}{subsection.15.5.1} \contentsline {subsection}{\numberline {15.5.2}Description}{51}{subsection.15.5.2} \contentsline {subsection}{\numberline {15.5.3}Options}{52}{subsection.15.5.3} \contentsline {subsection}{\numberline {15.5.4}Exit Codes}{52}{subsection.15.5.4} \contentsline {subsection}{\numberline {15.5.5}See Also}{52}{subsection.15.5.5} \contentsline {section}{\numberline {15.6}The \textbf {\texttt {s3qlcp}} command}{52}{section.15.6} \contentsline {subsection}{\numberline {15.6.1}Synopsis}{52}{subsection.15.6.1} \contentsline {subsection}{\numberline {15.6.2}Description}{53}{subsection.15.6.2} \contentsline {subsubsection}{Snapshotting vs Hardlinking}{53}{subsubsection*.22} \contentsline {subsection}{\numberline {15.6.3}Options}{53}{subsection.15.6.3} \contentsline {subsection}{\numberline {15.6.4}Exit Codes}{53}{subsection.15.6.4} \contentsline {subsection}{\numberline {15.6.5}See Also}{54}{subsection.15.6.5} \contentsline {section}{\numberline {15.7}The \textbf {\texttt {s3qlrm}} command}{54}{section.15.7} \contentsline {subsection}{\numberline {15.7.1}Synopsis}{54}{subsection.15.7.1} \contentsline {subsection}{\numberline {15.7.2}Description}{54}{subsection.15.7.2} \contentsline {subsection}{\numberline {15.7.3}Options}{54}{subsection.15.7.3} \contentsline {subsection}{\numberline {15.7.4}Exit Codes}{54}{subsection.15.7.4} \contentsline {subsection}{\numberline {15.7.5}See Also}{55}{subsection.15.7.5} \contentsline {section}{\numberline {15.8}The \textbf {\texttt {s3qllock}} command}{55}{section.15.8} \contentsline {subsection}{\numberline {15.8.1}Synopsis}{55}{subsection.15.8.1} \contentsline {subsection}{\numberline {15.8.2}Description}{55}{subsection.15.8.2} \contentsline {subsection}{\numberline {15.8.3}Rationale}{55}{subsection.15.8.3} \contentsline {subsection}{\numberline {15.8.4}Options}{55}{subsection.15.8.4} \contentsline {subsection}{\numberline {15.8.5}Exit Codes}{56}{subsection.15.8.5} \contentsline {subsection}{\numberline {15.8.6}See Also}{56}{subsection.15.8.6} \contentsline {section}{\numberline {15.9}The \textbf {\texttt {umount.s3ql}} command}{56}{section.15.9} \contentsline {subsection}{\numberline {15.9.1}Synopsis}{56}{subsection.15.9.1} \contentsline {subsection}{\numberline {15.9.2}Description}{56}{subsection.15.9.2} \contentsline {subsection}{\numberline {15.9.3}Options}{56}{subsection.15.9.3} \contentsline {subsection}{\numberline {15.9.4}Exit Codes}{57}{subsection.15.9.4} \contentsline {subsection}{\numberline {15.9.5}See Also}{57}{subsection.15.9.5} \contentsline {section}{\numberline {15.10}The \textbf {\texttt {fsck.s3ql}} command}{57}{section.15.10} \contentsline {subsection}{\numberline {15.10.1}Synopsis}{57}{subsection.15.10.1} \contentsline {subsection}{\numberline {15.10.2}Description}{57}{subsection.15.10.2} \contentsline {subsection}{\numberline {15.10.3}Options}{57}{subsection.15.10.3} \contentsline {subsection}{\numberline {15.10.4}Exit Codes}{58}{subsection.15.10.4} \contentsline {subsection}{\numberline {15.10.5}See Also}{59}{subsection.15.10.5} \contentsline {section}{\numberline {15.11}The \textbf {\texttt {s3ql\_oauth\_client}} command}{59}{section.15.11} \contentsline {subsection}{\numberline {15.11.1}Synopsis}{59}{subsection.15.11.1} \contentsline {subsection}{\numberline {15.11.2}Description}{59}{subsection.15.11.2} \contentsline {subsection}{\numberline {15.11.3}Options}{59}{subsection.15.11.3} \contentsline {subsection}{\numberline {15.11.4}Exit Codes}{59}{subsection.15.11.4} \contentsline {subsection}{\numberline {15.11.5}See Also}{60}{subsection.15.11.5} \contentsline {section}{\numberline {15.12}The \textbf {\texttt {s3ql\_verify}} command}{60}{section.15.12} \contentsline {subsection}{\numberline {15.12.1}Synopsis}{60}{subsection.15.12.1} \contentsline {subsection}{\numberline {15.12.2}Description}{60}{subsection.15.12.2} \contentsline {subsection}{\numberline {15.12.3}Options}{60}{subsection.15.12.3} \contentsline {subsection}{\numberline {15.12.4}Exit Codes}{61}{subsection.15.12.4} \contentsline {subsection}{\numberline {15.12.5}See Also}{61}{subsection.15.12.5} \contentsline {section}{\numberline {15.13}The \textbf {\texttt {pcp}} command}{61}{section.15.13} \contentsline {subsection}{\numberline {15.13.1}Synopsis}{61}{subsection.15.13.1} \contentsline {subsection}{\numberline {15.13.2}Description}{61}{subsection.15.13.2} \contentsline {subsection}{\numberline {15.13.3}Options}{62}{subsection.15.13.3} \contentsline {subsection}{\numberline {15.13.4}Exit Codes}{62}{subsection.15.13.4} \contentsline {subsection}{\numberline {15.13.5}See Also}{62}{subsection.15.13.5} \contentsline {section}{\numberline {15.14}The \textbf {\texttt {expire\_backups}} command}{62}{section.15.14} \contentsline {subsection}{\numberline {15.14.1}Synopsis}{62}{subsection.15.14.1} \contentsline {subsection}{\numberline {15.14.2}Description}{62}{subsection.15.14.2} \contentsline {subsection}{\numberline {15.14.3}Options}{63}{subsection.15.14.3} \contentsline {subsection}{\numberline {15.14.4}Exit Codes}{64}{subsection.15.14.4} \contentsline {subsection}{\numberline {15.14.5}See Also}{64}{subsection.15.14.5} \contentsline {chapter}{\numberline {16}Further Resources / Getting Help}{65}{chapter.16} \contentsline {chapter}{\numberline {17}Implementation Details}{67}{chapter.17} \contentsline {section}{\numberline {17.1}Metadata Storage}{67}{section.17.1} \contentsline {section}{\numberline {17.2}Data Storage}{67}{section.17.2} \contentsline {section}{\numberline {17.3}Data De-Duplication}{68}{section.17.3} \contentsline {section}{\numberline {17.4}Caching}{68}{section.17.4} \contentsline {section}{\numberline {17.5}Eventual Consistency Handling}{68}{section.17.5} \contentsline {section}{\numberline {17.6}Encryption}{68}{section.17.6} s3ql-2.26/doc/latex/sphinxhowto.cls0000664000175000017500000000563513015321157020777 0ustar nikrationikratio00000000000000% % sphinxhowto.cls for Sphinx (http://sphinx-doc.org/) % \NeedsTeXFormat{LaTeX2e}[1995/12/01] \ProvidesClass{sphinxhowto}[2009/06/02 Document class (Sphinx HOWTO)] \ifx\directlua\undefined\else % if compiling with lualatex 0.85 or later load compatibility patch issued by % the LaTeX team for older packages relying on \pdf named primitives. \IfFileExists{luatex85.sty}{\RequirePackage{luatex85}}{} \fi % 'oneside' option overriding the 'twoside' default \newif\if@oneside \DeclareOption{oneside}{\@onesidetrue} % Pass remaining document options to the parent class. \DeclareOption*{\PassOptionsToClass{\CurrentOption}{\sphinxdocclass}} \ProcessOptions\relax % Default to two-side document \if@oneside % nothing to do (oneside is the default) \else \PassOptionsToClass{twoside}{\sphinxdocclass} \fi \LoadClass{\sphinxdocclass} % Set some sane defaults for section numbering depth and TOC depth. You can % reset these counters in your preamble. % \setcounter{secnumdepth}{2} % Change the title page to look a bit better, and fit in with the fncychap % ``Bjarne'' style a bit better. % \renewcommand{\maketitle}{% \noindent\rule{\textwidth}{1pt}\ifsphinxpdfoutput\newline\null\fi\par \ifsphinxpdfoutput \begingroup % These \defs are required to deal with multi-line authors; it % changes \\ to ', ' (comma-space), making it pass muster for % generating document info in the PDF file. \def\\{, }% \def\and{and }% \pdfinfo{ /Author (\@author) /Title (\@title) }% \endgroup \fi \begin{flushright} \sphinxlogo \py@HeaderFamily {\Huge \@title }\par {\itshape\large \py@release \releaseinfo}\par \vspace{25pt} {\Large \begin{tabular}[t]{c} \@author \end{tabular}}\par \vspace{25pt} \@date \par \py@authoraddress \par \end{flushright} \@thanks \setcounter{footnote}{0} \let\thanks\relax\let\maketitle\relax %\gdef\@thanks{}\gdef\@author{}\gdef\@title{} } \let\py@OldTableofcontents=\tableofcontents \renewcommand{\tableofcontents}{ \begingroup \parskip = 0mm \py@OldTableofcontents \endgroup \rule{\textwidth}{1pt} \vspace{12pt} } \@ifundefined{fancyhf}{ \pagestyle{plain}}{ \pagestyle{normal}} % start this way; change for \pagenumbering{arabic} % ToC & chapters \thispagestyle{empty} % Fix the bibliography environment to add an entry to the Table of % Contents. % For an article document class this environment is a section, % so no page break before it. \let\py@OldThebibliography=\thebibliography \renewcommand{\thebibliography}[1]{ \phantomsection \py@OldThebibliography{1} \addcontentsline{toc}{section}{\bibname} } % Same for the indices. % The memoir class already does this, so we don't duplicate it in that case. % \@ifclassloaded{memoir}{}{ \let\py@OldTheindex=\theindex \renewcommand{\theindex}{ \phantomsection \py@OldTheindex \addcontentsline{toc}{section}{\indexname} } } s3ql-2.26/doc/man/0000775000175000017500000000000013246754372015346 5ustar nikrationikratio00000000000000s3ql-2.26/doc/man/s3ql_verify.10000664000175000017500000001034213246754371017675 0ustar nikrationikratio00000000000000.\" Man page generated from reStructuredText. . .TH "S3QL_VERIFY" "1" "Mar 04, 2018" "2.26" "S3QL" .SH NAME s3ql_verify \- Verify data in an S3QL file system . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C s3ql_verify [options] .ft P .fi .UNINDENT .UNINDENT .SH DESCRIPTION .sp S3QL is a file system for online data storage. Before using S3QL, make sure to consult the full documentation (rather than just the man pages which only briefly document the available userspace commands). .sp The \fBs3ql_verify\fP command verifies all data in the file system. In contrast to \fBfsck.s3ql\fP, \fBs3ql_verify\fP does not trust the object listing returned by the backend, but actually attempts to retrieve every object. It therefore takes a lot longer. .sp The format of \fB\fP depends on the backend that is used. The S3QL User\(aqs Guide should be consulted for a description of the available backends. .SH OPTIONS .sp The \fBs3ql_verify\fP command accepts the following options. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .BI \-\-debug\-modules \ Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-debug Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-quiet be really quiet .TP .B \-\-version just print program version and exit .TP .BI \-\-cachedir \ Store cached data in this directory (default: \fB~/.s3ql)\fP .TP .BI \-\-authfile \ Read authentication credentials from this file (default: \fB~/.s3ql/authinfo2)\fP .TP .BI \-\-backend\-options \ Backend specific options (separate by commas). See backend documentation for available options. .TP .BI \-\-missing\-file \ File to store keys of missing objects. .TP .BI \-\-corrupted\-file \ File to store keys of corrupted objects. .TP .B \-\-data Read every object completely, instead of checking just the metadata. .TP .BI \-\-parallel \ PARALLEL Number of connections to use in parallel. .TP .BI \-\-start\-with \ Skip over first objects and with verifying object +1. .UNINDENT .UNINDENT .UNINDENT .SH EXIT CODES .sp \fBs3ql_verify\fP may terminate with the following exit codes: .INDENT 0.0 .TP .B 0 Everything went well. .TP .B 1 An unexpected error occured. This may indicate a bug in the program. .TP .B 2 Invalid command line argument. .TP .B 3 Invalid backend option. .TP .B 10 Could not open log file for writing. .TP .B 11 No such backend. .TP .B 12 Authentication file has insecure permissions. .TP .B 13 Unable to parse proxy settings. .TP .B 14 Invalid credentials (Authentication failed). .TP .B 15 No permission to access backend (Authorization denied). .TP .B 16 Invalid storage URL, specified location does not exist in backend. .TP .B 17 Wrong file system passphrase. .TP .B 18 No S3QL file system found at given storage URL. .TP .B 19 Unable to connect to backend, can\(aqt resolve hostname. .TP .B 32 Unsupported file system revision (too old). .TP .B 33 Unsupported file system revision (too new). .TP .B 45 Unable to access cache directory. .TP .B 46 The file system data was verified, and some objects were found to be missing or corrupted. .UNINDENT .SH SEE ALSO .sp The S3QL homepage is at \fI\%https://bitbucket.org/nikratio/s3ql/\fP\&. .sp The full S3QL documentation should also be installed somewhere on your system, common locations are \fB/usr/share/doc/s3ql\fP or \fB/usr/local/doc/s3ql\fP\&. .SH COPYRIGHT © 2008 Nikolaus Rath .\" Generated by docutils manpage writer. . s3ql-2.26/doc/man/mount.s3ql.10000664000175000017500000001350013246754370017450 0ustar nikrationikratio00000000000000.\" Man page generated from reStructuredText. . .TH "MOUNT.S3QL" "1" "Mar 04, 2018" "2.26" "S3QL" .SH NAME mount.s3ql \- Mount an S3QL file system . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C mount.s3ql [options] .ft P .fi .UNINDENT .UNINDENT .SH DESCRIPTION .sp S3QL is a file system for online data storage. Before using S3QL, make sure to consult the full documentation (rather than just the man pages which only briefly document the available userspace commands). .sp The \fBmount.s3ql\fP command mounts the S3QL file system stored in \fIstorage url\fP in the directory \fImount point\fP\&. The storage url depends on the backend that is used. The S3QL User\(aqs Guide should be consulted for a description of the available backends. .SH OPTIONS .sp The \fBmount.s3ql\fP command accepts the following options. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .BI \-\-log \ Destination for log messages. Specify \fBnone\fP for standard output or \fBsyslog\fP for the system logging daemon. Anything else will be interpreted as a file name. Log files will be rotated when they reach 1 MiB, and at most 5 old log files will be kept. Default: \fB~/.s3ql/mount.log\fP .TP .BI \-\-cachedir \ Store cached data in this directory (default: \fB~/.s3ql)\fP .TP .BI \-\-authfile \ Read authentication credentials from this file (default: \fB~/.s3ql/authinfo2)\fP .TP .BI \-\-debug\-modules \ Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-debug Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-quiet be really quiet .TP .BI \-\-backend\-options \ Backend specific options (separate by commas). See backend documentation for available options. .TP .B \-\-version just print program version and exit .TP .BI \-\-cachesize \ Cache size in KiB (default: autodetect). .TP .BI \-\-max\-cache\-entries \ Maximum number of entries in cache (default: autodetect). Each cache entry requires one file descriptor, so if you increase this number you have to make sure that your process file descriptor limit (as set with \fBulimit \-n\fP) is high enough (at least the number of cache entries + 100). .TP .B \-\-allow\-other Normally, only the user who called \fBmount.s3ql\fP can access the mount point. This user then also has full access to it, independent of individual file permissions. If the \fB\-\-allow\-other\fP option is specified, other users can access the mount point as well and individual file permissions are taken into account for all users. .TP .B \-\-allow\-root Like \fB\-\-allow\-other\fP, but restrict access to the mounting user and the root user. .TP .B \-\-fg Do not daemonize, stay in foreground .TP .B \-\-upstart Stay in foreground and raise SIGSTOP once mountpoint is up. .TP .BI \-\-compress \ Compression algorithm and compression level to use when storing new data. \fIalgorithm\fP may be any of \fBlzma\fP, \fBbzip2\fP, \fBzlib\fP, or none. \fIlvl\fP may be any integer from 0 (fastest) to 9 (slowest). Default: \fBlzma\-6\fP .TP .BI \-\-metadata\-upload\-interval \ Interval in seconds between complete metadata uploads. Set to 0 to disable. Default: 24h. .TP .BI \-\-threads \ Number of parallel upload threads to use (default: auto). .TP .B \-\-nfs Enable some optimizations for exporting the file system over NFS. (default: False) .UNINDENT .UNINDENT .UNINDENT .SH EXIT CODES .sp \fBmount.s3ql\fP may terminate with the following exit codes: .INDENT 0.0 .TP .B 0 Everything went well. .TP .B 1 An unexpected error occured. This may indicate a bug in the program. .TP .B 2 Invalid command line argument. .TP .B 3 Invalid backend option. .TP .B 10 Could not open log file for writing. .TP .B 11 No such backend. .TP .B 12 Authentication file has insecure permissions. .TP .B 13 Unable to parse proxy settings. .TP .B 14 Invalid credentials (Authentication failed). .TP .B 15 No permission to access backend (Authorization denied). .TP .B 16 Invalid storage URL, specified location does not exist in backend. .TP .B 17 Wrong file system passphrase. .TP .B 18 No S3QL file system found at given storage URL. .TP .B 19 Unable to connect to backend, can\(aqt resolve hostname. .TP .B 30 File system was not unmounted cleanly. .TP .B 31 File system appears to be mounted elsewhere. .TP .B 32 Unsupported file system revision (too old). .TP .B 33 Unsupported file system revision (too new). .TP .B 34 Insufficient free nodes, need to run \fBfsck.s3ql\fP\&. .TP .B 35 Attempted to mount read\-only, this is not supported. .TP .B 36 Mountpoint does not exist. .TP .B 37 Not enough available file descriptors. .TP .B 39 Unable to bind file system to mountpoint. .TP .B 45 Unable to access cache directory. .UNINDENT .SH SEE ALSO .sp The S3QL homepage is at \fI\%https://bitbucket.org/nikratio/s3ql/\fP\&. .sp The full S3QL documentation should also be installed somewhere on your system, common locations are \fB/usr/share/doc/s3ql\fP or \fB/usr/local/doc/s3ql\fP\&. .SH COPYRIGHT © 2008 Nikolaus Rath .\" Generated by docutils manpage writer. . s3ql-2.26/doc/man/fsck.s3ql.10000664000175000017500000001124513246754370017240 0ustar nikrationikratio00000000000000.\" Man page generated from reStructuredText. . .TH "FSCK.S3QL" "1" "Mar 04, 2018" "2.26" "S3QL" .SH NAME fsck.s3ql \- Check an S3QL file system for errors . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C fsck.s3ql [options] .ft P .fi .UNINDENT .UNINDENT .SH DESCRIPTION .sp S3QL is a file system for online data storage. Before using S3QL, make sure to consult the full documentation (rather than just the man pages which only briefly document the available userspace commands). .sp The \fBfsck.s3ql\fP command checks the file system in the location specified by \fIstorage url\fP for errors and attempts to repair any problems. The storage url depends on the backend that is used. The S3QL User\(aqs Guide should be consulted for a description of the available backends. .SH OPTIONS .sp The \fBfsck.s3ql\fP command accepts the following options. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .BI \-\-log \ Destination for log messages. Specify \fBnone\fP for standard output or \fBsyslog\fP for the system logging daemon. Anything else will be interpreted as a file name. Log files will be rotated when they reach 1 MiB, and at most 5 old log files will be kept. Default: \fB~/.s3ql/fsck.log\fP .TP .BI \-\-cachedir \ Store cached data in this directory (default: \fB~/.s3ql)\fP .TP .BI \-\-authfile \ Read authentication credentials from this file (default: \fB~/.s3ql/authinfo2)\fP .TP .BI \-\-debug\-modules \ Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-debug Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-quiet be really quiet .TP .BI \-\-backend\-options \ Backend specific options (separate by commas). See backend documentation for available options. .TP .B \-\-version just print program version and exit .TP .B \-\-batch If user input is required, exit without prompting. .TP .B \-\-force Force checking even if file system is marked clean. .TP .B \-\-force\-remote Force use of remote metadata even when this would likely result in data loss. .UNINDENT .UNINDENT .UNINDENT .SH EXIT CODES .sp If \fBfsck.s3ql\fP found any file system errors (no matter if they were corrected or not), the exit code will be 128 plus one of the codes listed below. If no errors were found, the following exit codes are used as\-is: .INDENT 0.0 .TP .B 0 Everything went well. .TP .B 1 An unexpected error occured. This may indicate a bug in the program. .TP .B 2 Invalid command line argument. .TP .B 3 Invalid backend option. .TP .B 10 Could not open log file for writing. .TP .B 11 No such backend. .TP .B 12 Authentication file has insecure permissions. .TP .B 13 Unable to parse proxy settings. .TP .B 14 Invalid credentials (Authentication failed). .TP .B 15 No permission to access backend (Authorization denied). .TP .B 16 Invalid storage URL, specified location does not exist in backend. .TP .B 17 Wrong file system passphrase. .TP .B 18 No S3QL file system found at given storage URL. .TP .B 19 Unable to connect to backend, can\(aqt resolve hostname. .TP .B 32 Unsupported file system revision (too old). .TP .B 33 Unsupported file system revision (too new). .TP .B 40 Cannot check mounted file system. .TP .B 41 User input required, but running in batch mode. .TP .B 42 File system check aborted by user. .TP .B 43 Local metadata is corrupted. .TP .B 44 Uncorrectable errors found. .TP .B 45 Unable to access cache directory. .TP .B 128 This error code will be \fIadded\fP to one of the codes above if any file system errors have been found (no matter if they were corrected or not). .UNINDENT .SH SEE ALSO .sp The S3QL homepage is at \fI\%https://bitbucket.org/nikratio/s3ql/\fP\&. .sp The full S3QL documentation should also be installed somewhere on your system, common locations are \fB/usr/share/doc/s3ql\fP or \fB/usr/local/doc/s3ql\fP\&. .SH COPYRIGHT © 2008 Nikolaus Rath .\" Generated by docutils manpage writer. . s3ql-2.26/doc/man/s3qlstat.10000664000175000017500000000513213246754370017205 0ustar nikrationikratio00000000000000.\" Man page generated from reStructuredText. . .TH "S3QLSTAT" "1" "Mar 04, 2018" "2.26" "S3QL" .SH NAME s3qlstat \- Gather S3QL file system statistics . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C s3qlstat [options] .ft P .fi .UNINDENT .UNINDENT .SH DESCRIPTION .sp S3QL is a file system for online data storage. Before using S3QL, make sure to consult the full documentation (rather than just the man pages which only briefly document the available userspace commands). .sp The \fBs3qlstat\fP command prints statistics about the S3QL file system mounted at \fBmountpoint\fP\&. .sp \fBs3qlstat\fP can only be called by the user that mounted the file system and (if the file system was mounted with \fB\-\-allow\-other\fP or \fB\-\-allow\-root\fP) the root user. .SH OPTIONS .sp The \fBs3qlstat\fP command accepts the following options: .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .BI \-\-debug\-modules \ Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-debug Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-quiet be really quiet .TP .B \-\-version just print program version and exit .TP .B \-\-raw Do not pretty\-print numbers .UNINDENT .UNINDENT .UNINDENT .SH EXIT CODES .sp \fBs3qlstat\fP may terminate with the following exit codes: .INDENT 0.0 .TP .B 0 Everything went well. .TP .B 1 An unexpected error occured. This may indicate a bug in the program. .TP .B 2 Invalid command line argument. .UNINDENT .SH SEE ALSO .sp The S3QL homepage is at \fI\%https://bitbucket.org/nikratio/s3ql/\fP\&. .sp The full S3QL documentation should also be installed somewhere on your system, common locations are \fB/usr/share/doc/s3ql\fP or \fB/usr/local/doc/s3ql\fP\&. .SH COPYRIGHT © 2008 Nikolaus Rath .\" Generated by docutils manpage writer. . s3ql-2.26/doc/man/s3qlcp.10000664000175000017500000001133613246754370016637 0ustar nikrationikratio00000000000000.\" Man page generated from reStructuredText. . .TH "S3QLCP" "1" "Mar 04, 2018" "2.26" "S3QL" .SH NAME s3qlcp \- Copy-on-write replication on S3QL file systems . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C s3qlcp [options] .ft P .fi .UNINDENT .UNINDENT .SH DESCRIPTION .sp S3QL is a file system for online data storage. Before using S3QL, make sure to consult the full documentation (rather than just the man pages which only briefly document the available userspace commands). .sp The \fBs3qlcp\fP command duplicates the directory tree \fBsource\-dir\fP into \fBdest\-dir\fP without physically copying the file contents. Both source and destination must lie inside the same S3QL file system. .sp The replication will not take any additional space. Only if one of directories is modified later on, the modified data will take additional storage space. .sp \fBs3qlcp\fP can only be called by the user that mounted the file system and (if the file system was mounted with \fB\-\-allow\-other\fP or \fB\-\-allow\-root\fP) the root user. .sp Note that: .INDENT 0.0 .IP \(bu 2 After the replication, both source and target directory will still be completely ordinary directories. You can regard \fB\fP as a snapshot of \fB\fP or vice versa. However, the most common usage of \fBs3qlcp\fP is to regularly duplicate the same source directory, say \fBdocuments\fP, to different target directories. For a e.g. monthly replication, the target directories would typically be named something like \fBdocuments_January\fP for the replication in January, \fBdocuments_February\fP for the replication in February etc. In this case it is clear that the target directories should be regarded as snapshots of the source directory. .IP \(bu 2 Exactly the same effect could be achieved by an ordinary copy program like \fBcp \-a\fP\&. However, this procedure would be orders of magnitude slower, because \fBcp\fP would have to read every file completely (so that S3QL had to fetch all the data over the network from the backend) before writing them into the destination folder. .UNINDENT .SS Snapshotting vs Hardlinking .sp Snapshot support in S3QL is inspired by the hardlinking feature that is offered by programs like \fI\%rsync\fP or \fI\%storeBackup\fP\&. These programs can create a hardlink instead of copying a file if an identical file already exists in the backup. However, using hardlinks has two large disadvantages: .INDENT 0.0 .IP \(bu 2 backups and restores always have to be made with a special program that takes care of the hardlinking. The backup must not be touched by any other programs (they may make changes that inadvertently affect other hardlinked files) .IP \(bu 2 special care needs to be taken to handle files which are already hardlinked (the restore program needs to know that the hardlink was not just introduced by the backup program to safe space) .UNINDENT .sp S3QL snapshots do not have these problems, and they can be used with any backup program. .SH OPTIONS .sp The \fBs3qlcp\fP command accepts the following options: .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .BI \-\-debug\-modules \ Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-debug Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-quiet be really quiet .TP .B \-\-version just print program version and exit .UNINDENT .UNINDENT .UNINDENT .SH EXIT CODES .sp \fBs3qlcp\fP may terminate with the following exit codes: .INDENT 0.0 .TP .B 0 Everything went well. .TP .B 1 An unexpected error occured. This may indicate a bug in the program. .TP .B 2 Invalid command line argument. .UNINDENT .SH SEE ALSO .sp The S3QL homepage is at \fI\%https://bitbucket.org/nikratio/s3ql/\fP\&. .sp The full S3QL documentation should also be installed somewhere on your system, common locations are \fB/usr/share/doc/s3ql\fP or \fB/usr/local/doc/s3ql\fP\&. .SH COPYRIGHT © 2008 Nikolaus Rath .\" Generated by docutils manpage writer. . s3ql-2.26/doc/man/mkfs.s3ql.10000664000175000017500000000760613246754370017260 0ustar nikrationikratio00000000000000.\" Man page generated from reStructuredText. . .TH "MKFS.S3QL" "1" "Mar 04, 2018" "2.26" "S3QL" .SH NAME mkfs.s3ql \- Create an S3QL file system . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C mkfs.s3ql [options] .ft P .fi .UNINDENT .UNINDENT .SH DESCRIPTION .sp S3QL is a file system for online data storage. Before using S3QL, make sure to consult the full documentation (rather than just the man pages which only briefly document the available userspace commands). .sp The \fBmkfs.s3ql\fP command creates a new file system in the location specified by \fIstorage url\fP\&. The storage url depends on the backend that is used. The S3QL User\(aqs Guide should be consulted for a description of the available backends. .sp Unless you have specified the \fB\-\-plain\fP option, \fBmkfs.s3ql\fP will ask you to enter an encryption password. This password will \fInot\fP be read from an authentication file specified with the \fB\-\-authfile\fP option to prevent accidental creation of an encrypted file system. .SH OPTIONS .sp The \fBmkfs.s3ql\fP command accepts the following options. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .BI \-\-cachedir \ Store cached data in this directory (default: \fB~/.s3ql)\fP .TP .BI \-\-authfile \ Read authentication credentials from this file (default: \fB~/.s3ql/authinfo2)\fP .TP .BI \-\-debug\-modules \ Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-debug Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-quiet be really quiet .TP .BI \-\-backend\-options \ Backend specific options (separate by commas). See backend documentation for available options. .TP .B \-\-version just print program version and exit .TP .BI \-L \ Filesystem label .TP .BI \-\-max\-obj\-size \ Maximum size of storage objects in KiB. Files bigger than this will be spread over multiple objects in the storage backend. Default: 10240 KiB. .TP .B \-\-plain Create unencrypted file system. .TP .B \-\-force Overwrite any existing data. .UNINDENT .UNINDENT .UNINDENT .SH EXIT CODES .sp \fBmkfs.s3ql\fP may terminate with the following exit codes: .INDENT 0.0 .TP .B 0 Everything went well. .TP .B 1 An unexpected error occured. This may indicate a bug in the program. .TP .B 2 Invalid command line argument. .TP .B 3 Invalid backend option. .TP .B 11 No such backend. .TP .B 12 Authentication file has insecure permissions. .TP .B 13 Unable to parse proxy settings. .TP .B 14 Invalid credentials (Authentication failed). .TP .B 15 No permission to access backend (Authorization denied). .TP .B 16 Invalid storage URL, specified location does not exist in backend. .TP .B 19 Unable to connect to backend, can\(aqt resolve hostname. .TP .B 45 Unable to access cache directory. .UNINDENT .SH SEE ALSO .sp The S3QL homepage is at \fI\%https://bitbucket.org/nikratio/s3ql/\fP\&. .sp The full S3QL documentation should also be installed somewhere on your system, common locations are \fB/usr/share/doc/s3ql\fP or \fB/usr/local/doc/s3ql\fP\&. .SH COPYRIGHT © 2008 Nikolaus Rath .\" Generated by docutils manpage writer. . s3ql-2.26/doc/man/s3qllock.10000664000175000017500000001061113246754370017160 0ustar nikrationikratio00000000000000.\" Man page generated from reStructuredText. . .TH "S3QLLOCK" "1" "Mar 04, 2018" "2.26" "S3QL" .SH NAME s3qllock \- Make trees on an S3QL file system immutable . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C s3qllock [options] .ft P .fi .UNINDENT .UNINDENT .SH DESCRIPTION .sp S3QL is a file system for online data storage. Before using S3QL, make sure to consult the full documentation (rather than just the man pages which only briefly document the available userspace commands). .sp The \fBs3qllock\fP command makes a directory tree in an S3QL file system immutable. Immutable trees can no longer be changed in any way whatsoever. You can not add new files or directories and you can not change or delete existing files and directories. The only way to get rid of an immutable tree is to use the \fBs3qlrm\fP command. .sp \fBs3qllock\fP can only be called by the user that mounted the file system and (if the file system was mounted with \fB\-\-allow\-other\fP or \fB\-\-allow\-root\fP) the root user. .SH RATIONALE .sp Immutability is a feature designed for backups. Traditionally, backups have been made on external tape drives. Once a backup was made, the tape drive was removed and locked somewhere in a shelf. This has the great advantage that the contents of the backup are now permanently fixed. Nothing (short of physical destruction) can change or delete files in the backup. .sp In contrast, when backing up into an online storage system like S3QL, all backups are available every time the file system is mounted. Nothing prevents a file in an old backup from being changed again later on. In the worst case, this may make your entire backup system worthless. Imagine that your system gets infected by a nasty virus that simply deletes all files it can find \-\- if the virus is active while the backup file system is mounted, the virus will destroy all your old backups as well! .sp Even if the possibility of a malicious virus or trojan horse is excluded, being able to change a backup after it has been made is generally not a good idea. A common S3QL use case is to keep the file system mounted at all times and periodically create backups with \fBrsync \-a\fP\&. This allows every user to recover her files from a backup without having to call the system administrator. However, this also allows every user to accidentally change or delete files \fIin\fP one of the old backups. .sp Making a backup immutable protects you against all these problems. Unless you happen to run into a virus that was specifically programmed to attack S3QL file systems, backups can be neither deleted nor changed after they have been made immutable. .SH OPTIONS .sp The \fBs3qllock\fP command accepts the following options: .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .BI \-\-debug\-modules \ Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-debug Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-quiet be really quiet .TP .B \-\-version just print program version and exit .UNINDENT .UNINDENT .UNINDENT .SH EXIT CODES .sp \fBs3qllock\fP may terminate with the following exit codes: .INDENT 0.0 .TP .B 0 Everything went well. .TP .B 1 An unexpected error occured. This may indicate a bug in the program. .TP .B 2 Invalid command line argument. .UNINDENT .SH SEE ALSO .sp The S3QL homepage is at \fI\%https://bitbucket.org/nikratio/s3ql/\fP\&. .sp The full S3QL documentation should also be installed somewhere on your system, common locations are \fB/usr/share/doc/s3ql\fP or \fB/usr/local/doc/s3ql\fP\&. .SH COPYRIGHT © 2008 Nikolaus Rath .\" Generated by docutils manpage writer. . s3ql-2.26/doc/man/s3qlctrl.10000664000175000017500000000765313246754370017210 0ustar nikrationikratio00000000000000.\" Man page generated from reStructuredText. . .TH "S3QLCTRL" "1" "Mar 04, 2018" "2.26" "S3QL" .SH NAME s3qlctrl \- Control a mounted S3QL file system . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C s3qlctrl [options] ... .ft P .fi .UNINDENT .UNINDENT .sp where \fBaction\fP may be either of \fBflushcache\fP, \fBupload\-meta\fP, \fBcachesize\fP or \fBlog\-metadata\fP\&. .SH DESCRIPTION .sp S3QL is a file system for online data storage. Before using S3QL, make sure to consult the full documentation (rather than just the man pages which only briefly document the available userspace commands). .sp The \fBs3qlctrl\fP command performs various actions on the S3QL file system mounted in \fBmountpoint\fP\&. .sp \fBs3qlctrl\fP can only be called by the user that mounted the file system and (if the file system was mounted with \fB\-\-allow\-other\fP or \fB\-\-allow\-root\fP) the root user. .sp The following actions may be specified: .INDENT 0.0 .TP .B flushcache Uploads all changed file data to the backend. .TP .B upload\-meta Upload metadata to the backend. All file system operations will block while a snapshot of the metadata is prepared for upload. .TP .B cachesize Changes the cache size of the file system. This action requires an additional argument that specifies the new cache size in KiB, so the complete command line is: .INDENT 7.0 .INDENT 3.5 .sp .nf .ft C s3qlctrl [options] cachesize .ft P .fi .UNINDENT .UNINDENT .TP .B log Change the amount of information that is logged into \fB~/.s3ql/mount.log\fP file. The complete syntax is: .INDENT 7.0 .INDENT 3.5 .sp .nf .ft C s3qlctrl [options] log [ [ ...]] .ft P .fi .UNINDENT .UNINDENT .sp here \fBlevel\fP is the desired new log level and may be either of \fIdebug\fP, \fIinfo\fP or \fIwarn\fP\&. One or more \fBmodule\fP may only be specified with the \fIdebug\fP level and allow to restrict the debug output to just the listed modules. .UNINDENT .SH OPTIONS .sp The \fBs3qlctrl\fP command also accepts the following options, no matter what specific action is being invoked: .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .BI \-\-debug\-modules \ Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-debug Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-quiet be really quiet .TP .B \-\-version just print program version and exit .UNINDENT .UNINDENT .UNINDENT .sp Hint: run \fBs3qlctrl \-\-help\fP to get help on the additional arguments that the different actions take. .SH EXIT CODES .sp \fBs3qlctrl\fP may terminate with the following exit codes: .INDENT 0.0 .TP .B 0 Everything went well. .TP .B 1 An unexpected error occured. This may indicate a bug in the program. .TP .B 2 Invalid command line argument. .UNINDENT .SH SEE ALSO .sp The S3QL homepage is at \fI\%https://bitbucket.org/nikratio/s3ql/\fP\&. .sp The full S3QL documentation should also be installed somewhere on your system, common locations are \fB/usr/share/doc/s3ql\fP or \fB/usr/local/doc/s3ql\fP\&. .SH COPYRIGHT © 2008 Nikolaus Rath .\" Generated by docutils manpage writer. . s3ql-2.26/doc/man/s3ql_oauth_client.10000664000175000017500000000546413246754370021057 0ustar nikrationikratio00000000000000.\" Man page generated from reStructuredText. . .TH "S3QL_OAUTH_CLIENT" "1" "Mar 04, 2018" "2.26" "S3QL" .SH NAME s3ql_oauth_client \- Obtain Google Storage OAuth2 tokens . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C s3ql_oauth_client [options] .ft P .fi .UNINDENT .UNINDENT .SH DESCRIPTION .sp S3QL is a file system for online data storage. Before using S3QL, make sure to consult the full documentation (rather than just the man pages which only briefly document the available userspace commands). .sp The \fBs3ql_oauth_client\fP command may be used to obtain OAuth2 authentication tokens for use with Google Storage. It requests "user code" from Google which has to be pasted into the browser to complete the authentication process interactively. Once authentication in the browser has been completed, \fBs3ql_oauth_client\fP displays the OAuth2 refresh token. .sp When combined with the special username \fBoauth2\fP, the refresh token can be used as a backend passphrase when using the Google Storage S3QL backend. .SH OPTIONS .sp The \fBs3ql_oauth_client\fP command accepts the following options: .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .BI \-\-debug\-modules \ Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-debug Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-quiet be really quiet .TP .B \-\-version just print program version and exit .UNINDENT .UNINDENT .UNINDENT .SH EXIT CODES .sp \fBs3ql_oauth_client\fP may terminate with the following exit codes: .INDENT 0.0 .TP .B 0 Everything went well. .TP .B 1 An unexpected error occured. This may indicate a bug in the program. .TP .B 2 Invalid command line argument. .UNINDENT .SH SEE ALSO .sp The S3QL homepage is at \fI\%https://bitbucket.org/nikratio/s3ql/\fP\&. .sp The full S3QL documentation should also be installed somewhere on your system, common locations are \fB/usr/share/doc/s3ql\fP or \fB/usr/local/doc/s3ql\fP\&. .SH COPYRIGHT © 2008 Nikolaus Rath .\" Generated by docutils manpage writer. . s3ql-2.26/doc/man/s3qladm.10000664000175000017500000001063113246754370016773 0ustar nikrationikratio00000000000000.\" Man page generated from reStructuredText. . .TH "S3QLADM" "1" "Mar 04, 2018" "2.26" "S3QL" .SH NAME s3qladm \- Manage S3QL file systems . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C s3qladm [options] .ft P .fi .UNINDENT .UNINDENT .sp where \fBaction\fP may be either of \fBpassphrase\fP, \fBupgrade\fP, \fBdelete\fP or \fBdownload\-metadata\fP\&. .SH DESCRIPTION .sp S3QL is a file system for online data storage. Before using S3QL, make sure to consult the full documentation (rather than just the man pages which only briefly document the available userspace commands). .sp The \fBs3qladm\fP command performs various operations on \fIunmounted\fP S3QL file systems. The file system \fImust not be mounted\fP when using \fBs3qladm\fP or things will go wrong badly. .sp The storage url depends on the backend that is used. The S3QL User\(aqs Guide should be consulted for a description of the available backends. .SH OPTIONS .sp The \fBs3qladm\fP command accepts the following options. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .BI \-\-debug\-modules \ Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-debug Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-quiet be really quiet .TP .BI \-\-log \ Destination for log messages. Specify \fBnone\fP for standard output or \fBsyslog\fP for the system logging daemon. Anything else will be interpreted as a file name. Log files will be rotated when they reach 1 MiB, and at most 5 old log files will be kept. Default: \fBNone\fP .TP .BI \-\-authfile \ Read authentication credentials from this file (default: \fB~/.s3ql/authinfo2)\fP .TP .BI \-\-backend\-options \ Backend specific options (separate by commas). See backend documentation for available options. .TP .BI \-\-cachedir \ Store cached data in this directory (default: \fB~/.s3ql)\fP .TP .B \-\-version just print program version and exit .UNINDENT .UNINDENT .UNINDENT .sp Hint: run \fBs3qladm \-\-help\fP to get help on the additional arguments that the different actions take. .SH ACTIONS .sp The following actions may be specified: .INDENT 0.0 .TP .B passphrase Changes the encryption passphrase of the file system. .TP .B upgrade Upgrade the file system to the newest revision. .TP .B delete Delete the file system with all the stored data. .TP .B download\-metadata Interactively download backups of the file system metadata. .UNINDENT .SH EXIT CODES .sp \fBs3qladm\fP may terminate with the following exit codes: .INDENT 0.0 .TP .B 0 Everything went well. .TP .B 1 An unexpected error occured. This may indicate a bug in the program. .TP .B 2 Invalid command line argument. .TP .B 3 Invalid backend option. .TP .B 10 Could not open log file for writing. .TP .B 11 No such backend. .TP .B 12 Authentication file has insecure permissions. .TP .B 13 Unable to parse proxy settings. .TP .B 14 Invalid credentials (Authentication failed). .TP .B 15 No permission to access backend (Authorization denied). .TP .B 16 Invalid storage URL, specified location does not exist in backend. .TP .B 17 Wrong file system passphrase. .TP .B 18 No S3QL file system found at given storage URL. .TP .B 19 Unable to connect to backend, can\(aqt resolve hostname. .TP .B 45 Unable to access cache directory. .UNINDENT .SH SEE ALSO .sp The S3QL homepage is at \fI\%https://bitbucket.org/nikratio/s3ql/\fP\&. .sp The full S3QL documentation should also be installed somewhere on your system, common locations are \fB/usr/share/doc/s3ql\fP or \fB/usr/local/doc/s3ql\fP\&. .SH COPYRIGHT © 2008 Nikolaus Rath .\" Generated by docutils manpage writer. . s3ql-2.26/doc/man/umount.s3ql.10000664000175000017500000000620013246754370017634 0ustar nikrationikratio00000000000000.\" Man page generated from reStructuredText. . .TH "UMOUNT.S3QL" "1" "Mar 04, 2018" "2.26" "S3QL" .SH NAME umount.s3ql \- Unmount an S3QL file system . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C umount.s3ql [options] .ft P .fi .UNINDENT .UNINDENT .SH DESCRIPTION .sp S3QL is a file system for online data storage. Before using S3QL, make sure to consult the full documentation (rather than just the man pages which only briefly document the available userspace commands). .sp The \fBumount.s3ql\fP command unmounts the S3QL file system mounted in the directory \fImount point\fP and blocks until all data has been uploaded to the storage backend. .sp Only the user who mounted the file system with \fBmount.s3ql\fP is able to unmount it with \fBumount.s3ql\fP\&. If you are root and want to unmount an S3QL file system mounted by an ordinary user, you have to use the \fBfusermount \-u\fP or \fBumount\fP command instead. Note that these commands do not block until all data has been uploaded, so if you use them instead of \fBumount.s3ql\fP then you should manually wait for the \fBmount.s3ql\fP process to terminate before shutting down the system. .SH OPTIONS .sp The \fBumount.s3ql\fP command accepts the following options. .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .BI \-\-debug\-modules \ Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-debug Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-quiet be really quiet .TP .B \-\-version just print program version and exit .TP .B \-\-lazy\fP,\fB \-z Lazy umount. Detaches the file system immediately, even if there are still open files. The data will be uploaded in the background once all open files have been closed. .UNINDENT .UNINDENT .UNINDENT .SH EXIT CODES .sp \fBumount.s3ql\fP may terminate with the following exit codes: .INDENT 0.0 .TP .B 0 Everything went well. .TP .B 1 An unexpected error occured. This may indicate a bug in the program. .TP .B 2 Invalid command line argument. .UNINDENT .SH SEE ALSO .sp The S3QL homepage is at \fI\%https://bitbucket.org/nikratio/s3ql/\fP\&. .sp The full S3QL documentation should also be installed somewhere on your system, common locations are \fB/usr/share/doc/s3ql\fP or \fB/usr/local/doc/s3ql\fP\&. .SH COPYRIGHT © 2008 Nikolaus Rath .\" Generated by docutils manpage writer. . s3ql-2.26/doc/man/s3qlrm.10000664000175000017500000000552113246754370016652 0ustar nikrationikratio00000000000000.\" Man page generated from reStructuredText. . .TH "S3QLRM" "1" "Mar 04, 2018" "2.26" "S3QL" .SH NAME s3qlrm \- Fast tree removal on S3QL file systems . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C s3qlrm [options] .ft P .fi .UNINDENT .UNINDENT .SH DESCRIPTION .sp S3QL is a file system for online data storage. Before using S3QL, make sure to consult the full documentation (rather than just the man pages which only briefly document the available userspace commands). .sp The \fBs3qlrm\fP command recursively deletes files and directories on an S3QL file system. Although \fBs3qlrm\fP is faster than using e.g. \fBrm \-r\(ga\fP, the main reason for its existence is that it allows you to delete immutable trees (which can be created with \fBs3qllock\fP) as well. .sp Be warned that there is no additional confirmation. The directory will be removed entirely and immediately. .sp \fBs3qlrm\fP can only be called by the user that mounted the file system and (if the file system was mounted with \fB\-\-allow\-other\fP or \fB\-\-allow\-root\fP) the root user. .SH OPTIONS .sp The \fBs3qlrm\fP command accepts the following options: .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .BI \-\-debug\-modules \ Activate debugging output from specified modules (use commas to separate multiple modules). Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-debug Activate debugging output from all S3QL modules. Debug messages will be written to the target specified by the \fB\-\-log\fP option. .TP .B \-\-quiet be really quiet .TP .B \-\-version just print program version and exit .UNINDENT .UNINDENT .UNINDENT .SH EXIT CODES .sp \fBs3qlrm\fP may terminate with the following exit codes: .INDENT 0.0 .TP .B 0 Everything went well. .TP .B 1 An unexpected error occured. This may indicate a bug in the program. .TP .B 2 Invalid command line argument. .UNINDENT .SH SEE ALSO .sp The S3QL homepage is at \fI\%https://bitbucket.org/nikratio/s3ql/\fP\&. .sp The full S3QL documentation should also be installed somewhere on your system, common locations are \fB/usr/share/doc/s3ql\fP or \fB/usr/local/doc/s3ql\fP\&. .SH COPYRIGHT © 2008 Nikolaus Rath .\" Generated by docutils manpage writer. . s3ql-2.26/doc/manual.pdf0000664000175000017500000105244713246754372016560 0ustar nikrationikratio00000000000000%PDF-1.5 % 1 0 obj << /Length 843 /Filter /FlateDecode >> stream xmUMo0WxNWH Z&T~3ڮzy87?nkNehܤ=77U\;?:׺v==onU;O^uu#½O ۍ=٘a?kLy6F/7}̽][H<Sicݾk^90jYVH^v}0<rL ͯ_/CkBnyWTHkuqö{s\녚"p]ϞќKյ u/A )`JbD>`2$`TY'`(ZqBJŌ )Ǩ%553<,(hlwB60aG+LgıcW c rn q9Mܗ8% CMq.5ShrAI皎\Sȩ ]8 `Y7ь1Oyezl,d mYĸSSJf-1i:C&e c4R$D& &+übLaj by+bYBg YJYYr֟bx(rGT̛`F+٭L ,C9?d+͊11ӊĊ׊T_~+Cg!o!_??/?㫄Y ?^B\jUP{xᇻL^U}9pQq0O}c}3tȢ}Ə!VOu˷ endstream endobj 3 0 obj << /Type /ObjStm /N 100 /First 824 /Length 1595 /Filter /FlateDecode >> stream xڵYKo7Wb/$H&n\ֲc +ZAQHJzrg!-X p%([øf5;&`3%8)-GB2-idmYaR3<9ǙQN ڄ2.gjL@V3ັPRw Kq ځ<`@ÏV2 3 @J3Yτ9j$Z &X0AXH2yN yMxpa8 b"Ոc^0 rA&L&6Nxq8D:oG p0&)9J5|8 xTp#p p & SQ= P+̓p~1%$`R3US6LïLoiA`0 11R(|LLc ۄ@FƸ9g\y(6CA0;}ݍ7>0a؄! }W8aU"jҨ& fUrn_GIW|ꃹ2[i0DvY12̿uYƪiBTEb&9xExK;jH<њ=z!tTN\d߭##8N!Ni$шiF?pPRnhҵQK| 3X ;7Q2`̄HV?Xаΐ[&OJl"#b[65=E$&CtfvL5纶Oԍ9Euy6QW. UU(kO3Z HX*2MdJW)W̗e i/|; ޳$1e';Z^c_mGVҨ tvu+I1t2Czi.X9Qδ}z)iN-O7RL..c&J@]G nX\SiZa+2[\Mk",mr{OI.(xJJg ahǁͅ5mBb+Yw$n5uR ѫ/W\62}׋M R49 !Ϫ}?4gq^)KW@qnCno{y+3mEfkgO^w$b gZ;_Oħd=ojYyu~5 jMa<P!=iJIM{^\+iU7'TI]flύ)kBwW`?dZfeS qpbq' qXTؐ'u VdLEJB.m;EKf\EUS$e6.LR-M0sѿ˝ endstream endobj 326 0 obj << /Length 586 /Filter /FlateDecode >> stream xmTˎ0+$$0  a#A%߯jD岻fc;Z̫MfG} q]/ޭmޯo⣩0Z^x]fkn{E+{*ʧypg6;5PVpH8$hmڢ*߄zR:")󨺠3qXysO'H)-"}[˺s 3 4{pYdrK+ a }ѫW{ Fvm7344AGc ڤ_86 endstream endobj 327 0 obj << /Length 770 /Filter /FlateDecode >> stream xmUn0E"y$U6ɢ5h)8",c\Ws/.7?3oz(yѧ2zvAwG݌=yzVmMמMW\=j_I*Cn_f &1y+Sw$F5? S4!1!r3Ҵ>Za?ɻ=ñK}:j=w(]UU#5dkuѥy e*x12+Sx,099)5tJN'{fS 2R̼  KV iXBRs>^ .KCc2c4&Wo"q8^zl p5u%=cK(q/?xQcc/s/G|-mƯP/S8+8 4fRSYZ"?.01шŕ[KPKS60e;U}Z8~Sg; _gvi;Kc g̭oZ ' L^ ^$K{)p/EX{)^ (½ߎ> stream xmVMo8WhCj~H\HrhSbd IJ!ۇռâ؃޼!9_?7?UepPgww͡pcӷx6׏;[Rd񟇧}z eq<÷LUJM롯{Ni~l1>_\}~8ȳ&qq;RUl, g^Cs=~k*[4^͖OmTI:/nY㵞1Ls*J`#l neܢ8Wi+xA= pMn?SbZbh`-؁6+ҖtΘ 7 XB[M98h򯠛& jwJ7ɿq/1n^i 1z1MN F_ HĒ?K|M,愆f[ eR SxK¿ec QR+ey h_8khG_=soSs9S[<9^r%Z:k`N<'{>[AkZ&# 9%F-܂ϩ=WC'}k_KRV³ᯌQV $!6n/xzjgu endstream endobj 329 0 obj << /Length 1026 /Filter /FlateDecode >> stream xmKo0 ޡ@wbKE=îv;pCL2bzn>|ܘnxv%p[)OM5ף/ߝ\qh%-p< ~۷k'}r6?F<.oƓOVn<k~I1=9;[ˡy6Rw2)]~C2Dww<_ws1vn<ďqǝ{r?x),9|?\LR`йiߺq߿.I㻦\}𥹢9/85dNrf=KʳXxΈ9&^zz_/e%^I%Юskfy*x7`?J#+ ruAι.Ț낼 \duA\r \WyUb^卼:oy#yuȫF^7꼑Wl8/a9/Qr^8⼐Wyޅlf`;%[mp$[MyX[R+IL6`Yː 9HKvvI6)+Kk ㇹ7+/Qe\G$@if<`F[fĩW诉70O*Ƴx"ÜE)=+b~sN~v?SȆG?r#W?r#7?p>Sfcʥ~dFbw4ψ}}kfGl-?r\q# ?zSf fWKfUM}k5sBoh:0Ν4}{CUNzVcC6&9&jQ,^ktfj)B5&^SkP{MkMC"^+C*^kP{BEքkm V:^LZ"R[=nj lp\u[#CWCi8,ߙ~4?s endstream endobj 331 0 obj << /Length 264 /Filter /FlateDecode >> stream x͐MO0 > v+Ci]oVƴv['>n`qGJb'zy G$>ⅰnƗ@Y-\^JT7,Hm a/3`}ԴB(cM)sJKA;kT2Y4S'ևw!]3cOKr`<uSWz*um$%qN$cDwL54Eտ~$1j5ۃcfğD2 Ht endstream endobj 339 0 obj << /Length 19 /Filter /FlateDecode >> stream x3PHW0Pp2Ac( endstream endobj 378 0 obj << /Length 1203 /Filter /FlateDecode >> stream xOS8:ڇoHCaA$"ԶR[W%b,CD~}׏ч#@s04 8d(<&_ffSF$'8(`7/cf1(|;ޔ|=i&۠ӣ;ӛ? nGA̘4' zGR誐5\1^Y{8;5 q8P}4osZlJ3o1Oj\cl>jüC?&W[)@d|(4 S/Tj)>L&zwVÁboZjr+Ά\s^'GI{BBtOeksHCkiҎC}$%y"C _ :+gz؋εMk끩aBO%֤>Qe( -:Txgbn۱J2|kDs9ti'BC}b-'*/R|^7 ?}Z]oiձ)= ]kۿ)v ,Dik"I!yX*I}3/!lY }EYlG';bQI>TS wns*rlkŕ%~G)d%۵]yDYW(C?-Ĉue!]tA]Mw6p{7 {uJbU#l^RSŢ-(꣛j1}z :5VU}iGv1ڇ솭"KyS=U93V ]dW۩ޞ)3q*kM|u~choGzhӊ$iUT-gdWH;*3-ܱOMV |)r;~~y !`.A0oQ) 濌HU0btp:3_rtP endstream endobj 204 0 obj << /Type /ObjStm /N 100 /First 883 /Length 2055 /Filter /FlateDecode >> stream xZMs7WЍ*Wb{W,v#HD~_ GfhX96_n| RF9,9U6xԸYq[;8AƑOc]}T1XIetdU)k!y\04SJ[6:#LH4q$2F8>ቇh 4G'МhN"9 4IDF' aCkJ`XT 4a8<[n+miIuaS}R!_t YMZ=ūmx{-ykmmϪVqCeC66$7]ѦhI}]1.uU{"b_Vĺ|6V;<,4~Xm{rw\[7jWo+t.>n/Ny'W5>v;isERY3a7 W{= mYM髱ѷq0~am/ի1P'jgը\Z%> jOOrY-QsϚGlGЖbOkg6n;bϊdO(]\Ԩ| 㧋c}d0ڶo+=Q5\;5OwX훟̦/_U&=^%ewt̺X;ɗ]سo-Qw26\Eh=LѷjIWF4 Q/=E}}|v:ˋnr{vWuClu.Kp!g&ӓ˹}O/כf-/Dr2}7_/nVguyf~~1{NAN>Lb \Qm?UE:o./?<(teOSG$/Ը8C7M!8}Z7^_/} E_(BP/}E_,boݣi2=mrqd|:͇SHPh1ʑef` CmxXhkhY7ˆ!>8dLoΝ;#kcW0&gܦ{ k]֥Y t?c."7!|V5WIG]Do> stream xIsHH KLUkj ň- 8?jPlbp,A OZ~csYRh\~n6zsL.C̚]ZY`n.cg2ۋ c/d( c兜@g,ɷǕ0q<Qs{cSB \lM!]JK1mZKyA|eR$fӿe~:ۓ)Uwu;{X?+T[O+Ty/n9ͽ @`\oԪHH.#D>aGhQ9.*"ĹRWZ */d̹ubmӶhX Po,\]>Eu=s'Riur!yPRbh>͓v\(cG~z"xl ;H3sVj9_<x~y\ާEo}J}qh%CPN(w~>e6r~E^Pϱn6l.kEcKryy86nC+w&>UfF!CGŭ{ulYmVZ+1%+m)Klgg?ΰklKi eؘ3(o $Mͷ-&]v&bsXᶻ9x[fj==zL\/31D\yAD7r-lYU(gmLp+:wwtۮKЖ讅 .o%W7xsik6({Ĵ.{>̈A 9qe*8L}i Ѭijj\̌}\|.d)K>V_}o"L {t47bDi("oun ‡X?ӼZS_7{Wc5вѝtֶW7=ϖ'[ubzXaZ=ӝ* g?olS&IіkeG6΋j?AgAw.զ fSSuu3"lлջx4}YJ~4 XG u7Ea ZmsmJ!2Ψj23 endstream endobj 439 0 obj << /Length 2554 /Filter /FlateDecode >> stream xڭYI۶ϯ1\8xK%qWsrHHB I($ӍnPh4^z{/^߄~Az^Ad^޼-j ̼g6Ym=|WIAř6eieeDa={O/Y]mdŸ>ʃ0uew?ĥ7/eiKqƅEI'ƀ=֛$Kxh1_W5 Fploѽt)#0zkTǔAuvmU9jzZE/ 2&)7) gd7{{b zo ܿkG1S0'cQvk Q)ڃaj)b~$}[ũQR_6',cQĸ\Ps}MKj5<јQ[:Ѫ jDJE @ZAuD:ȚFwnYn?+sѦ!K:iӖ<(d Т. (j{gt} *oWnz S;)˚Ƿ,?,`;WhF0ҁ 6 2Dt]<&2(jA-%'K`A uҝh/ʳWb_XaѸY:lZhm:q y>U r8JĨ 3!`ThW6+驡R*KitU"зLZ5á#_x N!bd֘yQnɌ?Q'IEX0eG=얌O2`ɖǾ~k3KQ; OCD0?=aP0G?FVrЭAx<:[r~Qzʹ7rIHPn_hʇ:u{H#r~2 3fѭ0 鎩M Mn0wԝ 2 YźApG&)2&DAݥarsEe1!p ,85^&'KW9膣WQvZ\<8zkp`OAOPCnȱ?y+|c!]GԳ{QƊ[3 EF4~vy5vĴj@w+'b czE} ᓋCG7 15/E-.lՇ9&9ews׺4\=Y`ϟCbJD/!)4э[ozUQӆ]" ao= ==zKPpǔՑߡY-A0 9 ȝ=Pm/%AkI bn*D\S4X3 lY&o: (M\EҴE6R{vsei'bwi;^܆].iQw_j5͝%7?ܭ6Ϗ}0+g;GpGuA\c#@ړCFvGyv= d0Uxr4\Q[ iA|K!3Gg=޺tMkzOIR)v |e>j'Dk'e@ٵlG„C,'J m: î06l8gi^vqRLd '4<1۹CP *Mmo8(,kA#@'$ZΖV|Ɣj PWͻ99ŞT=aEin/I=Aj`U^ȔȢyV=7#:EVo9e,\EprȀ[hDS`P!GT4`ZQQy0Jfv |V!(5Χ 3ҹ6љ$ά֙EP&NsyA* 9j0Q{iReUx_.1@is)o 5n!&{ ]*6Ɉ UZEVY,kdoc4B떽TTI,pC;= *s[,q_kK/M6(rjt endstream endobj 447 0 obj << /Length 1278 /Filter /FlateDecode >> stream xڵW[6~ϯ3]I~=P0L/tA[LZ9My}v|+% K^Mm-dO6"ٔM^E7}۫u&6MlQ/VND^۾\]}Xq0N |6I7޼cI ˋINaטu!_bY rVl[!`}9g噼[X q~uעJ'M:Lk׼JOZ&cT!p9AW5? CU8hH[ĠH<5h㧸àl1L!IT 'y[UAu]9)t{pX"ѣ[nB_qfw>ʔ?a&>8I!UfmUOdyIpGEz5OiH{%V {Ȯ4H/?ȸv.&T#2QQZﮍD K>zwۨ<\WUrÀ:Rfa DT^<2oUY䭠H\\kf+ɛb,}?a:"ݒ!1lUEV^@y T1U6 g}ǺANSPUYJ ' 脲|D|'HN&MQkjY@Q"2<!zNQh'4.Ӧ#Srxٷb*.GDZVQeKgԢ=6"Ϋ1zNh5Z<4ĕv$"mQ&ZXuƱ}ݜ@ٮS!KX /ٟOhsǘDV4cݰ ~Rk%*ifF\Q CD-\rյ(KP=Z󻧉A.R DžN3(0Kr _:oD` >\`_d!4CGE "g=a C@8$Ʀaz `T,@趥gh]~Z?gձ\ۥEM8nyUVB!H==,›#޲hoӐBrϽyw=`;f`*hiշU ꟂGTJyLux$EY]-y BPdO endstream endobj 466 0 obj << /Length 2053 /Filter /FlateDecode >> stream xYݏ6߿B2P")QR{8 &{IuчdȒKI)۲ bÏpHG67y}ŋW/IX\.[|'_Bp^ijz&Ì'6E6 \Ȗ(,ߗE7k]>w"CuBIኝAJDΊcǵD&B9;r.jZ×MApG< %NкGVj9 z2Z#Sp=A4F7+ZO]n:j,v H1#))*qIZ{p$:S8,oL{[7y k;LhzfJNFi.6M<|k\@Re28/}I?'G'>=Ϥ"WL*FԭJ{Зl Cg'η+N'Ѝ/ ,׵lT0_@<(/Ziz)h|!jQ]0n>k|i{a4[X- tUۡ,+ˍ=ǑrUK<\A1)IշB$lXM`Leyb96fK5ed% e{ygi\ 2+ 8ٲVA(1r[-5[EUt2d,V(`v87e=MօYΚq<^T8}Lga*`I aigQ؀[sO[SMۖ:C RQaiG9SAh[ݗjIWVPv$ب Q"woѷ"ңd)<#I;q0p\1 $ei= 3ibEI sps3~-৒j{] J !phrkE=e}CҀF_ڻ|x^$!>LMm{߬Jߍkq~i>lkC4U;N.] }F99>'ڗ5Zo ?L0,N(2*; {W3On'\8BB .ҧr디7Tw]cV(ad0JoИuZB8 >UƕXZ 7*Bx8١zUmy:=,R/qVf}VZP?[IqVDGIOȧr/^^bN<\Xx:zelwal}O+?BD65`)ZakUMqfl6is 7xZ NqʲAɤyry<<M TPn|q{p:U0j&6Ŏ^wŖm_uԦgZ> stream xڵZY6~_ZʂWޒ88[-ǵ!)x^۷ Pęîy_7=R^’PfE‹TT"M_\(^~.튺zAiA0.?l~is㴡byˁ3ލYTo齻· l>.{ LI 1"42>~UviY5io 5y}a}o%b&@0Vnn޿O] 0!é:q|V՞~[\SE,T@_l"^t+ZZEJ5$N @ro8u#,@.oucVÀYt:>kDaݣIkN\0K(@"kg(㩣%D0jYr1RkdД=vƖ;ZC>P󷫢sJ"2/uus =cIzRcG*i~uSWƙaqꦅ;jH02iۥ\ nxNWwvu3>hf3!8kLS?wj[qém.@&ȁD4J"vcȠp$Hm{:K8hc0P( )υ8`&\I_d!=m4i=DB#y{>)E;7O9!KFqP)&pՊvL2>g%~R0 tTESWjiҦHJ=uS_p * }i.wi!?L"{*>09-f(àG'<+0+mq1"6\z)if϶pSZze<ƾz <2|!׻OTl-n?# ְ!q,L=ŢO?xrJL<%0 r?ׇ3qſB2PnQuԶQ?]o EjmPK@Z7AWB"M/Π+W:+nP tI&9@rF$؃pk|4@BNx1`1 ḣ<w YOM?5+Ȭwu5|gdjld<9{!$lDZE^9nB1iH ʀ>NWDn)/deiC .:3 ̭3 Xon%L(5$dž,bҤtcni}k7Ahd\j5w-QG5UM!I[Zi"o*#ŊV9FKdR7sMRy: MaD~$8-=Z+$ Z0`ޗE*v/g, ۲ C $^ gfZ3* (N@)P`{MvpD,FάY<+s]jM8gEC_]nf6^h}}wOvNV[8*Pm9!}B4؊ Os=zhg6>]c/LF}zl^vx߼\NPu}+2Qc< x%WT&m{YO͒GNpVû3+~,)|ȒjYǐ%+Gރ9.};p \AM}GX>(%MTL%C=E> C*F 1ˠTX|GF~~\2KlXYS~tfKDc4}0\`kpZ&> ;+mٮ.h$C&wGPFƷB@!ovEGR_BMo M`꺮s8)n2 Ԅ߬igib!a7T/# ]=BSiѨ i/})n[&JGG"RB0)Վ^n,&=#k:Acc׻vĀF- sb`͑|4ib pO;\_Le,<(.=tF ᾺN;O) endstream endobj 383 0 obj << /Type /ObjStm /N 100 /First 904 /Length 2897 /Filter /FlateDecode >> stream xڽ[o ~߿Bw) h IA{bo~U^Hj8(H)>q'o `8b}v&}.&d6;>r>C0Dx]X,B̜ ŤBD*8C}!Dl>U_%2"( _/pa|}ώ"<aA1iB(jdA_(&{DPHI7U &~CP"IJ\)TL,3a26H~H'O$gbO@#I_ -MJObRƤa&:y!… F?^.`)j9蘂79&U&й@Q./\2֙0".`JWHu)^ 3(1@ L@pU 戜QCqE"\sI "\E0 0?bҏ"0ajE4}V r V+/9v4m# fEĪ="Q& Sɴ8:Zt/o6nѽz\_,߆i}uGM7y3r%(%~np1xPZ^:ϓa3{?yF*LoVl;1 ={t/;s=;>ޯw#tI?N`ztyLg* u<;]|Y&zŢ~~ڦږ}[\m\[_*Ty+U^^F}KچJmSmsm<ʣ*<ʣ*<*<*<㽼m^砑9 -r~[;` aqb82ۢ-Fx>H:]2Y"gN3؄Ų,H[dN3 1U뀧J̰~"gR +VnLWո(,g Y扬Sj`c ܀, buWR0|5+Xu#"jD} }V!)9&B-f9JJp6)H怴!'@z(E23}HGcST3El'E;$"ަ">j!y.i0y&0h`<DP4jlup[geie#p\& don Ȥ xo I Y%*'@B{0vGtDDSgf%>z, 6jChH{;i{q怴 ^,\1ٙix,izUH{[edZ/Djx,iDYpp.>IHB(3 Np9p!+a E3Y =hkI-Ѭ}NLj!"#YPT|yi~BVy<@K02M.Y$YiyJB[yО2HRi#Qo>R t ̤"{^0uJCSlnf4<p<i WxH{*AӆuY}'"{!fdvf!_lr&'XY}̾nVj7˧{fEm݅ |˷&J'#DPbre (ঢ&Qv4݋ϟ꿟w_d80z{2\vǛ؍a t?|a?,f0<(E9Ca=CZ Ζ"6l;>l3-t>d!#0m\<ECs 7B* x(ly=`m3IIʧ)p-0MuwxrG陟 z|oxo7W'nb{{_j3`64/"$0~Z_?W+ 6$`!Xz Ct[[[ b=<ʓ*OAӦLz@4Jzé5Ųc䬞zt] J3mTjm?on7nWcvr8=`iYOp*q[=\J;#ތĎo x 伤 X^z߭~t~0ks;Y; A"!!z=s),]67f7 PB:%;;RU)&!O7BZӔVz'j-RO{P5<ډkQ[nuۿGP@GX{I+Vag7㮿<c% @ n~f^# ?z <\mɪJk.sڤGxOΆu;Y^WG֣%lSw_ov/d?F?>.) Ѣg9ʇ~bRJe2Tx ֳ0U1EI&Jmp^ ǧ!&P'ԓ% ApULvڌ¦' i{Azc?IXq2vKSV~ رHI $ș|w'-iyܜ/f8o4S/z endstream endobj 488 0 obj << /Length 1410 /Filter /FlateDecode >> stream xڵXYH~ϯ#ើF `YF ZirN!~x8w5t\]uU`|jv A2Z,f8Bwِ¦LD򰬦$IREC(OJ]MfEn)k}blgg`a{Y9٧˟3{B?ۃ{_ ~10($߂\dԷfR=H8fR{%P0?9*c־R8Tfҗ=&]*Di('5"WMI$3y*ͽAecc|)1UGto/&ɮ`CmI/!Y$l+TpNu'ٖP}ya([!xa>Eγ5&xj:2ipEm"QgL4#H ]:@-8|`S`q4l?IhfU۲@K6Z[*w/(T endstream endobj 492 0 obj << /Length 205 /Filter /FlateDecode >> stream xڍn0D{~ŔԒ"6a!bv :KF~?\`wvkUZԤN0,W;blefdl<f{oF?I{WǾYsF: _ApYZĭJ۠-fܢ uOsbaH-q΢|MP&J685m#R/ʷL endstream endobj 503 0 obj << /Length 2843 /Filter /FlateDecode >> stream xڭْ}o **ٖ%9-dT 8(tO"Hr9=齸|w)=%()"{?w<),>]G/SxV/7n^^&8ݝfJ,JFϻo6~U(t+Ex@L)ڽ{s{|7.si,d )-BBwۄͳM$7 Io6Zp<["9jHU_^8Դ}GonDiM8@2FȪϋM oM}VLh@Mkc4{| ߴ˂Of"}y0-_cŝ웢:0]r8dRĖ#Qw!3hrkU~aHݧZҳpq~pxzCߟB8c]FrpS5Qr&clVϭQ@:]"Aq{ [2@a YYlH>rЖh Xa22SYz=SG%ׄӵ4׷޳ Y) ) :+vlPH=ywMF>1hـC3$1 hNḤ;ÔpM:l#]3UUMk=YK I `''sH9'Rŷiv`1 &F!==qW"PKheLDLeQ\,]iy}.)@`X?"&d`6f2 փP:pVYT]'NW6 D{ af!*E2ZP"̸qɻ\c>* <>Tw݇:jB`.&߼Xmᕃ&-Χ#~FڀyZSHݺ*u dSg$M r /K\) rQ^'i/q(4a# < mRq!$l *k),࢘!,X0 =~m+kO ^lL U5 9=:LiH?Ye(tD3T'zCiƈ`"R2Y#ݴ*XN2\U&S*|׃ LY0}[>` (3Fl67v0ع́̂Eh4Q:֚!mUїzgG NeV$m9̝Źi aq1g÷ilMCe'H )s XZ5ʂeH7n͚J8Mk+w 8K]8XC?p}U̖a?!3stAA3Eщo5ROipH"ۢg /ḻ$l :xVpݳ46:7.+W5-3ѱ"'[r[6Ya"T:?Mn^hfΈmHD$PH1jZ$P<;aڪ HԲŅ06##eSTj O؆{1SJaJSܩQaL1X$!48EmI覆V+o{~JJB͹m݆2iͥ dS:\<`5rzL/M7 :I9g-]=tcS^2Ȣ9a GmF>ncס\6ږtA.@*>2=I De_T-R=8(i(.mjsOs03/:=۟I2i8ia$x=ƕ3s*YՀ,Jh2]A昻~j4螸/Eŗ UYCrf % ip\L&(=⭡z]YQu;̱B wwm Oi-Cڵ H|VZռTgH]=E؞?`Jy̚Ғh$khsc)8]uӶɐ&㐻dy{׬Xn ٌՕ=u~4_wʏs k-e;[*RL6pГ Hw#(P9 +)@2s@̴=  _bRy{#9*{$pE]jn3Cߞ#[N.⋖ѵZc-` hLWH "5߼=o% ^ SIW lHI}uk~*  Uc4 `s.ϼ8~,˫r''`:ύG)9wG j~+B-:ן!ҡ Re_!fs endstream endobj 517 0 obj << /Length 2542 /Filter /FlateDecode >> stream xZݏ6߿BOw2PkIQA[`4ɺCZh^NI %Yd+p~ŏ73w1ǫ" O[m4( \xֿ_-auqث}oA,XtD0 Rz@O Dyfދ w{YX8 YDJBzu >ZFA,9r eV=/;P.cӫ bB׵*puN>tHSɲV_U]&7r/ raU[Zr~k_$cC%q}ӾHˈYLu[,E}+;j;E]ıa7jࠅCӔB};ca!{׭_i_/r3Ǿj?JI-`5rQ.ݠ@NHvQzL" Ҍi{y\,ykO$kOVv]W64΄3"_ S|u8MtYfI ™4(,y@3d4)`168 8؋ D{- u%9D!m\xo1c^̭ۨIr\eњw9H0RVցEYklgZС몬mN޹p|[XĬu􆖿YNX!M+(~Ac(<ײU>8 0w( t=D+1aLL3~bٛW$AKݤזNv3 6?P*=Ը3^PX1a Tn-Py[Iq.$lA›W0&.so[>/$Ҵj[~qςcTe THđ8 ޗ1.(H*|i<$@a)4N ;أִ LJ 5R1J)t ]*2Cn-A3HQ [6Pdps<ZCHO<5W&5d4{NY55MaKXd7;vHd5ǐ!"X;[:! (?ED05CY9/ΐ ["M g–C8eb[YvP)z^}PNY!P\`mKhSD/C#sar"I@RtL%"(5C10S,wP<ÝYf8+(MOOrSb 6OEPܘ=as"H+=vƢ wwG6dSwY"ay袕M 7~y-:\REZ7+}Wn,),jc XWY$w MPBN9'R/ޱviD=Rw+0b{({^w)¹ts2FBBMLαA!1I7wtQ*whyj,98/˾Sa pdEͲ޶aȟxQI?U"ުʌǻt;K>A 9Y#L1r&4VNY#9m٫Qnu]Ԓ3.H!!˞#*P*'Ug0j>cLv=i 8 70nHѧ t endstream endobj 533 0 obj << /Length 2996 /Filter /FlateDecode >> stream xko6U xez>-i^]F87!Vp8-Ν;?e:6kd1'"sE/XUʼ;ۉXD:gfˢhUg~A۩Fܙ߯B/0WV~AilHx~\{s Kʽ!YfЩ{G\{‹ׇJ *]_0ˏVo4(wBs_ Vlʪ>{q ,j@!;QV-0ɼrcx-fYg!{q5K"2N76uoYt"_q}qs_nߜ0pmπoNʁ_f~B#7Az^gVM.i"T_\+Ƭ9Տ/dwOϯ9 4zΑ=|ث */†{}=/z4fsc `[htHSQxhyu)# Q۝ q{ s:2d|gljkB}d/h,a=+zئGg_!!ܗ(`hApC0wSGCtswD""?T7* !Cả"2,NtOIxq䆈!Z*dQ_HMC0&h,d?6j{y=sq6 r4˫jwd9,Oc%Wl>KzQ4mYBz)z,QFu$AgY]_ )0R7Oxv:0/gim`N!C?BИjऒ X"/KM1:Sl\,pn[z9 ͠ eAi):&wT6uSG }P=ɅAPU;I+%)8dM>: e:wIL5afÄ!K8JCS@rCjj-)UHwJUf2[pV0(][􉜴sP>VgllʮadjݗU5&`[( ]gU%0$fl5eW;Zrq!mEUW*dX畍6TD؃Zש䱺x>Mw* {_ ?fU *uYmj $WM#ۃ" NFt̃$2f͒% %ϒF'i*FlAbFX 0;=?5g71޷DCSoNRpp aVmhiBGLLIRY ]t֛Ȓ6bHQjԻKҡWtEԮ/pOf&5__VnSU| Zj_GS}pueVlLhgck6o(p3:/A␼P وJ|';}%׈^81CdŐk~.@ m7&]Nw"28PN cKTPĜ24(i@of(`i[tك%u|G[a(lf9B[O^ZA^):J[nli1\?x.]9`' =:_a{?$@c_9-es^ endstream endobj 550 0 obj << /Length 2945 /Filter /FlateDecode >> stream xڽZs6_R3B$HfrI^ںN,6P.IDI{i.b?,nVo/ UR)j{Xb `ݯy7"N/uթ.'D҅V\|  +N C*+/޽W{Y&[3\2bu}o>a6d0?ם~8>,=ނ(oCM{/߾&Z_+5T/E㪾Hhv`wT]Ǩ̪Rq> tklx-6~64hWj槰6swY+ne>;(G9m Tz?kyq#v O`1ҪjҸkvuW7DޫNQoQCh M@FW1:ÝM7>'W jЏJwNced*Cepx >L,-h$r-˾VC-h-e֠TdH,xԍ^]uTn H5l>j3awDFe{w謃,"& |:/OW t< ^G. y8$,4 ^dnw2<Ie6]u.G.a"$!s"E8N4_a18BWޜtuݡY.,Wn*N22,iͼV 諳nvmcXsi N]j;EȖC_ۼ;RX&|Lr8EgTBJ2d"ds٭$w6TbCJie I*2LhcT Ta E?Ig*6%R kV3Ą/ʚ;r[ƪLKя>?W(pp;JkN-+j,R&D|(`t)\5zu#Q'f|,mjJ1>=o 3wcҩRٔȜd1*%tgSͬ%uc%\uV)v߳€q0Đ<V)1TGCy#@KO11%"!C#o8:+)A* u1 |ʩ/@Kc%,|5f8! H$ 5.@B]>LUΊU4wD"ӆhEA#vVr a})E`㐉2 av@6(,wm,lTs PTZQjZU'{Q!!`ۚ#gz<ŽMi u v>KKXRޢT)"AW-e@: g(8 4#身O%%X UN5H`co{4egΒ2}Gw`g'8DÝ`F4p҇Q@2s00W#NjP9F x4˔hG о] ;.> mp37Sa铣DD!4^à^p^%a.`yf?}asKs#]a'ͺ:C3؁/KߺrҰ%;X2&;eAoe;Pٹp 9fYlNu}#,y,ț0Z7:¯z oz( ^r9~adGٜBdrHw誡A@D2WP* .<@GHlصO(Ā\hQN"c X ,m- QZ=`֖oQXLY,Ȩ#a!e8=E8v `gC:`̳y v,^Uoڶ\FI١j؀4Ѫ]wj9GILe$,k<ڳg37li yt$#G q؋ϩ`p'S&F3wA'L <7 tXof.ceiEzuG95F]b j˗Kd²e 6%&fq2RcOOTZGmOFW1Tnº֕[iC13ʑ[i6]^uУq{|@ -qBc{#dh@=UMFF6]ig̕UuV0ڱ8쌱N*j,=P t1'HD촬2ğFotA,8<>]pU=77_}s7J S'*رŦ9hkmGUuG an3)dxBZ§F^K!0Ҝ> stream xڥXݏ6 _GhIa@ݺbC{SQ.^+)C3MQ$E}ģ׳W嫤JVf2V(b1I,ڑ=ngfCmI3-@ t[t[4!H :\o;}o‹"MX$L?tCWeUЛ_-&ܢew%>`H E^w6n7$CUCK( :"4z{bk#foOwh)s}h|X'"N\z3Jm]P(JR"N9*9 Df H&jLYvITrxW#9 JpF4wBe8}96tͭhOAZ*Դn+gݭ-(0-I㮱>U'. gyIyFJaI9DC4 endstream endobj 566 0 obj << /Length 216 /Filter /FlateDecode >> stream xڍ?O0wm?TXJU&6_n${{CŠ{|jY[8㥩5ћQ|1> S>x9$Y k 싩\J==|_z0g=B6 oW֓-ϐT "Qie9SART7xB!ߋ*,НO3qN endstream endobj 570 0 obj << /Length 2173 /Filter /FlateDecode >> stream xڽK{6_jϣ7ISwӤZXj$qG_j$YiK/ E֣Zo\ZVNV\}յ `NYs5V9\ 2':048ύCa}ݯ4:Aȟ8X{?p2#Y^Q$AW{nw.swOR[~y] ٥]Wz_! #k0H{/{QʚD} ?Di繶lh:IB UKꇮz@AĆ3ģO 7 aD3;?S 5NPV׈ Z&RtŤ'4,b* a}X΋l逬4< ē[Bk/p_𣪂P`5(h6xPa5hNǺ~őh5A:q ϐwo()H'+[wc-{rPmy uXg尠J%YI3+,ɜZ7j;a7%N4?o-YYbKv1v⡪($rF <1sz©S8KkCˡ!j+]WdDz*PS0FBP72WYh`KOBFbߏyIlOF@!CÛxuXh-+-9rV[cyVG{MFѣT8UNN* D[pw8@HcbMgUKRGWaQ|g Xl^}T$57M76zS7 1_tzaGߏ'G9\2a:G'`V1IK' L6=ϴ\=/5u_\*@O K$74lc Ў8ꎬ\ %igdrI׸YE4M`Zȅ-Y&SFs"/YHx:ICbhʍndDY0AO" fՒi䛁@d 5գ1I_Ydn9z2B!BsٵxR3fL/c,RF3ŪidQl]]Bs4[:U~x~l@/ע7+Q0$I$`uzuE>M _SeS)B,_0 gSqDYcIn%mgNC~~_ѥ]XMxJ8'%88Q&[^l7jl)C/! L(S50 \:%Da;wٮ7"=hBYb"QBr5[Vus[`xUW7~$S4DAK,^gTZg `4]΃+"8(6y4 .A:}JA8ή344RR(.w`T(}KF&=d(lD#;ڔM<ʸȁ|#ݭP֠ zը.̿_f(~#:׫nl[燝DN 矾vQ %w N,(G͆(i8e_4W 847^B¥1Kf-nFOb0N/ ղ}JLbSM kwcq{{/^( ]RdK?t i;y2Kٓ *uF(/s~+# A/ِTP} W^~u/v8_ac󰾶A~[12OQ_,}|xoy%LkK endstream endobj 580 0 obj << /Length 3508 /Filter /FlateDecode >> stream xڭ˒4_Q`%˯CгU]^\v4o$՞/e)%R|.}s7]NvoTR&votl۱a?tr7_FN1Ba+7?* (v43Ifw{M(oG*QLDi`L1&&_akzm[Tdn>+9IĽ=!+t욦;hS & bVw@Aq1" %BGo(q?wVokW)Ol1ؚq6okb@GsU'Ya$0Sϡ|rFA77M( "$Q$\[8 齘-O0p ̋B0+zf?&2aY'p(Iac\N<L2̎/ş]˺v9/['hi&Ǣ#A ŸXKXK:$EӒ/_ jxFR/@Ԃ7糭j磷^+''Γ /!8σ$u ܊U^ z8h(1NED_c:^MQDAo䋆5`a/uY4 =eVUa%UskruXgrf|!A0: UA92p|e;tH#C[&ܢY#tVFw9r ZY#lBծ<sG69Bwa?jX38D}t"M~ZK w"ȒV b`I$ s):Ib.ɡ5\=s.vd 빅s[ 5~}T t&8[[aDK ڍ ` ςT i{ھhG8ƺ}\z&0h8ql o 2'2|[S@q`m P:C\d3 9w>ڸ2V@sBx U6cS/Oa٢5z[i)#2d\{,^sjQھdXZp^z Vt׾G9R@,JhE]1*Y]Ila͑rp2뚽~ Zijȩ%Z SCnb6g%DCɏ + J֣\M'} lt,FfY/O%:KSyB"xNy &ĿEfhh ?OK 4ѾTEWZ)\{U䢏TZ^>β'(0r[/,Ac 9i@)=X_+{AmvEdK*C%!-JalZvnEḠR. j"i"hB jPrtdcNܒ.ZW~);W;RbUYbe:**G#ԶOx%(|߬ܐm7'A NR /U<`^ ;K7CaEzZrKFW@^%*6N>4h~4 qR/!)ihtSȞ-'z& |C׉>ݰXwM,^: endstream endobj 485 0 obj << /Type /ObjStm /N 100 /First 868 /Length 2273 /Filter /FlateDecode >> stream xڽZmo8_AaC)"lw[[4Y~P%ֱ K6ZmFlAJ3CgF&#Q hܐК[+D'\*aX(aT$w#p [yRTQ}+RhfO#F' ٔ)+AXnCyjc ;h]9Ý7HPdYcɲNPʲpir܁ [ V[5U,) 9 8·O R88lBD;YOGm~zgZXwb"cW;l[9"cf!ppsEy:#/1LPtXyJ=r]p"D%p 6;p]Tl ^:Q"G;` vw$Fh wq"ft,nq;,q= NJʏNNF/JſF>&IvsQN`Ubz匑'O!崍2`?ɝճV Goa#^XxU+ފ/g> jTLլmokTn1_Wu9)?<DZ0M9j$HMdbʆ|A'e~RCOzw@ Ѿ8j 6!օв]6vra8qboT\/.E[+~/^/q~ڎafR/[$"8C>''ytNYq^iחi%mQ'MѴ.+ô46I@II=XR"Jꠏe=zv77\E` !D [MyhV_Svr(euUM{ V{M6QϏYSOًΔMnN'} /Xaah']kXb03 : `仦6HXI)J0HD4a;&L ƥMɣeNk~lc{qeL{!yKq*)+ 8X`Qr{Xӫa5tG؃a+i70q60]Hésk@%qe-L-(^~ldy[%oG:y/Y8 W k,CN(-sʩ"w|犛g1s9 )/rJ:0I2a |~7弭V#qvq|Xrŀ08 /=׋:zhDQݬ-&Sf;?M8(?@j H~7H_C뗳L@CN?zKrK?1L!`JZ~9&cv;NX!" ;nL6⎦..tqtqx>5`Z}$Wq6ƧBFXbAmpdH& ڃa*0A 5W[pVIǏ%eִ;Bn\ m]/N01RT7]=A>~RmzI^eMxu!iiGMt#{oqm)m|_hNW*b+:*ڂJB(5]AӺG!ICs͔3!=8ͼWaM&jv,2G B2`i}w@[(ZPR&@“чBpt{PXo{#H$%܊^o֗viiS1-۪iׯ7_?^Yd0i7_8뗳6h{H%l άKz'#°Iݿ$j2;~8rxz+b48b,?#WeۑkOEDýqD[DzDC6ryTA?5r#]S]v*9ڢin'ݿ\_EV?HpOq9W,Ov59Mo~:u@Wte&>~SCǚz嘰xMr@c 9DP2T| wAp 2 WßB>53'Ԥ4 endstream endobj 587 0 obj << /Length 3706 /Filter /FlateDecode >> stream xڝZKϯK5#K{Kl`#@`A`ZLˣzS/RF/TUXtWT~("AeWqv܏ ux?xw?GY}WEwS95}iVȏOf !agAv.w?j{|Uu9i]a:+D^a*5]εa<7~Ng\Pym3NL:?FrLמޭsCi2J$kKOMyȠR%#WB%X_Jh.q}8JOH@J; QFEAzlĘzE\@ZF3L1 Y"!c3(PAUe VwOz 㘬T0-_"n0pjd`QL4C' a6Fpz\,|d BF -WQX>QEOgê">*KjJX~vd0=r.%EH>ؽgˆ7 *uӕÍl?"FC.I DC!@hyѣL!:hXI=0c lYbh&Ϊ `3~ mZ8V=9Wh4 ó1O#|lxhF*s3rQB L) cHv N~!o/k6L<5m3V7 ,#ed! 'z)pe (pmlYXvv q L"̆Y{YsdĹ'e&-63Z7i0V<(m?_es =CwRbQ(+jaaDՖE&YV(!9jϓXX+ C]L1(Wna14 ы 0([a^ds1GM"k6۔Ԏi.2iu~Z#<lebUe%2[?HbrO `VebHFV24vfWͧ& =N!n~g%]AN|A"VKFZnݶᕛ<6ceӿ__UG,+gcbvjUnʇVʟ;lYdq8?O\ORst\xewPn:Q d9i`rM=?9pPWBf1L0?% ܘX=ݱDYaGX~Dk~]2^ פ"nݦnfcȏx-6Zu&0T`"|Όc2JGw[SICNX'Ie>>9Op ܿ٬ )c@&eP]&{)kI@]OGK pk{dMŘ5Uf8M%G!ZS4{eĂ((, +[jӷL[p+&+I`]s,aV|ֵh4~v{@r}'}o.-<߂br2M=,kEbu+e9NM`[ktꙪ*&(\,rFj>!XkX (fgiAxwSPp~;J"MK9&$3In'AG"h+R6J|;5O&Ս8n++:,"@͋#d|O㝯.jWkTσtmtTdn_w*N-74)RUCƥ҂X)J*DgĪ?}y Pewq(AxkQEwت}hB27.Yf@'@f䚎xE=_pE=ZZ_U`#0nON0N ۤRdo1U Th\Z玥0W=P4rEFty|<&WQ^:eFqKOzho{O U0'lzֹJfqI/IR+. PM@Fb4 n춰K?JEj3s^(GQtvOdqގY{tidؙZn;J|!_rF#SHAK:[1"jub!8=dNqHvjk{5lMu`{D-,.1s'mo[n.=5$ [e~fkoT 3hi|m @g.;;HF>lR@$7'sc,F-at6H怫7~gp@~=_XP /Q}TRDdd endstream endobj 597 0 obj << /Length 573 /Filter /FlateDecode >> stream xڍTKs0 +|iױ 챯t% $/x`W~ɡ$}OADs\]m TQP+̷9[wنU4ˠF+6ǀT ìȞ[I( D͐<<~ηzċ d%e '6DɨHG9-^5VH8W ( R퇽Y?{KlqYG"(6Ӻӱq2j)[f&ixf@*M=?QM_eD4цVndνl:c5̢kf 5=&/j(78a6fNھqwӇ.Ff ^9y֧>HBEP# l yWMeTju e ۓr~5j~ң-[Jwt\f<&mwAa\v\.~c.>gRD (N ;a?q0?oS@~8>shD5_{#Mg, endstream endobj 601 0 obj << /Length 2184 /Filter /FlateDecode >> stream xڵYYoH~# G,d=vLh8&EއS-#Yl]_UӡB۳^?qb6lv'2E S#){MmQC[ͺ4?eS&,؁Dž A+йjadd!'Y|,}7Qnd=Qa!GxQ<)&* ֱ U !yk&jl smG0(*v]ƾ‰e=;}[ffvtڱ%͝y>+w{Zx-] u{tW: ߃kָ|\EtݕjDF&o֖`I0t$~_a2Fr}"-Qmf_2]ޤ(ItR*SHk0V6tZSopE2 12N'9p´BrWY~k$qϨ_K1g'$Oj#9μHD&}3bkee|b Nzl:̂ɁY!^r81gc)Q󬻙YMhl6vt6`n19UkIg$O&1E7Q\O5K1&2 *22X\OW\dGS X6,4uCW@0]ģIVٞawNh,CAOi5rT?5#OJ&ZM=1a_\Kc{0"G )@v+yfcAMyN[Ɛ+˦!" + 7[=mɖio v#`nKi8@`6<"M)jUb`LfT[Gnn BPGzn#[s4`UQX 0=ov}lArbĒ8r}[3ԫ*\ޱ?"6sC8\p\x/yAY$R  0 ySRLZy *f 4r|6%4,P;C%j$̚3v]@6"6L |D 0X,MRy-JQ,W=,uyw~ g|DCJM{'yyb-_Z2u Xۮ XErYn$*Y1dǜ2k%nK7:E޸C?9Kd> stream xڍMo0 <,.]|K{pu5]l뿟b]tEO& ~Y~ub!GF62G#tѼV+4R~h}sF~娈K/V" $ DZ)%EĐZ K)G26t,+Wt{"7Al >}pozW_U.ō5 ];<cUWZ X)N#"8 mX11{WasvŽ//[8 endstream endobj 609 0 obj << /Length 2065 /Filter /FlateDecode >> stream xڵY[oܶ~У"u N)z9ltPjWlo.p/v^_P|~`bŌFަH (IȻVx⏋WO /%i#j뉔D2/J9aTzWwW>>^6,g"f= xHR?ҋWWS"8_i =˕7_'/o޾_W֖oO@)0<%HjT$,!5mPY@S"ԭe Ͷnּjo.+\PWF|FE}U}l#')其gwm˖P,YbE9evvoUwv F/8^I%*l1Hv+ m^H&uucv{m^vqaxnI[1) bUgXUǎv:(wk0 3v83(%K)<6dE VDf4ۂ|I\{"Ŋ"aDV*$1 B:(0G{a{`_ۛOg`q:s1~B!gPRyՍo)%љJ>! 㧒?]!|u8Xu$!$w6y>(|c9/h68ODa& #Rj%'#Q],K. !}\Ѹt};_b"iTjQ2+K,$MoFeKDL>FhkBf꜄qz⥾=N%Y}[ʂRw*SZ (-I,ž ujчu~EQw;׺D>tBط qA~Wzك&Vm;*49y]8+=O#p* Bmxp u ^M :3n"ȯwK;quX62dM%[dS0IHr~Wg ̖}½Y@A'8Kݶ~f13ZX2uc!{DbT (7"8Q  ~R<~J!kfi:L9؂Aj]` =!@OSp%(DȹXy ?Ԩy:Sgdh1^dWX̺Afu?Kvy˥q)-aq"@[`LLjURX3~AU̾ 59+`IH"W`=@'3$$@ÀҥQ@_V+&!ku"4L^.('nEʾLUԦvc{`-Mݩ4;025"6{-K[kbY8_ \1I FCg.zk&gKsUIHa=$`3U}%k堺WrR 0itf&U04ݬ_$ =%&[zjxȫmM;fܠ}k;]~b?|u?iYB?v"o} 셙Ij.P~K;m')@#f_p1A= ch,rhp endstream endobj 614 0 obj << /Length 1772 /Filter /FlateDecode >> stream xXYo6~У<$R*m@[H@K-DVC֪.P,1rouDoϾ8;%('2T$E߈_~}1U78j*m*pξ84b0!hA(y:6Jd&zs(pvd_a AH(GDo)Auw7^ (o;3VjW)[F{NN,l#oK GΌi)5~jI|0`'=[kجGmƺ v/=ONzOCc=ewYzgjĊE~LR #V|cԯz6}w] ؾ ta%uzm=`o@P?,|}X6Wc EfɅ7i@'UPZWgP8]8P`;P5QKE߶+t ߮duX(Ww;ϳjLp+sr~+K^JSkp6W.6vu5nE݇; I1 `&vZ1r-&z ~9,? teN(cQsv2/P,SgIH4ۈ,Z''SxXxǐ4xt\y00Q9IOss٥ (a$uá`a*F( Y% % <H?(ӱW%`>zFۛq?@27 WwOuBϢPl\VURY%vj(zЗ`]ƮPT/荩/nXdjl}Ѝ[OcunPP/u1WT"xh(v|i)B)O!\ e5.-'SxxC9Þ,B(BiݵFTup{J 4ڌ8:T th ?_80^_+-,r <:/CصWa/gAQt|*$uwi9J'8RhY jbvXHyD+pFXak6x%dnr5LzbHhmv`2ՉK[̳hC'6E[Yʮ g֑7Y9$Kba/Ě𜱼*Ww>'7B0[ga2{YWxE =O8QiImYԁ|eGοug!bsOM_z< R7m%O~E.(NFW)U[C=j@Zxs/є*܈9`=ؼI"j! nPثiS(i g :Y$Mgká|9x)o.ג܉KJR&ZO#N&MxG-z^H|;cy7PJ8 ~UW{K_0?ui endstream endobj 620 0 obj << /Length 2083 /Filter /FlateDecode >> stream xڵْ6}Tmkl$M<}q 2Iy^`sŤ_FL*X 2i/pՔX',bβ ):~z{,e `nR(DoQ$*Cp&i}3MnaKа5D}t"K `{Y/?U'"NҢYR(|;)ۼ?gu[UBh`x!*01\Az4) a]y>M qeb'XX.֑|1:2L,G elrfTf;,a6}&/w(׷.ZXԠN?f~-݊f~;nS jr9 # 2@E ^J1 6j8YnCUYpzG᦬*b^Cwturvv!QM B iў|HfkPLd9b3y_4+_}Gur%buAZ@ȡޏ"1vjTj5"}5&ձ2  fYT7W1b(1;X.ıOm3)*j Wо7R$+f)V!\%☆q4[*?#6L멀sfi ]$oorvVlaJ0XV[ogWi( eeTfu)LFsPeH"%Q67>& PvEk r¾)c?z25qGq2tlnO 7w'I<"Y\ftKn@Wb@Dȃ p|?3bU`cpe \OYWm!g4N oȩ*UƾA]g=ڊ;w4Ml7].@qds/mvbp[_+|B|]PD|Kc+-}֮1,2[:B,]9f_?pcF!_zZe{ vM:LtvP')M>sۏ$I&=򖰝h0k"[@L_tBhfE rMn۽C4k;}i=>d c992 [{%:"xv~vWٱ(v^59e"0˄S ꧻݹ'iٜӇ.wcʞNٖ-aL @ @[W("i&xjG%tlq~v.Ш{ItYCB$оuٶ*`TSOW{? Ɨt!σը rj/Gџ>3::`/AD l-+c٬񇒹GUϟn-R#DeGG74E endstream endobj 625 0 obj << /Length 2503 /Filter /FlateDecode >> stream xڵYY6~_,xnRq)ˮHF\PxCC;cWI h 6`w7W_T&Xƛ&$L0S|ֻVdz/P}x) /7?^}s畀 `P]#0&Unڄq 귫`\$fI2 7@%aQVF]Ud4LM[]Jwv:J2W/R~$}e`n{Rd☛3چvgsᇛ YnvBYYQU=ﲓY i;&FO<U3=IOݛ3uąP`h('tkWzn+<q8N886%E)`T93.JJ%~݊kR3Vu낕]q |';Tze0e&Ĕˏń F),d~OP)3+ J\k;GĚ?f͚eghë́hi0c̼!ӇLۦb*`߃(8bu])7:(';2|C{|;Z]%2R6:[t]:>q懡r||(EɔjZa<؛yY01nvKҝ5ܵ`9̋NKff'_:wnyzy‚hqn,vlpU*t.= =(h_4"J/Bi#;t\s}l-Lpny:t[3KTܜ+b£7?EA cI"4SH .VZsГ/O@HP):OSt@;+#M]~ۛСMarwHC_I^hK?jpݴkb_Rǀi˫,N>Û0xG?e𣋒 (,bJ ."52c(+.WGs&h>*"ML)`rC,j'Q`~Ek6 u~-LYAX3G1|!@4ppQfcyOI>a.4;$ܪ*ݾ֞r+AxWeз/p|.ȋմ+|t@.@è5P0`Ć8-E uq 3ʤ#\.{t4e{ XYaRM]YޫsG SqXdjGfP ;լvvL'PxU񴙊m>x/}ҡW8H5Ώ"lMTFge]Y9&pW4CG=( HW20e$ -/휅"B@C (ʶylw$_ å!O8/-l]II#wP]׊NW2Q.݋BOsoPBX@- o\ge<'}P(+vW]'w}P/T endstream endobj 630 0 obj << /Length 3405 /Filter /FlateDecode >> stream xڵZɒ6WȊhIp?zݞvx EJ%Rr.*:o\5C>mJhW*+y1o7>Vv.10t/盟~v%_<7#:8pݍNOfuxF~ޓb#M^'q_d>"fS Oǜs=44\~Y,<|W2؍l-2òdp3OlpL>lC$OLٲc }i4kD kPs%+׊58z( l:YZ[ASNcXC|E!yL6̧AW@i |eoCK1CkҴW;ШX#  c݄2qGGpPsy(6&GjpN4`Jgdϓ@i{[^`e2]+ghQ3A~r,RˤC}RUgI>u LG1 y[*-Rw+߱'vߵgi |:R<+MezcUѻ~'$ȺX8c:L)cG Cd.xn~sΐ_ Z3j .WO z3lu9G ؔU`3:rYuW~ȡR*\&b}/Ϧxm6 ]y=S3fn\rOJRuJl/m]ںVG*CB^6&tSRge=Ǖ12ʖ7{Rof+i `R[~vU\7!P/5ss!D R\? 4_x4t.`z)_#VKPտD]BWpJ)erZ]ad,-q>j g ǚ0^%K$S& O RX~a8gB \ kRJh)7f2ΏUdKE r/5߲Ww|i$ϱM I:!:2V@dn8t\39~V/PUAΦ78PsU]~FVP|65/3$܆bϿ>pQqQʬ]nIoHFIRKF85k9UeMu?4JގSfTˋ8DWnc}7  VrG0t yfrӺ=ZAHg#`Ԁ70o\(,bꃵI5j}VM呎@ǡuX‹8\Ky3]/b8TdE80&sIU`x?2tى0HjSAwr(Ft8ݽ^ ABޕJ/'a2LC۞pveu45Q o4X^6p׋9s.s]QOaT@뢓i.WU2Au[8䡫F.;MYbt&#63D[`$"batS$^60i =nnq3%3'oR)z^9oĽfV,= e}QW7_η.(_ _!r"qt b^Ldkf9w# endstream endobj 637 0 obj << /Length 1265 /Filter /FlateDecode >> stream xڝWKo6WEǦE[t7.zH J,9CHbp曇i5ѯ6[dhsR&I&>ħI5щ֍-$ZMܱJ(53TyQ2 0Bm( }p|v]7CikWu47 $@7qZ߽GBU˗i L`̵]ŗDNfuYj9? 9,;*{d7i v>.@8"8B6&Oc |awfZ [ؗr> stream xڵYm ~BwD (8g;A6ENݪJIk{}!G+i.w N 9C7 0x}+ 0AX(wW?A?H(vNTR$VQ mpSW/߼4BH9z$(-R)onf~"湧luGMHjah߯M: W/~zyzdNWj~N?^s C" u$&߾#4rol5پ߶P5t)ݯ]7mwW lǯ_U./~@|7DzrtM; E(cX:ٌ1h$ړYC|o:eA#CKⰯ< Ѫ+4:t%s5l@v 2G-#.=U3ЋFįz"/lD`#H-[.+p Wۚ{G>(!VUmCcwe6:&m&(p/2{ ٗq>zu0_I'K+<TSORY=P^'h!Oc&503чx1T>Nfvb"!M= ]\%W]9BN8ƆGB'L 6L"{f'K7Dŏ^ć]'Wv|ŀ[o>cU4*%-/>YD>ە'CTJDFsgd]<ѡ SNVl G %`V@SYQT65>KPM}%Ͷ L!][T(wTW >M".49Jle"S($miY9K`p^5~1@T. 1'#6 P6PkW7l!K FJ>K*_̆E)p\M d^B)<3=gKHG‡:rv%R"VZ d (eTn#; Zv%-p[khb}8 MK Cr(kB튪҉?GJ`zQICn4!=Yw zu`yJtG*?=Uw1kŬ1Rȉ9*jlhB>]+PlM7 vmχgCThz&YI+lT;ÞRm2WL2 Nr.\MGHV(9 榙Cdq NjR$"<;\,e@h`5qfjiҬ^<#RܳG+d!at"UjROiad{ ^r$v^6`]au\HȤ͙V1kOf5iĒE䇁 (HE8D֣>0#S+=2 gf_vbUf%\0i6'3O9{t7wbG6{'CKI+.{]-Lмe,VG_+"S!6T2޾klGL$2#*&`N*NNOH8#o'|,dB 4{ģU2"ceM5 &k>is<,3%O%2ˮ-hn )N9m O]ťMZ ƟtҥfZYkb+r\ yH!=IR IQWG~G?V5]ݘt)ao;wiHg=`XA=l8|'q"`g׎)-=O$pᔷ$!?6Ec;0~v%.=OMLT.Qd} C(\5`FxY̒HjD#u2H@_jh:U= qG2En0+>FY3@G"⫻`?\Y|96Kro:,=v ˼re11qZ\cnHK0҉=#yaa0H7o]J 8xAnd8!v"eG"ܝM6kK^łGX uo>gN_0kpbZ@]y8SoVw5eY9CΤ|Whrqsy\2e>l43.cC>\ȝ'dSiV>XQHܵ!pFKxI \l _JF )9/EmP endstream endobj 649 0 obj << /Length 2924 /Filter /FlateDecode >> stream xڵZݏ6߿p Zwq8 46rH5;d)R&{SDQI7w7_ 3/D&KFwWzvod^vPCe[kzM don~@ izEs} 0ϼ;$o&+r"0](Ulok"j/ǽw8YL^:suRS8'aNID~%-x0s!2_.w8v0T=[<~~!|u(Ul* D)-vz Nc:M=D]ha4r%n`a ٽaϪk1(6pJQy!g$_'d~I>)Dd~OO;Ϗ?I[_r/VC?s'O-.j?/dimJ3iF9h" uBӠ;RWʹ.tKā~D5}VztrQF9{k[38`?ہ.m|ef(ÃI%16W:iGU_u*ZHb>0 TEmyx6bTSu5 *i qQO'3d _Q0-#FIKձxO21@#v=*樻ӫE2DcX͵#F** 9;g[$;[U|`FdBг7JMF 7~(F`Qe6-)-T]cސ9,cb.F:bz8>3D1BS DZSh6l< m)t6$mȅl#Θ-OC۬2F\1x!X K#ns: QW'7ĖlUUC%X&cOMYח-~5MzR 6 I.M-|MӠp\3ֿrd)\aCjOf+h~ \+J> \:KLYmSN8ˎ U5 ֶ$V4罸[^6]3ufɮPWӮ7ݕ8(]Q =Oزd@ %=5lhuPiww_*wIrmvo %J̶Te5÷*WAWH5;QHGRԵt6<p]a.Itn !'ʺ$9ɍlGUַͥKg~?Os_l[kG(s@}VH ox]X.~*N`}4[/{+,y~Jz%aEՐ|6,R[NU]a1#>'Ѐo핁]6WmT{] =EMr7ނsԺ%B(rz3ӫ|Oz$6f9Y2= =BlҨns\jN7h@ILJaYni;lY;fo8##{-7jxiUvfI`ԽLh~m ApKc3ҶRB5|Gb; rFcuC2?nXx9Z.7t}pH}jEtnO7v;ݥ1M{lj3R# <55hXCR< nDpM W==a oY Bt"xLG =k˥ ɂt:W2sZ:r}ƿDl|M]1&ЧG=.P"MYHFy-#FBeSHok47e%d{NSb'үiNp4#X뉪nO44#hV >ʜUw &c]K8S`KAȽGZʄ s @\ϳ)U8uL?bV؋Ya?ɾ1#)\ w=KlDx]J r<i\Abrp(<<മ @%)Y`jXE$EPm'jpR?!2Ԝl^su7zCVFfhNk\raf7>L{߽Hs])q`1źZ(;*aJ]@ʿu &E%M=k6Q&IE> xu~ʅ>B?LL'ڍS>A V/kڬIm387Jd+R pd&.]GH{Qsa`b/']^QүU/ԟ endstream endobj 653 0 obj << /Length 1662 /Filter /FlateDecode >> stream xڵXKo8W(5+"%EmwSl==0b Õ Xi 5fxvzx~&c/a[_z2L1 3$?\(l}׽鋦~Fy.^~^[^|]p% (Z ＀$ u 5T@CȈ{ovo! ~TTվ7eNۅMQw}/KywT[ ˼hj5]^ۥ }mU$HO_J跦%|ӡ+Yi_vQMk*]Ѯ@7}V@n^ڬ#sayK4{fG5uK|,E_!%w[f[ 9TpB6D qNb i0q-8!dAAO||?ã!H-+rG:>2]TbI5d "kV6%cK25I-U28 .SH89b $a!):Z1J X7m1S` Y LH3-6v+3%B6!+,BH!1 Xb6lf$bqž+ Ajg رBILa 6jH;lJEou[֩]ac' D>QRa+5SOost#&,MLdn:|kZcjW/ljA&q@,eRY4ht.H%f^{PSU| uhA PBÎ,?AR04^|y5I9 3=tb.}_I ފPdBgdRͥbŸ>YrTАJB ]F&ֶjۜFyOCnlgY)=mj:lw,J2XCRSe/ V*~{8Ee ffzL' ]ĒXoU90?UϾm =mnK-Qn~-ąJ1[.RLY'x5c5;ovhg%YKWGxkٞʰQ"=tG7<r.e )E%v92[yCe9uS[I =撞Kgn;I$4,瀹T) M) +7lpW]t!R͡ fL[jUXk'iЉfsehes3:| ;..phxۦ&%<N =5kx\v+pD/&c?@íkd;3v̤7b:Ϩ'CW6;&h;d$&$0L @74iΘgAmwI>5^lN X#>޵ hG\H$m`~s!}Vn>Od*)*#z)Kq؀$O5:jPG9k`~jk?7_Dp endstream endobj 658 0 obj << /Length 213 /Filter /FlateDecode >> stream xڍ;1)И$[QӉE؍pת` <8sw3,<רm e3p%2XcqBf[!Q#g"2*n"2 x[x*PF=̉x?W~ʧK mn  b9kkD+;CA}]2M~2]zO endstream endobj 662 0 obj << /Length 1761 /Filter /FlateDecode >> stream xڥXY6~_GUޒ@4MnS-jdёwCʖVYlZ,7f>bы+8|YģTD)g&޾c"义1R&oy7W_$(Or#LtTΓL"LG7mvջWND*#P$KM29'Q:$KPOx"zi{s˯oo^^ qnUg~*c]f4()M_$_ynE}y}D/ ``9H5|)bݖzI:P 02 1h Bgp$P&6*}dS_٦{GN ('S5Sev8'[5Fz|seI%x'Ae8;o`*le*UI1fpnd'y>Rq:⻪}H"Lo Pe训> z;˔6,.mn#<%YkS߃dT`ĺ;X"\ۙ˪ 8 hF$*̓ 6.nkK *-+U2Uoѭ=A1#XXi $׾DnYܕ3dR9C*X~۪ܶ){=X uPC,: FtCH 4O;TaÂ+2-.yCQA $tp)Mi\5TE1_۾$~(@gƤ^H* )P (J* |RSmm})PlЬc]|ϦqNlZ`}3tY`zK4`3iiDRF;)sLmnT5rtHRBAX;gf3/CW͞FCg_2=fIsr |CUOwL-}zqnLGt:쩭8Ҁ ֿ<%S[Jؿ֟nk\ aB)T}9 S>@*^Xn+H@ݐ\‰\/x|([+v RCQKkm;!:d4 ]*Qboo^6?Bzyc£ⲝ.6JtXsxriJ[ 6~3 \˷JD=Qֺ;(!:۾w% T Ho GC֋VQx$N!_hi|9^ cCh &X*|y#ȦB-mH2ݤ9[E[/X蠛!b;1tתYx!ڨ'Pm=> stream xڍ;o1{)$0GKH(  A|<9"hG !ƞfqI`FYbU,Ťܚ;qp 58rlٙU@7`ӰZ`9jMzĒ-+wׇ;)"^Um bt׻_ x9Dgl[qC%tK endstream endobj 670 0 obj << /Length 2362 /Filter /FlateDecode >> stream xڵrίZTY1ɛJaa1K=`@rXI~x4 h邺,Hx0Jbbw+ JPe*7X@:Ed$S\7@f1 T qpS_}W_o~2L!G|'A7Eԋ7sTDȗ wM(ITbH/?eē4+xuuiյT:(qD(Q(}JUoNnR0&^wU[VE^ulʝv#~L$hfmzEX==bޔkBEq?+䀱=h:AcZMr{ۮ|bVW uӏH@I΃1.Z)0v +X+7+ @}pp;9wF֮e>f׍jF-1wtf.uM^|d4/Ɋ((L|R址 ʁb OhgAYXAszWÝZ $5ym N0| ,|0KI[rIpy>#f|ﴲW.ygOpo-_\PNB8]t=x̉;m"nYK<9O4A.X,r eB{y`jJ^_l_<-OR3V~W> J-u W 2`O0!\qO ]d^10T,&1gּ8[<䞚rolw\m3d5@=X Z܊YሔT2оR28Mh|C:i2e_]gʹ >z)WhiKz6 quz*}cGۮysYP)s56Mܮx_@aǥᨏl[K|^n,|shnu7kݣ!JLAlJaci%\)txd"dD 65#lN|/QLiRK7T V6t> ~Q~ SCr VF ;ofJCff2G!TGr—INh*G0س5 t?xN} C7 J,5kVK peZȤ @'Mn&4` @ckGGi?XqMǭiٖp:mEEmiJ"ǐ.6ƦʎPBb^Mfv@^a^5tԹnGzHԎf$ᶭM,#(4?߸\?YyݬBf0.j/l"ûe* :lvJꤠ`@G(%Qm EUw^f~Z>8c='vj ?ΚK# ftdˬ2K9}@"4hp SO$13ϢNcPp;(-D7:rS8֑|;6[=@ǪdP|ECf)70ܲCz4Qug?t 'ݴ^)?]?󮴧v/3N4AيM7R(éFf%>@pk2׻i2|gp7}u(Wut8ˎ3l7өuӬd;.|W9%d :a{o Kv'wqjƷ2BđQB2zLK3`w.m]ڍ LJ(!R@ b1XoVK3ÿ_T`R'rA?{ۈe|{A\m \._, NmwL˧fILh:tJ*&exZxW:-dd`` W; t^`˜ Za(Cl[=fQj`rs[wz0 똜wP"ʀpbj>Df>[nzP(BI {@%/_TӄڧM.2;lRGaUO9v?5ߢ̅FvL_J/-|lٵ wTm;-L<#)?ũ9͖Gw>O u"9ac>z笹¸>'(OG:'0ϖ>Vz81 WIQ,y"7rNϔ ~E>b!+f1m_k"W1\#Y˶wP? 9F 7k8\W,Ly 9aul,"f) endstream endobj 676 0 obj << /Length 3073 /Filter /FlateDecode >> stream xɎF_h.A`;F0clbL2;}ofwrb۷bzW~i$"z‹UTzׅnVzqy}֗M ޘdhWWB8 BPq{+2MvSQ׫ Hj1C2Zzq 6 6?go@k ]`sjz6mvVm- OvaR8\o>?\O{:qLK> mk>ekoΰO }S_ַK~+ia$4mn`$F3K u9.>f+wIYev+)L r ~v4}Vd}F3gĦ?XRt6CUDUMJk. TkB.UM-! K(Z|#yPRE3Tv2ULS,⃼w;7yOnU#&˷"|0nQ޴pp +i_a{W*vO-wvaKڼ9-2K)QMGh@h{1bn~ySm;׬ UwX!@K?NGY+P\ƍ @XEDZҭ#*@Hp5h8us[{V)FjtRśT~~h '>ivhu䡕| sg>ٟK.ш6=XUJή;0hCgE$`ZCEv}+w'Hn*?6meME 6E')9CC6ݩȈu< u ݿ^x_k-`jlj{ZZAԶ b0"p J7p5iIn18/M{`cY36Ieˬ0%PD7`"U.w=8Rj'dr/v3, 4ngu>']>P}(.1-kpxYA<窊+D̸&G"dFfH8XÈK"Wݱ,N4}k9hPe "':|ZO"=i)|}^WvBsB0n qCLx1zrSUe_񆅩L䱇lj|HXɒ3afBP#5\|;kUJˬQ֌۲΍C(z zEڱ`~j=rӝv}&(W`e-GP=J5`v L5ݛX-\lk$赎/bl+%OI`d%gJ>%+B˩ ;_`{doĂ%c `Ԡ!4m[On G/8?/0̎: ]/Ę̡CǾbJ~4}h;79k=Q啁P^GZl-lUEb8#аԂ,LLةC%oH\P 觬%LzsK^uݫV`aTe:1we7[v FZp6Vo^~B~'?j\CaGPZ"Ð*:?ىUq_]@i&@ }1~ -M^m۝#?, endstream endobj 583 0 obj << /Type /ObjStm /N 100 /First 875 /Length 1801 /Filter /FlateDecode >> stream xZmo6_A`(~(#9dlCt (WM\'RfK_甴eoGGHCP"*׉@‡(30Z5(N>_d'-(pۈiEh w>ExV& R m΍xzm vڂ AcҒhh7*@@? :;"Q /02$:^t20pVAJ. c`A$&em@H:L'z6BpŨ1I'w0uAgvpEJ1(Lփ`3;+\c^q:]D&ZVg€HL.y0{߰NڃXB(^UO9W?o{qjvWHˌ ϻ(1qs,kK6i9ϛIcv=CC }«O &:ZZgrhk4>hqZci1I=-(!շY/.+9rwYIy]U#wxo p  J`wY/S^3Q1TЉ]8jjC5m%%SR鴍^t[4P/}wǙG% İ>qݑEʯۋr_R[0X3veBFk*^avծeWm8NHqI4LjHrjerC;L~c#P*pr>H)N{|x(u92Ҫ%2v}^26^9{\C/c< y=MXY2neu9>^䯏>Tu~W.'RM5@`}j%-jSO/ԌKD+|ؾHE,2ۄ涽mn–LBL|o"rv`1ƛF#3[ΰa*(&dmsuP'޺N>PmӉ՝4 }[jgTy?q3xFIT훡Nb2-YeȦU3^ٴ8{vQrQԏśNj?6(_)hs)~aM_ѿ70~ dwq ;<^xu$t&oWp\:ߊm^ 5.,X4~:lq>3w=Ry3FɈSp*4i0e C3#׳,v%/%-N>Iqл3-szϜޯӛ~\9JN>zد[sǶiғiIOjoQ?0"j 6u#G5 rp#  "o] \"h#yXM"47ۉu}Y6´s$c E] V}0|`\Z`P. endstream endobj 680 0 obj << /Length 329 /Filter /FlateDecode >> stream xڍMS0{lG7ig  9@Zl@/^Mg7A {Lݳ!IDSZr4hsHH yC9xұkͪMfurYf]ϞRɄ @T\|Fc\&1lVK!^tA*(J9JH3Ykme(D~PIl20]j_n)[yG3gUwoQsNV8?HT爵]tnx=Pّ{n_J(BkNDD^4q٧s֘[ky O'L欼iz('u/?ʳ- endstream endobj 686 0 obj << /Length 217 /Filter /FlateDecode >> stream xڍPn07RvVZP!( ~]J:޻lPҒG+|9_vC>e#bu$+6af]J*Aݰaԥc}'X%51)V3$\OQh4gub(D CϫFݬG&z/:=mP endstream endobj 691 0 obj << /Length 1846 /Filter /FlateDecode >> stream xڽXsF)734NzpsŖ".}]%Ѫ;tt?<{_qz~%Hzu{qeh^,__^zR/cY,coL4^I&x-7޽7ן?Zv2 b_"r /! BnE ׷˛o7nݷw><I`@ęU ؔ>/˧E߁PwꗟpFG*kW8<.(ۺqv*Wdݦd>겜R=unmQWîɍU8%͓i]`YD6x6Ff_TysmZa]`qyijZ@<::auA~lwН%L ձ\ :y:H+ 1Վcm+tQm1֏@Y8p{uQ/ 뽙L)qi}ɇ-9,!}]W0q_ꭣFvi~i-s:s=<(fM܁֯&"heY/ #J׋@Hyy k 46·AR4Εo2Ƚ8ƽF_"5Uu]bpFxqFB1r|sHAMl~/GZ?UUd'qC:_&}w"cAQ=.B?hT[k[I9ΛeCQ] 2)oRhki zō0^6MI`g0mI#Om%[&5K, ×؏sulLv#^l)ir7$z _`vu zӍ=ZSR͇O59Xǂp@fu.EGԶen KUIV /!X ޡ=|gugK\Ȯy{Ly9 gd3`Gz= IaSLho sڑ̃ǡ" (>D,AbAzU#d͆jӗRb֗Xm!n*VC5%p-~k%66lƴx9Pt&6%8wLi=-Eտ Vm*oʪj~Ü/RUa:=-VOʭrk # iτ{*L%!$g2GtǩIe^>9*c*I=,IOdm mgB >v'c!Qؓv! q9MJi $qsio#L:{5Ph2n`ia8Xb4XËaAV++Tvs|Aϛbܡ[Chg N]{gD7y i]5Gǀ-nHqIL#i0HmX\`ɒAW?6Xvu〫ѕ?f,XO}C90jE n@}{S9Lnt:Ncxmlx? .Grqv9c\|ä.* *wo7~|jwu>bn_otDӉ(d2N.F^(tSѩ?dr endstream endobj 695 0 obj << /Length 225 /Filter /FlateDecode >> stream xڍ?O0wmZ@T,%٪Q궑+>~:{!l=,Y 61#rl}Rq)5bB+PwUtf|(uyX *fc:iݚR3$O]S@86o)N%RR,8uá;1е S_wW/xU endstream endobj 702 0 obj << /Length 1931 /Filter /FlateDecode >> stream xڭXێ}УpIQyf _g04{ZYݬˎs4KYg0*EVԩbsޛ;Z /XpI_W@Y*̪P%h+ݏw_vT0,Ҁ y{_L! hHz@T;o6Bp{Zsw??bsOV1UBo`۟l8x×߾LJm/bNdlDN Gl7A?/ii #ۧ>NAmizkl=kgG6V xg#ZC{Q cVU8!o`i9ƶ2njh/MՈ J黳ة2?neǍ|LXva6gV×5 : CFR,I"fLÐ@ls mC3չvsa~rřr;npw %aϩ {632tY!{)r(R U`n% o+x\ro#&' :L󅟀gb:&\lzfg}9j7_:krMWŸl>GDsYXVm<&?tZBm?.Dzy:TeH :ajhm| uSd)5ZXOc:T?ޮ6fn|<:+ܖPLc@)!?zo1P~uSWm; X3 aad紇5vC,$pTJ0]أ6nO0R1R6榌ޖ"d0= ,POJ{SGhVMXJ9@"ԋ lHRI73`Ю+jdm7 b@v dNȆIy N=eZ*B/Į"X 7I,ume.̢ӍL3tɳ~\C=Y"lE/Ւu8r5԰'N;˹fKPNs18\95YOɳ8FF[8l2LY3 0t:o.WSSҼ6aus$BtkWiGf/3s|^&;C ur#M4hm!C%bBb˘o)ҡ?C@'WL hR" 6+aA#S1Uu1KtTIPQMʢ0B\ΖAkC|xQ )Y-'͸;≱%.у1ųdbjc$qVP)xf 7,Hl΢ 6J~k *uץ)%/.gr];U854qoǓZ#? Gxܔ3U8ס[awj "UqT{0U]k[)OWL&ntv]^Ή׷w\97:aF2M6]`/*P2c8^^da)a803t x\Pũ\l:yxp_ `]0JХm!rq8 y_vi#VNѱu?hа( i7dSZD/?Gg7SYumF% QIEBHT.ϋd /!dK[shF> stream xڭko`\oKp)zHM$|HR\v@cޯگ՛^2Y^vUOru[>?_~l8YjS!ʶ&]xA׷W] 8_ PyӯO+ߓiz0ꕊxVW\?E2$Pb{ 3gEOcUـ(z8M/SO,?iP Qho(`( P1*?:猴]ʀE},.)k aǍ׏ٰAF]ֳS~fOîlr7QgRg_i* (HLS&/-w( )}&bVŔR 4:ˇUugy ]l-BA2 n9[!؜;;=:fx=.ꐏYC~Aj)x\*$GA:(2Lf?u1E ACx*2Q1ahZ7F,GO^ip`[fZ!oe$4Ɯ6Θ$.dRxeȨTvuiwS\:qOŝ7Rtz&cGId:/)3 GUbk[ăwWesCn^kҐQA!hTӔY̒1Yv= $ogL^~!%Aziw߯f7f0ߑ.T9WPs "NFjÁ!,!~Ģc)[cȖG!6⌤GL*B/tףTѺ20Z}i 9QBB ۆƄ# iypZ9˸@.u< sHY`E6ۦ Ds\s8TqP"FY/bN"1P?,@f1!p ǥ!z\#Rp0Bx95=FIao&~4 Ԗ EtzK:w6Sj۾M fPB^Ps+`OHyJ͟? xI<0vaNFϾ4Uqd*rsF4elli>!c̪ӖuQg֨җM]6U7=5MDxH $Ԧɓ_luI/dc̋p@ۡWt;1FvK_ gp#}rzHYtcgµ^Lh8-a~L2o;&L x= AvΓ1 &̼6Y{HP_2,h=1eOFns{3_P\piWsFG^ YRi\  l|rsWgxD5—Q4tAr{[`R =cfnKOaixحJR[<"W'_0d:CHjN%UXT f( Yg1dbvkHgb&i!1x$D?7]U> stream xڕTKs0+3A 677XvhQi `i/h@.>܊ 4ъ+XAB*3"6J<|bf'[k PMe޸ '\EO:0BAPP@j*óU%Ä-X*RG6bIh; 0wXUN~p.O¼meSoܭ;6c:Yf ':Kj;[;tcK#FCjtʦr`qsXL!#+5g<)cf.,bVRO`˙Dy 16X>gdoec" endstream endobj 729 0 obj << /Length 216 /Filter /FlateDecode >> stream xڍ;o1{-}R0=ǏGP!3K#~HT]̇y6|+-84i;0FY\ _b@I4U)TgL 0h`۰NPՀ6,ق_RV )))LCCQ$bkcC9߀M*OO endstream endobj 736 0 obj << /Length 2147 /Filter /FlateDecode >> stream xڵYKsܸWSIkoR[^iR9x`4%9QM~}_֖S2я1"7~aqDoQOA\OvUXW\\]dAr`DY&yH])|o7w6?\DׯŹc9߼pps{3nT2ei"Vɹ"Q,I@ wꇏufi}x{+7V8X%oxψw?^D {Z%,UN589~=OkE{ޢ>z7|aL6Xֳ=mS=uQ?}?Vye%el~^(ij,t*9*E2I$qgL` pdXĽp+JC_}Cc7#z"EНT4oJ>}[gi24mwҡ8gtXFm(SФ5UӻyGlA3}Ue Юl M.RfrA,fF /Q,oMyhh @D1d6ZpO`Vg6-\Wu%>|\cix3%pvMـ:wa&Fk>$4DQD:3} kpa$IzI'j\{kj-v6 ,Yug_ʲ &s(ۢ- BCΖ0܁DؘiZE. ̎H7TYG9h'5 =ksnhiImz mS-LL% uSPuQ{y[6i4VakKξ"1F;@Njvp豱4wixWI'ޡ?;E!+Q&lhpY^ əXZK/{|;pI<{nAX endstream endobj 741 0 obj << /Length 1780 /Filter /FlateDecode >> stream xX4E(eu^- }AB4mp=zf,)r5 ?W..8INT++Y, Q4A8c֪Iz"@#տ4T7IH !փ\<$;#:8u|Z㥓hʝ./R7 y]-Xiߟ ǃ6Q4%Ⱥe cT±o4]kZ0le QD[ՑXfaqg: BR6-C[-KV_bd$")=lwdؾת `_D._>#4ҳ(!"-KW$y<|2kZ6h(Ye *A4+ $:⒊1ew_y44+Wތ49+dpBmWFAՃ4G{u$`'0nt1q6A+$cigwvդRȶrIonBozX?]ASl{# ) ;r ;GkD)3뜲gxD*lQxm1K6k*L"Thh^T78"K )gpEKp99 oV uu8;۱/N,r^u8}54L'NNRաo] |=@Ax\<B5iX.\? )+"8#oJgx*򘒤 !ȾogHgQYu.BZ:SC}xjLq-4\=H@"e=2x(H9g"N ]K`/^{ey $8C+^ j8`I~Vg/ITCQA|W?կ_\=F~zqnluuCר- VP~Z͞O(w2ҴIy$9b7,LCn)O\qƊ|ďNN L30f=*Yt#KwuTl Ǧr%KmEYb^ VDyx[§@IF*>WB@,J"DSN-#F oxg[%2>jИtGccsz'QLUMsx;uDvDh9\'@{Aqx}+0-НZhdlw{l u{t~ R N,m& $o 8}g1<Lw]#B5уc8GvBw]/ޟ:D;v/heXHA"2jCBTH@v 8&_TՑIR$<B24 M"!Lod *zEKy%[ws_Jf4S5=Ai̢Cus?5L] {m\+ ]8ֺ{ܝN$R=NԌPK '$LDƹ$B3d*!e3vV1#u=AzR*X_U=@NA FpeF"}Lu0p|/z2 endstream endobj 748 0 obj << /Length 2942 /Filter /FlateDecode >> stream xڥYY6~_a`Vjԙ`&McAKt[hxEY b"Y*i M`RJu*_~ V%п]~'jVQ^=tsxsVgw(,WI0Woo{:Jot< ~^{|=Nyx.f~*bHe֫$~ě~uxyި4=<|x@;KݾNn,W J2*Z{?}+g+uUB2Vmx ն>rkB)ܩ8v _@?jwʮjn ~e٠ 6a8`j]C}q3C%o=Tma;0ߡj[euxw""{:={!`!9KȂv8T=ƀIe[޶#)+^tCd$H$ HW[S2ʡ@ {YMbYMhVd#0r_@nqm3iC Z/U]uboKVKFdRo)HDd<2U^Lx_Pue@a{t~*ibo Yj֮(^Kt= )g+f _0̖6CN ~OSnā#4BP)-(efq{cp6gFSs5 6#&`'#Rg;f]]WCpz{ i i\H!0oHAK{s%h;2Fؓ<^ZDZ A%2ktř ȑQC?-%FyԦ/ݑ^mll[L;'gϵK&:áG+ +¾yj>ȧ M w02~xxä?\"9RAiL<v7NpC0g$Q`-#9,[`O?9IG\:ɌXsv4U= o9c]NPh"đ8B5#d/N=LDg,/=`Bzm15b¶X`BeG bMZJ¸ޗU-r 8`_ϑ{3pCBj("5wHbl v9ZqJpeŦ\G mvW* ntT0_Ca.숵-*)؝S$t,S9iezDoredq< _>O[D``7 vұne3/'gk-b$B#Jfw' Cg2+I2h@82Fua %`?"BdX ccQ|4NqTz*T7y"x~tD uTs?)}ErC#Jae_ '* Ȕ%`(LXAcIH\T8WeC* b7X_t%i<&"oE=\gEQ$˧0I0аj"{2^|*{_ M(g@L5~ aZjb?xu>_ܱcKM{Q_|U.@l]\)<v*m.TX ԏ+G' <u>Qt b7:_65[۟}ptVZ$O/G2~yXL{$wH^%pp|bQ\S,nj Y;%^<3lڮfw=qsBĕ;JI̽1A(<3gos|:t{^n( ֶ:ps t> stream xڍWK6 WhyfֱM4i'mhS=xă}/W?mW/*H` $(*4ض!}zU,4zNuJΊI׫էq Haq48h:ꃬ/Ņ8^"$GJ;&p{P}fRDIi]D٥б'*eɡJQQnD6,7V෨H3=!itױBHK -gE%I:IFN˥hB:2cP>fޫAMiٶ0/iǖ[< 2p\vٽnysmih=܈}*{e\Vm+֚9KMB:n FIゟxR.'Q@Q߿* [VxTI3ma./+иθ#|[yYJ:BTN=k2$eN<{^K7VOqF /qP@vhh }i1K$#Z{Jg8ljTmw#c0,.U%d֘";3-r"6$uJUr4ͰӤ 3ɣ^3ïpA_V'M)v"ye;y]_j1_nC8D ,#rZ"&iѓ[6cavt,bƥ]:tTAȤǧdEnPӃW"xyQ~y}C ,(bB!|w30_sü]lMo˜Ŭx Po<1@tK9D' ,67NX bʄG@ӌpJLfp!|u  cTqcOzR}:'L#o] Ki%)z"GБ - $ށט6~5YwL?rY핳ZG y6]fxt0RXW ᴆ x,1~Nl΄vgp9ig7R#qJL1Q>g> stream xڵXI۶ϯ-TUc'rI~Il )R&)ϛK~{pgR4F_7H}@7??^0xҀ"F:` JDv!:g\]~k I4fȄX@'06x~Wm.߮o~˙$Bj׌-X$|_\njHJ8:R "y?4|R4|sy$5"pϏ͍YMCZ€Mhim-/6- uۼq+ݒtT;Js״9[GRڟ)7+[ [iFZ) cGUP5Yue[Z8_ +/cmv6efF.`"֧G9`j&vP4eVnL"/Z)6no A2% T'Bgķo΂IL6ewkatDŽv ib)CbFfLK>  0_ Wweulf `5 7XLA',BXG HIj&:S?-h@@h-@<L$\NFk Qw{Wۼ*NuA@";ݧSv/cVk f=ŷ.87Ԁ1pPvt:PRǟ<hy$"9û//C.aPE"GiĿL`m@MtlK:{ hN6h5G5Q&mq$9ڟ¸]ICEw9tBQe-=,ǽ.;iѸP)H@I/֚jy?dA-[)˦pJN5GSn܁ Y4wռl]+Kޤ/qKz{67թl=-9mWPwXmu(,NWЇi^ŹfDE0 >a78ĵBСSFAXR"!DҭshCuT<)#Bop͇vmT0ekjtOh;ofӦEU5Z)GW11vb! e.( Ut \YF]1ewil ďC(`QP<`fAZIԧuڢ gF ͶOPsNBY~eO_?6uD|*8DHb B(sl쪢l˽,`P/e{]deivcǴ!k$ظN+@K-#io&Y&mw](5;%xgš6S}(Kq>WKY[" U6OqxC` ~^ yj̱5XuNڞ 4 J*ks4X[MjmJO{팃 H^ | 0p1,I9l~,f:F/| PrWptm>m颽pB{㻙iφݰc!pkɇzPTh(Hp?(PgGR|;s0vwysxα/;,ºjxReV kp4~#FV逽댮b~?Y摋ԋ;5>:u^j_7qc4|b%?zOp1R͏ۯ endstream endobj 766 0 obj << /Length 1687 /Filter /FlateDecode >> stream xڭXn8}Wmmyѵ( ݶnSDVQ$dv' $҈93{k{'/&w,4.V^Dȏ0"._珳9u-E.gFEKa^(Ňۋ GBE8rr{?x$ d@  0 ҹ鋊4t2,yBى/RӇ9P¨]}^yw._"Qw#P1-`?5jXL땕tu¼J"WFg'mkq{ʥB7'%Ah\E&lTv Y=#?GU_mM!wM(hjH_{+z<Uc`X)5>lƽړ\ l@Df`L p=9(3OSq:m Ƽ|.FbD@5e*#C@彩&TB|h{sPZ䏗xR BDYh˼#}wacE(jޟÝyKm 1C PO[KHf *{*{ب''/sB{wi!ڶncV솴PUG{U߹Qмv#"Ѵ働'G5186/ŷ,ye_rux:$M-2+iR / aHp6+=dzSo[Q@0t_tfoiAxfBn`/n[ YVq8ZTܭ <аiȧ1>F YOƖ`j -bf ~[QuG_a&J} X FY͕ٗm] @5%:6$v^T{GBByf޶m1tC!hC]a ssSH`y$\I؝w(ouD?D(2VJ}X iczΘ)PX\,&(rav|p#7R6V]{Hz#ʠsmS>EMQ& щ{mw{HSz'fUU<6؆ <9_ >SBݶ{,zކ7!4IP!O)~UU?UW"-ځo endstream endobj 773 0 obj << /Length 2011 /Filter /FlateDecode >> stream xڵYI6ϯ-X+EQP$)`r"4ۉ70K~{6oLC&d齧~O^\]f$suX#'c'̹ɜƣQ쾬ӡUkE!x'"n>ݼzus'@G ;q0bIܩ]1·wWH8"b䩐8-|8X|?݉ FE>$@ 8c6o>_dWxԇf/n6"utsUabuY*ۈv[e'gۼ:vfuc^d$$1~|B^x, 'gXlUR(nEG#ݻ\ݡ˫@0?0v_jvz|jS oyVoEV&(|D`6&n-ߙhDuzoo Pd3C'2i+B'Vư0w p}~ʳ"3|Z 6jV\doҐF| (6NVo: y3@PY`2~ktt^  4MY_LEQK-5k-3:tK/2q+8켲ΆBO9 @|S{D<#Cfm[z5"e)ɯC,HxJr3ܾۉZX-3M(P;p)ڐԙBL[]M\}2a ݝ|qW3 wE PSh+ݱ< 1>\ڂ}z)Y?NiS/Xðdr84 QqbL&8Qvm⯲M" cJBQU]#JDvaGTCmgC 㐗Gbt1bаdb+5.6ц3.Jy!{Њ!10m^X+ϤF~YE۴BWLUE#Bw*82VK@HC)$xȱ%񽩀c)4X֝f荓 Dg=_cW(Jv[<9blJg4jd0 `*|fe4EI$=^Qi+2ȋά\ CܮZ$ 5@ZʐWۚr c =]^@_35S6%T}w' WT:m10jawOgKp= L}En)1 Y~G}Ƞ +Q^1)qZ㑧q"|QVn=lhڼ奞|ɐ]&{AK#QM'DgvMPͶ^SJ?3ۋ9RAH DT ϲ\&d[SY~ Ӓ#\c5sPPÞy-=ݩnk/ .>9x fN2OO?yPTi{L)hFY{vu`^49 *;+QV́D9ҡ Kܭɻqc!}& D焇Oxwy7cxFSC-bZ]U|G; q endstream endobj 782 0 obj << /Length 1906 /Filter /FlateDecode >> stream xڭY[۶~_@L. ipE C] %Un;Ë,={+jDrMB^^~yRBs\^'O2Yr|J?_-<~gyiMmJk '\/>_xˋ?.l@J,Y.>}?&"OYDu )H' M 1J8@Y5=摕ԫv,wU}`i_`&h8%Q;um *5}amG: v8%EdXxV{ۯS_4wj  22ˋ8E@|Q-z.s Nsp|z2>^eQ]$*QӁ#,ThН>Sh@Z bi; :lP,ePu;H EB"tқzSV'bMXehψt1 ²][5Ü9uknOmqWIe|y`$xD@<3 zLM)NI<)^`u<$ze"{c}\!g>j1Mr3 uG:Ӝg3H;ʯ8逧'XX"U1l)'qO]g;b3eFS587Rtc S!gj]+'QRJ =?EN7ؘ[_\OC8Wè k@*B_-޿W;0H z0S`PFOce5U}'RٗI5,^2ş ԜxhZOb/6a;|u*60#pGkupI}\mwhE; Ug Qy,ooPi9fmq?vP7v?tG,+8yj+bX2IQKU@0o0)neSެ.s%7 a;~6 Tu+N2ϱcDcNe7.7␫;2}And#G} mQd NG(&?zS}geS+?r^.vHص6pu@(#B3M7F_y徾 ?++w_x{],6A]9l;̜\+,)r~>bKRL{p D~gMpೠ*nw۲?9O*.K=/^0 endstream endobj 682 0 obj << /Type /ObjStm /N 100 /First 884 /Length 1889 /Filter /FlateDecode >> stream xڽYKoFW9xٙ(p[$9؊b QDWuG]Rp;d)ޔdXלL=dbµ8CNg2\H=2)~%|2<qw|Q2be>%(%x'3 Y!R9$QEgSm>b{bB,e"P.$0@(C xr.J`DP"z|>HJM zN*Mt V!:!C cfՙHpFčI7QUUYQ9@ ub@g{I  5/RV sg|uU1)!(38I.H$P2)C^e/ 0'J ¼s(:M@* r#z dbb}KNyp< GRh3iXTW3 VQB}ZoWmXQC(e%"p>89?4/Ll.׳C^9$IlֵtՔ~_~k^*R« XKc}]ey5Ͽ6U~_nWqx9?7EԀI*|0Wlm^B/Lb|b^Wz8kK_y\`Ne]*/P3_ۧ;h5Xd1JAy2%zZa|yz~^7Oyl7$K'Jw겦Ȫ{n65Ca@:\)VwqA3!΄;m򇫜23B50欽j֞JG3pA2E#͘ھ u#H< },P⬶o/SM֧l;bHhŒO=9S2<`PO3~8 V^\B:->E%mqk0@X_9ِ:w_v;~[3ګ! xuPn( ,p3}f}e+wrqo>HI@ݯbpM:E"bS=mQ ԛM7wZAC5ٺ麛7q\ 3UwOWռGlj{ P5m{H{IL2s ^m^`>]kxI~?; A.YOzT:J}P r㏂w}&?E'5# jB I&8UejLu\߮.lϢwfxnmZ]Nal Sbޭ;\tOFQPJ(a2,\[ ]7+;m1VOתOׄ8^ܒ,X  aq-_^֚A2nNquJq/XD2#tYS\gC0&Rsho3u]Y"6t _V4̋v;y HI(ԋ#̤Tr?{ ?4GQ$Lz| 6FmZ y%NKwT}HI}ŅDs?P60{c a=R(v00{q-0c #i姱_lI- Hn endstream endobj 791 0 obj << /Length 2345 /Filter /FlateDecode >> stream xڵYK6ϯ5i3ڪ8~Y'͛EBwIB})Jfrl?uͫoEd~ع8"' S?̄sW:[ǯ+'Zc#!*վy/}sC? hn>8%RW5N_;7ӈqxH8 @$ĩW}>Y;"~j e^<2")*fdyݛMdt h=|+>p`Eߗ~/>/($ةKLRWJCP5ĉ=]5 %glȻKݭs۳0[E{^la}LP{J>CNq}X٩qd|Q8dR 8<'q.BJkը5Nd٩q>pza: :6\$pu^slK$I2j&D^`?t`gVe&E[iW pt( ܗE6F^GN]V Û %~Bw=u=;]NM:@6ds+ęPN7[+Z3n7A5 ^QyB7UA7WvTڱyOd̿V͈Z {ײL:Z`=%Md!ND2I4IN=՚āREW0pm& mWhbd'E ݙhAO)l)֍3;v6ac(gQXݯ?T:qE 2 OR2y$=wcNUVf@㈇MSW! A,,˴UA '̔@(SVw}P] 5p/HWXlDIq𭣡NP^B<V4jl x/#I:٣[BQθ|+:SdW]G2NusGO-Zf6س^dՖrGCa!sˏ{5US~C3!4a Z'JM, :֞U*P>ѩP t L벧& lR ,2kVQ0tZ84Nv1)#z9%$Z[,b6,EZ94U $sO=E*1F!TMazA.cd;hhh+2mxO ]Enpwymڃ\6ԫ!_U$-k=`xX.znб~݀׶0SN6"*aZ^{T뭂׮/#K'舴do,vz˅+CԴF'LZ%0p4NKa-]xA+B؉17O а>sze!c1Fї4 6TNJ3[_8T169JDeKd2?Mӫ|C)3b/)`y6??]8 {- (Q@cayV@l}Qm6!u>]b^MDƗumH)rtP-Kx}k{jG1//EGJt *:!FRZe{72;MLFq1`-/ԬGx{vr,'w?KQqF!ȵAKi/>|lY[s^7-=vU-`S} Nj zfث ?.'sxijM{=awG#g߀}@O"`l;AGq= ' il"*'|"o1F Y(ҞkAm夶g!m B|ݨd/zoFW ?\/=0akr>u6 GiA BG_[ή>'x,,~Uc'K]! J2$;"=%OG@n2y{pj޷dRyUZ6몵,ӳ~%Kz8<=.8:xmHgʋ9 "8OEł4f4YY37Nw9 endstream endobj 797 0 obj << /Length 1667 /Filter /FlateDecode >> stream xڽYێ6}W6")QRQH&hHAyʴ-DRf-ZǗEa`ͥF䙙3Ҿ|䏛%0fEċ n#}v6'Q<})f:sQ>3sxS?6|f͂˶Ͼ7h{ X ߅q~[p~dH: X/>idb۽o8@IVh%i)j3/ͨk3Ix%˼57qJ,@|:L5ƌ S^.:ӷ~r~>4&Ufg\)xE]S9,5SJpHS NW9  i[Ve/B9 eWڧi^%H HOVL+u@B?:AlJ+ZtmgvY)˗.&_ g8~U1?R#9G=oŕ6PIbD+8rp\;蛶KC]@䐉Yݟֲ&K_f8ZAɕ(&6 U3*ӫ|m)ڱ/8f:CƩo^6b˫]BvUDDccl8iu~],NӶMDtzQ_5:c-z]ҨoQ #lѲ{6bjcNq q3:- 3<ʭ5Khk 9EtLۭq2r),a`a4Z4J.dHƘ!\gY <\.jƠ'g֫+@^XyY5vPEi%X ڑ;EPz(.ݔ[ѣ=Ig(nl #zqlMyjnKsk}y֓B?Sw@=sw-{!&;wS0 ۋ U8_lR8UJB:黴R5xZL5k) endstream endobj 805 0 obj << /Length 1981 /Filter /FlateDecode >> stream xڵYY6~_Gh"%AI4ۧ4Zpt/Yel (jqofy,*"XyQ0ag~]47u֗.ﲐi+ET>r]GE8Gm`#Q*P[xapEbWx 8t&BP9UR0yG!b}8ov"l#'#b/fTa-"Jb k}.Zв]8fuYϱf(]P֛v5 ĝu 꿴N;ymҌڃ?1rc ?1}5 1EHj1VC$0[E d\VASCj>.eۦ;iE0O?6yڮٰKj1jr$iw (b`$hnOPSЋNq D$%Oq27rWcc| NvՔ*Yqh"/qLMj'0N<sv84tl(b1lZ~;*0wMZ˝XCzOXڤ1>Q>uq՗qJ7gyJ HB2bF-|VMʢFdsr毭0NF6M6y6=t5^1/>p?WDPgQS䕃lWbR -: -<^juַTT9S$}]Bξ%iii]wh_yg C?5_ _O@4jL?yF Xk챧#b>oz yJ dt19gǪ>ݯRS` g&HJ\-O"8X^#-4.KHÂC> [20i> 3p~$" 5GhLv E*/L{5si_ujW3 2qQ :38P o0a^rBE|v90D&3hi>'F*P|j:1.ΊIװH"+ W."Dmѷ,ԭ=^݂p|}:a U.R2Ӌ%2 h-5M,"(byˈXC ~^#SIkqAM5T8 5R1~3[1N-ӊPUkգ蓕NQJLJ=j5ۋAj|Te IMozyrpԚO'ɓ5jk{2z̳woT+āψwtZlL8L.CS ,wqj"dFRTͭ0irf ;ML)ÓVLL sh j8H5kn:{ od'6"XgCޜ'`Sw=Q[w%a5UoE<?b"B(@Gʇb5fې 4{d~Q endstream endobj 816 0 obj << /Length 2190 /Filter /FlateDecode >> stream xڵYK6F4I4LM2ӷ2mk#KIso"j4Ц(X"IG7?ެ^Bɥw"EaL$nSKG*;U٦m^o*T(|۟o|a0 ID#/}( ;Q{/1ާoU|$*N5Lテ#PޘO~ܥV5K?|n(8YmFJya_6Md -nwfJ8V_yJK^zLZ਴^x5&vTYQH`Tz>KH $2K+Ւ "[nt+/7]UCik|(iZzal`$a'#Fi0L`$I Zyȹ)dFhŀj$ (Pa_  P`]u.k>ۑC" Qҍ HX ?;_Rn_PeOQo4 c9sƛzoꙈ%C< !€8~ PCh5b09%x'2@ ȸ$` AXTKd"0ꄌ&$`[S)r N[6:VOYHB@2T_H@z@z<@ o O(`D|֞@eˣpտA[r?Ty*ISaJ@AbgppBd'Dtg|hj>I1^bተ{:9aZ/f.yaEQ>/Szjv J>-BRNkAz1Lt)CT:u?L(lYK`WBlRevCݑR:::=5l>KO^zqq"__< FV̄x;]^Q9˾r69WYiUhjsL!Qi'*fyZ- CjIv^>[fFkӼH_Ub׽VH^6-Ee4~C PD9lPk+@/ ),$ǥI:6(o0$T>= DDao ZD+tس'=~*C"b9J6HN|/瀇5P/T.(ctkˆ endstream endobj 826 0 obj << /Length 2706 /Filter /FlateDecode >> stream xڵnF]_1 ðyFxmo6H551+5";MuMyͷoTK$n"*?E~;ObU /ZU:4M+oo^|pHBߍhU7v9yJݽv~o{wˍ=T(v-l\-:$DzṞ+ _~!<<}\Ap4 n(ؕS`s\*v(`YSUi^w4F/ɋVg}>zp}jLt8E7[#1r[แ>p:`,<I3SyDA{0xg fiLR\iFeE9Iވ-x9C5DmMllr^<ƹnK xLՒCh)N3 {k+4j𔠴{b ╄@;Dʘ[ ѯ5#Ŗ853DT?eZWRmZ=3 ~t@JYJPEm4tcu?cMCm33H/pU[ d:FV."!Uɠhs2m^L ѦyԼxвU sy`c3?Ӭ6zd+GXjYOyᑁ\žeX ԔE 4ͩM+f'NVl2 Y! IǨ(pT3HW2P,*c}i f\o8c˴" Rmus<)x^^u]FEʜSNm|t, ɲZպ'fm1Ƞ| ܷPڛ&UtN4;ggt'lJ (1pā]~bB4/n>7GRX7\.M RFVٝ&qlɽ.!YJN]68`5c^`@G8eem^'vuxbǧ_- Z$6Q^4 f3pARȽ' &xj0 aME 5(hbӟEw%Uhh*h!=)ؓa&W4ur7+ !7LCQ].jlxmFqԣb?Vi#|;Q@} 1$?̈_,my 1ڒ2z:*oy[] C/G&D<͐M 葸8r?Y4n`6@&*} jz'ͧqXi/5z ޔ߹:8fr2y7Ϥј豉K>͔0)˨qL.E׼qK56\ڙRd~g2woz1>w|6Y5оh8E>Z͟լ 9WfE":=r,u}zyڼڦzR3R7k:_~6%5uO}IPƗfUhBPtq=攆Hn[r^.p e8*r/xSRPY_Pn<އ^mdCC M,Lo6_N^z0D`TN/iKj5}q)팰AN%ye)ÕQ "z!^K~w^-靷lN[%]= وV$d/5x'ʗ^V>.Y㘻6CK)lZ'_V4ƨ {sOQY6_w |`@p —sQWHc7VV]DC:Pkbc endstream endobj 836 0 obj << /Length 1931 /Filter /FlateDecode >> stream xYYo6~У#( jѢEoiZzWD Ii%q}) XEoBWy$X]z1brz~,NWMW֙.;*U)6z}}G-AA0O^HxxwfU(g齹*t=4LK6a:H cǖа2 tG|qC{}(="CFeIX@crfIGB.>\Sm۴vyߪ|AQ<ʾeqa@y?O}pmo9`z#BI?+ 'UTUV Mq9K!@;5Zp6֗)%!{.HtSI""'|I@F9^v%^l.`th*un~o-U( x<$el9i3Դ>v϶ۛB['oq&.޷ێ(3ށAczjxõLۗ%v`?v/ws 27HQw:+Kt ?:AʮtxiUA8Ұlrspge@~%s#XlvX즙(Hvs. ;' y'Na r"c1l2uy&#X>.?ã XYHD,(q[us$e2qv Y撓}c:j̬PF) )D~ %΋4kw %b EUx$$t3BH^9{Rb >1+9d h5w3EIT\m I(!J0DB N(/lD{"MN h(#9ll,i{$ABm9{y-#}f$&gZΙт4(L9Pط]UB~ Kp?} V"\}c[РB+qA gJW8^ngp)V^ xEO ^*'+DbuI55$`^b :*+ܩ-t BwA kUnwz`$ nic֦nSU:)ݫnrexw(}ͳj0,L7 Ɉ fψ.ԜޯhO$7nэ +x_8Sd֍;d+04r@5n[h5nKmKhckij Ji"jWK$5\чCbւA)'#C(uߌ xGU6-I#] Q3\wv c,Vg.mA!q4h5%v\1kF)9=wܱ4l.Bz@Xty1OS~+o9@꨻jo!=[W>'!YN[jv-pKZ Hх[d,"L^Dkm*;*/0nLD!Qes#@{i@E06EAAZN`K]RGW' x+uа;6FPɜ隅 P.FfzڱSٯy#G._0`:ū>l|@oK`|[3Pvf L9\#g #|fRuK"YK| oS;AVI-%`~_}ɢֳI.h 9np0Q?Y)VՒ OSK^m+NRSԋUoWE=^ `ɺ1@`jDt~\*~ˎag&Q 2 :ɗ숭Eh>ctI[2$ endstream endobj 845 0 obj << /Length 2793 /Filter /FlateDecode >> stream xڭ]ܶ~:%mpM6}}(\?]Ւܿ g=ԇ,r8 88q𷛿ܾM`2w ɓ(I O(%>O~ey1ե՝-mmW{]&27?~#8A.`qT͇qA%"xpXmf|o71s8~# @=]zXx|7$DSJPx8ImDj~#x՟z!] F@EdƵډpiIaqp2fO%*AH lȿ*|8^&᣹Lq׮kA +<4M'x4Sl`8jew[k' B.`䅫> A.8AeaSNcgYB{x9yBd@3p@椤!4 aRHxYвvy<:^2lЖ[.Z#.ͥVU]y>ɸŹ ;,G`n׼{I48E0d4ui<@t&>¸Dh5+4E3&{|PDx$[^ ˫,|9ڻ*&@gP-3m)?_i0:&xĬ DuOFra2rpB}x>pZWj,`qS AҺgXԤ<&88<:@NgVGj,q+iW6ʐuu{?]mrgZ'/9! RfvڏMLxNܚ?zMp[#f刘#3l32[=rb^'W—}MǼ03Ťzpp6P$pًߒ>mUm0; z6-+m7ө~x6ro[Էirzܦ2ˆ9g^DG/4sO~0TpƓ)N@s2d+v_Wsœa.8?;SowIww~CSL^4W\PXE97Uhg_Yêa챳Wu33n喷i>e<6QkTȂ^f 9>az4"1FN.Bx@u9uiis*g VC1+$)$'$'Dz|9+ :eۺ wxi-hc%P2*34zr{tmwb39`):N?2yrGnxǓjicnfg|D4?[OϖO-1U>ʩƇ r3buLRȹ/T,v5ص|ϳ!B^URG~ }> stream x]}@MC$ p]mrMȒO!%vmZ9hrݬyRBq$+DrK>]qop4M_U aڔn?x haF4x#Mv1DyWL7?@)H%R$QBFJWrpGfW\w7E:"{CzFyZ5`tlwCm,Yҷ#IX[VBJ$$M7]I߆!qnOf[J)7[fZɋȈժnqٌ]U \w*& ^1x *jDtx.bvvGx﷌@{ja{ߕGZz@?՜VJCHS~Œ^<=$b \gJR&IN>*ވmwS. @kyۃZo #6XFu4ݱj֜WB-t7娟m_-_9$\)FwA֏a$Dx0uM ymt]nPCe>2jvv{ I}/Cs~j{1-:ބKl(Z`th#+ȷ nphDhs$yl}_כG%#v~'-yTu.گgd bg멖Kf~M3|iPu䡛jl߄tW">C0>!?QZ{EKK-+crz`|Xpt)B! -[Jt>QTW n?l2<@#x:w1߀19VeD*^f8Xs ?-aUAX3{%lړ6s;VE,VYD=sigZB J:!~鹈lY P fs ABN~ B1kbyA Z%S@Ɖ-9Uě:VmWyu&~s$^hZVg !J~V\0HNELB5\V(F$\귙ŧ[r{ 'p G1{p=,Lw}@9ՓdDC6e,IweCicKT.O U 7MҌf@jOt8z88qt=acA ױg}V8aBǥ>&‚b_>Bʙ]h)ΙJPGz0ar`C׶ .(>8QMK| Af5a3B0nWԋ~FbPL l,ڵ❟ {հU4S]؆Zj@(ǐڧZ,uJ_E*w1aٮgo8D`zUmUG[uyjjO ~<sMRA悫x-)EX!rkq{ n 7flb7X& "g Xyh{BjȊ9aVe鍖Ze.};Nx Eί<[24Ё֜z-j# 9vvūo^ *+oHgJ^63PCCڽƆ͚wʁ W*hµi/B[Lf\9RPթ3xx6im~9hc[i;De/hxD1*F|%3pWZABj,}GҗzIPz uHx8OY6҅D˶% endstream endobj 865 0 obj << /Length 2013 /Filter /FlateDecode >> stream xڭYY~_Pb$ ckd X{"[#fxhyd}oOU)ifgփbwWu.7?l~IT1sE"!"m| _?,Nm>ֺl7YW:뵝0/7no~pJ fP8- DI`VՁP V_o"0#l2IAL#"]Q즳)lbl_F+iuYm'Vz5hr8?mu[ӘyUq%erZ|ğpE"vlT{O^UfuQf+eqP1nܺc^wk ֔T* ʪ\pELr =M*ܺojBvV:Fߺdw &w_24%-.>[1~a^.Z"BB"sh ֵ,f NX,5$Q $)#^|Tu-(vhm( .YQ3;=&:;Z Na>iVUP6wgb58^_%rwK.BEOz<Γ~?"O{apdtv56V Ǒk;;l|t:)l29E;p9Ͳg f%Бz`Y)TVe!w+w&{q^:3]_Kr?V}ZosQ :mٝ\!"SĐ$93܆пl` 7Yp );t;$S]A-SF2e?Ҋy`?v 9C1)~݂7af\i؎h2$%Ɛ]g,/a7cm=#RE"7gҠKPoIz/'2"|˼*F%3%JlC_gw?XM\,bҀ+AM4F"2AkZc[)`j`<5K %|OCn)s%Վ+2h0_ߙm7o\E(wVhS/bS::1&ٲ/)X D*Hp.scH(4u 9 J׽.W@81.BfPh谊NZAakF@|;*ݞ4[󣄯?{eXn=puXay}R2m ;]mYe'*d\ VW*NҢ\[ e!>QȮɲ]|7`^!l$"a&hƸ;^+$!+{M+c?Y<tSd]ag88M%\-A{D6C7WE2ǷŢt!/p/4[F.'śo,öQ4$J3а>)/Sdya?B:))&XA\NH~Qv][!uj0TMP gf1ͮe8g?_7P:oI{$|V\rN]s< \nq endstream endobj 875 0 obj << /Length 1746 /Filter /FlateDecode >> stream xڽYI6WV)EHH2AIDj$Q8_M3cy^I-jzz䕟XF8V8H d߽];8N,+Zw+X}ӒNvqrԁʪէ/a ~X(zBZ94U2%=8C.ctwN ~]*gN#ّ),Þf]ơ]!XD=NLE6뻦efg0նA!:\Xi**"6MlN*M`ˮhJNs=w _M>Pmh.]Gk=!_v%vyf܌oO^ESyAF6vԶ>vx5t1SN88J\;xo>S|1ec>g]1cF h9JzUnӟN*h/@ӳ1>h)Xrl} ɾ=d:w&]EzQ`{6`{&{"p3ئ_DZ<5;@3|͓H)4K-kՀ)R%ٔڛZ7'~F\:zٴE= ٮ%zcCsܘGʏbxVIDW*Ő"Z_i"ν@&68g{}UK*3pvr-57芴 zf9V VM<9Wю#j|X?Cn9a'\-2\Xn):֊TK%G!\lB5<[_M?!] |7t8z P0X _*C[6 x=GUp Am˳.'JK2!F= M0Oۖ QԸ"P2[5,DO<ЖQږfre5DZz+$/͵VnFpM٫RDxNV=*AqPik,EU`JRYWGRM24M $; N=Mӹxн@ C^Rq1k=PST,HLplOukƸ\#e=~̣7EzYC(w6MLP&Ջ0\Ml<JY53at4ڽ$XSGE|Ml ihSY@Һڷ#Ns*CUÍI;_s}VC%@}9g/IɍCtʳ[)t4WŚI5B5"2N+źVF2X o۫;%> stream xڵYY6~_Gi^d`AdF<:ߧEɒtt`T4=n% "ŔHL'{Ƚ?bm;SuiW7DŔ&m =H&ŧ=} {‹$0,⑗>~^p+xaܛÊoR*|@4iѝ(;>llK iK4M>mZQ ZmҊ=/Mgfsg ¤5BIWj6K/5Ufhy d,9Rsc,5V7W`W$ej;/}hօ^K4~ 7liL>g˝EʢZS܂vo"-QTk wMMjY8;.'i 0@*eymFm)K+שM+7Xmfh!Lܳ}[T[ltDk7IC0:y1\Vb qrkϜ˒v |{O/l/,rW}ih rL1X.iЭ_:\aؤ;zR"2^KFF:jkﰾgn n_^,ob6.-Z_l!p Ecc휜t1&*,/6N>۸.E>whE5L^5U:2|?1ZxgbJDkU| l;pb$ jS endstream endobj 788 0 obj << /Type /ObjStm /N 100 /First 881 /Length 1567 /Filter /FlateDecode >> stream xYo7 ~_E'Q"E A@P-ȃmlgm}8v=$}GR':M.(Scɮص8 v55T(qQ8۽831^ubHbBt1Ftd𡄑r19o BPMPG  "qr`X+l45 #LԬ# Ӓ̟̏d(A" /10 TA;D H Um0 P] Ӂ %SJ'r)z$&do @@fM3i6r4DzHF hfMY! 1ɄAHh.P7ЌqBfDFL(@3(6!B U`a6%G*\"l#ВCSk09sT$"i E)U`ڒ-E0x`4Y)[Elh@X`HPP{ ug N/?~<=yr?4qA}ČZTXM\83w8. I!=H{鴕u|L ^ݔ ,u7;rݫ{;t7Nr1ɨ{&79^OCZ:yw6~6bfsߍE rCyU)ٝȬp00R?$LtUG`쟏gɼsܠSPy|R(Ze U=#po.Or6=˳ٴ{aXu'g˓| dH|~w3'3,'MIt5qT|@lᢘHf.ݮ-erYSaufYfb5j>ef4(- Lrm@m)'ֹpX8$ w(,y[ƴRؒ KG+WF&ZMiMU ,X8Vo]nSv+mf#}YJ <9"ŏ8cJӧO~1dܓr2=Ň:g=I=}[O~:^^g֢[,gbmC.k|j̩h #Kjp}}ښi7Ly;E\Wa{i jRe.mX)zCAKI> stream xڵYY6~_Ucl:À#c,4[mlo"RK˞`U$u~UTSoQ6oE$x]o{q u}?ޭ_18z $W?Q%"M;(g}:'a 슘IH|ЇQBElhRtT;cyUlQ0ڷ3\ gF@F$ADrlnJ}ԤV,w'7J|63A@bFjTam_UfĝVL'վ3c KqLF鬪[@$D>?}85^~js{u6od Tgӫnv[;ঙQ;g '5U$aB+lCAD[e"*N9 a1*eLbXlܘ57L{\F@L&v rش:fNg^F)yaLCbA4 #әVSbΛh(}l6>9p!suP8ٽR,E`׾^lzI˥LHĂ:ΉCP1NDcGh=K@7IʻS ht*E M05oЌadb-Ahͣb~KSWE3 >-Ƭ$RZDŭFwn_{A,15*%Ty}wYǿE+-y HAt^9֨P_w&N!sGJf}טzongQ4Icr݃8`݃VGT8+m. sQF4ѣ$ܶGg(;#;!*k跪ntN9w-840<-Bbu[tro *sMKtY*fz۵5A.2ӐBb` 8t 4il(PBס$@!C5!w C!tZ*XޕэDw] ԇ:Kth5f&Id݅O ,8 @B}ASӟ ֹ9M7f`l@r@# pOsKVus$Ԅyt羔lIWa|lcq zU v(p+qތ(gY,ˢt!O6"#W4߇h_,hiV!bDM3AGj =Kڬ d|Ve%YbJ9 / 32G^gl-cuҋ]tvqoh,?̺6.N?FS1 92Ҭ֮с[Eʇ B`Wv[xR)@,UJD {[;UB,?yvD{]xL> stream xڽYێ6}W6"kPHs)lЇ4(2mEqÛ.wa{+ g g0Xaכ ͂ IU@Shi(2 % BD,3R J2IvW`ک*G5a8&zQzzv=8Qdp^?QyJٵK&GJox`0>- IgZW IH7Fv[a8*&US(f*q/6nhH b3O-Nx*5W',X֛Ա"<\_3W5:5*MϞEc5 -+[:elfsͦv3|,{a/3^kQl_ًEvQWKhC]5l6{ ؕyXr?H:|W9k!ql>M7]92S;`r/v@FiB )d:0u>WSSz,ʙ- Kɔ5x3ڈE-eWڧEU%H+Ɨw؊EY2)J.a=+GZY*.sa㕦5|ə<} J5ŜLz9s^*]NW=hw3ܺ[zsGWJ4u)' :A$J\R5Ŗ]sJ.Yܵ-]r!jSWrPq>^_P(6uqf(},W 93߲P3ۅCJgNSoH_R v]TWf{>%b*v {9QvS i/jroYplD[H|F+"Xch1dv8hmJbq[)'l i-t+a0qN]];=brc`2̎@yfNIGHKZ=x;445;r0mm`ᜰI]xc0 mv;V ~^ՀRq%$FH=P^&ΛBLX[Vh @QJQ$,XMޟPuy9$rgR8MQJ(?ۮY~q( u]x8f4iLQg^bd} 8}2Od %JkxNcfwzw#^EM!?FH0 @ 7{3Pj}:xSL8&Oǯ,Eex}Ot7REh~:!f#މڎB9IHI)4}S˜BUF+.^DqZVW{WVH0+MB蝾F;lj;tu,h.YekΪ-Sʘ=SN0Ԋ 0m쀮T3 endstream endobj 910 0 obj << /Length 2054 /Filter /FlateDecode >> stream xڵYmo_21⫤v{ڽMEȴV7!%:ljD q Nj?\\AF2Tp $"%": ?"۶׺lK+ajw7/( I$(ۏqALxvVo|xϕH9'ܯDFg( vқUec(Q4]9m6^ /'~u ;h#꣒>LIM2l3JbEqq*'}xF]IXuIJWtIY p Of~;-(d~œкcTJb`Sgqv !yg`Xא\fac_zpP8O@U$H~6鋰Ɍg|'G?~p-AyGZ~7Be(Iv\?<4y>Qgn p)5뾾ӽl3f蜷w?h_lx7`HcLBtPS K;sP3`Kڵ~t`<0sns r}P>FЂT,&~?ȫET^{^WW2[<=[}6,6 n[^ nNf]skg2segof%bwTڣ;+۪*c:W*N|uW)+DP"ˌfKT0j<3 )☈0Kw o]HZD)?*~5bf]WO=Jp~IMte}݀ZRRdm*>>%)W݇r8f>flPpp(S0vH8ɠ 2 zl.ޟ`TƷ(3?XJA1QYau.dTl|1b!Z$T~uqI$;8?R!8ec4NIJOubowlzXH;($oj(3  L̳apoP%gKW%&T$';'p¹g IwT^儸b"&6~fF뵩^lƐd,*nV)Ȁ {) 8adC~hHy3"yt<R72H5^~B0u!6aHp>qHҩ;7·V\ۋRS1oÕ?Qg '(pbUN:3%M0?ٴݰC=hLys+nZ8{cYۮW<ɽ\ֺS={:.4D$eoֹ+p2^ip[Ŗ ct-vpbVOUN0mk[G_z)Zb/or$,v!W'ɥ^kB2< <0N׳ѿf)&}?F2:b F(ugE%|jB˻C-g?c_s"r\@ endstream endobj 920 0 obj << /Length 2877 /Filter /FlateDecode >> stream xڵZmܶ~BT-pˊ^ANMWwW=%m;j%|wAlFCr8ye9;s~~P4#vX N$"Hs;~oͱuES_W] ?Zv۫Wv>,؋Mu7ɁJpUN%,W\yc C9גTL}%YH{]0)Mb/՟1x$i -{ѭʏMV:q>^ jFn?z_.'k[n$W\E2`\4 E*;.D[$CQ$PVŜc)(̬rjpfJz:ﹿdj`۔eCطjevWboڑ,kTI1AߍWR̨hx$Z'7{w 9KH s7= QL v΂hto%ePvQz|rn{`ww`Zl )nIzH^tN?"dm= Q*K(;ކ67.U0- jx+A 8k!$5M&SXHq4ٱ}{",k2 w'm ƈ4WB0^cznHSkDlck(j,F04X!AygmFLsdi]6JfVX&3*A ,igTaZS|%a`H`97SM>%h.\>cO (q4V! ~E,y2\d$ PcҾ꜀ĉᄆdN:& ;qt|h1x5>kW2Ā}uҙ"4ɰ?f`P:xVO'm‡*΃3^:|$1GIP65?ˆ/H`DƗc>YbMs/'Ehr~7DϻOak^86πoeŤo((}|(J=P4m5Sن0BG&Pt7ej5Kι/U?V_4&sh`a)~ޙnW*t쥍IS]7Ӌ.Ĭˏ7> stream xڵTKs0+t @PK0dwB0ף!Uۺ(j0VJ8=aq I\˜}d /{uMK_,88Me*Fƴ(U+sDbd'lwYe2=y7Y, Ѩzxy99b }ٯ}y!eɫRjٻa.lԇQkÌŸ[n5eLGeث~'^4O{BK!ͻi=Yv{ҽ;Nt6*WZRx3tE|ŋ'aM"Q<%6^C.ڎbU{*zzBo endstream endobj 943 0 obj << /Length 658 /Filter /FlateDecode >> stream xTMS0WV{Z }XS)c J!1v(1w) =tzJ}o'T"NbC!DQPH \qoZ@"8%z\bfΥ1uybK&QzHP$c)(]k"MMidS|qL&$9x'IwJ%PEHu`#nz "}F4}%33=0$I&g#hw>䪼F`rHIzLp 0P7S}uc5,h/ bXfNu]ѾhA 4(W?|VzXV1m*ѣZaC`}Jq,S(&/Vյ4)]^W.XQm3C1U3*#ˣu{~fovohc/ COKUΪ~q1\ݣ՞> stream xڍ=o1 9ė:P.@6p:\@ oJzC^گ5AR<1_*U7pzevr[_:/qx6r=r.P!A h0*45_Ve6Ձ%X sܤmo?YT`GnI>KŐ*f(oe> stream xڭXK6ϯhCIأJaM2Iy2CVj7`Mc?z#"e8JUǛ/AE<9cdxw7/_MPDE&`$6BF"N]| WVw8vD*$&ʓ4XKBvOlwK4rUgJURDPQB*oz}ZWEŸ7O[ݓ 1Nڰ*7`_Ե+Jj/]{n*zC?^Ρ]4|"zGöm5WYFulFWy$ᚙM;˵Qr>S3'á7@";88_~.WH!I#io̠+=hJvq_qpHE9~Y#cAOq,k?9/{ZhzmS iiȣB)4T]xp0̀Ҋ je;lrdD[4_4m ^Leh!ҦS`>u<ҟ Զ1O.ٟ:q |C {rBOϴDU7H A fCظ$6;;,1v@ f{wIAZ7N&zN\яvz *TaE7[]y*J0PX\h=v*5^Os%'ٶ+'t<$gtn(jIx}*~|_LŊLQeD!*PWē@!Ҟ T\9QWƪLEq\xVx LL~,KiXt&exUI^qjaT'[״ ޻ǞE͓a0 j,.HuC]Q׍\W:sDE/qepP1>p?N.Hx_lI]Yb_qOGc ,,׺`K kuMl ٷa}G"Zr1HXWg*)qƔό5_|Y|2O_㘆t7&ᢇN8Ue 3} @ezMY{P:W`Bp5愓14RRRԵ[X_~0QWܻ ]Dy]+7{~"_F e4/=ŸNm-O'qt.ZG3o#28]~~80d<&tƵ:>V [ ? ]-\NuL.Rqa{> 70@ UCwcm3n|sb'B `7 Tg7^A 61> stream xڵ˒6>_Gu[#k챽T6g+;VGGR{Pd-2"A@m|"(؋7Yugo4<j#͏737|]x]ia ~ǗQ`bWʘcW&S6w9UX3z|*̨fFmOmvAazrNsR>XoCG&7~`' ڮ'N^w4Su;W䅞w/0"ꎰJ0=J#[` {Iˆ!I ;Ŏ*[k]V-Wp9ffzw?VT=,D 3aN姢,tM8eQ9SofDpYpO]o5u]>|.>vsw`|8 }&4`P/EWK^zlZ08wAu5u䐿ٲf4h -(q{MZ|\wTV 9³jAJ` b+`B j a**< oS義qBK֐q a>,35'3 ?vee{baJnжj =ݝïP L nh w*û>g|}l騍# T=10z:-y$@YcTL>rKm,fu۪ULch'܍E\%ZF's.|>J;NӗEZЬئ2PR?٘F87nt<^A!lY}o4 Sc ,ZH ,Yi G*tYZЩV33t!P/|eb[ n7Zڄ*ӠAՕZw=X]OP?]ewWɝ9C`3!m ܬ&Vgl1fA ۚ+dm/ Φj;p,]y!;* Q2pzEʤYQ\ 27#M}&AcPM ƔlJ'M! I,=.JLKPI-Tu.c$S |_VwE`D \R_[9mdV@ ۈQ39"VǞfh°}`+a)Wwy䦎(m)GcSh^P%3G"EM2%w| 4W?=_ .[5p[ArNkW.P$3g0ڞXfe/Xxm(ɏD$lsg020A"Op2K5H tc8=!ơ}RτZ(TcHYg:D4 W>a Bt]8kN6<FȡpX#ݪ>к6sd(ShVzB- s׭ GV S:4.)FdΐWkTu,r @T%ha& ehh @TM+ׁ2m,Nfƶ`^Hƨr'\z2bp5K$AIMZ@HUб5C3oh n<$`(Lb%*;  tb|(K!eT838";-R^9xE“gU?RؑtnEdċQ2nwN_ #L q&Sx8! kعd)a䆩]ƄH0H8MB إO9>>diPO|m4,(tp`=ߴh;)#A[>C3lby…eJMӱLJB4$5(`/l(ˡgcT(@MX=#MxӘfЫM 齠0җWZl jD`kA/ K S{>8?ĒiTW6c km5sq=z5+lӽqt_dʽD23&]Jz݇X +su*e`O8cbPgTk)l Yޭ3=/ nK:5jبj;RH//$pev.-ْ :>p2s`cZ_яTAT?wQTO#S=^̏%iJ2/:+'¡z~GqK ~t"m&q&]ܝ)N3%}. ,m90yUx\?$ɚ6nU˨eo}r\[EV [ F9-F;8͊n擷Ɩh \qI =2[yϸYХDGt]vkHlw9jO}q " ud*򤴩~^+AcIMY1ۃf`*Hya|`J8* 69M{ɽ"j(Nr-O9CQ̛jLF)/]~?k! jd|wiP5apCV{ϼ_B(ZXO b٨>J\M ՉV`/ǂnG endstream endobj 966 0 obj << /Length 353 /Filter /FlateDecode >> stream xڍRn0+H;9P{8ĴiC+ғɌ .n< gI7\(P""㐗D >L):Ǝjj[ W,WD+F3 g)-E4ȵ„^)gx 0, K%ch^-O*3<_- endstream endobj 979 0 obj << /Length1 1606 /Length2 10592 /Length3 0 /Length 11403 /Filter /FlateDecode >> stream xڭyeTݒ5$ 6n'k4H7݃ww>Ν;~kf~s@N CԅYYRj]%~Fطv^lrhgR:_c?K(śӖ v]{O;Cg6 m Ӷ^zFɴ67a%eG$N/*l/k+ HVZʜCؽk_g[L(4@|8;gg{{Mƶ {ӅQ(b Am2J䣌EZ8?׼ۘSȒZPiN_& َ(qqR9Gwqx#jPַڧ;VyBB#'x-D 擃e#H/+ J3},gn5imEۧ@b)T;1ȅa&tOb?/qKA~]KMy*ũEH5Y }om8SR3 hG{nUdNgyf%ܾh9O"-yauɅ/~{$f`yng9,]$J^ω.Joȣi%TI>xh>@ +>1VQ"e|m{a`s%ryW%mC++.-Y=)P,Vg/y(JxZ=+/K)DMgXn\|"HCXa%x >8[2=`a5'|; V"f)QLE냦{Z/-j"ZM9aq ozo>4c>}|x6@?"qX57i /:8f[6( |O.gK7jT 1.Dg0^' i\L'a?KmCv(%NaxN Vx&͒~k7>GƉt?&Ty6 yGb(&KY-;~mv&z"by SCR؍c;?^Ϊ҇?k/N Bg5}}>Ȥƿk!1u?~ OdQ Vxm|))3 lO,}arH(?`a28%rO֐SVbK\|j/@C>Bv'^H)l|VIKa)5,eu$G'/?*~-Ai_"5۝lք(k60y+9H?\QmD6}  MyYG?m,~JQt'( |/]yxMut84Zv7x`RL|.#u:R18M3fG@.h x}p-Ӫ2.Kili-&}>$|ߙQG5kfǛFҍKԢӝEƔwJK'(6Hݔ蝱}^z};?w}_*Hu c93痪Hf:VZa7&,Y*=@3U==X,*f)۹Ԡ-7JAOʮfeӶȡC/ t6jO8c4iEpJ9+?NjB?[1zfTҢ L̦ vGaШG.v!džWd ̿Ln{)"A/,?YDKIj'wKYkP9g~K^vRH)lqV䆢 ޓi$/\Rԫȴ%0PY|X^/coTg3l"'[W1(pH!wA2؎D 3Uc}04C@5kӑ1i26iwsdκX#~9JpqDfxDz՞իوѧxg u)K.:|h1cVmh\0iwXuɅ>|z8&qn^Jb̬PU B=:}NgMgY)74*3_p ˾t@'5b;Auy-NkIG АwR}LXY ,4aa8<.8giw%;UEMtAď wK9\H]DMtrF휺6>OIrSbV/Q*j ߊ u!yxkY^¥fyḴG ][; yJt&i7ᦓ6ő%|ʥ?_RJtSHx0q%bWM发i ΒiDq0 Sy{9M5އEscDOR$"trN۵g\+8O-1?1˶ԜKJbc͸#s*MI+S%)X/G9%SF×PA^ybjQ}ɒBpE}GJ4t,1 ( OU.W=.'WO 'APh\hwD*ϛ]._~!>vPQoy u"GX鬥v*.8CIaiz5\s5{08ஈIV^`#kbqRq!͔˜5-Vq]|Tț%[:T۪g D xݡy:k5 H<]PsJ%ӭQ-&]4=;Uߊ=K, ęOϜX:>TN$g3fmr$b^ cF`*95JZa'1UBd@ouod)|7O+׆j9HO2TPI_gcvxKF.hp-<e 9EVC}CjTxNWZDHޠ VUB,;=a:OH(F1k8X5S ~JC7P@A!TG׀?ah'2,.yaRtU*`3WV}܍I*"R+&O+=zOK 擫ޕy6ZqK}dɹ_0v1kUt_y`U8Tea~˸(FII^%3խrѭNI:Ij2aeWOW0R)OѺK):LB@ʝmy!HM&s|.N)_'[dcwg6OP lT!xG=΂9U3"ܵv T) sK\g4uM-X,wx qͺq9Aς/?(\f SBG)T5 F<Ƴ aP[f]F!?2V~a*c[W_G"npe K!pj]u U7N#ێ~ <2/ha i'R"fXX0~ЄNkǦ\TּP RJ,4DY!lRYI!15qh/:}/h A)H|瀖&:m?ӽ@t5Hl{ķ H~/#wK@׆+do'JC.X` q|x˦fC6_l9iJܛ.kwODBv{ڛLKk댆6 4*GVx(Y:k4}تx>X<ʵaڌeoS/57190m^< / ƵJMv\@{58~U 9cLiާ%aL>'G-"3NW\hƯjyНnAYJNW͓Cʹbd>e8lPZ0ŵ8|pȵu1/W?<:>A ި$>f7|HRO}vQϖm{JFV)쒈}ﶾRGkyQC>)%) S"Ѐ^b:YuƲUL[5z,ƹ9k嘩p"< o'.۾xl'Tؑ@~@cuw_7珡_`귩EqAM'hx]" b^+3SE R*٫#066 eزL9d#/آiZIи9WrޢfF«`~n4-zy]E;5"5lC+dhGc&z**9Yr٪_x(Pd?eAT9./7GuIȡSio^"I9z֣ g1VRʜrEge\ѹ+gb؜!u<奡D'5܂PlxE6Z9ghx͈g {p[P1lfTbж_K#M/,fZM|9}z0Ӄwfv:v[8Ђ)pw8EJ7&,["x5_B Ȫ#ֈ km @r0]MӪ9MN ݎ] f PS 1RAA.ZV/hhȽzW-X=VqNQaw mp'hl>[! pQli[y%Ѫ9y\z˴GceU^׽_3D̩:)kdS30S91`DbXK=C$֟c Qy]T1_\ G$;g!7 UR [v6rLU&z#Oi%)0ժ!BvUz{nܳϣY>QɫqY]ɢbGcՎ MCt/{ {ê8n/JF \Npv7)oE5OxlrDy$&ߛrH}#2dwr?d!IkGU4\a=&* GO1'#.isnMt[Tp,k[m$Eh5lf蓦04twtYss?{5$YG)Tzz9!?.^EyX!04ie}[mE7ҲXƎV 61ZV?k 1̹Y#{T~wx(dCIϜWO$m Sj_tWl3F8B8YT(ޅIGIvI>xzQQ Vx&=$DiƟC,r(UpAL]郡vuDUz-ս4qF0P'Ky0Kջ}XiGӦ4YD_=9ό.fˡA6r(Ži[ygWCN>헩T,3qlƅV/inI,U- {0 2*ƗƧf;-BI!\-#evlXH ~ l1*$$/&i~ʹuaNBK|xGR&$`9꒗8~nHB785aae rh5ؒ%C\CQp*b=RwRUh5OXkO T:'Yݣc3lQpb:_=T|XD$JMKT#T߻=bpTVc~C񐁋׏S^YE#/%PK|-2 >5^{.ǧ[Nb*b8S HLgL,+@G>^fz|,Tɮ8;ԸP1.g,$B#y>htE'щy\jS ZDN5㽢18 сȫ?WJ17;Ӈ!@Rp&'YZ\Ő˘8Ľh=(蜓 U;<[.pAz }y0%&e"/{e7EnW YJEA/*j.xŤm֭}~5嬷,LٝhՃ -~ fr&kj ^լ :<֢ %XMO< YJy2f2XDOI+HH׸6bIfSEO#K7>c&c}lNa*lSY &2=((uv#AQc^8SܻV]p1` r͹ٲ0%e-mEgfeAdܰj;&' :P5v6Eg(⾥'zip]s|آ":xq[ +lק/͛NKuJHlwޘN\Ӄ+yizkGtړF8C$ƕ}Lc93:bpzm'$je5Tsi9N6w9FYCh ;lR̚E˜-*lPnFzi\{4}q4jX9B/QԮU-L4ϝVSJG%;@ h}8B@*B̾ endstream endobj 981 0 obj << /Length1 1612 /Length2 18305 /Length3 0 /Length 19146 /Filter /FlateDecode >> stream xڬctf].t۶m۶mfNǼ}?k5j>T=5X"Pڑ cfeZVhkd%%;X ;@#0`%غٛ:(T)iOzt03}qZZB_oT@% $+.!#Q9'K3C!H 0X`hcmdOit[m@WC?.-;`bo63kCK'Ko__09GC{3[G߬r¢Tf㿑F6N/_O. Ro`fpr06O4{%/_umm-mo`4edon3kXEv#'s f/ }#kK7^oJt}"7H"oyТN2V?. 玱Է[Y6@Up k01Ah$ghh 0ַ۩F@{K3k_Em&L -i=F_N$/"L_jf(嗍Z!(h {ilr02z _fͿ%30[xs_`D mEG}k [\;@+vmgyjFc-ff/#pmIRaoMOj.g{M]4gҙoI^ KdU1e_>Y;?N |ڹjޤN;$t=/s/ɣ-aJ} z'R#Jm9Y#P-Do\ҟIg? ?!^]~ Uѝ'Fz:^ 4y 7cd:)`0.6QN72Nt`aHY)w[Ѵw h2[Lx6y]KFX7 .޶r]8GYf@)c#m (f|,kɎ\}ۤ~ zzk}7h n &Y&uv5ĢIQyϓeOda RGcS_i)ɡ-wdoF}:L{k5H /Ŧ8.z6֮&VqEuG 0^ wr{bN"ά7 +Lqs̩AXRZ+u{4w_J&MKR45T"EHdzO%W hz׮ +b7( ] .̯\RWW%t>`+<27ɉCD o;AYyfdƛw.KϲrSe?: ?l"zbsRr<gfx WH%B׃akGpprMsr7Bؖ J]oniqI[jL\(=;19.B`OH)[6oڕfYʸͻ<^{phޢRo׳4{0b&eǢ1Q#z#aޖǁPlq.3 @E}> d:ӋVS\kF?~OXCaNBm^P[֏?RZBsk a͙"Sߺŀs/ ]b.p=]&ܚE}{ fd6fOmhBD|8@8ͥW}gީrQؾ\Ue^2lw[w H)2Lcd,ۢGgTXX( *W^`&(xuUxxh%AM+>YH)eTX9*94m}dIo-8jd/XShxtk@$Ƥ|(iP'ΧN n8_8񂄱l^4tIu" Q_C }+g.Zo~ˑvv.E_4(q G(PwMۼ3|®w5)]rc"ŸXvpI_y*<$p4Js9 |VO&-u%zxcčk`fC4s=FC GBCeYTz*Gd,;r\~KĽ)`-.S>c:&K$dĆ][ɯ-5PX&]},Q+X!h:dϡ5]WSݒΗTWŢ*iޑ| hpFn-X4m $ )z>U_@WØï?*j^%! 39 *z*o I$߉xyWb2H0ŷ2#We 1< _DAorbP 6>tzq "$4vSDnP6c*j.t)jZ[dE/GkjpҨzߛ;ʤPJh9\kknf=䧷n bv.n"|i;L|/v)+ x𖭽 2$iA Z \*Q^]3-Xq%%ܾ?t&%W-t>Z\[X@ {";AU(5#̅ \,p mBQ~j䋧hb ( h膷.QeW<z}vSKB OңP&yځq'vJ+`L(*êe" V8z% 4Z2Rwv>& MM@L k-z%0~74=Zq:Uߌo|"\}R:ur&6H[_'_2ofw0Ρ[R ?Cg堫z쩔ԝ,6X2CXj;R)OeFyАW &IB<^j"si*MhֺKI(3=^-znc*NN|8+n)6Y'䳹ΐ˾%0~p4um$y L; N0U-aۘkO6#oPw=~I?b gDŽ'5[kF!{ZO[H5DÖ6q+ _/%P'½MS1?XՈ0׀$5)md֘]e ʠ5LSLtz}p̳L߾_S[sbH{2pDN%/skHM]`7 [ÊZ5b~n*oG7PwMlT&}IS[K|zXE hYɃ*mKT% fo-$x#(QQx7$v׹ łl.,mBrB@ԇ<| CenR.Cㄪ5Hp_\S x+ފi,/Uܑ:fpSpꂈ[~4+nnV\s$^au8 JaIЭ_C\dgZuy/NEH#,RXgu>F!*vd('=V^B0KR-R3Fy`woEnP$$h B U۽0aA}=4Uk)F/CmvZ<%) `E {؋@A[a2ex7]f))->G,ZI))tS}Vp梄!Ze6>pi1"4R`(LJFSÎm ]#cQCy"Y\^>sw2'E #XQ c967.(a7YtU^zĒixz[褎ˤʰ$ȄEؐR$sDLlaX =.&E!@ 1'_,B%{L, vѵڎl rG;6;ru6C1hfSCRE.UgܪTȉ<3" `2hU [lبTy"\?՞eu'O/@B^qg␎>[^_@ 0#ib>!9'ΣЅ*L{+B++hf'O:vRՃ.ɤwlySK !=/er$l4YGDIXaZoɱkie_ma➃azwG#t;ƈu1EߦVTf3̔1z^V׷ m赊D|X% >ö>AoQ`@uggokeɉtTK@Q6 & 7/3YUUCζ!:\cOP9h7ycEd5T+k۝>B;M:́o\?@%dqhp+u˂cйe2$[&Ɉ&#* Jj175M.̢ו0QUZ%RB(QDvdP *nBhKدuA|KdmXEUyjh Z+5w 7t K8H,jj1r\$4yvcc)< Zai2|kjW̝JKy\諾y²‡{ƞ㬋˝wBOh6sj䐣zRk XH/"42hofu* HPSs--"Jk2*ۤ2'-x!HmT |[]1a5pi\)~ZHu7 QYsժ}wB,Hy߯{)ö !t'*e G\%",Qcl=(o;Y%^ .Q+8_່Sv4~ɤ`zj%f$`'N =1ěYQ4sLbVJyُ ?fl}72"h֏f;͵q H4ºry\L^tS;\ѥC/m@T%ݥ4(e90̮6P$7Dd' ~z7R1x(2SWqkL%'nݩaE]߰odJR:hF- r(@&N?uҊ$klB3%&/sOIq9FMwit#F? u#\^Q^ ^?`՗(vwVY:B";i)N\ۋ~7Q=:gLLe΂|!cPj?b"ɱz.@& " Pyr톋~[]N5˹/.*wϞ0CTlr{ȏ*<ű1Rq6!-9Z0N{kB,a2CZk[9N'-聥N]|*7;d(]56:@hXTQ~I3iR=۫nD(3,k'jQ;liʬIQ -xzB#ko=^%>`3E}C7ʿm_쿁a(?lO~*wJ)d}!K)I'%kה#\ͫ3x}UT6`6HG|3EcX{V`xŻs[t!KY#`uhRZej_ܺ{O穪|7H`m]$bﻅDtO5@kSvLgVllwfo ۶L҉ QcąhIj "K-Ahzl:tfJCD\!O-\'6qz቎=͡&?g >îw6.ބ #j2j h(N)A&+swws`KtWY0Dgqa+XK%du=E<D.nr#6э]IE-+Q+9D*T;}*| n# pļ tCL_pJ̗f.u4R?˫'7QOfJ,mCWogBWM!mL%I2С ,Tq@jʍ1I?` ,8 ь {F$3n'T:6_)1]أU諒f"v0:s*E3 U)H6uddV80N˙C 1 qb:o4K@F=5^R\e}dO0#eeoU݅me:2K5®JЙ!_;;z)5BxZ޹ ^_#mNR1%X\c(-`B%|68gY8pIS1)gA@F:|؆M? \ޱ@[GJJ+OJV 5>nJ?VOqF''.pzQ_r  3(HGRE/$w`|*}4R],b+Bt550Zjmt|ĕRF]-)d+)VᴋoUjyKv>Ht?@e"Yx6Ge8rs T=^ UJ)KNɯ^c;7TVhp$[mHZBק'oRKQdζv?k[ǣa {-]6(aT,ZzXg"Q;ΐŇ 0x1 }|2/lә$2Tvw>U'֌CItbG%TZWͥ?.M\Ĺ@Fz.`~Lx"Tհ^@RY{:ڀfa;ū[9OVmZǪ06пH&*Y mM 6ڈ/͏ ;zuH(.=#췩?]}^ȇd^ˊ,Ť22Z.Z  *N, 2YW9{`Q$DFi纯}-M@+gYx|D_}FoQFpH [IvB M~^ia:er C _.?x-p-|<&'6 wp~cRz`b8R#O@r \>{6pz~U̹l%|t18 "k0ņ BM@ט43-{&dBu>dkMf'5w0eqQB.S4NL= jqxQ-Ai72LY_(582DftťG) ef_/lk`MQ~y>J5,RMeT:F^Т &qQͲuUˡ \X"Sdp,gXaɧ))Z vqcwZSL?~\ǽpL }mdPj;!1H,]4_q0O m" d8[}-g^&j9Io>I0X1fJzUU:t*h5  H27ǃd JqEĘboAP11qz~m;_ixݣuo<|sN믟\XY& >0[&QMCFrD%l [٭@A{wͶ[NgߢVg+1T_ tZ yfכB{/20% q eH ϗx&Uڍ[;]wDD6cފ>cEĒ i QG2oG) . ;bU oun4Zb4)Zf&ǸJdk>L, 3Y2R]vގeئ(+*NʋX!a (%?>,twiA3" )u<ۧ5lCVDZ(Z&Qلf{6x:;!|`vdUvͽ? 7ar 9et1%?uITGg7&0?Pc*_̮T h̓Wl5>[@R3~G]?CJYw}^Pܩ.i:]?Ǘ,68.P%R=3?aRq(Ic},<&([ ǍVLs8$qQRWb_2DM(߯dV#J&@Ǚu]5^oZ;b[.[saOH'լ1)" QIk yA}C,%j=NTD,iRQ7-^|/% U-ϙ(V&f^J!O'>ajs)jZ]ͷ~mlb-f iԖaeR F㒪6"_˂@z悠r uRc+(ރ9,@ v ǰnL>, bbahsn 冫iNX2h,ΑrY ~68uiq)!F0AWڭ.@(T,U%oQcmbͨlJhTZϖc զp;cXNOlSy/cxZߢe++U Z=^ g$*wЪjHZ`oq7\2:UjC9 y|#2p<5"^ ~2G#G5$Rd$|MK..|yu!_|M7!ZR)ã1Ş9G٬A-`*%~8;'NڥsQJ5s\X]L6b)7;ExCR}!@łA fô:x̬&٘q/jph9 T)99 aMP_ƛW9ET$0< AI  1>n`)P]Τ_Tyg15YP2+>)Ć)a>;0MFꕮx>RHY.bRƖ#PP'MsDu~o_qc?0pW+㫫X[lǗ}8G^)vIUxl7 ֳ3JåDG5UcN*8ShԷ@h}8,BC'vFy:6S+2RsxvR‘(:i/ee ^$U[c+XpKx+iO`OgsXzv)%lF7k.?e+M}\'/_oDrciQodqa]@ L$?((Ш^XVr}mLC+-xRW &1v+Q! $yW;]:!Ompmp޼C iKjuص v,d(3AG+7WmB˜?s@׊!1y2br ^BbμF4pʄ~ɣwY8nm"6=mʾ#ƤhB:q%mUN&('X($?+izs9A4Ųݦ)1Mثg[U R 4AZGCsW׉ل&=1*7w*Sn)l75X!AR{$tݤYGO<-ԘOk_aFQ GtZȠJl޵Hz|ݐXo/IR5,1PAŮ-kZh}R&Lj댋ME"cnw1Sm?ҷ}Yl0j/qBoں{{J}z{c.!Ɩf,=Hy,g97A[#x7@ f"؛_ǂ)皼4Xk}N[n ]UqOb==U'Ň]W#wRGG&c-TڂPXE$wGP{ R |vL(MrU꒻A HLC;]cSG>`|\5t5tr/3ێr4J]L?qUN ˖7l,`C%>uqZPߑ8ܝpPXjMrVGr>Am PJ޾+Vz3V,J:Q KOb38'owkDقnXKu7P;i}J-_> k:K?>9RJ$8W>A,O/jjf=fs31R&dE1;%dMϫqvB/`tNJ]ImV!p:, U6+QeXΜ }΅$4xb(3JfH-*LJfjwPGrw`Pjj >^b-)5B 4ɓ#I:BNClFxIhPk3㗭%Ա.O=9t;O"%WpƹuvqbxuA5~1{E+tYp)R9c7ωnEaxQӿ=ہ!ko^_ǽ,?ֳnK?}y|uYx^ӳ{ZGz-r6P igH #X;lWEh=I68f-9S,S,WHV"~Z-Pc_vC6Sow}̀GU ~5T^VEs3&hhcV$?RhrŤ&Do5{' :p9n,î-KH)t1AŁ`)9Y7%@߭.9RftRU]5z#ßQ:#ImGx@7;f)a>0 }ud6Z#ivWXUșLW=.􌳐`ٲs֦`1&nZe~a 1?ͺY7c6c$sЂ'X"Je~&yhkּ`_if,|Hfpv0as]+ĉyMss88֟ Kv쟆<6=wp*W [U#LspnP<Ā*-_e5|+5YTVK,v> 6zM }mU⥶$''̽Et:pRYݰ%A`]#ȵz}A ŁpIЭѥ%ۣe_u6AS fE sJ_i\aU+4jl B(z]Ql#{:4@y]4l暎D.R lL3v^%NS]i];ZP"aS'32\Գm>[qȍiX,W`p/جDdP D޿uTSߧ@ƌHuK^=J>X>^{{$+īk@S'><)u,5Jeq曫tܼ}iةͰc}'g +Rpw(}սDiÖ1"M>KRYPds]۷iN{DP#X% ɞ;gJGqkj%77I44>K9^ze->Q#En0^MSp{.Ťo)Rwq= I.-90>u>/ɓ%I9din < M9zKZ{ME oy6w:#rqz`T$J7ſ8<[uGIj-~ E/߇CKPm~Ivf䋶 >lDy;`PًUΤ]ۅ؇i2Y8'CNGA)P\hcl7 VIo՜Z Y,&R$ B1^mپAi>{hm;7~_ųa;x "Ug6QסLOdA`j4qkE6]E>mH%U=(BP&,Joy"NАv #uqGg ڍ+JA/G;_%(V=6yNa䛫ѝ-fHi!J;&zLc"mhI)6wSŻˬΌksGcxyG0nmSEZ>KAM}詖FCax9/gԠaPc=t.ht]sې  VwL|JCd1uDU !{IOaP0ݽ?ԭ r;MKrgj#L?@؝PI0rao#lZK%3KLmɗ? 8W[֍fyL{/MG?9Jd$XU!aC/YĖ#]6 vt e{+6T௛P&}JO "DߺӅ;"HEA]7Al-2ПO8/"y 朓E.Y" Vn.{'h:A# *牛l@d Z]̮blfʭԼPY&s'NN"[JCʤbàDG}po"WH&B Бsx"b%魜33 ZVaN;\\~@d-:!u^pP\rH{6_ɔ4SR?gW|;(K.{m9%iv4?~3Ð [_!؁6ym }XuN|~Mõ.ΙEOao̊)-zOALmGA$?yo`YuXy:ջ2˛TϷSaHvCP騉nm4sDB=W5%_'1%80 (`#; endstream endobj 983 0 obj << /Length1 1630 /Length2 9312 /Length3 0 /Length 10142 /Filter /FlateDecode >> stream xڭweT5ܵHq  HB[wwwZܥxbE>=q#撹5 m) $8x8j`G+7WU(DC dn< 2. K  @d  a0dN^.`[;YWK_?WV^@=];)׎ f؀@u CE5.@X:4ܞKAW pB?r>sI,N kb8\+{ C܀x@J|{&Ӏ\]N0sT YYv?M OIa4( q@?@ 93 4\epZ@4:zK''g`+959-gX!6Pv?0w_ b33,IX/d=0Tω?"D{S˻98Y:>Ke=`k@#p/aM> *50k;sB 0_m}v7Llm#+y.eEECiuSy`:^N Wy#- xs8x_ yx|j s{9ydo4rk(h,!al_s85 'caj-mRj24Dָ?ةF'7ۿ>ӺP}E0gcö`'SG$ʗ+Em'ˬ;P?tFe H[ogcDS,X+k:l"+'?HVZ8ʜCWLC}g]۔l "~4q0/ G;wA  RDø0AaT7T@d ɓ{aE$CyYsu,cݵg7 ?R1H" )]'$1?IGҴ 3lF?B)EbJ|p%\/jIO#'E6j_֡^Xt7ݏ~#hRXv X2LtR1Gп$i<ÛeT9VMt/iFgO) 8ST3MV_ 5tFI  _Ba$_l1hb2$fdЩOꤲ]ԹQݹ无"Y lʼn_Hun \/ou15Wk&T{W=|@Vccy ^ Tp)_{}*e  MxyF؍}n)-}p,QN`Fy{/w1Gs{ȆGkriT5v(d|paLjEyQu꧐haB )kZg8y5XevT9?Gux '~D qסDv=0|e/wu鵵O{2evvmKL5|R6* 9#G)i x},}NPnrϹ3%&qN*/ѿX8+ ӕ?lkV+T|5ޖ,  |s&E%e/Pu#$U'WV\U9Mƭu<IG|AC(X,bc1|7R{X`Ϗ+\cd nJ闍5|D~zꅺ_Je[LQs #ݔh/`,^/5JM`κvom_!wH80%"y6W;RMIRvjPO8;mF.{*gde%FsfyR&B7j}~ fJoRPODGbR J7W NfL;@ t,?bѷh5nWƒ.``I[kM镜^10Bư {tI5aƧ6Uj*d rޜ=s'?ozmIeFʦpՐֵ@eG RVXo;ߨR*91}pcKۮW[jl62^"<Dk+s ͬtl]_d}o'3]CZg-#.Zf(*V )iHK>l_q}q?뷚Q\+ڠʵ?bR [%r'lv`,* >1:o\Ho*)]c;yo7SDFnxʗb-==~|yHR޲lh)5PD &mkJ|EyoW3l@o^tH2sM LomTUY*0'Mr==ƾD9[/ElgA.us*{;tج-n$#WIh}6Ǎ]}~Oq)zX?h6%)+yK9VD6֖5Aw#iwcylI=.7n^$\:c??maqj˾–R~wømW cZma'%,:u1{.:mGHp˞X֚:֒_#"Yxn[07 =]#|nJ{tw¯uzߨ'QRy<sW.Jhyk C?ja,ɀbӏF^3mTqa.8RiP.tS~ G䞋_--Eu"͹wc`g ,&o]w! 'qż[}E m~s 7?z qM9O/~J+6<iT]u< \FG,Gκ}+յEֱ_6"hL,8D =ymz^‘?WXO g7WTz{93U{@:;|^mPO=Kmwzbs/9"P |Kl WA }8`'+& ~e!CiZUEV!e͓LioJͥ,{ثahlkȝXjM 4k852e$oERw":×[Ƞ!3f"I[Ӳw*} >N;]\I.<WEQݭ {h U JXVeihSd&.5N.* vp>3qIh'n1aMMpi]4,YKwn de,i~7knFnZ'K0[ǝ ;Pn`F%I3h(]'k)b]njdۄm @i3DAV|]v bHas*@vo;qcӳy)߆#l%>ܑDӸo'[iZ-'Wuϸ(mӞ b8?H~U a=K3|ʽ:-OݝO&I/s݀(6|!vX)x~d.;zؔ[ThII5M#D#ӠVS ؒ% z3C\{%P}LF:4vny޿A^2"8%)wŅVhqMiD?I] #o+h.:{4c0s[&%qA\:nL EcEbPW⩯k/6ŝ $g-ޤ PfN,)~Åw {s/K],JSTqT̞N Y揄R\#}lf3-:+4"ԭ:~H,9w CO09qZ98L;EV6yδ(*>WmwkS+xюKNܔpNb+4?*e2 ”# ٩@xbyi,M{C^ @j6cS{nיL 7"9);aB%_BvYTD54?C-QG߸ xI<٬B*+' % ˑa˷C^zM~J#)GpA;Ⱥ{oGc7ЍrN&QmF$#Lňz GFn&Bۀ4oUF|,uIn2,5YH5Fҗ c$e@ҙ܌%ҹ~i( !YNZf&Zi~k]I.,fq#7KϷJ_Gޤ[nf+itM7MJbU1rV7V !eGx}; osJdӮٔZ_x1gD=sα-Jziuط,|sz3DHA g)uL ;*«pt3X|N,"/&M8[7kAF x LMWN ԥCC"R9Ec2Z1& RFؖ(e(Ң!}""[C\Y"y}7j@7#|^jhș$:HoQٳe!o?i;?Z߬Ь/U,q%G/gcŜW ߯):utۡ,hz>^wϪ@ݙP@ȇR'Q~7ۻ9wWjf)X$ 5m).` &%gv)SeVFw,1/;ggT, -8ԻE#Bk?[[aC#Ҹ4(zTq!uB'x| r \v{ߏM:̊Fׅr,=sj}RGLIud **sj˵)-v4iy߁4@L\-Í{{R⛪f)1Iq'ySTFwkda}ݬzD6-oy*.|XA YChx3N0M)~7'3QU#?K咄A'NIp)y6(+Nc$p] 9`qR;RTLgY&dje0n=%N7bbk(ZZ׏rO^u%201*]-WwӋ _`v3`(h=Z.@ |#@'Hp68GJ)8KXC5 (>g?%QxZPئ$iRvPt`9.+D_J Zr$gF2ٙ|EA@=h3 aUt{m*sF[+7&= /m\ Nn*w_)/wCW:Z1Mv o"oE2*oFPcD!# 3fQӛWjҺ[SAd7eCeC#kK_R8 {3-ږDE&'-+LhT QGC;Adq7. 8,!GRN2|诽t'k032Uz|ʢ>0,dpHm}d-j*UQX PL#Z0qV02RD2@.w_79LѩR?[RxRܓ7WG|]~pƘ齗.*C0]/ӱ29q 9Q#)izu*/:/M#Κj eS$1HTɦFc{ibe~v\ *FAؔ[D53$+F2f=ˎ`;>Κ~8&$9nM;j3prĵuHQNR b kW$uwGe,bW& :Rl M{Tt f2XP+6ԖΈ7~DI+sdlIЧA3]M)Fg O|4ffkrsCTXV6GhKiSAd7@[?53&`4ak_9UrAC`Ykios8ێ:',\뵇JXg4u~j*NR6j,dM\F K1w2a >tJ)r]JN?ҽA@{W.8 y&p7E[Bb)IR-QNu'< jB_hɷ9bn[$;84L[ v|87ĩRԨAZQ<9P5vT XV}Guku_\-.fLrS3bmiZOU`6H Tg[kI3"yϧ9zuQ F ,/04l60zȈ_0X dZ"A,8F]dV$Ir",gVU/& }FIX~:(clwYP74]E;,8V!?n7'E/v;.x3@^q(v-b-yP Ŧ˄*\x 0NT\ph(^}vO6/~[ kو(/˯w3+9?(E-ֈR L endstream endobj 985 0 obj << /Length1 1608 /Length2 11837 /Length3 0 /Length 12669 /Filter /FlateDecode >> stream xڭweX\ݒ5NpHpm%C@<ܝ`A3s?yήjUڻϡ$UVc5@.L,|Ek{SW<nDwX$L\|-9@h`c"RA֖V. U-ZzzZ z}'@9\!Հ@`am+)|VH+j@g;@ ,@,f sJ3c&#} tߟ`{\@k3;W-@rtGؿA` =?xXl,#Afbb=\e [LjOj_&4MVp(wrI KѥMTDj7$3b\g gw$sg-YAg\氀Ril1c)(iܸq)>}bKO(I DK#~ u_]N~~kylɽ"/b2H9`B@w/>?tV웖&oƤZ!3 3BݭXYq32f6|ɨYivu.C__1m.6qrcqYt)"C)'*NicZ_؀SSmԉBJY-jV{o4!wUGjLC[=o Ąp k/:'LyP^Թ!sZ&YGBINN/,E:mHVP+96*Ow bY5$b5+ݠmءs3Ŕl){$pqhtV>iywd؇ф?:IWj!Ehh Fx6H}>_^<ܻ-ծpF"K\rZ@6./'z }@m sT$rV2 .\Ɲ/؊1*zxw;3Xo=9rŝp4+[v P,8;|.nޏ./T'`}9Tn]VÇeB$vyfD ӘWBJuKݻCFZp/JP[Ř-NTDE_;WR >Y,g>2Q8~Rk? цh[p"WIR)_>2]WM O<B]X!W4Rtj=܌TM'm*[iNMhviٗ"xd_̗>x綽FF[^Q{loIQ K+]Z,?ao!$mkuujB#ry/Ȍ_|*}xP!{!ڈ?Q$>JP`>fGQF>[q8#Ŧ15[ݞϝn+*~g^5Y;u; 7:^)ܵh{l<,a]9yQ >_qx2/ Bl?\9 i %eS skni<΅2R7y#엫]~+zW,˞pл<8,_>EFvw@anM`! lb[~XƳYLOei^#)pb .]s 8s ;*OI"E6f=!)@N`ZkbaQx~zEeCu峎ByCdTUp*Kxx/4>{=vpyT.a&!&>\OΟz£K=!ꅓl3ctdZ^aiاES*۹Cm4 }wސQߋt? AZqV hhi!gF*N{IR+"t_ErKΪ\¢YaL;+,F8횏"O*ç BPxܿkw 37v] >f'?//g:ZFpiXPZt3Cs.Fh\ 7~K4R4yXq@B0忸Rsoے,2lr?&lĎQF|9%6"7b>QD-"ګk ft'wD#â1m1Droh ]|U?@RBBB98QCkTr\gMVP&͊TܥߏF|q OiitѝNyJ+;{~/%ǣ;`1L(~ND'&DU;ݽb1K͌ 3{?*>^ABtOǤӊ(]LʏdʭO~2]LqƷS)\4q~Q++ JIDwYiQՋy %ig,rfC^BgՏJg$A4v7"t^} JFY*vRX)$[m::H&9RWj - n;Sq= .$7S)P4jpՎ-aV#gI.MGW W}!?>[dDGSerÝѦ #FztQϨ\^)R좏Gt.YVRH& >lnEA-="H,8P331W01Q.D mY߆4Kg+"6o@Uw;O@1YA|=S2R7b6)&+Fp't nNuIC Yrs4%2|ewyf]ybU+1WbA(yĥ3xХ׀^vu  7UUzk#|jb[RȞct$Aa?)|BQK0면kӈ4,$XuF=KMNI)*/bF6<TYt0HXd@yh(OxKu*w1#P#DX@ L{9$\mᤩi H&*CM,A%XQK`,yJ4Or 4]X1'Q~BQ7 ƨ^?髢M/w[egBE \7GysS4[2qKmw@ptf+ࡡ{w3X-_L,ubGJT$DWF#q幫4bj9JC=:rw; #;RRqd)L7px7V!;2dx.Y)::|JuczTҔ}Wz7>=7Zo)[k4{-M>d3Q1vtV6!TvN5{Zʖ,(~ڔF[1Z+)7CZO£:6f[@.4X&դ`t k+v^|@9SO4! 1C(0I#UZkPEδd_1cy euIF#lĶ)ŮOŖ -moM*iؚHQ=|(jh9?*Ƌ{bMpx83d'l_WbQWx.Wh|LCORS۲lg_*Z]ޫ G|-}v=>|%s r!Yѳ]ҡw^MP9[ZoVONZ~9/d_g'oGKEVzzJl4=VGz.Ƈ^ڨQStjw5: \cdΪ3Ktٰ^_^7ujE@h+)s8cP$ϙ YDI޼?RY Vxz2_Xq{Jl4CMƇYGT*7g @Y$ig"XfNFɣ|1ZLMQapm͐40 vç*4{j&&0(S ճTҐԾC0a YGeeErgJ: G{f2Wi9H5ܧ4s>[ُ]`j'Agfݔi6,FsGdJ$L:d^("'.BoԷCS)Y1aQ#<pdV,7GL9G?A jJ42YJx֕6|n$W]v\L 5E/x#J9-ԫ.X^ipHip[t {#`eM=6AڎOO5 x&^mXmIZ"</n*ެ-cݰ$RhU +:BcyfXY,G;ek]}HqEB}2 6!K]O.RF 6g>! D)ةmʼnQǩsB! >~tM8&R]VA/y*ccId ةxlV6#3@I|1:AD}fN@ыtgߗDd=_'Y0>!|.gF imavZ@p!3\fZ#{^Qk< PB}&r٦c'x}5Ea) [8:'+U%_(qGx!Bݰ.4هb "϶Y^u:Iy7`gS i*,c7Y}Un TC`x %l,Ht%7F_<ik|UIz.n1%pݞSLUc1sᢞ,[Q;l ˏ~iלeR6WjZěXԹε|ّp̬j8WN8 yxH=2t&=Pق !F $8dw BghJK+a!-,AViރmPo+wW!mͳ~i:gG#N43LNJZxIPAp|Nz.tIccM.$($(E%HhS^2z[+aXH2~g OdLFg]6j#\ϋ؂qlDuto@r¿ 2X]DQЧaD tp|8+,lFYԂX{[՛2NI>fL~6|8 L:= v#^W 2NK7% WeɬLP栈CT Pl7lP|ycZɍTy~-t4 8}k@][닌퓹j{SR)a[eU3tf .,JO.AY=cn=%qWE^{" \&?%] TH p߰\_9L/OgKgcGu%Ɣl f !_HǍ<_C8w]͖oO|&P]R}& b6gU5_E˭7U!so?(BP' q>0Ʀ}&1e7G `B"sC]js؋s)uIb 3;<'t/ ֭~&~w( v{V6רeRȴ 4}rF}6E*\[ξy[!ѿ#'kל '>@Ys%d:uԾYF&D{s=eQsv *FݶjJr8GOUNu3"9ٝɜ(QX1.Jo;R!Q8EWԄonN:6 *F}1!uc(q6a—VXdS2۟.=dokdU֏zV!*K]Ir% +izer۲?@l)>-,7ERԀu\{AW:yKU ]-"of!5 ߃0SørU3 $iogH=).0–RR08a1< 7=ɭQJ)u֎GF9qe8&vUġ!V>6(4Ðmݕ$+0e"24 Yg3M%ӭ1{ܳ 0Ų#$}*]QMcB( I?!EcS魴ΘSj븈ƙaD*謴u#ؽq֝?!a7]:,byh{A>~ H|+tw\9M+X;=xd |Z :h5ԐEVKզ%5.FX |5oc\{eUV x*MV"*Ĝcwtyaɾ8"& n ׌U\ԅ#;WtJ8Sk? n–[wʳԓ2΋+ʔ_/(Wak?c_]sT(uQ`lLݺo"eR_ +.j暈@pKB%̛~9v9SY5Yz%O0+&Q}Rfm2+QVOݬj(cԖq!{q6?BWe0U]ڟPHee{!!=9s ˽( !+zv, i]DQ4NUfU(}m#ɍZ-l}jx*JGV,/n| t@_cX軛Juᱨ(X x,q߉L:.\Qp04hYbȹ rA-[,٬7YsrІR*_CT*Xw/$B<"`Ey'\`rt36iRq]sINbGrRA%tfm57b':Z#, S^ϸe,$)T 'ܫCTXGڏI( N |'jm 0e4AƏeh^Omz\KYSVxZ<e҆~\\광v㲊nCصrwn!Ta QX"޽O Hz}:,HfB1h#DUI2ξVzYUe7|D9eA#ѬZY9_ ޥ?8[I4d֍M+ [JQ&J2=Ȍ$4G6L{b% x 8U0n^#"K8ԓ6W}х\УLr腝d',]GH 5d)[Ͼ@.楏(ZSW֨s$U(cN'my5^g ݘ6 ayDm{ l#|XR"29Sxx0clnLHTB#/-=DI\MKʜVxS. 2'hh6q\`rȶ=-뇚M4$z{hxuSrP :P>"ZvZiI{(Kc>- "^QKv~&Riso(=%Q,52M[| _X"5 "~ kԒA?ݎR?o$EaIߤJa(hx̴Σvy\ n}3< r^ةo/V] [ewӚcEOӷy e3>SqCEuT ͢ӏ͝Ԁ''7)\&ٜtŢMX a@WkLǾ7K-z|̗^$ϏΐU!'&َs J"WTR[,]fws tI=ڇTwCM0D: ruPv|i>Qx<%WP1Kn3ڻ @JFPն_FtIQnZpys)jΚq&ZߏہcJEM,& '("otM^D^)pZֵXS ^`R5#n-n֔+6D:Y}1"[}6{+m6r4?av+Gt)tvaWŦ!iK]D'>f(rju`?MSJW}iUr&AC/#*0P宖,OB/Bm.dxKhy Qځio˰jwk`=! NV޴r(e7Zn*YEc" 7U?IE}~]KB~Y])YhF#^zB[ei-x8sQܾ3 u\KmfH{!RQ); ]6ϣ4 wBYXBֲӪۈ$m*G|, Rhè۫I]]nf; 4)8B AɐN"<1X͂Ӹ.̝"l "r<1P"U&:Q^%_XHNswC9 XJ܆ iGC3$>i"2;yQu5ivИ""v Z_bN/МDUHȽ6wԁOAw SgQ2h ٠WR-eT,o?;|YɷvYvIC\U>ׅ>eKO: i0R[+*^OAuJ3qJ/DRvfYu~(lU`<[ Nzb@ףpMvs4#/h|C]7]>k.=tohNu C4*oV<2*ZXf⼡ׯhSCع@p _Nvһ *+'#9* eWP* pt*Ǜy>ѻ9W*%){jcIQfF3KmDzY+O]YI:%,K>gndaJw9iTaM-+dRةv6۳}JRB,JZʊ?T\9)@y(.5N0VBԹ* ylsdTK`"k5*x#׍:y-tY]K՗(Jqj†y_ӘW[/ FM\Z֦6y3938f2U۵!%{۱S8ϹZv6+rk#-A7N^' $hO8кJ;'CDk7ž3,kZ 6^&F˃ܨWber|~cTD.y!B5(>߄Z5 'ƼyVKMaH7]pvhG 2_*_+\˯`ŹÜUʐovɪ O0r֞5,ۇ|gnEoQZNJ` J9D`IF[8Q}cC*uCy]sN2On&n (a_WE?ͺTcɢ^hxRJV0߱~K]]a>/_vdM%7bV)> stream xڭWgXSkHE`{ "*(BB $!  ]"HCS#E"]:RxΜyΝ_sֻֻ귞DBJ viQx)"`rYQRh$TF*#(M0& 7nP h?wV6bIUW K 0$C  a 4LLuQc+@a I"p01 E&p0 9$ `P<@ Hyw@,Edh"0xTS8`oh7& 9O7F!x0;s_0a`o$X(h+O_c0HZnT 9O@Qɜ dC}0|a=1B`(07*c4Ϻ,kWk{/{ND[{8_41{!6?=;' NHNZO1"w $ a$@,Oyn`(c'w2vn4%74#4suu P t uE^n\o|&u6㱈;, u7- = <%?0%' ThRGZf:Ygûigy^h9$-ˍW.'Q5g #kE}VHs l3rXkôkK2NE46[Æd Ks}fN'`)B|B`h!U ,ok+օV~tuv4'o]N(f N^{wUCN )/-qroNue6R;M?tl]\'߮}j7w ( =yQJr'LSROBFjLcXuʤV _֫gb.(u]-Cz-ꅲuBU[_*\SU5D)] xd>a#E)6-[WtNǍ38;=r;̤L u_1P[*-M`w\j\Mkp)w׽Ȑ_=k^ 2,eDBܾckynuMKuX Z7TK,BT:q]GW[e~FshE=hHYH>U\տz&!|.|bM>2rxud du+tF') h00aufoJ&WDm%d%ᭃ{;VfўE$r\1iNz {X+}t&Y9 Zqww(:O\'xKӣ)#./^0u62{(g^V Wubq [s[@'ڳI(Ɠu;'B"MguۡLhEI c7-m6g*˭~Qg- *P4LY1JֹQl1 M ^N Pi}(\}ϐ~q(Q,B+* ~p; [wvj?39{]u\uXƨ]Y7\ n(*9m|R[?Yo."K9>jTN\ GKF c'PN\q ZC]퍬3mWt[kg.F]~kS-BACf—ӛGs"wȈ X5W"z)oj[wffp?!R |ȟK}W!>qڛJ &9~,3EGCLJE藅RWBc8B0^d _Mg~>ag*\cLDNMr;B94Q&#m;OnܳCI6Uo=b<=F;<=Bz^q~SUu!6CSLJuw#'Ac* /x~_Lc5rŹB :ȸ8NEޙO:ëZ$ x/7zxQԜ,%9oCs,/ܸ;W{AȮ`rWCĮCJ1l}e sv%bVyjCFyJ=vj<AY+U= sk ;uFzY>yPYIWCc&h.jQV[ŌOlfp2X*.31Ef2I2?~Maw=(QڷwGIy1D.! )cwa2I X=Ёb'{7@F!*8Wi^U\qi4y_YTI$]] BMHy rvr]]#ʁ" GQ פecx@nrxo$JɾdUbjdH3 Xb-JÙ\ RKqBm-G$_8VlORBm9Enrn."^D csKk+ 6d.TDkrUaqԢm#-.qx(mpCاÂy5usE\D]]+ˎӠl~YNʪ6k⑹e:Gs@'?;7k N*i=A.[rUřt6dP|ML^hݮjRW4rw $Rbm+~Ć35:5 0*jNHsNƒP4{SR3_aLl}W'Mߊ*X9;`C8Ӟk2-z A{oK@LmyĴy͗BAZ[',#2Ä дS\r{2 f%ڨ-(V54+ť3 MkMDfƗsm n.?AWx v+9X`ObnM4,{bTJEp>/^Qvʾ!oŖ;a( UFr Mk_a u04p+W{[\Zd^Mj=yjݹ/l`20 >%Fh$k\ endstream endobj 989 0 obj << /Length1 1144 /Length2 4081 /Length3 0 /Length 4834 /Filter /FlateDecode >> stream xuVy<&wSd}NH 30f03vɾdI)$k"k([ol!y9{?}_}=#rJZuE`1xi PEypV0%IAE5 F&z~dڃ0P"bT~(ol0GQ2u#x#0x+["pX p'W=O X]`k_ @C!1QD0D`~?ca(vx; Adp2^Q~7rSA vGaZ|dm0(_H?(` @yLiE~0 <,pq0;b1@8 pE :F7PG > bACa%KJr*d+P[Fw,gU?Gv!@a/S ţLr $W_-{?;L?{GFˁ1(7 P8T n»yi?q 0P?Y4Hgr p8~Q 1nX8 X~n??x~]_{w@"uSnگ ^Cf 9*S(Qoܑʔ26d/n -+[ )HL:TiN|`oZwFޒ^P7d|ٮ^?U,#\`aa׌OQ9"{}BfExn@c[y3(SbH!zӕϾTYUGv eTQb¶l㏙&iSLg"M>ev60|V\H.&ΐ&zǚ?bݷNIo؛;l\SNz-_f( RXc+Fk`C_?zdJX}օ v:\[Ev;CxgʕXM'1(X0ަ+-dzK0ڗK-Xl4ETƴ3`v/:JD׮=Uw9,K^矦xBT5?i\Dϔ4O f3Q>:ۻ%LCsI.,&EW7_;m\zŅuNb'wձ9Vj$gT;zOAc-\bIKwFHSu-nVN {h|KA.rP蕩@w6fK*v٠J6xv-8PQVWx\ex]+Z=ZҥEUfq85v&mpdzY#T5CwkBv=25߂kpAs%ƮG,#Ol= ^2tpɯQ9a@hr9%!@؜F`G2{!-|"i2%=PB#g22I)7՚yVSPpLRxBzkyBM"B(*ߟr^qq~y41~9*Yu롾! 4aba>=MR^6FL)~p.^GcXqiT%y9ի^gYm;o+\+~F^hHr*%6W-㠮|*UIm2<9&^'4&]ʾX;f[_w?p7[v@k~T%f^ 𦬇 NO'$T(SNIkύ6TQ e 0YQR͖ϟxSb޾+e#|χsmO)̴fɢ{CU:^bb ",8+{ $LΔl(S/whr@oWfw Np4-1scVL⻐<ʜw>Nܯc1~x3v:B{(B*ϴ]x 9 r%dWRs:YUeR eB?&m`S|kMq?'Emܣ;|_>k y^Ia|{.Na;Z/3?/ئWY%ArᙹzEQnWs6c]񘇉uYO6;IjNn3u,Vhe#&^cOB#ta>\1vYkcafFj7yŋ;\l r  s>m"; im=<$K8e+eMsR( w֌6L 鑑t3S|Z(q$mE Т2ToMɗN_i?*E)6h p+oIBBe?i:qmuhQN]JFLWKE9aRd3nBU|j6n#ʟߝ-=F-?8П磠YH_4s|`4ψu`A, ,֢@U'IT@E5sTha ZR8a ?J"nk GMD'SSɄSER r+ ] po*M۞$"}SZ:{%W_80dt x~a5(x1!L!xߠn%,e_-Q!U4ccž5VGr:!ˇ\fo[A Ւv|aJ1>vI)^ՂTTǤ׏"&z4HWj]¶L04l3|d/bwKd;V}1gi&aB?~SL1L)a-=|]I'Q#;\s2Q qEZ$BaEB(-ɷm*1nȲ]t 21N}_'J 81Ai~c Q2jQ>a N$ۯdJv(vT:ż*@ ŏO][}/iz7lD6Nmªv@|0vQ82]Y4u4PXmVPE 4v e.kx}bj+uWk]I0^sEUBa+In_wlutgv/L8v^Y4&W^yb2gL=ʥfou;$zxGbVzeZ zZ(pEy3vɺ:~u ~ jRKD2 퐤}nL!$7ln6$> stream xڬcx%%b;;mf6vl۶m;6+vU|=~c?^c9\korb%UzaSc3 { =3@EN[^ GN.lfr34Lbf&37779@RWѤ/?!cbea:Q 4[ٚD$T I3{3g#[ @Ō` ``ojOk. ]FG3if&ffvV..\F3:Ml]M!n/B#)9]LU$hi_7o?-hdeyel0rq5[/տhX[:_Lo9:z+_Q֜oMVp슴vSWs3s׀$Lm=fp %Tw*3 -?5. Fvo #cd<4v#;+[47[[ID,L L6ZHXy*YM,F/_]5R=3,Ll.3{WgԔӖU?< T@5OǿG+ @%(/ :<fbW_#no`ڨMn26quv+.߮_;ofaf`b g u,oR+) uψ2| eh\>w89ǴK3.%(B٦=b/GȼЌYۅ`8ޟRV/{"buM@VNgэ Z_|~A|rxbltr6//8id >#.ʐhҙ%o^M8D[Ű\W`'=FLLٵlA mxx)!U=wl "|nR*Ң-=Y /~ْc?vwhǁ_<"aWY^kHPZ|0wD]Ӕyӫe=6=)m_ЫIfRヷBae܄NέY05達6j@mHv=_.P{/}SDzÇL_@cokѿP+5tyH΀tZXb X6u$@B\XL&LoQl\dUh'~rD!DWv6R!-,GxHy41d.UQf#dbښs~=,jbb#{%LʗK}Srݷ)W7lNkad^ýO=B!h~xH6$˝Jb s=Z usLGz(ck,oXggTߡN[:k擉^Q؛C L^C(+U?\U~F2&sG%qNM|Tg omZƁr!p)~%&I8M l2J!$"ew9 ͨ*(n3\7 Bۄdc5E?9;`/$Yю•U~ U#WF*BQ '!$zeI_" 93u[<}:V tI9PJ/U$+ 8^``n&|5e 6z醝եZ?vnr8 bgכ,xE%caUrm zp5ŒPG^‘+i/S;r0x\q`1N4 ED#qŰ &[!/edǝMcؼGƦ.OgoIךMN+ ٌ6adlCBۇ?TN7܌ ؝Oh]w=< ?g=- ?P/8" ;*sudz7 ]d*<2RR ]Z1(-<}/uLD%ydDY9ei'8~.nE&6zTG+w3FÂq[193/;1;Jw J->,`!nRwȞz(.*Bΰ@֞3'nHm~2Q5'C\Bt4b~c".Z~M b$bùC@|3RƯnSZ0n\$DGrqFFu!ճ8=o,E1CpSA>J/"*^AaM>;yJ&.C^`Cn(Ka )~4Z7c!ԖVG9LJr vpu@d44尫ҫL[a#RelP;&z?uǚE*U&ʅkۧ]R(.֋t ithX討l,|iaM֯W RCщef., w88Й/}>G_[fBrd JJbfI,՟G|"aAxgy N=".EɁBs|߼QYLĜKs Ո6X?`LzC!K Ncede^T f-bEBTn$P $7 v!kw(̐6t!Z?p d}U~4 ĩ}F9ij\(CzaW羌 .pkR:iu v dyiOc gUo{+1rU5o'ID(,JP"!F\rIKP/k&Ғ:7t@EG%bҮBX*ԦF+]"Qp70@)RJ~~}'PdԞ WtTf a%y%aɩ:ܹ7x=8Ȓp 0]?9TxvDO?y';ݫK.FN"Gxvhhr?Y.U . (r '"d?`](}A! C{W!bR,B6V-\9[HX fKzϫjvCqf@%KJ왼 $M$r׺#e*Ն0n\6JRv!E%d8Q|SHm>::."|c j^B!:Gu\s#-&-}rQ26z9#̫ؖp`!Nmڏˀ4%UdVwǒ&H7ӡIpf[9׹q?ܣ'"rG:gE6zm0t Za8j eaIk貔S"&tS'+9XL?tpbU%_D>u.WEѮyM=o&+JA$[.xu )D&鈚A[ڋ"*w= */~߼:E`Rq㾁M&?3EqNtu*g+7 ~!ݳV1ү[cD3R}c栰(kNVƉ0Њ\$Y{<uJ.,jvB n۶ FTe;^QsJb2n].i;wCaw#? Q|"-joaRS \/hU]ju/ Yx+D1[4D}~D~+k Ј4;pOӑE@| t^WhYN2ydY qԱ)~0bqÓ..1Lf^Y x*ض팚+BeyZ!kd\–da8OT U;1zb#Rqp{{že+$*3yd*Oֶ+ɜ }pc!CƗ-ciopB;==F({Z.겮DU/AŽDShq .@>=TջAc;M*T5݁bZEks7!ȎƤfߩ^L#]M Z—U9nX`QYDsQ-kuv`m/.[HԞITT\Nk"}?vLG˓k[ٳAibZ@fZӵ uP`˳ViYxxE6j-ӕl֧\'4N"lf/= >:su@5(#"С>\4)d[;ѱ9?ՔE{>-J >hTIbPָLeA\3Dc^Oj_GpIycgo9T>J\>k{3U(Fٕ_>Cw{p.KJu'kvSio` Y߫HKkAQd#Sw<tϴ7͊*:'p8' TF.u:mCQr3gxfp)%Zxj,E@عhq&.Ĕ˻[ SX(:T*l$wK &䧪~/\ !ꄜJ0MLbnr@O$M`~y/g47p5%CuS'b)7֒/[?>[]Hlf֑JO7̜:n2yK%ozO=cMNWf:-~J#a̩g^P|l1/s r`~bxb,rROYܳjv9AETRg Bӑ5ѣ36V!truQ, m t"1.Ul=Hh+؍x$OD;FM*EPצ:_Ӫ'F nHw>XOH&r@ -)MQىf: mO3A#)(7V鏂7QqZLf1C2` p/aYZbxݹ]!p)o< ͌D(0J\} QY-G1R fƇ+m"7*z->pojgSN5V/ PSntX!&#*r elbmPXmx<^ZMI(F&ȆchٓZ?%hQ7̻{!۞iN+>/%EpB`dҤGsB׶}\jS, ZpVi";^ōȊz3 E,l=ű [JV5z/S(Izd5d) D͹g!ugAgF8*EJOE`ʺMovzӺmut)cc# θ_P׮Ts\݀Gs\xĬ7mgd\,WuA.Ck\*T*̺b~o4t T *iu,0h%DlL0vDF>*755%߃=@0E~RێU^% gQQÖTP-}V5散 ˾9?t[,L5:&yJpvRXˏ ĶIѰ:@KUO˾[*;'^_VwJ5~XRo9MtЧdpFŕ)-?kN z1%$V7U؂}1QߞfX<~J򦘉 -"+`1Gx +z2lcQ$%X?AP z!1x|_O sf?7$)抔Eu?sBK$4vLwaT-ƪhLl[MZL`c&<;$2^fZ{zKޒ(ϵ°+룦bLjL-<6Q{b?`Jm3gTSIBeS/T1eȣ)JtwDnHzy Y!󭞋kղ.b[o>a2zه+_W,QMX^)'YKnN8WECl=cpg35Rvq-ؽ(nx8/DwAIfGM";\4nLei+IrxT`v3Cf)#}0wj< F'[_?+Jڗta"POǴ&zlR0:"`+' q r{jzQHeL7N VK(CʶֶͥH<"l^=в:@;dw )qyAS}N. m#ܢv9q)GBWE}Dk-hlGj9@sqPKp>928C^-0l Ρw2S.JP3( 7Jԉ۸aW e[jnԌ^ghE{jqXx*.\b/{c OysJDl ]BH٪7a+cP?bJ$N`8ǡǞHL52`!{> |`e6KAbd]B'==5Skt?ph`f=ĕx1&w,YwC[jB44 >[xSI9F,! uOґrzś$SUmʶr{ȣ%v f8yR*ٲ631B).yd'~Yvw?r.+&:6S^ӝ+Qd j qp6Y-[Yưq )C -K&NɁ+g(%~YH?eG,zA󢈌 ,qDe 1IwJ?[NBa 󭉽yY*Lk P}C Y}CS8p]T7k9 'iJFlP](vU7{w?~>/4]:o\J +U|6=UZ<R-CuTKcc8wGj)9E ~}UWaR%K!Qj$Qӿ 1:M_XBG[z~SP?xoh*'45;>=a!6 ;ӡ%c[&%8X01Cq]3˂yv2A}gP/eLD!4p/eoN}~ɈHYjoYdC6?OZnǬgE\o &dgcHD3˵ȸG"۶2$1,62NK_8q Vhq/2U#Zl{[V;?Ci8E.`\ZH& !5)*Pm񢅡m]nK+EH%Mg]Y xCI) iЎ{MePgP΋ ',@`=7[K 65AEFuY3KxHigYuH @K ۸-.ƾ~iʳ݄,F؄`Ap48!SwRRL=8mQ21E2.]}F&f7}uO-5i? y@cs֊f0K?[5N7Etll-9{H_1d1 nȼլ/'5Qa WAq l58L5]w_GbǞps7HrvD3=b= (Bd^ɔNE2P iV=$rzɴ8ZYpT:nc`ggwi:T$$U fef`F8mܧ0Ԝ78z>D;VM#PڔƮ1Muųti8cF=*GĢPES ;πnY{E"&DzOu#{b fP[&7NdNngGi#ٞK}O8cYÖWu0)澽?BRaPwY(46dz/JC"jI9"c4b7 7kY(kt =x Jz) aOA}X1-OU~_\]?]*.mمR5BvyeIew1$4[zqc+V=iզB=H:\ հm&; FŨcA_F>`MvXh0+sч9qo睷@%0G EⷝoPV~6#|B%,r!*Yhgo c%9E4aP \o~Q7ᱥ?vzC)bLgځh2mz+ub_{DJ'\SG1Z+j6뼡s&>6Yzչ\zQ, Q\{"K)`nAƼiz6mUA!ɍT$j(9n+b;[~t*˨f98\dg¶m!XrnB溭_"('Kοwb:Σ#[mId3HN5[;]jXh{a*ek&QB@5xxuBI62)yPr?u2J&KzlN A}]H4LF:D 'Vi64[Y{{_'7έ M6<rSPv{ߤǻ~jg4)F$w@<.ʀ++2W8 j y2u= F;lrүtn !3+OmF{1ݍp J챌kvXo%)ipg2!t@zvdztH%k;q*Bj[ۂk3wG582Di9ͳw:fw+8!ļ9^w圐zhd4f5»+FqamݲQķ4b|BE: 6}aU~z3Ynу5q+ b|،>:HbGN🹝__S|qTNl/ bIE/KC2ƷPXVL;4 ka^95vuΫ1w{YQpb=_՟ʁ[TI \ WelB,BƧ,O@U̮ Z1\:{[" Oe;SߖI0vD`9lj,EjaC3,F9Wĺed@7V8^-C{91}*,B2o)Azת)KLnbeNT|au˯ ?%A6l0X"Fava&zX&J! S2T0Bj3W*vHdP5~p>RXc+؃06Z IYJy)- z'S>[ 2b{SbyLXwFL:F L.h(-.Jmk;2ц~doʠU|q'k,D+Ak v 5q^#m3URy2\8J=2in %Da2;띚qBcbʻB(=b|N Y#DKlZMR^_Op xRa|?t+*2CUs $e"oVp>/'%¬æ<)Po,xBG135 >j endstream endobj 993 0 obj << /Length1 1630 /Length2 19784 /Length3 0 /Length 20630 /Filter /FlateDecode >> stream xڬc%m۶*m۶m۶m۶mTo>}:}~wjLX$r4F"6N4 2Ύ 2R4 Ʀ,$$BN6NƜFƆ $BvfN TT?,#MmI~[Y8T46w23712S'Q51vз—s627ė274q47uohkcdOk cƷ3v6wtooowN6VFWoblzXutr4t0sUNXu:;䯧?-I\FvVss0WΎ6Y5_L?_׷rWg NV& s:mjnIϮۘ3[ol6c [; $ߔw,@ -_9_{"VV27ѷ{g94V}ks+SV5w0q'c1K =-"nFrNf&Vg/_n5V|zbS237mkWt"BJ*T/Gnэua`eǧadgI/ 5M?%Qtҷ1mS/:_{olflbkdT;2%9k$خQƶ7-lR6i}HxՊ7:ۛa8N&B5fIjX^xJ^Agȥ0! 4lh|td*'K' ]݅ͱj9U!Qc)f! G V:4*(oBhVIBkEbu㲁32s#OShQ7 9RFH =;|J$,s=&o^H:}hN'o\pj 9_4 \#-B(/ʙc1RP+-FAҚcj]%I1BĶ8sIuZ{5{~,͞ 4I샻7\IB0'k\|.75QkFjUbeudF$qd?0)LEX. !J>ҙ[IԨA4sujDk4G5XRJtk$%;rq\˭F!:] rkLDS/ֲkLۚuL3+23QMDRo*wuqSyʒ5"Q5"Tk(f) vwTvq Cfx*coxT{*R}&D`24R@ 92ʓnzNɫ~>farԵq3Geޏp4པ/҇_m[ 8.s&v,'?!5&@xS%Nm>rnD`BNj'2ZneۛZ^šWگhd:Y9lk s$.vr Fxb,cI?<ѭ!Dxt|Oyi,RSb.BsTϘ_pVngrm6Oz!ii,sGCN7Y6L&O:߳`<(ދ㪅~erf?+`tˮJI@Aet#%Q>0V_(SW,.BAN0gEOsrZppy9uE=g艝"[t N"I\ U K2򳒉)hp^'h.Mo$!:u3ࡎc6\ЌJ߭ yw^m3̳uHN~v|A gJB6Pj^H%.~RR.|T &4%q9)aCc" ft"rW[.`C9o(&aKCӖګa<c+ bKGz)mcQfjBC(D<-͝ϓWR#A. kZÑe>oo#צlMB6ҎGH̳zw% !Opde=c.Q3@(j)#6{1nL}ٚŃ;zNנlC\N$4>QnKEsGhHإc0I֣{aTւ0訧!vyNIn=g C'Dd5؍0 ;%_D /L`iY6bMK]U1N9_a"{~ ;""FY䰙Ť`1[o98HEe.f{ųLA'@sد #zr?Cܤa,i`k8ۺ6)va\qFI`VE|F<2WTt=xq-HAQE7rdD(!?.]\VJȆҭzrqsHd 4.<t@f J4&cCc>!'6T!#繸]D/Y7 &Ԯ)@͜)|OwfVC݅`X 7m{DZM>q֢ (OIQEbime'Z!N09d׋Rhn7z]Kj{] Ěz#l:I?++\;#ިϺ&в"dvpe!U*TC@J.=H]>mVj=P'}+Gv+z_)nzLS[Y/\j7Av\2 a4>aO /! (T=zSN^`'OO*'vx.%3GTꟳ-.!84bx2Y`s] I8wJ3uTE.+ 6ȻGy/4y$gdG< &*e0(Ӡ##ţK,$A5vmG4 nE ^^歚aofG \.QzhEZ*ԣF: i[Pb}FzMo9n"Fr)QZ39D:Dpf|q؎?fr/^_xQȗ~J@>$/%}ˏw~)Ĥ.߮pj5Jk~X >cv EY)< ndNT\lG(-5jhO<܎ 0E6$! }}-!#(^ #J~8x"b Pg~h.66${:J#b \\F9ixUƚM;0"t<$~ZG\t"6+2)T&~9s\u*e-unGE`H5ndY⾍%ᮊ;'ZDYlR koƻ)Nƫ,]da0 s-FGa`!M4" %JzQE 4UumF <ʍƌJ[٣mLe <u[X -n^jSAfB@isҥD&feq99FG "dSj:ujNۖ{CzO5MPkw֖vb"M8Ak@WOł9WD Z('Sb}j`grCeE+ _*uЄ|ü85 v.2>T#zιBR!. >b~\/Ӡ5އ-9 >aEW Of2o4 e|l JYc#piXns IEiWᦸf6tcls=#>9b Ti+ȡ+8*Tㆯ$E%ZJ\q_64͛, OxK.:.r= .(nXDW= Iҹ+h@O[ZVNb))ny3_\Jj{~U*GM@:L 9<@:*a^LR $% 9ŐǨ)!Paӯ Jw֏ 3RX(?"{pL#3 ~S >y[B$߱|oRa\U]/ՋKf,P4Rm9Xr{H|OkZz{vvSATVܝ5@2R&JgDś!sgM[3qK2U~rJbGa2 *)ɏ\ȁ-5#LA3ڴ`hY*>vj^NGbӎF ưKCf@E5,KU8Bƅzr95$~94%NҔc[XgW{*x.V̚fa\R>03 O+#1DӺV+`B@+^ yAMޛ6:p^T]# ?Bsvw.|Z6Yf(֐赊v"xța.FL}I \UJ֠jwj1]~+iPjMG43j ~Q \/D,^ꛨ (/QpŽNv*{2[qWjhbޫ\ W'`sHo칾i=+Ua}]8Qē@KM1%SҒ%rJ_h6} Dl(YT>[Hu;ƛl?PhyyRo 0Y.UCb`BMe?ZD{JNv8:&ȱ.A_p3i*[xj3G ˶`Xbv!>l5!jT'0MdZ}kRTt›(ՆmǺI ی9rWd'.~"c1 ;z >[Ds.I{g ugP]PZ3`AM L7vCflTԃ{>K IyS5!Z3G !ڳU˗pPa:wpI^Hv=.A`㞎hȧצ3;iIԅc#GlQ2; +qvޢ[勽C5NvA'qTt))ժWIR Ǭ֡+&, T6ٖx*/ mvq|ػ=ɴ&}#vc,Ӱ~wG ri@3mM'9on#`ORJO VJB{Iܳ*(z{f's"xNbo<ܼ~-qZ\Q0XyꖵQZ~Ett5zl7NW鏶{>ww'C8DEddԡ?Sm\!ޅ^pP/ɬ|J@?}َ|x8;j'?hqrЪxrla \L`Aifs!!ɄX33!&FG߃Yhgu4 ? Qč= /X^8#Tf#OZ8L).o=JaKO%2|Cf̌BNd@蒽ݓ1J`]С˃z{)}k9ߑ ?#2 &Oy!ՈT[Y 38}Yo[0  'o`* *8۝v-+?\-a{HXY Ħ&BD!f;)v!EїyZ&8 k#Bq8^Զy/ h߂1Wr].-%]_j,b¼VxV#W1.ZnjkaYX4"@-[q"rt.%R: Džp~7r9Õ h;.F':CTLne;0G MYU}# w+df[2& dptM,2l0~ph9xh ȝӛJٺ"B:eTֽh;W:1h4gE N>^k8M>gq } $'pYE\5;S  &-xk f p Y5&(~4  t;_LM}sN0Y󕦏 7DYά{Q7 5gJf,"&4U 5wij'sUa} GPȪz㛮-yK?LMxH}pYHsGBM*4 ǖpNo~V\\djWbܯ@^<<vxSK@+J*<)z.w=+F0J~9^32泭Cݾr4/~).M^ CLvH^SZΊ{h ]mu_AV[yՉhU[- 3%ڐf{ySsdb_"DqiLQJ9Bds,D[ztka xŸIA6Ә込Md";ҪMpgma7j!y{E(2p9hveUuwX,gK}=.ٛRILݿPᗣT pMZ /HjQv]gCLhSKn|FѡTgo8PgA>ZrV}[`ga͉߱= d\I]-$ƫ~_SAmʿ#+V(Dw(1btύY ؾ 7 r y<(99!ļ^ D`jF@2'̭nhAWp m%h!+$FoF =>Σ>vjJ|8ISJP{lO%RqWRhFo\>x/} &Xu>9$اSUh[/_L @PyK,7dQ s*o)6񀛩#^`mbĂ@jӇP. B# =#-嗽AM}]yL=>̭;੕J$Pqw!QP×V<*&hdv@\.z6[&/G,GkUA[W$(**qCԵ]bjN}EQK!M[+ 4tֲl(?ok*D *(' U\:T=3D$\ڹ>w8}GRj^]_[.v?wDIq@P~y;I=b%?F:{K Sw=kDmpmPc2SfQf=뚲E^ 2"_L@j<&|5̵=~hX49!W֩g+/x*MvC܇F;k!!};śK~r_D8Xcx }[I2}RTJtVRԂU̎"%rO+A ~Beg=3@מ{S!LŖi܍] $~wSԧn~v1eX MmucEB)ř5؉ IzM٤f (][}G&kU"G|E}Z}^:8Ÿex4hSLɣ?~HiL(OZ"T̐]5oVCc#|G1%lsLcH8B4I g3g6튿PqˌL`BNXB:7r!svۮ;^2a.^m.k(;D'"̈–|LP(*u+Q6чVl |J#\~/K@M8$H:OI]`I.if%*Ԥ]1=ůpyb!0G =g1Y#emT-Ĝ|Μ ;kBYLOw?/dD] ሃ~!@Y TgjgvTgPkjzIvQ۝3ωjTԷ}*7_=P_Pf1b RԈtTF }sv>m9V.~Sp[>UǷ nh:/pI'pC`ߪrg59il=_?FW.\1HAPIM/;%Na qث9Lِ}Ȋ!#5 82TUtIY+@* |׶1 jkEM*:TE: dI2jtܙOk 'X3#㔄Y[V G Y)[ wL\6mwQ$*Y7Tdqu hez%bػABr.CX~4aD1= CZ\`Ա&of p-PqdQ_?E p5)§ߛ oY*I KU9%x׀Z/SS^TL@g uk`D" Kzr4 ])ޓ@- _[RGE1~ܽn֡8QX{_6@Y;ҬjP|L [OtH-UtzMM,w3ؔzQGTy' B|RgVk?cgms{5 m,TK ݚJeHl¶z>.qe(F5!,a;n ?Q-l%[c!y}<|lf9n]6m&ϊ_J([i5`U0M}#wȷw!8]1u x˹ Ȗ K6 4E3N .jώb~;h}M[EΉQ%%P^O7I%gv' g>٨zYz?IAU<ҏj[yв%̫_++`RZg4/ü2*IŻm̒ɬj|ĭc+KÏ Snzagt}6;-B޹PlN6A#ucӖejfOH4J_gD_&܌X_ a2hm[N[WL igatJkWm o7d 5*j,Uo^ 9,g7,o8P[{k9:[#X|*cbW5n5F XMXC4OI9įw⊿\ÏۖrdE,UK fͻKK&[i+Nn-OܫuYymfBR'޵f3s% k*Q'.DfɆ}\WCBg羗"{VK"q2.ñpL`{edZbLᓊ7!LiM $j햗g.Ŀ\1 'q]ЕZKc3l}K.@>Mz)}\M~QwvhdY%;Y^Pm-̻LAYXPzIl.6¸1ȳZpVdlB4 %'7k'<]R͡ɖqu@sqaa1D#x<)&[->gwXy^5< '$wYB",U]M)C)ѤQ>d(mɷc)jXPF"zҙɰe^d8^| vdJ9>Л56#LW9=a*( w=F"AAׂN^W_?~`etji:`CQdX0ɴXn++/ˆG(L\Uo=ф齈BxݙBIwYO\=,TM=d2\uw?&3݉b;W8~bkhj6"\]3vBM?qi`>c 7HPG{( IJ^e2ҁ_Q㿁8~ M͢2Bf˜hL9>Ȉ ^PYMS-Du`u~lI'OfHoYUJ =OLᰲ,|/kh,f`l/Y!,/ ԸO'-s𛀪[X̒ _ ,j,<&ɨIzq(sGΨ&ޤՓR(80xU_aA2S=Fw !24LLД/;F$\Rt@y#N񒘕B2!eHNKfip<$S`I Yn06[T.]\HwM ıQ/gdvF~W~dLp('-G:fy->܆y\FS"}0y4AL ȢupI9엝}_;|_ǃK:jf[5J|ҏ7 >&*a,=I"6 XkY&gE-ahyUdD;UKݬj@>fJ p_9T{Lb5h>'~&+n1|e@f ggӚʪ/V"kz ag"(#d5kt5–_jյ88dJf ljDL_2znՆ@1^*Ԩï5rn TV؅Sˮ& $jn Qݽ(2~%A.L3@xے%Q@dp:; m҂S{ˆtGi`}4ARj&wLH7kJ>1[3 @G/iknЪ*b4# .@WSW\['DyNiO]@"ns8ol? J896Z^a/i4Qg9vMA>H^x0Av BcŮ6Kߏ#PZh0@1#ܪ?{nŴvQD \GTcoRSYp*|06fU`W<[o ߦo$ԟlSrFm7o+K+ʚ*X8rMS4b,{Ҕou`$ҕU '0:'5``=]HFφNAҜQ![='N}6p!ah%ch-1 fy4aGZ?3ϏJڠsc8:J['zF@5E1JLY(,t]] 4ݮ{8K(@#0R ;TSk*Xm(~5;[jifL_ߒ>roZb^Gnt{1?sFp}=z2ۍ4t"Xp Ө"S⼈OvY9]{ .l\P y9 bToo[j!N/P&r㺟sg}n/G3)FO⋺N1ьPG8jEU$`ʻC2[#¸bZL,z>#;ds1\E}p=I]_όn )S#pY#s$Q54q2٬O_ψ q H ×%x2}u-^ ڂ[}T%NbThڡ*̝%A*C;j =z9xۺ5BT<ԌJ{X$[%'w] T9E_"-P@eUaEG&f ]a]A1: yEN_DzROTշ?T!!W>fY֡ {Wy_^ERۈgq}F[r +aF &+4? ёvSyG[w#p3RM (,wTRMҦ I ";;dM6Gm7 ˷rt]5eb)E,^v†_?O6$Yjˎ*zROt#'HNß{k-S_`Pq@xW g>X+N䭵aT,z41sM5X7˰Kr@ysqnϛABaS\r]adk!֓6sD#Od7e'/ƈ{ŪƔ v׬)+[7WEG.0ɖ}8 ȔDHnt.v&5b0~Xk#삤\j3qDp;̯BP{q I|2I4|(p~#+04X5=F.\GfoSn8h7PT*iYιK!GOh`$ͭoijSڤq~I0wSVog?Jzx JH%o)I~!$U')qF)egF^j?;<# endstream endobj 995 0 obj << /Length1 1647 /Length2 13616 /Length3 0 /Length 14467 /Filter /FlateDecode >> stream xڭweXݒ-N\ww]{}gΜyνg~]kתjMA b`twe`ad(Zٙ:):3-d\m3^ āVV @@ECGG/_[&^D>=],@[G;'Q ZV@ZJQ :fbke 2ڻi,fV%08M>݀@ǿ z#`p6w<W_ |Ns'Ibl ,.<]-]b ?w9U''jlepz0rq5Ilwn.Vʀ 0v6||ru:_7vt] ֜3gl +{8E?1wDW|&al`o01):~PTfAW5/>;gc>=s g_r5}Y!bo #?V.V@3e+WSKmװ7:Z?E|? S2K @@{S+`VPTԦoߛ?UHZ fJT``bבo"]<̌,~7 {SHl@'nu/:=+õ;ohB\ PcizQa`Co@z6Okmc{۞,,[TEM_!/N.`&RČSo> [_85OBNu9\>b?8"5av4}?9L:z|콆çˍ3O9!Iv2rk4}|vr@Axs# 'Mw{b , Msi\lb DfrI֑Ś\#3ljdvk7"J2Qexkk[IQf=a$,N 65KP%$BnayI* O°چRy'=>9֨3 ԯ2TZ~~mRA/@3fzlei)7=)] H~IQ?Њ) K𑨛,ye>k4ln^S 0UHÍRA7逗n_#+.md߂$|[-į57AQ< p5(5,"m㣛IV*' ӠS$bd]W)a񁾚0~wˌ^Gh^ b e!vYk[|b}:^OfL<oz]xPdƠp+یIQ^Ay!Y35"B'!r8hrMz(1f6dr7g@^S~W<-lИKZbiK@1l tH+)P'H*x.kU+7^*)2"{^٫Bd@+syȬ;#]Z|T*"ڋwsV;_##5gAշ:+- tuxqgGR1ksָ`HZE x.UѲŹڣP+Z,Mϻme]t #e[X /"tB/M7 B cMgzK^F %<Jgmy[c~0+FȕuݹJyisPss.~aA qsI>h 蟋֏J&h tD"u6'l8 - ꥤI?"|4Gg+qVf,<T:wRֵj%9!WHK;c3ѰwY2A#U%h0ԟAm>c̥9(p !h23 k{g(J"n)ҕq4z 33l(amZ`j^l_VE֠AtW"*!EIF2|9ԛ#_/(rIj2lR]G ppPb2W]hrWyNɳ Zvj/,j;tcJ(8:ȴ@~'?O:ٸXv5q_zKIGD|VMb>bq_SS[uF(8V%A(Su qVTQUтPbUJb |S$4MX|Mp˲foȁ;9Vq8~$?ckZ_cU\EWїR4+o?`Hd-!1*4%b#hŕdCH/I:te3tof$ڞ[bnx9&,o39ZC4 X)Wk5xA=CmgnGy457q*گF/^d50hOFRD+&oI 1fr7l7D\ZUZ:ofBTչr)rR',Zk5]@h3zWΥw\yKf& V]%c/l 2Ť˪; [[c4Uf6_d4w-Vm m*DZ3 yHqI޷S H`[.tH%!F~a{2qݑ/8L$et¯t.{|X6T˥A.zBU_3 ={'9?`'5vފC 2@~!،Cb֫ҀB |gvNԔ]8ulVW gm]&-[)ߎi4#3CLf|b ;?" .\O1% e61k"F˖43W5ké"xUjbT;: 8oчD8cz]L Jy+bQ\Ea00'a,qTr|ќEu^AAG_IUc cy6豺s˷̿y0^V0JaIKrX?e= T«쁷R" [܁9Fw%$b_|S1e ̮~:,"N-C_$7lu'A%+p{;fem'k*vR%Zd,؊=CĹpy7SdcTR䂏6R6`pͷOp:E;מFoD(i]!v)+,!U8:C/U+bT}fzmyw+f>`KPFa.k"p /Jg? ֲƧ ^o!`7w{ȧ ׿wm ;~>ku݌?ԍTJ?㨙`ޟo[G:E)cBv`cuE syPkϖeVbb+0`ytl(=n*&6"( Me)|Zw|J;SWՅn1/t)whViyczyBAm~4߽HfO)^Gǘ\,Br@rjFlB0cYŎ[Bi"8ڙ) -|9Oߖ=,P=:w}& ^~#w${ò=|Vs5хfߊuh0kkE)tb11&4ێGG OOg΀pWǀwQ*ᴁ ~za,VJ4TUE$DH.;xMG:;ץX(Z3v=X&Sp8{e .E{4)7p)qSX\[h,K=*tTZf} c"|<Lp (I=nHhiRbݭ ʬ.o4U׃юS ̡s$ӈz}eɨӗta0^s&0e*1_~vԓT7y_q8rUuءØk=U}VE(sىqO(k;7u5Mo^ d^wRQMX5p Hyz1X/*OdC7zg?_X̀.¿=Ą؍ׂ;Gڸ'ѵz+P)qQί1 2?sKD Lq{6v~ӛ9n,{<4Q4AGͮ&Vd[uȔdYZ_ |Pljx G[3b_c=H>n_%-Q~d48nkZI͕iCY>.,'#Ց%A`ǻXS$(ଥZ#~CTd1m*3/K[z]AerNek@B4/%iwz"-2BoNlp  UD{MV+ʷG\}]5rybI˸C[!t] ~Q.Fux@wZ kh2֩ap.JźDӈj/V^&USkWzؤck}R89yW9(\!f\5iČP{kbHG٭/(| 7X 6vS(c6K˘ bhCz}-,aVjyߴ{@16ʃniãnΊL+-xםK zDRL-GLypr5g^ VMH._HGߑ{:%0W45y@~9,gZỸኴ$}V ]iE[ݐCBT4`C~ul`Nz?kb-3{)-'j)Zb’7M 8-H Þ9!7J*앝\\䜪/OkPq)J*a:[_Y:^#vqoϠP6 yz՟\Ȼik s1PʠNI/7ݚ[k\ɼzs7];yi̺!2}&`gV^F0|LsoU'{ :r2m1-I5=pQW Dr$T5SޯJA MzUQTK=_htFeFG ЍM!NH+0"˖zaթ!\}X U_܍/}Hՠe1f*?;5eanSn1n:^RC1d{UqT85!UhSH>}AC`V,<N7mi<@͛@4(gֈ_M0| k0N'W2aFxSw &O,5V4A-L8^CEEZlg/WͥJp._W~\|[>6cOxi^ 沃DN(h =Z$YLsAwTgnlz&杇!>R+hqF%l˓C`׈8 BE۱Hƿg19qRe>@տe %ĢS:XU 4~ع R }dj=,)&l9mK~s@ic>KĴ^5pc5wGEYaڍ(Y99)e/qa<j:N]@ ,78|SB4UU+OB&o4pQb]1 QvcP8c,J&=IML G//%0.L$(NZ}pT0ÛC}#{!Kl3">Ր8J6m6AFfpdPІ+P:neD䯙G`JMBb U'uµ}_2$g+eX7]¬TgY7- Μ'gn~q9DWmPDy(cRND&x_vxT͊-I5gL38]՗C#p#GfgAْ;ܣd3 P&6.u?eO ]TO)'=AO jʴ"1ԧ+KfpzǍ[G8nҪz( #HھcX6*>G_ TRh \f MĨ5)w 痉F18ӗM9dbl䋧.jEʠ7\y+jfjntJJSP17(lpXe@MPOK܋ Y=3O4?7$ۜRܦF2)&;$z-9Xzu#?sC:'Ѕ5e NfUs JG.z|'-ԗaa6r1qidU*-n!czUi2d9H8T^y/? ībv<`)dmJ$ rlf~Ywb,onȝVDi0MC|)6T]ɑ}Gs (αYBv|!%yY|VmXW%ݞ]TA8e$|Y2ȵh ':ʞj Vjط쏄7>{x)ɒ ɬebSؒk~[Qw5 o2rL߭˝~xYӍ`>U;ևÒn#FzP d+} ist/6n-XDž@hC }ZHx h >oXu9Qu$Hz߃.:3Vt<|)2?joCy}d)Ha8msj;?;6:c.2C4r f:5^[z%cH $~Xhj6< Ɋ!#gEHY@C\r=f%C mP#e_Xֿ&3g.čW{J#ɒΡ0hiZo ;an F2}fcsDp+ b.ne-\>l1n;ipT)mKw6KT ]wa֖Yb| 9Tlag$g~}J;~wX?Fת:r:Nj/i'N*g/PuԝQ|[b:tKgofb4T֣,pmCBf~?OO$6,cu+ptKةT6\z7t2To'cOLjGuA:vcGҡwađ`lݺoa_oqT|a$ӽɐٴ9mֳEc ?"YV'nN٦!2x3 2%Ӱ->$+gdؓ4SoM]/@zE0d|CVv}E??%}:b5G].rTt^8BJw\;uC{.AR7AD\MsX u-AbN=:dm1yCj-XFfrkg&4t]H|U[?u0q.e_"O<\뤍H<6I:] qk ^4`5 6cӓrvUWI>z "n#`DMܣ^A-Nrg)˅2C:gx"{.uO'Ml9OW'2,q'4?~Vcs'{ewu7xc{$-iAO@lF\?nAM\xբ6F^})tL/IaN*5~ (nP@G-E8MS{.e)b,΅4_-KM\gAT[ꅢ" dW㞚+q "qzh%&CsFp<Mr⑕3RDSakr'Gq3RlzƺE :w=$gUxE 3f`*F&%mD/zS=:5t$ %-A!ٰ[f?.;*M~@܄;ރz6_n۠NgO:GҎ +g{KBt1kN?z;ش/n›9ʼw AZC uqڠXFf|dT?1$C^ t6q<:6LZ9ܰ><RA;!k12q ӆgfl{RP^7p9ȄŪnە?#+w& R*菿}h0 g~V>!e/F|irأǗS\EnL1MXm73? gQ9fr%$N ˡqd[=}aO,|wp@Z) ]'1Ձ\~YEw.(9[ sɜ\{vO>f.*ʐ˩T?oӗƃ U^Goܾ%\ bCw3@%ٮF hfVFw6ʡ?: K$mY*RN.A{{[mT1!GTw0_p6kjt"KzT>lLY+_8.CCz.+e6) ڕ jʹQc$@1|~ 4o\a5 B ڛS I}6j75iq%0<>|gdpؔ\|)تG9 Q%:Gg& a Е= G ~E^0S]nPa* O d DZG@BͻWiS@0wh6 TWFd`]7옮!nZ:]cC9_G"3dF9z*"BSc:v.3PJȉ[k]I 72%d#(3ƣSEhi۷Ԯr` 귁r#O)Tܚ=zQ:8[D{c/'(kZ9Ie*1oKUDоz7#h*s6wL̹BʆݷG'-$w)PCB;^tiY mrGmd|Q2P Ye˾ݧ(}vZHS/Κf ߆+ʵ|5&pIdTjJ-{ j,?h1]'맡r$5NyiAD\ eޡPSSSD[3RˇLb o_F,n݁9 !YjﰞPnk>etwv]pv^+;ʐsϣ%!oky eh81|HROZUxkXpT&CVB=Ad@+ Xnz<5,*<xqp(V8"ʱ7Kgp?/0U*]K GT Zߏ~*%'f90=w|g{yҵ~[b߄2ȤFp ǔ,7K<2_zFo\B&\1ɝbfJYC*M>X5ꏒ_LxY-[:E-#X;R1ꈯ&eJrY o! tpGIп!tg#cZwnڥn-o9׶:G]/{4Dq 6gGOvf/Q+ɳ5܈ &VT 6QPS I)Kq Z~ݖMKH؈{󁨡.eXNr`"UR;oVe_edbY=gN|=/AR_ =L5vB7{#?, ,{Q5,'7.P!8Emc/gL,(megZ2Cnv_4C *O}3^槍 PteiӰZH&HiE;ӨbBܹKs(2#;I@c\Qmױg;3mSW眈VIHbzhو蚌M =rW%HmR{0PH"xo8mdFꓟv[D-׀fe$q[R7'v,tFrn;-F$HMOw]cr.~z3 QI!?lB endstream endobj 889 0 obj << /Type /ObjStm /N 100 /First 897 /Length 3533 /Filter /FlateDecode >> stream x[[oF~ /-p8"]ljl7ss<2cDG~3dbp\sRv.yq)#g}$õ8Hi #u%H[:p:'ɎOȆqtD 'BE )50B$Dau8k#)#OJHFͅ#/6l:]߁m.~e㴌GLч"!^^]18OwJ'eP ﰣgaZXCӳl(x&`ga$TCv!Y wأ|zN\{pp@!66P8k`Sr+x+!w<;-1';&Ça;,|Ž٫tpQόfl)-|z&٧GGX8zO 6}{.ά'Í|`H#ìdKxlh||Xl|7!uC _| c I.b|TU_o9y2E"[' J>&Y5Eb\ }]ː=\=ỵڇG ]'S"(R%$KH{+cX}>&uebV PWa>jSz3b|=zQoCm (`n9yhA}vơ96T\Xʠ5GEXq%å1+wL'f51fa%P1:ɕ|5xf}3"Q.n@^aӊ-is4oMrֱwC1hB"\pۺ[96 S#Q3M;`nxlѱZ5uభz\㐓5ܮQwW9vaXwocfBi>,+YZ̥-%K[=%ikuQ:Ŭ7:]5juz<ˣjQzʽt5J;q~5.>2 hC^ 5 ]S(_' e^¹;cE mk*mu>q( z}T0l`(a~t_Q~NxW>KC f(`rJ>9c똠yW]vZ$)HzۘJ+͵ɇE|yU^`F?N+q_?EAh脜DOߤ\cVj8,e ˬͬmG3Z|ʢ2hbpo­YHJFBm6zڼA m+VƊ:I8yL44} c8d]K lhpδ~L}Зpyސߏu@^l{m:2!YRvQ|!>WDDIٝ~:7ahAYF}d _EmMh7v vYYэZ9wukU=(9HF:GwdEZ'xZI@K5hH4riJY]jSHy"I #nkoVg̳V/E=wmV Ե=\t) `[9& }2Y bI84Gs۬$&Y$X >V:IT\*MM[;leiDFƮMB?բ>Y]P!>q)sMMG]#N ܭ\iڦy5 sn0ӖYF*UȊrM 5XpƔUev8Վ}Roy!_Ki[8Om`;{O~?0Ί|t༈T(S(= jbf0 .KX.{ZFpwr>Jd5"ﰷCJzq16`lX>sv../ ؟1哔]tg3 XgK G11_)y>MXbѭ(o'\;O!]v5 * \fEQE0HY%y~EAqB ʺyӓy~:ʖC :[5 p|?ov<9KoTQb6-'/F&E4\&hpR` ȷn@3WQ/&< ٱ=^o⛗O}C?#$@+}`vT}&*z]o) @}O]WՕ7uu0w zPVՒ\~W{a||͍ڇݛ|<\^k16 nY זEO'G-ՠ9j*G qu=GiރYJ'/)sI3̦ٸ^fQZB[ڱE4״.z2\U}%tQ%%Ozd|Atyx9u!B^!F~wq*gv8kz;bIe}2g]aW"`tImol@vH zj XPmnm ZRnxL(U腀i#JmC7tqFr~^.ޭ_uч(œG" yo޼7)_漟M%`P_pdgEF06 WG endstream endobj 997 0 obj << /Type /ObjStm /N 100 /First 876 /Length 2089 /Filter /FlateDecode >> stream xڭX]o\7}_E`"E ( $Il6q}pAj YlRW+8̈9XCb 0PW6p@ &:6D= EEIHH 3*il@&8A7ƣAFt# 9jRL1&Zv;SjL5 t}ڥY`w.0YCҍ#fLϱʋ2aэgR|ܡÑE=zX)'=̪AjrISaRCICD@m$j6HAP %PD,j )(C%^ W)\v (j ά`DeqIR=,I4(Q1!;X؁1!+k~I yJzVӬe͋~3c,cp;lrL0?I?IjwmNHjo6d*CTnd}IPTmK(}z.LiJ@׎uzt inL W_}w}u;z2 'g9:}_~󏓋}q}ѫ%7]ߘg+}^vd^z}~qPٳbP7MuսץgZݓAe pSXx||rzrP\?:>kuً?mʼ42}~d'Eja㇨7? kQ?_xrnPxgK{ya;:<.9;ekgu@[>R/ɑc )XG `P̫e岖%?Gg[[V*kσwNkWr+k\y:xe岖"!#Xס>&@Ysf.3r.![ܟ~3R4R/}I9B&C'P9|Cc7ev+eF.ks9$C6]Hʭ r[/%}ʃZ(bKD@Kg;؁.wW `!SL:elyU C^WzۢXf֬r4C4it,X8Ų[q]pae}E,+X`Ve~]j1ز|e`T+[&beL#gYVԵ N5:cQAj4MĎ\Ų3, 1KX CGXn)v*;ʶ}5{{?7ˈ{_Sޏ=O{?+<y=Wi=5{Ӕ{jT)Fܻʽ%zĶlXۛ١boI,IZiꂝ.p928Vն(#%bomJ"L )Q%JY,$w?j¸z'a[ȄqQXg ֨.X];a5X0¢37Xk=`&ch7^Kn>-͕z3ciNߴ_i",ktƒQRG2^*^4JT֟L+yU0?A TJpx>WꙧѮdtEpaE,㺪,$SF Uɍr4ʯO:HC7Bۯy+pUU\ʚ$t~],HWu@b5: ª)V T 1Vp <ӴôI2NBZQbӻu U+F`5wPê3wܲf T#sΪl6nW'ֽY epժ;^#U\W팷jv023Bf玱p՟;{CtX]36r`vQ5h;maLzlg Lc#JcVsR͔.H>m4}=gV펜Zhmo]1a5l4ɑ endstream endobj 1065 0 obj << /Author (Nikolaus Rath) /Title (S3QL Documentation) /Author()/Title()/Subject()/Creator(LaTeX with hyperref package)/Producer(pdfTeX-1.40.17)/Keywords() /CreationDate (D:20180304113105Z) /ModDate (D:20180304113105Z) /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.14159265-2.6-1.40.17 (TeX Live 2016/Debian) kpathsea version 6.2.2) >> endobj 1003 0 obj << /Type /ObjStm /N 67 /First 650 /Length 2601 /Filter /FlateDecode >> stream xڝZ]\}ag*}XB6ސM?3/f!ѩۺn)꜒Tn_rJ+2LP{eX{kb:ī-Jƽ1_xʝet>)[}qwW̤&"?M<KGG2/vb^RdJE'acc]ȡ^!2X;\P ^rMjQ'$ڹpcp^=3 ׃8<%xhۮY&AP }X10Z'`PO!bԶ_ 3i7@f&20tNpX Nm'KQ&hFl-Ӏ,Ul.@t;FGp;ǨQ3F͍qu]P% zAn 0%x3I`8\]q?eRAw߃8E􆷗tK¤id-+ۅndu? Lԃ!t-8omYx4-Fݻ /5TE նGQA2Ӷag/oL6&y(SILiZ/Gh;2Q(أO(> LVC }F]\W!qk\gýoѝ 7l6I҈y}x3krfAbhJXf16Oy+yeDݤ5؋]4Ase16yze^9qE;71in6qfn6y|eΈyEhn*&11P6-VttBxxIf tmUwK-?'wFy#wC[;?C,ҧ$KsmT<*{B?WAp'w g*p>*,?k=FL3\rDžlKą;\s 6p;*mQb[ؒQ[XY\R4le,[dْdW94W"͕̅K+{r$;oWo[xz}|~=۟O|Wuh_~w>?ܿ;~|zͷOOݝwwo3_?$1l?F endstream endobj 1066 0 obj << /Type /XRef /Index [0 1067] /Size 1067 /W [1 3 1] /Root 1064 0 R /Info 1065 0 R /ID [ ] /Length 2585 /Filter /FlateDecode >> stream x%_hU:juc55&?Wcbbbi&NJ;2Ni`СЇy)y0 J_S63S:(v^ZJa|s=os5v̹3Wy*^K[ۀBėF8}.˄*)y=pJdn/B ,=1eBcD Rݱ wD=nwb[FPVpC{bBbp؃{l v#w-RoU$>R;)H68j&RGz )x )xnbԌHGuROQQPV1B݀: zf]3!ԃS[,~R/w0> YMqCJ zL J ;L3Hj3"8jn|)mr5}7;QR(y` 5߭(HF ;RؑV)HqDJ7Ҡ"ii24HqFf$#6R)H3]GJ-20"1 iFe"^3V*/AlЂd#hn`w V0mc~`xFV`P;S(Pn8'> i`8zv.ysyQ0\n6.^]&;0 fY0`XUඹ+ ҬtVր``+|E7Uиq+` srM6! >HA|(P=e2;蠐A /dSKH<-FS_]dSPh7F31VjX]h\0&'p (cxYqPc֘`6a+(7"l `8 j06[b.Ȱ`V,-^_ |a giXNóf5v՚R59p(1Ck Z#5۸VGZwt;ͽ8&bm Z\p8 Ix: O*sĮOop1,h4pE;oV}U"2c,ݨYu56p̿ndߞ̾\ "{w~($>3syU'#S|PVNF6beb7'9&xdY絰h0n0b}_fy@3reMw{Y]^WV[<]&zh7{]Ƚ2džz2蹮8EI1xv 1wKY?ܦ^zTz6m17t2^==x<Ǥzx:_3ϯt۳8U5Ϧ/eB>3,,0a:t(#3 $ئtA@ф%`ٲ5%9XlHG,xLGV|MGR֬p@G:zbO{uns/iLhҟSmVcUI$!m$!6Hh#oL$!Ky?%v $PSjJB m$FB m$PSe$PDB E$ m$FB m$Đh $CB $IH"D{Hfh҄UG_8N6h,' endstream endobj startxref 281097 %%EOF s3ql-2.26/tests/0000775000175000017500000000000013246754372015170 5ustar nikrationikratio00000000000000s3ql-2.26/tests/t5_cp.py0000755000175000017500000000335512615000156016542 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t5_cp.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) from common import populate_dir, skip_without_rsync import os.path import shutil import subprocess from subprocess import check_output, CalledProcessError import t4_fuse import tempfile import pytest class TestCp(t4_fuse.TestFuse): def test(self): skip_without_rsync() self.mkfs() self.mount() self.tst_cp() self.umount() self.fsck() def tst_cp(self): tempdir = tempfile.mkdtemp(prefix='s3ql-cp-') try: populate_dir(tempdir) # Rsync subprocess.check_call(['rsync', '-aHAX', tempdir + '/', os.path.join(self.mnt_dir, 'orig') + '/']) # copy subprocess.check_call(self.s3ql_cmd_argv('s3qlcp') + [ '--quiet', os.path.join(self.mnt_dir, 'orig'), os.path.join(self.mnt_dir, 'copy')]) # compare try: out = check_output(['rsync', '-anciHAX', '--delete', tempdir + '/', os.path.join(self.mnt_dir, 'copy') + '/'], universal_newlines=True, stderr=subprocess.STDOUT) except CalledProcessError as exc: pytest.fail('rsync failed with ' + exc.output) if out: pytest.fail('Copy not equal to original, rsync says:\n' + out) finally: shutil.rmtree(tempdir) s3ql-2.26/tests/t1_dump.py0000755000175000017500000002065112615000156017077 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t1_dump.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) import unittest from s3ql import deltadump import tempfile from s3ql.database import Connection import random import time class DumpTests(unittest.TestCase): def setUp(self): self.tmpfh1 = tempfile.NamedTemporaryFile() self.tmpfh2 = tempfile.NamedTemporaryFile() self.src = Connection(self.tmpfh1.name) self.dst = Connection(self.tmpfh2.name) self.fh = tempfile.TemporaryFile() # Disable exclusive locking for all tests self.src.execute('PRAGMA locking_mode = NORMAL') self.dst.execute('PRAGMA locking_mode = NORMAL') self.create_table(self.src) self.create_table(self.dst) def tearDown(self): self.src.close() self.dst.close() self.tmpfh1.close() self.tmpfh2.close() self.fh.close() def test_transactions(self): self.fill_vals(self.src) dumpspec = (('id', deltadump.INTEGER, 0),) deltadump.dump_table(table='test', order='id', columns=dumpspec, db=self.src, fh=self.fh) self.fh.seek(0) self.dst.execute('PRAGMA journal_mode = WAL') deltadump.load_table(table='test', columns=dumpspec, db=self.dst, fh=self.fh, trx_rows=10) self.compare_tables(self.src, self.dst) def test_1_vals_1(self): self.fill_vals(self.src) dumpspec = (('id', deltadump.INTEGER, 0),) deltadump.dump_table(table='test', order='id', columns=dumpspec, db=self.src, fh=self.fh) self.fh.seek(0) deltadump.load_table(table='test', columns=dumpspec, db=self.dst, fh=self.fh) self.compare_tables(self.src, self.dst) def test_1_vals_2(self): self.fill_vals(self.src) dumpspec = (('id', deltadump.INTEGER, 1),) deltadump.dump_table(table='test', order='id', columns=dumpspec, db=self.src, fh=self.fh) self.fh.seek(0) deltadump.load_table(table='test', columns=dumpspec, db=self.dst, fh=self.fh) self.compare_tables(self.src, self.dst) def test_1_vals_3(self): self.fill_vals(self.src) dumpspec = (('id', deltadump.INTEGER, -1),) deltadump.dump_table(table='test', order='id', columns=dumpspec, db=self.src, fh=self.fh) self.fh.seek(0) deltadump.load_table(table='test', columns=dumpspec, db=self.dst, fh=self.fh) self.compare_tables(self.src, self.dst) def test_2_buf_auto(self): self.fill_vals(self.src) self.fill_buf(self.src) dumpspec = (('id', deltadump.INTEGER), ('buf', deltadump.BLOB)) deltadump.dump_table(table='test', order='id', columns=dumpspec, db=self.src, fh=self.fh) self.fh.seek(0) deltadump.load_table(table='test', columns=dumpspec, db=self.dst, fh=self.fh) self.compare_tables(self.src, self.dst) def test_2_buf_fixed(self): BUFLEN = 32 self.fill_vals(self.src) self.fill_buf(self.src, BUFLEN) dumpspec = (('id', deltadump.INTEGER), ('buf', deltadump.BLOB, BUFLEN)) deltadump.dump_table(table='test', order='id', columns=dumpspec, db=self.src, fh=self.fh) self.fh.seek(0) deltadump.load_table(table='test', columns=dumpspec, db=self.dst, fh=self.fh) self.compare_tables(self.src, self.dst) def test_3_deltas_1(self): self.fill_deltas(self.src) dumpspec = (('id', deltadump.INTEGER, 0),) deltadump.dump_table(table='test', order='id', columns=dumpspec, db=self.src, fh=self.fh) self.fh.seek(0) deltadump.load_table(table='test', columns=dumpspec, db=self.dst, fh=self.fh) self.compare_tables(self.src, self.dst) def test_3_deltas_2(self): self.fill_deltas(self.src) dumpspec = (('id', deltadump.INTEGER, 1),) deltadump.dump_table(table='test', order='id', columns=dumpspec, db=self.src, fh=self.fh) self.fh.seek(0) deltadump.load_table(table='test', columns=dumpspec, db=self.dst, fh=self.fh) self.compare_tables(self.src, self.dst) def test_3_deltas_3(self): self.fill_deltas(self.src) dumpspec = (('id', deltadump.INTEGER, -1),) deltadump.dump_table(table='test', order='id', columns=dumpspec, db=self.src, fh=self.fh) self.fh.seek(0) deltadump.load_table(table='test', columns=dumpspec, db=self.dst, fh=self.fh) self.compare_tables(self.src, self.dst) def test_4_time(self): self.fill_vals(self.src) t1 = 0.5 * time.time() t2 = 2 * time.time() for (id_,) in self.src.query('SELECT id FROM test'): val = random.uniform(t1, t2) self.src.execute('UPDATE test SET buf=? WHERE id=?', (val, id_)) dumpspec = (('id', deltadump.INTEGER), ('buf', deltadump.TIME)) deltadump.dump_table(table='test', order='id', columns=dumpspec, db=self.src, fh=self.fh) self.fh.seek(0) deltadump.load_table(table='test', columns=dumpspec, db=self.dst, fh=self.fh) self.compare_tables(self.src, self.dst) def test_5_multi(self): self.fill_vals(self.src) dumpspec = (('id', deltadump.INTEGER, 0),) deltadump.dump_table(table='test', order='id', columns=dumpspec, db=self.src, fh=self.fh) deltadump.dump_table(table='test', order='id', columns=dumpspec, db=self.src, fh=self.fh) self.fh.seek(0) deltadump.load_table(table='test', columns=dumpspec, db=self.dst, fh=self.fh) self.dst.execute('DELETE FROM test') deltadump.load_table(table='test', columns=dumpspec, db=self.dst, fh=self.fh) self.compare_tables(self.src, self.dst) def compare_tables(self, db1, db2): i1 = db1.query('SELECT id, buf FROM test ORDER BY id') i2 = db2.query('SELECT id, buf FROM test ORDER BY id') for (id1, buf1) in i1: (id2, buf2) = next(i2) self.assertEqual(id1, id2) if isinstance(buf1, float): self.assertAlmostEqual(buf1, buf2, places=9) else: self.assertEqual(buf1, buf2) self.assertRaises(StopIteration, i2.__next__) def fill_buf(self, db, len_=None): with open('/dev/urandom', 'rb') as rfh: first = True for (id_,) in db.query('SELECT id FROM test'): if len_ is None and first: val = b'' # We always want to check this case first = False elif len_ is None: val = rfh.read(random.randint(0, 140)) else: val = rfh.read(len_) db.execute('UPDATE test SET buf=? WHERE id=?', (val, id_)) def fill_vals(self, db): vals = [] for exp in [7, 8, 9, 15, 16, 17, 31, 32, 33, 62]: vals += list(range(2 ** exp - 5, 2 ** exp + 6)) vals += list(range(2 ** 63 - 5, 2 ** 63)) vals += [ -v for v in vals ] vals.append(-(2 ** 63)) for val in vals: db.execute('INSERT INTO test (id) VALUES(?)', (val,)) def fill_deltas(self, db): deltas = [] for exp in [7, 8, 9, 15, 16, 17, 31, 32, 33]: deltas += list(range(2 ** exp - 5, 2 ** exp + 6)) deltas += [ -v for v in deltas ] last = 0 for delta in deltas: val = last + delta last = val db.execute('INSERT INTO test (id) VALUES(?)', (val,)) def create_table(self, db): db.execute('''CREATE TABLE test ( id INTEGER PRIMARY KEY AUTOINCREMENT, buf BLOB)''') s3ql-2.26/tests/t5_fsck.py0000755000175000017500000000515312615000156017064 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t5_fsck.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) from s3ql.common import get_backend_cachedir from s3ql.database import Connection from common import populate_dir, skip_without_rsync import shutil import subprocess from subprocess import check_output, CalledProcessError import t4_fuse import tempfile class TestFsck(t4_fuse.TestFuse): def test(self): skip_without_rsync() ref_dir = tempfile.mkdtemp(prefix='s3ql-ref-') try: populate_dir(ref_dir) # Make file system and fake high inode number self.mkfs() db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?', (2 ** 31 + 10, 'inodes')) db.close() # Copy source data self.mount() subprocess.check_call(['rsync', '-aHAX', ref_dir + '/', self.mnt_dir + '/']) self.umount() # Check that inode watermark is high db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') assert db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', ('inodes',)) > 2 ** 31 + 10 assert db.get_val('SELECT MAX(id) FROM inodes') > 2 ** 31 + 10 db.close() # Renumber inodes self.fsck() # Check if renumbering was done db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') assert db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', ('inodes',)) < 2 ** 31 assert db.get_val('SELECT MAX(id) FROM inodes') < 2 ** 31 db.close() # Compare self.mount() try: out = check_output(['rsync', '-anciHAX', '--delete', '--exclude', '/lost+found', ref_dir + '/', self.mnt_dir + '/'], universal_newlines=True, stderr=subprocess.STDOUT) except CalledProcessError as exc: pytest.fail('rsync failed with ' + exc.output) if out: pytest.fail('Copy not equal to original, rsync says:\n' + out) self.umount() finally: shutil.rmtree(ref_dir) s3ql-2.26/tests/mock_server.py0000644000175000017500000004751013160156175020056 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' mock_server.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from http.server import BaseHTTPRequestHandler import re import socketserver import logging import hashlib import urllib.parse from xml.sax.saxutils import escape as xml_escape import json log = logging.getLogger(__name__) ERROR_RESPONSE_TEMPLATE = '''\ %(code)s %(message)s %(resource)s %(request_id)s ''' COPY_RESPONSE_TEMPLATE = '''\ 2008-02-20T22:13:01 "%(etag)s" ''' class StorageServer(socketserver.TCPServer): def __init__(self, request_handler, server_address): super().__init__(server_address, request_handler) self.data = dict() self.metadata = dict() self.hostname = self.server_address[0] self.port = self.server_address[1] class ParsedURL: __slots__ = [ 'bucket', 'key', 'params', 'fragment' ] class S3CRequestHandler(BaseHTTPRequestHandler): '''A request handler implementing a subset of the AWS S3 Interface Bucket names are ignored, all keys share the same global namespace. ''' server_version = "MockHTTP" protocol_version = 'HTTP/1.1' meta_header_re = re.compile(r'X-AMZ-Meta-([a-z0-9_.-]+)$', re.IGNORECASE) hdr_prefix = 'X-AMZ-' xml_ns = 'http://s3.amazonaws.com/doc/2006-03-01/' def parse_url(self, path): p = ParsedURL() q = urllib.parse.urlsplit(path) path = urllib.parse.unquote(q.path) assert path[0] == '/' (p.bucket, p.key) = path[1:].split('/', maxsplit=1) p.params = urllib.parse.parse_qs(q.query) p.fragment = q.fragment return p def log_message(self, format, *args): log.debug(format, *args) def handle(self): # Ignore exceptions resulting from the client closing # the connection. try: return super().handle() except ValueError as exc: if exc.args == ('I/O operation on closed file.',): pass else: raise except (BrokenPipeError, ConnectionResetError): pass def do_DELETE(self): q = self.parse_url(self.path) try: del self.server.data[q.key] del self.server.metadata[q.key] except KeyError: self.send_error(404, code='NoSuchKey', resource=q.key) return else: self.send_response(204) self.end_headers() def _check_encoding(self): encoding = self.headers['Content-Encoding'] if 'Content-Length' not in self.headers: self.send_error(400, message='Missing Content-Length', code='MissingContentLength') return elif encoding and encoding != 'identity': self.send_error(501, message='Unsupport encoding', code='NotImplemented') return return int(self.headers['Content-Length']) def _get_meta(self): meta = dict() for (name, value) in self.headers.items(): hit = self.meta_header_re.search(name) if hit: meta[hit.group(1)] = value return meta def do_PUT(self): len_ = self._check_encoding() q = self.parse_url(self.path) meta = self._get_meta() src = self.headers.get(self.hdr_prefix + 'copy-source') if src and len_: self.send_error(400, message='Upload and copy are mutually exclusive', code='UnexpectedContent') return elif src: src = urllib.parse.unquote(src) hit = re.match('^/([a-z0-9._-]+)/(.+)$', src) if not hit: self.send_error(400, message='Cannot parse copy-source', code='InvalidArgument') return metadata_directive = self.headers.get(self.hdr_prefix + 'metadata-directive', 'COPY') if metadata_directive not in ('COPY', 'REPLACE'): self.send_error(400, message='Invalid metadata directive', code='InvalidArgument') return src = hit.group(2) try: data = self.server.data[src] self.server.data[q.key] = data if metadata_directive == 'COPY': self.server.metadata[q.key] = self.server.metadata[src] else: self.server.metadata[q.key] = meta except KeyError: self.send_error(404, code='NoSuchKey', resource=src) return else: data = self.rfile.read(len_) self.server.metadata[q.key] = meta self.server.data[q.key] = data md5 = hashlib.md5() md5.update(data) if src: content = (COPY_RESPONSE_TEMPLATE % {'etag': md5.hexdigest(), 'ns': self.xml_ns }).encode('utf-8') self.send_response(200) self.send_header('ETag', '"%s"' % md5.hexdigest()) self.send_header('Content-Length', str(len(content))) self.send_header("Content-Type", 'text/xml') self.end_headers() self.wfile.write(content) else: self.send_response(201) self.send_header('ETag', '"%s"' % md5.hexdigest()) self.send_header('Content-Length', '0') self.end_headers() def handle_expect_100(self): if self.command == 'PUT': self._check_encoding() self.send_response_only(100) self.end_headers() return True def do_GET(self): q = self.parse_url(self.path) if not q.key: return self.do_list(q) try: data = self.server.data[q.key] meta = self.server.metadata[q.key] except KeyError: self.send_error(404, code='NoSuchKey', resource=q.key) return self.send_response(200) self.send_header("Content-Type", 'application/octet-stream') self.send_header("Content-Length", str(len(data))) for (name, value) in meta.items(): self.send_header(self.hdr_prefix + 'Meta-%s' % name, value) md5 = hashlib.md5() md5.update(data) self.send_header('ETag', '"%s"' % md5.hexdigest()) self.end_headers() self.send_data(data) def send_data(self, data): self.wfile.write(data) def do_list(self, q): marker = q.params['marker'][0] if 'marker' in q.params else None max_keys = int(q.params['max_keys'][0]) if 'max_keys' in q.params else 1000 prefix = q.params['prefix'][0] if 'prefix' in q.params else '' resp = ['', '' % self.xml_ns, '%d' % max_keys, 'false' ] count = 0 for key in sorted(self.server.data): if not key.startswith(prefix): continue if marker and key <= marker: continue resp.append('%s' % xml_escape(key)) count += 1 if count == max_keys: resp[3] = 'true' break resp.append('') body = '\n'.join(resp).encode() self.send_response(200) self.send_header("Content-Type", 'text/xml') self.send_header("Content-Length", str(len(body))) self.end_headers() self.wfile.write(body) def do_HEAD(self): q = self.parse_url(self.path) try: meta = self.server.metadata[q.key] data = self.server.data[q.key] except KeyError: self.send_error(404, code='NoSuchKey', resource=q.key) return self.send_response(200) self.send_header("Content-Type", 'application/octet-stream') self.send_header("Content-Length", str(len(data))) for (name, value) in meta.items(): self.send_header(self.hdr_prefix + 'Meta-%s' % name, value) self.end_headers() def send_error(self, status, message=None, code='', resource='', extra_headers=None): if not message: try: (_, message) = self.responses[status] except KeyError: message = 'Unknown' self.log_error("code %d, message %s", status, message) content = (ERROR_RESPONSE_TEMPLATE % {'code': code, 'message': xml_escape(message), 'request_id': 42, 'resource': xml_escape(resource)}).encode('utf-8', 'replace') self.send_response(status, message) self.send_header("Content-Type", 'text/xml; charset="utf-8"') self.send_header("Content-Length", str(len(content))) if extra_headers: for (name, value) in extra_headers.items(): self.send_header(name, value) self.end_headers() if self.command != 'HEAD' and status >= 200 and status not in (204, 304): self.wfile.write(content) class GSRequestHandler(S3CRequestHandler): '''A request handler implementing a subset of the Google Storage API. Bucket names are ignored, all keys share the same global namespace. ''' meta_header_re = re.compile(r'x-goog-meta-([a-z0-9_.-]+)$', re.IGNORECASE) hdr_prefix = 'x-goog-' xml_ns = 'http://doc.s3.amazonaws.com/2006-03-01' class BasicSwiftRequestHandler(S3CRequestHandler): '''A request handler implementing a subset of the OpenStack Swift Interface Container and AUTH_* prefix are ignored, all keys share the same global namespace. To keep it simple, this handler is both storage server and authentication server in one. ''' meta_header_re = re.compile(r'X-Object-Meta-([a-z0-9_.-]+)$', re.IGNORECASE) hdr_prefix = 'X-Object-' SWIFT_INFO = { "swift": { "max_meta_count": 90, "max_meta_value_length": 256, "container_listing_limit": 10000, "extra_header_count": 0, "max_meta_overall_size": 4096, "version": "2.0.0", # < 2.8 "max_meta_name_length": 128, "max_header_size": 16384 } } def parse_url(self, path): p = ParsedURL() q = urllib.parse.urlsplit(path) path = urllib.parse.unquote(q.path) assert path[0:4] == '/v1/' (_, p.bucket, p.key) = path[4:].split('/', maxsplit=2) p.params = urllib.parse.parse_qs(q.query, True) p.fragment = q.fragment return p def do_PUT(self): len_ = self._check_encoding() q = self.parse_url(self.path) meta = self._get_meta() src = self.headers.get('x-copy-from') if src and len_: self.send_error(400, message='Upload and copy are mutually exclusive', code='UnexpectedContent') return elif src: src = urllib.parse.unquote(src) hit = re.match('^/([a-z0-9._-]+)/(.+)$', src) if not hit: self.send_error(400, message='Cannot parse x-copy-from', code='InvalidArgument') return src = hit.group(2) try: data = self.server.data[src] self.server.data[q.key] = data if 'x-fresh-metadata' in self.headers: self.server.metadata[q.key] = meta else: self.server.metadata[q.key] = self.server.metadata[src].copy() self.server.metadata[q.key].update(meta) except KeyError: self.send_error(404, code='NoSuchKey', resource=src) return else: data = self.rfile.read(len_) self.server.metadata[q.key] = meta self.server.data[q.key] = data md5 = hashlib.md5() md5.update(data) if src: self.send_response(202) self.send_header('X-Copied-From', self.headers['x-copy-from']) self.send_header('Content-Length', '0') self.end_headers() else: self.send_response(201) self.send_header('ETag', '"%s"' % md5.hexdigest()) self.send_header('Content-Length', '0') self.end_headers() def do_POST(self): q = self.parse_url(self.path) meta = self._get_meta() if q.key not in self.server.metadata: self.send_error(404, code='NoSuchKey', resource=q.key) return self.server.metadata[q.key] = meta self.send_response(204) self.send_header('Content-Length', '0') self.end_headers() def do_GET(self): if self.path in ('/v1.0', '/auth/v1.0'): self.send_response(200) self.send_header('X-Storage-Url', 'http://%s:%d/v1/AUTH_xyz' % (self.server.hostname, self.server.port)) self.send_header('X-Auth-Token', 'static') self.send_header('Content-Length', '0') self.end_headers() elif self.path == '/info': content = json.dumps(self.SWIFT_INFO).encode('utf-8') self.send_response(200) self.send_header('Content-Length', str(len(content))) self.send_header("Content-Type", 'application/json; charset="utf-8"') self.end_headers() self.wfile.write(content) else: return super().do_GET() def do_list(self, q): marker = q.params['marker'][0] if 'marker' in q.params else None max_keys = int(q.params['limit'][0]) if 'limit' in q.params else 10000 prefix = q.params['prefix'][0] if 'prefix' in q.params else '' resp = [] count = 0 for key in sorted(self.server.data): if not key.startswith(prefix): continue if marker and key <= marker: continue resp.append({'name': key}) count += 1 if count == max_keys: break body = json.dumps(resp).encode('utf-8') self.send_response(200) self.send_header("Content-Type", 'application/json; charset="utf-8"') self.send_header("Content-Length", str(len(body))) self.end_headers() self.wfile.write(body) class CopySwiftRequestHandler(BasicSwiftRequestHandler): '''OpenStack Swift handler that emulates Copy middleware.''' SWIFT_INFO = { "swift": { "max_meta_count": 90, "max_meta_value_length": 256, "container_listing_limit": 10000, "extra_header_count": 0, "max_meta_overall_size": 4096, "version": "2.9.0", # >= 2.8 "max_meta_name_length": 128, "max_header_size": 16384 } } def do_COPY(self): src = self.parse_url(self.path) meta = self._get_meta() try: dst = self.headers['destination'] assert dst[0] == '/' (_, dst) = dst[1:].split('/', maxsplit=1) except KeyError: self.send_error(400, message='No Destination provided', code='InvalidArgument') return if src.key not in self.server.metadata: self.send_error(404, code='NoSuchKey', resource=src) return if 'x-fresh-metadata' in self.headers: self.server.metadata[dst] = meta else: self.server.metadata[dst] = self.server.metadata[src.key].copy() self.server.metadata[dst].update(meta) self.server.data[dst] = self.server.data[src.key] self.send_response(202) self.send_header('X-Copied-From', '%s/%s' % (src.bucket, src.key)) self.send_header('Content-Length', '0') self.end_headers() class BulkDeleteSwiftRequestHandler(BasicSwiftRequestHandler): '''OpenStack Swift handler that emulates bulk middleware (the delete part).''' MAX_DELETES = 8 # test deletes 16 objects, so needs two requests SWIFT_INFO = { "bulk_delete": { "max_failed_deletes": MAX_DELETES, "max_deletes_per_request": MAX_DELETES }, "swift": { "max_meta_count": 90, "max_meta_value_length": 256, "container_listing_limit": 10000, "extra_header_count": 0, "max_meta_overall_size": 4096, "version": "2.0.0", # < 2.8 "max_meta_name_length": 128, "max_header_size": 16384 } } def do_POST(self): q = self.parse_url(self.path) if not 'bulk-delete' in q.params: return super().do_POST() response = { 'Response Status': '200 OK', 'Response Body': '', 'Number Deleted': 0, 'Number Not Found': 0, 'Errors': [] } def send_response(status_int): content = json.dumps(response).encode('utf-8') self.send_response(status_int) self.send_header('Content-Length', str(len(content))) self.send_header("Content-Type", 'application/json; charset="utf-8"') self.end_headers() self.wfile.write(content) def error(reason): response['Response Status'] = '502 Internal Server Error' response['Response Body'] = reason send_response(502) def inline_error(http_status, body): '''bail out when processing begun. Always HTTP 200 Ok.''' response['Response Status'] = http_status response['Response Body'] = body send_response(200) len_ = self._check_encoding() lines = self.rfile.read(len_).decode('utf-8').split("\n") for index, to_delete in enumerate(lines): if index >= self.MAX_DELETES: return inline_error('413 Request entity too large', 'Maximum Bulk Deletes: %d per request' % self.MAX_DELETES) to_delete = urllib.parse.unquote(to_delete.strip()) assert to_delete[0] == '/' to_delete = to_delete[1:].split('/', maxsplit=1) if len(to_delete) < 2: return error("deleting containers is not supported") to_delete = to_delete[1] try: del self.server.data[to_delete] del self.server.metadata[to_delete] except KeyError: response['Number Not Found'] += 1 else: response['Number Deleted'] += 1 if not (response['Number Deleted'] or response['Number Not Found']): return inline_error('400 Bad Request', 'Invalid bulk delete.') send_response(200) #: A list of the available mock request handlers with #: corresponding storage urls handler_list = [ (S3CRequestHandler, 's3c://%(host)s:%(port)d/s3ql_test'), # Special syntax only for testing against mock server (GSRequestHandler, 'gs://!unittest!%(host)s:%(port)d/s3ql_test'), (BasicSwiftRequestHandler, 'swift://%(host)s:%(port)d/s3ql_test'), (CopySwiftRequestHandler, 'swift://%(host)s:%(port)d/s3ql_test'), (BulkDeleteSwiftRequestHandler, 'swift://%(host)s:%(port)d/s3ql_test') ] s3ql-2.26/tests/t4_adm.py0000775000175000017500000000634513160156175016715 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t4_adm.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) from s3ql.backends import local from s3ql.backends.comprenc import ComprencBackend import shutil import tempfile import unittest import subprocess import pytest @pytest.mark.usefixtures('s3ql_cmd_argv', 'pass_reg_output') class AdmTests(unittest.TestCase): def setUp(self): self.cache_dir = tempfile.mkdtemp(prefix='s3ql-cache-') self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') self.storage_url = 'local://' + self.backend_dir self.passphrase = 'oeut3d' def tearDown(self): shutil.rmtree(self.cache_dir) shutil.rmtree(self.backend_dir) def mkfs(self): proc = subprocess.Popen(self.s3ql_cmd_argv('mkfs.s3ql') + ['-L', 'test fs', '--max-obj-size', '500', '--authfile', '/dev/null', '--cachedir', self.cache_dir, '--quiet', self.storage_url ], stdin=subprocess.PIPE, universal_newlines=True) print(self.passphrase, file=proc.stdin) print(self.passphrase, file=proc.stdin) proc.stdin.close() self.assertEqual(proc.wait(), 0) self.reg_output(r'^WARNING: Maximum object sizes less than ' '1 MiB will degrade performance\.$', count=1) def test_passphrase(self): self.mkfs() passphrase_new = 'sd982jhd' proc = subprocess.Popen(self.s3ql_cmd_argv('s3qladm') + [ '--quiet', '--log', 'none', '--authfile', '/dev/null', 'passphrase', self.storage_url ], stdin=subprocess.PIPE, universal_newlines=True) print(self.passphrase, file=proc.stdin) print(passphrase_new, file=proc.stdin) print(passphrase_new, file=proc.stdin) proc.stdin.close() self.assertEqual(proc.wait(), 0) plain_backend = local.Backend(self.storage_url, None, None) backend = ComprencBackend(passphrase_new.encode(), ('zlib', 6), plain_backend) backend.fetch('s3ql_passphrase') # will fail with wrong pw def test_authinfo(self): self.mkfs() with tempfile.NamedTemporaryFile('wt') as fh: print('[entry1]', 'storage-url: local://', 'fs-passphrase: clearly wrong', '', '[entry2]', 'storage-url: %s' % self.storage_url, 'fs-passphrase: %s' % self.passphrase, file=fh, sep='\n') fh.flush() proc = subprocess.Popen(self.s3ql_cmd_argv('fsck.s3ql') + [ '--quiet', '--authfile', fh.name, '--cachedir', self.cache_dir, '--log', 'none', self.storage_url ], stdin=subprocess.PIPE, universal_newlines=True) proc.stdin.close() self.assertEqual(proc.wait(), 0) s3ql-2.26/tests/t5_ctrl.py0000755000175000017500000000310613223730045017101 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t5_ctrl.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) import s3ql.ctrl import sys import t4_fuse class TestCtrl(t4_fuse.TestFuse): def test(self): self.mkfs() self.mount() self.tst_ctrl_flush() self.tst_ctrl_drop() self.tst_ctrl_log() self.tst_ctrl_cachesize() self.umount() self.fsck() def tst_ctrl_flush(self): try: s3ql.ctrl.main(['flushcache', self.mnt_dir]) except: sys.excepthook(*sys.exc_info()) pytest.fail("s3qlctrl raised exception") def tst_ctrl_drop(self): try: s3ql.ctrl.main(['dropcache', self.mnt_dir]) except: sys.excepthook(*sys.exc_info()) pytest.fail("s3qlctrl raised exception") def tst_ctrl_log(self): try: s3ql.ctrl.main(['log', self.mnt_dir, 'warn']) s3ql.ctrl.main(['log', self.mnt_dir, 'debug', 's3ql', 'dugong']) s3ql.ctrl.main(['log', self.mnt_dir, 'info']) except: sys.excepthook(*sys.exc_info()) pytest.fail("s3qlctrl raised exception") def tst_ctrl_cachesize(self): try: s3ql.ctrl.main(['cachesize', self.mnt_dir, '10240']) except: sys.excepthook(*sys.exc_info()) pytest.fail("s3qlctrl raised exception") s3ql-2.26/tests/t5_failsafe.py0000775000175000017500000001153513160156175017724 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t5_failsafe.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) import os.path import t4_fuse import s3ql.ctrl import pytest import errno import time from common import get_remote_test_info, NoTestSection from s3ql.backends import gs from s3ql.backends.local import Backend as LocalBackend from s3ql.common import get_seq_no from s3ql import BUFSIZE @pytest.mark.usefixtures('pass_reg_output') class TestFailsafe(t4_fuse.TestFuse): ''' Test behavior with corrupted backend. In contrast to the tests in t3_fs_api, here we also make sure that remote connections are properly reset. We use Google Storage, so that we don't have to worry about propagation delays. ''' def setup_method(self, method): super().setup_method(method) try: (backend_login, backend_pw, self.storage_url) = get_remote_test_info('gs-test') except NoTestSection as exc: pytest.skip(exc.reason) self.backend_login = backend_login self.backend_passphrase = backend_pw self.backend = gs.Backend(self.storage_url, backend_login, backend_pw, {}) def test(self): self.mkfs(max_obj_size=10*1024**2) self.mount() fname1 = os.path.join(self.mnt_dir, 'file1') fname2 = os.path.join(self.mnt_dir, 'file2') # We need lots of data to keep the connection alive # and reproduce issue 424 with open(fname1, 'wb') as fh: with open('/dev/urandom', 'rb') as src: for _ in range(5): fh.write(src.read(BUFSIZE)) s3ql.ctrl.main(['flushcache', self.mnt_dir]) with open(fname2, 'w') as fh: fh.write('Hello, second world') s3ql.ctrl.main(['flushcache', self.mnt_dir]) # Unmount required to avoid reading from kernel cache self.umount() # Modify (val, meta) = self.backend.fetch('s3ql_data_1') self.backend.store('s3ql_data_1', val[:500] + b'oops' + val[500:], meta) # Try to read self.mount() with pytest.raises(IOError) as exc_info: with open(fname1, 'rb') as fh: fh.read() assert exc_info.value.errno == errno.EIO self.reg_output(r'^ERROR: Backend returned malformed data for ' 'block 0 of inode \d+ .+$', count=1) # This should still work with open(fname2, 'rb') as fh: fh.read() # But this should not with pytest.raises(PermissionError): open(fname2, 'wb') self.reg_output(r'^ERROR: Backend returned malformed data for ' 'block 0 of inode \d+ .+$', count=1) # Printed during umount self.reg_output(r'^WARNING: File system errors encountered, ' 'marking for fsck.$', count=1) @pytest.mark.usefixtures('pass_reg_output') class TestNewerMetadata(t4_fuse.TestFuse): ''' Make sure that we turn on failsafe mode and don't overwrite remote metadata if it suddenly becomes newer than local. ''' def test(self): self.mkfs() # Get backend instance plain_backend = LocalBackend(self.storage_url, None, None) # Save metadata meta = plain_backend['s3ql_metadata'] # Mount file system self.mount() # Increase sequence number seq_no = get_seq_no(plain_backend) plain_backend['s3ql_seq_no_%d' % (seq_no+1)] = b'Empty' # Create a file, so that there's metadata to flush fname = os.path.join(self.mnt_dir, 'file1') with open(fname, 'w') as fh: fh.write('hello, world') # Try to upload metadata s3ql.ctrl.main(['upload-meta', self.mnt_dir]) # Try to write. We repeat a few times, since the metadata upload # happens asynchronously. with pytest.raises(PermissionError): for _ in range(10): with open(fname + 'barz', 'w') as fh: fh.write('foobar') time.sleep(1) self.reg_output(r'^ERROR: Remote metadata is newer than local ' '\(\d+ vs \d+\), refusing to overwrite(?: and switching ' 'to failsafe mode)?!$', count=2) self.reg_output(r'^WARNING: File system errors encountered, marking for ' 'fsck\.$', count=1) self.reg_output(r'^ERROR: The locally cached metadata will be ' '\*lost\* the next .+$', count=1) self.umount() # Assert that remote metadata has not been overwritten assert meta == plain_backend['s3ql_metadata'] plain_backend.close() s3ql-2.26/tests/t6_upgrade.py0000775000175000017500000002016213160156175017576 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t6_upgrade.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) from common import populate_dir, skip_without_rsync, retry from t1_backends import get_remote_test_info, NoTestSection from s3ql import backends import shutil import subprocess from subprocess import check_output, CalledProcessError import t4_fuse import tempfile import os import pytest @pytest.mark.usefixtures('pass_reg_output') class TestUpgrade(t4_fuse.TestFuse): def setup_method(self, method): skip_without_rsync() basedir_old = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 's3ql.old')) if not os.path.exists(os.path.join(basedir_old, 'bin', 'mkfs.s3ql')): pytest.skip('no previous S3QL version found') super().setup_method(method) self.ref_dir = tempfile.mkdtemp(prefix='s3ql-ref-') self.bak_dir = tempfile.mkdtemp(prefix='s3ql-bak-') self.basedir_old = basedir_old def teardown_method(self, method): super().teardown_method(method) shutil.rmtree(self.ref_dir) shutil.rmtree(self.bak_dir) def mkfs_old(self, force=False, max_obj_size=500): argv = [ os.path.join(self.basedir_old, 'bin', 'mkfs.s3ql'), '-L', 'test fs', '--max-obj-size', str(max_obj_size), '--cachedir', self.cache_dir, '--quiet', '--authfile', '/dev/null', self.storage_url ] if force: argv.append('--force') if self.passphrase is None: argv.append('--plain') proc = subprocess.Popen(argv, stdin=subprocess.PIPE, universal_newlines=True) if self.backend_login is not None: print(self.backend_login, file=proc.stdin) print(self.backend_passphrase, file=proc.stdin) if self.passphrase is not None: print(self.passphrase, file=proc.stdin) print(self.passphrase, file=proc.stdin) proc.stdin.close() assert proc.wait() == 0 self.reg_output(r'^Warning: maximum object sizes less than 1 MiB ' 'will seriously degrade performance\.$', count=1) def mount_old(self): self.mount_process = subprocess.Popen([os.path.join(self.basedir_old, 'bin', 'mount.s3ql'), "--fg", '--cachedir', self.cache_dir, '--log', 'none', '--quiet', '--authfile', '/dev/null', '--compress', 'zlib', self.storage_url, self.mnt_dir], stdin=subprocess.PIPE, universal_newlines=True) if self.backend_login is not None: print(self.backend_login, file=self.mount_process.stdin) print(self.backend_passphrase, file=self.mount_process.stdin) if self.passphrase is not None: print(self.passphrase, file=self.mount_process.stdin) self.mount_process.stdin.close() def poll(): if os.path.ismount(self.mnt_dir): return True assert self.mount_process.poll() is None retry(30, poll) def umount_old(self): with open('/dev/null', 'wb') as devnull: retry(5, lambda: subprocess.call(['fuser', '-m', self.mnt_dir], stdout=devnull, stderr=devnull) == 1) proc = subprocess.Popen([os.path.join(self.basedir_old, 'bin', 'umount.s3ql'), '--quiet', self.mnt_dir]) retry(90, lambda : proc.poll() is not None) assert proc.wait() == 0 assert self.mount_process.poll() == 0 assert not os.path.ismount(self.mnt_dir) def upgrade(self): proc = subprocess.Popen(self.s3ql_cmd_argv('s3qladm') + [ '--cachedir', self.cache_dir, '--authfile', '/dev/null', '--quiet', 'upgrade', self.storage_url ], stdin=subprocess.PIPE, universal_newlines=True) if self.backend_login is not None: print(self.backend_login, file=proc.stdin) print(self.backend_passphrase, file=proc.stdin) if self.passphrase is not None: print(self.passphrase, file=proc.stdin) print('yes', file=proc.stdin) proc.stdin.close() assert proc.wait() == 0 def compare(self): try: out = check_output(['rsync', '-anciHAX', '--delete', '--exclude', '/lost+found', self.ref_dir + '/', self.mnt_dir + '/'], universal_newlines=True, stderr=subprocess.STDOUT) except CalledProcessError as exc: pytest.fail('rsync failed with ' + exc.output) if out: pytest.fail('Copy not equal to original, rsync says:\n' + out) def populate(self): populate_dir(self.ref_dir) @pytest.mark.parametrize("with_cache", (True, False)) def test(self, with_cache): self.populate() # Create and mount using previous S3QL version self.mkfs_old() self.mount_old() subprocess.check_call(['rsync', '-aHAX', self.ref_dir + '/', self.mnt_dir + '/']) self.umount_old() # Try to access with new version (should fail) if not with_cache: shutil.rmtree(self.cache_dir) self.cache_dir = tempfile.mkdtemp(prefix='s3ql-cache-') self.mount(expect_fail=32) self.reg_output(r'^ERROR: File system revision too old, please ' 'run `s3qladm upgrade` first\.$', count=1) # Upgrade if not with_cache: shutil.rmtree(self.cache_dir) self.cache_dir = tempfile.mkdtemp(prefix='s3ql-cache-') self.upgrade() # ...and test if not with_cache: shutil.rmtree(self.cache_dir) self.cache_dir = tempfile.mkdtemp(prefix='s3ql-cache-') self.fsck() self.mount() self.compare() # Try if we can still write (we messed this up in the upgrade # from 2.16 to 2.17). with open('%s/some_new_file' % (self.mnt_dir,), 'w') as fh: fh.write('hello, world') self.umount() class TestPlainUpgrade(TestUpgrade): def setup_method(self, method): super().setup_method(method) self.passphrase = None class RemoteUpgradeTest: def setup_method(self, method, name): super().setup_method(method) try: (backend_login, backend_pw, self.storage_url) = get_remote_test_info(name) except NoTestSection as exc: pytest.skip(exc.reason) self.backend_login = backend_login self.backend_passphrase = backend_pw def populate(self): populate_dir(self.ref_dir, entries=50, size=5*1024*1024) def teardown_method(self, method): super().teardown_method(method) proc = subprocess.Popen(self.s3ql_cmd_argv('s3qladm') + [ '--quiet', '--authfile', '/dev/null', 'clear', self.storage_url ], stdin=subprocess.PIPE, universal_newlines=True) if self.backend_login is not None: print(self.backend_login, file=proc.stdin) print(self.backend_passphrase, file=proc.stdin) print('yes', file=proc.stdin) proc.stdin.close() assert proc.wait() == 0 # Dynamically generate tests for other backends for backend_name in backends.prefix_map: if backend_name == 'local': continue def setup_method(self, method, backend_name=backend_name): RemoteUpgradeTest.setup_method(self, method, backend_name + '-test') test_class_name = 'Test' + backend_name + 'Upgrade' globals()[test_class_name] = type(test_class_name, (RemoteUpgradeTest, TestUpgrade), { 'setup_method': setup_method }) s3ql-2.26/tests/t5_full.py0000755000175000017500000000703412742247106017111 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t5_full.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) from common import populate_dir, skip_without_rsync, get_remote_test_info, NoTestSection from s3ql import backends import shutil import subprocess from subprocess import check_output, CalledProcessError import t4_fuse import tempfile import pytest class TestFull(t4_fuse.TestFuse): def populate_dir(self, path): populate_dir(path) def test(self): skip_without_rsync() ref_dir = tempfile.mkdtemp(prefix='s3ql-ref-') try: self.populate_dir(ref_dir) # Copy source data self.mkfs() self.mount() subprocess.check_call(['rsync', '-aHAX', ref_dir + '/', self.mnt_dir + '/']) self.umount() self.fsck() # Delete cache, run fsck and compare shutil.rmtree(self.cache_dir) self.cache_dir = tempfile.mkdtemp('s3ql-cache-') self.fsck() self.mount() try: out = check_output(['rsync', '-anciHAX', '--delete', '--exclude', '/lost+found', ref_dir + '/', self.mnt_dir + '/'], universal_newlines=True, stderr=subprocess.STDOUT) except CalledProcessError as exc: pytest.fail('rsync failed with ' + exc.output) if out: pytest.fail('Copy not equal to original, rsync says:\n' + out) self.umount() # Delete cache and mount shutil.rmtree(self.cache_dir) self.cache_dir = tempfile.mkdtemp(prefix='s3ql-cache-') self.mount() self.umount() finally: shutil.rmtree(ref_dir) class RemoteTest: def setup_method(self, method, name): super().setup_method(method) try: (backend_login, backend_pw, self.storage_url) = get_remote_test_info(name) except NoTestSection as exc: pytest.skip(exc.reason) self.backend_login = backend_login self.backend_passphrase = backend_pw def populate_dir(self, path): populate_dir(path, entries=50, size=5*1024*1024) def teardown_method(self, method): super().teardown_method(method) proc = subprocess.Popen(self.s3ql_cmd_argv('s3qladm') + [ '--quiet', '--authfile', '/dev/null', 'clear', self.storage_url ], stdin=subprocess.PIPE, universal_newlines=True) if self.backend_login is not None: print(self.backend_login, file=proc.stdin) print(self.backend_passphrase, file=proc.stdin) print('yes', file=proc.stdin) proc.stdin.close() assert proc.wait() == 0 # Dynamically generate tests for other backends for backend_name in backends.prefix_map: if backend_name == 'local': continue def setup_method(self, method, backend_name=backend_name): RemoteTest.setup_method(self, method, backend_name + '-test') test_class_name = 'TestFull' + backend_name globals()[test_class_name] = type(test_class_name, (RemoteTest, TestFull), { 'setup_method': setup_method }) s3ql-2.26/tests/pytest_checklogs.py0000664000175000017500000001116013227212113021070 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' pytest_checklogs.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. py.test plugin to look for suspicious phrases in messages emitted on stdout/stderr or via the logging module. False positives can be registered via a new `reg_output` fixture (for messages to stdout/stderr), and a `assert_logs` function (for logging messages). ''' import pytest import re import functools import sys import logging from contextlib import contextmanager from distutils.version import LooseVersion def pytest_configure(config): # pytest-catchlog was integrated in pytest 3.3.0 if (LooseVersion(pytest.__version__) < "3.3.0" and not config.pluginmanager.hasplugin('pytest_catchlog')): raise ImportError('pytest catchlog plugin not found') # Fail tests if they result in log messages of severity WARNING or more. def check_test_log(caplog): for record in caplog.records: if (record.levelno >= logging.WARNING and not getattr(record, 'checklogs_ignore', False)): raise AssertionError('Logger received warning messages') class CountMessagesHandler(logging.Handler): def __init__(self, level=logging.NOTSET): super().__init__(level) self.count = 0 def emit(self, record): self.count += 1 @contextmanager def assert_logs(pattern, level=logging.WARNING, count=None): '''Assert that suite emits specified log message *pattern* is matched against the *unformatted* log message, i.e. before any arguments are merged. If *count* is not None, raise an exception unless exactly *count* matching messages are caught. Matched log records will also be flagged so that the caplog fixture does not generate exceptions for them (no matter their severity). ''' def filter(record): if (record.levelno == level and re.search(pattern, record.msg)): record.checklogs_ignore = True return True return False handler = CountMessagesHandler() handler.setLevel(level) handler.addFilter(filter) logger = logging.getLogger() logger.addHandler(handler) try: yield finally: logger.removeHandler(handler) if count is not None and handler.count != count: raise AssertionError('Expected to catch %d %r messages, but got only %d' % (count, pattern, handler.count)) def check_test_output(capfd, item): (stdout, stderr) = capfd.readouterr() # Write back what we've read (so that it will still be printed) sys.stdout.write(stdout) sys.stderr.write(stderr) # Strip out false positives try: false_pos = item.checklogs_fp except AttributeError: false_pos = () for (pattern, flags, count) in false_pos: cp = re.compile(pattern, flags) (stdout, cnt) = cp.subn('', stdout, count=count) if count == 0 or count - cnt > 0: stderr = cp.sub('', stderr, count=count - cnt) for pattern in ('exception', 'error', 'warning', 'fatal', 'traceback', 'fault', 'crash(?:ed)?', 'abort(?:ed)', 'fishy'): cp = re.compile(r'\b{}\b'.format(pattern), re.IGNORECASE | re.MULTILINE) hit = cp.search(stderr) if hit: raise AssertionError('Suspicious output to stderr (matched "%s")' % hit.group(0)) hit = cp.search(stdout) if hit: raise AssertionError('Suspicious output to stdout (matched "%s")' % hit.group(0)) def register_output(item, pattern, count=1, flags=re.MULTILINE): '''Register *pattern* as false positive for output checking This prevents the test from failing because the output otherwise appears suspicious. ''' item.checklogs_fp.append((pattern, flags, count)) @pytest.fixture() def reg_output(request): assert not hasattr(request.node, 'checklogs_fp') request.node.checklogs_fp = [] return functools.partial(register_output, request.node) def check_output(item): pm = item.config.pluginmanager cm = pm.getplugin('capturemanager') capmethod = (getattr(cm, '_capturing', None) or getattr(item, '_capture_fixture', None) or getattr(cm, '_global_capturing', None)) check_test_output(capmethod, item) check_test_log(item.catch_log_handler) @pytest.hookimpl(trylast=True) def pytest_runtest_setup(item): check_output(item) @pytest.hookimpl(trylast=True) def pytest_runtest_call(item): check_output(item) @pytest.hookimpl(trylast=True) def pytest_runtest_teardown(item, nextitem): check_output(item) s3ql-2.26/tests/t3_fs_api.py0000755000175000017500000012235713177323070017412 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t3_fs_api.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) from llfuse import FUSEError from random import randint from s3ql import fs from s3ql.backends import local from s3ql.backends.pool import BackendPool from s3ql.backends.comprenc import ComprencBackend from s3ql.block_cache import BlockCache from s3ql import ROOT_INODE from s3ql.mkfs import init_tables from s3ql.metadata import create_tables from s3ql.database import Connection from s3ql.fsck import Fsck from s3ql.inode_cache import InodeCache from t2_block_cache import DummyQueue from common import CLOCK_GRANULARITY, safe_sleep from pytest_checklogs import assert_logs import errno import llfuse import os import shutil import stat import logging import tempfile import unittest # We need to access to protected members #pylint: disable=W0212 # The classes provided by llfuse have read-only attributes, # so we duck-type our own. class Ctx: def __init__(self): self.uid = randint(0, 2 ** 32) self.gid = randint(0, 2 ** 32) self.pid = randint(0, 2 ** 32) self.umask = 0 class SetattrFields: def __init__(self, **kw): self.update_atime = False self.update_mtime = False self.update_mode = False self.update_uid = False self.update_gid = False self.update_size = False self.__dict__.update(kw) some_ctx = Ctx() class fs_api_tests(unittest.TestCase): def setUp(self): self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') plain_backend = local.Backend('local://' + self.backend_dir, None, None) self.backend_pool = BackendPool(lambda: ComprencBackend(b'schwubl', ('zlib', 6), plain_backend)) self.backend = self.backend_pool.pop_conn() self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-') self.max_obj_size = 1024 # Destructors are not guaranteed to run, and we can't unlink # the file immediately because apsw refers to it by name. # Therefore, we unlink the file manually in tearDown() self.dbfile = tempfile.NamedTemporaryFile(delete=False) self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) # Tested methods assume that they are called from # file system request handler llfuse.lock.acquire() cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache", self.max_obj_size * 5) self.block_cache = cache self.server = fs.Operations(cache, self.db, self.max_obj_size, InodeCache(self.db, 0)) self.server.init() # Monkeypatch around the need for removal and upload threads cache.to_remove = DummyQueue(cache) class DummyDistributor: def put(self, arg, timeout=None): cache._do_upload(*arg) return True cache.to_upload = DummyDistributor() # Keep track of unused filenames self.name_cnt = 0 def tearDown(self): self.server.inodes.destroy() llfuse.lock.release() self.block_cache.destroy() shutil.rmtree(self.cachedir) shutil.rmtree(self.backend_dir) os.unlink(self.dbfile.name) self.dbfile.close() @staticmethod def random_data(len_): with open("/dev/urandom", "rb") as fd: return fd.read(len_) def fsck(self): self.block_cache.drop() self.server.inodes.flush() fsck = Fsck(self.cachedir + '/cache', self.backend, { 'max_obj_size': self.max_obj_size }, self.db) fsck.check() self.assertFalse(fsck.found_errors) def newname(self): self.name_cnt += 1 return ("s3ql_%d" % self.name_cnt).encode() def test_getattr_root(self): self.assertTrue(stat.S_ISDIR(self.server.getattr(ROOT_INODE, some_ctx).st_mode)) self.fsck() def test_create(self): ctx = Ctx() mode = self.dir_mode() name = self.newname() inode_p_old = self.server.getattr(ROOT_INODE, some_ctx) safe_sleep(CLOCK_GRANULARITY) self.server._create(ROOT_INODE, name, mode, ctx) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON name_id = names.id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)) inode = self.server.getattr(id_, some_ctx) self.assertEqual(inode.st_mode, mode) self.assertEqual(inode.st_uid, ctx.uid) self.assertEqual(inode.st_gid, ctx.gid) self.assertEqual(inode.st_nlink, 1) self.assertEqual(inode.st_size, 0) inode_p_new = self.server.getattr(ROOT_INODE, some_ctx) self.assertGreater(inode_p_new.st_mtime_ns, inode_p_old.st_mtime_ns) self.assertGreater(inode_p_new.st_ctime_ns, inode_p_old.st_ctime_ns) self.server.forget([(id_, 1)]) self.fsck() def test_extstat(self): # Test with zero contents self.server.extstat() # Test with empty file (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, some_ctx) self.server.release(fh) self.server.extstat() # Test with data in file fh = self.server.open(inode.st_ino, os.O_RDWR, some_ctx) self.server.write(fh, 0, b'foobar') self.server.release(fh) self.server.extstat() self.server.forget([(inode.st_ino, 1)]) self.fsck() @staticmethod def dir_mode(): return (randint(0, 0o7777) & ~stat.S_IFDIR) | stat.S_IFDIR @staticmethod def file_mode(): return (randint(0, 0o7777) & ~stat.S_IFREG) | stat.S_IFREG def test_getxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, some_ctx) self.server.release(fh) self.assertRaises(FUSEError, self.server.getxattr, inode.st_ino, b'nonexistant-attr', some_ctx) self.server.setxattr(inode.st_ino, b'my-attr', b'strabumm!', some_ctx) self.assertEqual(self.server.getxattr(inode.st_ino, b'my-attr', some_ctx), b'strabumm!') self.server.forget([(inode.st_ino, 1)]) self.fsck() def test_link(self): name = self.newname() inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), some_ctx) inode_p_new_before = self.server.getattr(inode_p_new.st_ino, some_ctx) (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, some_ctx) self.server.release(fh) safe_sleep(CLOCK_GRANULARITY) inode_before = self.server.getattr(inode.st_ino, some_ctx) self.server.link(inode.st_ino, inode_p_new.st_ino, name, some_ctx) inode_after = self.server.lookup(inode_p_new.st_ino, name, some_ctx) inode_p_new_after = self.server.getattr(inode_p_new.st_ino, some_ctx) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, inode_p_new.st_ino)) self.assertEqual(inode_before.st_ino, id_) self.assertEqual(inode_after.st_nlink, 2) self.assertGreater(inode_after.st_ctime_ns, inode_before.st_ctime_ns) self.assertLess(inode_p_new_before.st_mtime_ns, inode_p_new_after.st_mtime_ns) self.assertLess(inode_p_new_before.st_ctime_ns, inode_p_new_after.st_ctime_ns) self.server.forget([(inode.st_ino, 1), (inode_p_new.st_ino, 1), (inode_after.st_ino, 1)]) self.fsck() def test_listxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, some_ctx) self.server.release(fh) self.assertListEqual([], self.server.listxattr(inode.st_ino, some_ctx)) self.server.setxattr(inode.st_ino, b'key1', b'blub', some_ctx) self.assertListEqual([b'key1'], self.server.listxattr(inode.st_ino, some_ctx)) self.server.setxattr(inode.st_ino, b'key2', b'blub', some_ctx) self.assertListEqual(sorted([b'key1', b'key2']), sorted(self.server.listxattr(inode.st_ino, some_ctx))) self.server.forget([(inode.st_ino, 1)]) self.fsck() def test_read(self): len_ = self.max_obj_size data = self.random_data(len_) off = self.max_obj_size // 2 (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, some_ctx) self.server.write(fh, off, data) inode_before = self.server.getattr(inode.st_ino, some_ctx) safe_sleep(CLOCK_GRANULARITY) self.assertTrue(self.server.read(fh, off, len_) == data) inode_after = self.server.getattr(inode.st_ino, some_ctx) self.assertGreater(inode_after.st_atime_ns, inode_before.st_atime_ns) self.assertTrue(self.server.read(fh, 0, len_) == b"\0" * off + data[:off]) self.assertTrue(self.server.read(fh, self.max_obj_size, len_) == data[off:]) self.server.release(fh) self.server.forget([(inode.st_ino, 1)]) self.fsck() def test_readdir(self): # Create a few entries names = [ ('entry_%2d' % i).encode() for i in range(20) ] for name in names: (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, some_ctx) self.server.release(fh) self.server.forget([(inode.st_ino, 1)]) # Delete some to make sure that we don't have continous rowids remove_no = [0, 2, 3, 5, 9] for i in remove_no: self.server.unlink(ROOT_INODE, names[i], some_ctx) del names[i] # Read all fh = self.server.opendir(ROOT_INODE, some_ctx) self.assertListEqual(sorted(names + [b'lost+found']) , sorted(x[0] for x in self.server.readdir(fh, 0))) self.server.releasedir(fh) # Read in parts fh = self.server.opendir(ROOT_INODE, some_ctx) entries = list() try: next_ = 0 while True: gen = self.server.readdir(fh, next_) for _ in range(3): (name, _, next_) = next(gen) entries.append(name) except StopIteration: pass self.assertListEqual(sorted(names + [b'lost+found']) , sorted(entries)) self.server.releasedir(fh) self.fsck() def test_forget(self): name = self.newname() # Test that entries are deleted when they're no longer referenced (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, some_ctx) self.server.write(fh, 0, b'foobar') self.server.unlink(ROOT_INODE, name, some_ctx) self.assertFalse(self.db.has_val('SELECT 1 FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertTrue(self.server.getattr(inode.st_ino, some_ctx).st_ino) self.server.release(fh) self.server.forget([(inode.st_ino, 1)]) self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.st_ino,))) self.fsck() def test_removexattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, some_ctx) self.server.release(fh) self.assertRaises(FUSEError, self.server.removexattr, inode.st_ino, b'some name', some_ctx) self.server.setxattr(inode.st_ino, b'key1', b'blub', some_ctx) self.server.removexattr(inode.st_ino, b'key1', some_ctx) self.assertListEqual([], self.server.listxattr(inode.st_ino, some_ctx)) self.server.forget([(inode.st_ino, 1)]) self.fsck() def test_rename(self): oldname = self.newname() newname = self.newname() inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), some_ctx) inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), some_ctx) inode_p_new_before = self.server.getattr(inode_p_new.st_ino, some_ctx) inode_p_old_before = self.server.getattr(ROOT_INODE, some_ctx) safe_sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.st_ino, newname, some_ctx) inode_p_old_after = self.server.getattr(ROOT_INODE, some_ctx) inode_p_new_after = self.server.getattr(inode_p_new.st_ino, some_ctx) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id == name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.st_ino)) self.assertEqual(inode.st_ino, id_) assert inode_p_new_before.st_mtime_ns < inode_p_new_after.st_mtime_ns assert inode_p_new_before.st_ctime_ns < inode_p_new_after.st_ctime_ns assert inode_p_old_before.st_mtime_ns < inode_p_old_after.st_mtime_ns assert inode_p_old_before.st_ctime_ns < inode_p_old_after.st_ctime_ns self.server.forget([(inode.st_ino, 1), (inode_p_new.st_ino, 1)]) self.fsck() def test_replace_file(self): oldname = self.newname() newname = self.newname() (fh, inode) = self.server.create(ROOT_INODE, oldname, self.file_mode(), os.O_RDWR, some_ctx) self.server.write(fh, 0, b'some data to deal with') self.server.release(fh) self.server.setxattr(inode.st_ino, b'test_xattr', b'42*8', some_ctx) inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), some_ctx) inode_p_new_before = self.server.getattr(inode_p_new.st_ino, some_ctx) inode_p_old_before = self.server.getattr(ROOT_INODE, some_ctx) (fh, inode2) = self.server.create(inode_p_new.st_ino, newname, self.file_mode(), os.O_RDWR, some_ctx) self.server.write(fh, 0, b'even more data to deal with') self.server.release(fh) self.server.setxattr(inode2.st_ino, b'test_xattr', b'42*8', some_ctx) self.server.forget([(inode2.st_ino, 1)]) safe_sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.st_ino, newname, some_ctx) inode_p_old_after = self.server.getattr(ROOT_INODE, some_ctx) inode_p_new_after = self.server.getattr(inode_p_new.st_ino, some_ctx) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.st_ino)) self.assertEqual(inode.st_ino, id_) self.assertLess(inode_p_new_before.st_mtime_ns, inode_p_new_after.st_mtime_ns) self.assertLess(inode_p_new_before.st_ctime_ns, inode_p_new_after.st_ctime_ns) self.assertLess(inode_p_old_before.st_mtime_ns, inode_p_old_after.st_mtime_ns) self.assertLess(inode_p_old_before.st_ctime_ns, inode_p_old_after.st_ctime_ns) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.st_ino,))) self.server.forget([(inode.st_ino, 1), (inode_p_new.st_ino, 1)]) self.fsck() def test_replace_dir(self): oldname = self.newname() newname = self.newname() inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), some_ctx) inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), some_ctx) inode_p_new_before = self.server.getattr(inode_p_new.st_ino, some_ctx) inode_p_old_before = self.server.getattr(ROOT_INODE, some_ctx) inode2 = self.server.mkdir(inode_p_new.st_ino, newname, self.dir_mode(), some_ctx) self.server.forget([(inode2.st_ino, 1)]) safe_sleep(CLOCK_GRANULARITY) self.server.rename(ROOT_INODE, oldname, inode_p_new.st_ino, newname, some_ctx) inode_p_old_after = self.server.getattr(ROOT_INODE, some_ctx) inode_p_new_after = self.server.getattr(inode_p_new.st_ino, some_ctx) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE))) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.st_ino)) self.assertEqual(inode.st_ino, id_) self.assertLess(inode_p_new_before.st_mtime_ns, inode_p_new_after.st_mtime_ns) self.assertLess(inode_p_new_before.st_ctime_ns, inode_p_new_after.st_ctime_ns) self.assertLess(inode_p_old_before.st_mtime_ns, inode_p_old_after.st_mtime_ns) self.assertLess(inode_p_old_before.st_ctime_ns, inode_p_old_after.st_ctime_ns) self.server.forget([(inode.st_ino, 1), (inode_p_new.st_ino, 1)]) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.st_ino,))) self.fsck() def test_setattr_one(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, some_ctx) self.server.release(fh) inode_old = self.server.getattr(inode.st_ino, some_ctx) attr = self.server.getattr(inode.st_ino, some_ctx) # this is a fresh instance attr.st_mode = self.file_mode() attr.st_uid = randint(0, 2 ** 32) attr.st_gid = randint(0, 2 ** 32) # should be ignored attr.st_atime_ns = randint(0, 2 ** 50) attr.st_mtime_ns = randint(0, 2 ** 50) safe_sleep(CLOCK_GRANULARITY) sf = SetattrFields(update_mode=True, update_uid=True, update_atime=True, update_mtime=True) self.server.setattr(inode.st_ino, attr, sf, None, some_ctx) inode_new = self.server.getattr(inode.st_ino, some_ctx) for name in ('st_mode', 'st_uid', 'st_atime_ns', 'st_mtime_ns'): assert getattr(attr, name) == getattr(inode_new, name) for name in ('st_gid', 'st_size', 'st_nlink', 'st_rdev', 'st_blocks', 'st_blksize'): assert getattr(inode_old, name) == getattr(inode_new, name) assert inode_old.st_ctime_ns < inode_new.st_ctime_ns self.server.forget([(inode.st_ino, 1)]) self.fsck() def test_setattr_two(self): (fh, inode_old) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, some_ctx) attr = self.server.getattr(inode_old.st_ino, some_ctx) attr.st_mode = self.file_mode() attr.st_uid = randint(0, 2 ** 32) attr.st_gid = randint(0, 2 ** 32) attr.st_mtime_ns = randint(0, 2 ** 50) attr.st_ctime_ns = 5e9 safe_sleep(CLOCK_GRANULARITY) sf = SetattrFields(update_gid=True, update_mtime=True) self.server.setattr(inode_old.st_ino, attr, sf, None, some_ctx) inode_new = self.server.getattr(inode_old.st_ino, some_ctx) for name in ('st_gid', 'st_mtime_ns'): assert getattr(attr, name) == getattr(inode_new, name) for name in ('st_uid', 'st_size', 'st_nlink', 'st_rdev', 'st_blocks', 'st_blksize', 'st_mode', 'st_atime_ns'): assert getattr(inode_old, name) == getattr(inode_new, name) assert inode_old.st_ctime_ns < inode_new.st_ctime_ns self.server.release(fh) self.server.forget([(inode_old.st_ino, 1)]) self.fsck() def test_truncate(self): len_ = int(2.7 * self.max_obj_size) data = self.random_data(len_) (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, some_ctx) self.server.write(fh, 0, data) attr = self.server.getattr(inode.st_ino, some_ctx) attr.st_size = len_ // 2 sf = SetattrFields(update_size=True) self.server.setattr(inode.st_ino, attr, sf, None, some_ctx) self.assertTrue(self.server.read(fh, 0, len_) == data[:len_ // 2]) attr.st_size = len_ self.server.setattr(inode.st_ino, attr, sf, None, some_ctx) self.assertTrue(self.server.read(fh, 0, len_) == data[:len_ // 2] + b'\0' * (len_ // 2)) self.server.release(fh) self.server.forget([(inode.st_ino, 1)]) self.fsck() def test_truncate_0(self): len1 = 158 len2 = 133 (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, some_ctx) self.server.write(fh, 0, self.random_data(len1)) self.server.release(fh) self.server.inodes.flush() attr = self.server.getattr(inode.st_ino, some_ctx) fh = self.server.open(inode.st_ino, os.O_RDWR, some_ctx) attr.st_size = 0 self.server.setattr(inode.st_ino, attr, SetattrFields(update_size=True), fh, some_ctx) self.server.write(fh, 0, self.random_data(len2)) self.server.release(fh) self.server.forget([(inode.st_ino, 1)]) self.fsck() def test_setxattr(self): (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, some_ctx) self.server.release(fh) self.server.setxattr(inode.st_ino, b'my-attr', b'strabumm!', some_ctx) self.assertEqual(self.server.getxattr(inode.st_ino, b'my-attr', some_ctx), b'strabumm!') self.server.forget([(inode.st_ino, 1)]) self.fsck() def test_names(self): name1 = self.newname() name2 = self.newname() (fh, inode) = self.server.create(ROOT_INODE, name1, self.file_mode(), os.O_RDWR, some_ctx) self.server.release(fh) self.server.forget([(inode.st_ino, 1)]) (fh, inode) = self.server.create(ROOT_INODE, name2, self.file_mode(), os.O_RDWR, some_ctx) self.server.release(fh) self.server.setxattr(inode.st_ino, name1, b'strabumm!', some_ctx) self.fsck() self.server.removexattr(inode.st_ino, name1, some_ctx) self.fsck() self.server.setxattr(inode.st_ino, name1, b'strabumm karacho!!', some_ctx) self.server.unlink(ROOT_INODE, name1, some_ctx) self.server.forget([(inode.st_ino, 1)]) self.fsck() def test_statfs(self): # Test with zero contents self.server.statfs(some_ctx) # Test with empty file (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, some_ctx) self.server.release(fh) self.server.statfs(some_ctx) # Test with data in file fh = self.server.open(inode.st_ino, os.O_RDWR, some_ctx) self.server.write(fh, 0, b'foobar') self.server.release(fh) self.server.forget([(inode.st_ino, 1)]) self.server.statfs(some_ctx) def test_symlink(self): target = self.newname() name = self.newname() inode_p_before = self.server.getattr(ROOT_INODE, some_ctx) safe_sleep(CLOCK_GRANULARITY) inode = self.server.symlink(ROOT_INODE, name, target, some_ctx) inode_p_after = self.server.getattr(ROOT_INODE, some_ctx) self.assertEqual(target, self.server.readlink(inode.st_ino, some_ctx)) id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)) self.assertEqual(inode.st_ino, id_) self.assertLess(inode_p_before.st_mtime_ns, inode_p_after.st_mtime_ns) self.assertLess(inode_p_before.st_ctime_ns, inode_p_after.st_ctime_ns) self.server.forget([(inode.st_ino, 1)]) self.fsck() def test_unlink(self): name = self.newname() (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, some_ctx) self.server.write(fh, 0, b'some data to deal with') self.server.release(fh) # Add extended attributes self.server.setxattr(inode.st_ino, b'test_xattr', b'42*8', some_ctx) self.server.forget([(inode.st_ino, 1)]) inode_p_before = self.server.getattr(ROOT_INODE, some_ctx) safe_sleep(CLOCK_GRANULARITY) self.server.unlink(ROOT_INODE, name, some_ctx) inode_p_after = self.server.getattr(ROOT_INODE, some_ctx) self.assertLess(inode_p_before.st_mtime_ns, inode_p_after.st_mtime_ns) self.assertLess(inode_p_before.st_ctime_ns, inode_p_after.st_ctime_ns) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.st_ino,))) self.fsck() def test_rmdir(self): name = self.newname() inode = self.server.mkdir(ROOT_INODE, name, self.dir_mode(), some_ctx) self.server.forget([(inode.st_ino, 1)]) inode_p_before = self.server.getattr(ROOT_INODE, some_ctx) safe_sleep(CLOCK_GRANULARITY) self.server.rmdir(ROOT_INODE, name, some_ctx) inode_p_after = self.server.getattr(ROOT_INODE, some_ctx) self.assertLess(inode_p_before.st_mtime_ns, inode_p_after.st_mtime_ns) self.assertLess(inode_p_before.st_ctime_ns, inode_p_after.st_ctime_ns) self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.st_ino,))) self.fsck() def test_relink(self): name = self.newname() name2 = self.newname() data = b'some data to deal with' (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, some_ctx) self.server.write(fh, 0, data) self.server.release(fh) self.server.unlink(ROOT_INODE, name, some_ctx) self.server.inodes.flush() self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))) self.assertTrue(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.st_ino,))) self.server.link(inode.st_ino, ROOT_INODE, name2, some_ctx) self.server.forget([(inode.st_ino, 2)]) inode = self.server.lookup(ROOT_INODE, name2, some_ctx) fh = self.server.open(inode.st_ino, os.O_RDONLY, some_ctx) self.assertTrue(self.server.read(fh, 0, len(data)) == data) self.server.release(fh) self.server.forget([(inode.st_ino, 1)]) self.fsck() def test_write(self): len_ = self.max_obj_size data = self.random_data(len_) off = self.max_obj_size // 2 (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, some_ctx) inode_before = self.server.getattr(inode.st_ino, some_ctx) safe_sleep(CLOCK_GRANULARITY) self.server.write(fh, off, data) inode_after = self.server.getattr(inode.st_ino, some_ctx) self.assertGreater(inode_after.st_mtime_ns, inode_before.st_mtime_ns) self.assertGreater(inode_after.st_ctime_ns, inode_before.st_ctime_ns) self.assertEqual(inode_after.st_size, off + len_) self.server.write(fh, 0, data) inode_after = self.server.getattr(inode.st_ino, some_ctx) self.assertEqual(inode_after.st_size, off + len_) self.server.release(fh) self.server.forget([(inode.st_ino, 1)]) self.fsck() def test_failsafe(self): len_ = self.max_obj_size data = self.random_data(len_) (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, some_ctx) self.server.write(fh, 0, data) self.server.cache.drop() self.assertTrue(self.server.failsafe is False) datafile = os.path.join(self.backend_dir, 's3ql_data_', 's3ql_data_1') shutil.copy(datafile, datafile + '.bak') # Modify contents with open(datafile, 'rb+') as rfh: rfh.seek(560) rfh.write(b'blrub!') with self.assertRaises(FUSEError) as cm: with assert_logs('^Backend returned malformed data for', count=1, level=logging.ERROR): self.server.read(fh, 0, len_) self.assertEqual(cm.exception.errno, errno.EIO) self.assertTrue(self.server.failsafe) # Restore contents, but should be marked as damaged now os.rename(datafile + '.bak', datafile) with self.assertRaises(FUSEError) as cm: self.server.read(fh, 0, len_) self.assertEqual(cm.exception.errno, errno.EIO) # Release and re-open, now we should be able to access again self.server.release(fh) self.server.forget([(inode.st_ino, 1)]) # ..but not write access since we are in failsafe mode with self.assertRaises(FUSEError) as cm: self.server.open(inode.st_ino, os.O_RDWR, some_ctx) self.assertEqual(cm.exception.errno, errno.EPERM) # ..ready only is fine. fh = self.server.open(inode.st_ino, os.O_RDONLY, some_ctx) self.server.read(fh, 0, len_) # Remove completely, should give error after cache flush os.unlink(datafile) self.server.read(fh, 3, len_//2) self.server.cache.drop() with self.assertRaises(FUSEError) as cm: with assert_logs('^Backend lost block', count=1, level=logging.ERROR): self.server.read(fh, 5, len_//2) self.assertEqual(cm.exception.errno, errno.EIO) # Don't call fsck, we're missing a block def test_create_open(self): name = self.newname() # Create a new file (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, some_ctx) self.server.release(fh) self.server.forget([(inode, 1)]) # Open it atomically (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, some_ctx) self.server.release(fh) self.server.forget([(inode, 1)]) self.fsck() def test_edit(self): len_ = self.max_obj_size data = self.random_data(len_) (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), os.O_RDWR, some_ctx) self.server.write(fh, 0, data) self.server.release(fh) self.block_cache.drop() fh = self.server.open(inode.st_ino, os.O_RDWR, some_ctx) attr = self.server.getattr(inode.st_ino, some_ctx) attr.st_size = 0 self.server.setattr(inode.st_ino, attr, SetattrFields(update_size=True), fh, some_ctx) self.server.write(fh, 0, data[50:]) self.server.release(fh) self.server.forget([(inode.st_ino, 1)]) self.fsck() def test_copy_tree(self): ext_attr_name = b'system.foo.brazl' ext_attr_val = b'schulla dku woumm bramp' src_inode = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(), some_ctx) dst_inode = self.server.mkdir(ROOT_INODE, b'dest', self.dir_mode(), some_ctx) # Create file (fh, f1_inode) = self.server.create(src_inode.st_ino, b'file1', self.file_mode(), os.O_RDWR, some_ctx) self.server.write(fh, 0, b'file1 contents') self.server.release(fh) self.server.setxattr(f1_inode.st_ino, ext_attr_name, ext_attr_val, some_ctx) # Create hardlink (fh, f2_inode) = self.server.create(src_inode.st_ino, b'file2', self.file_mode(), os.O_RDWR, some_ctx) self.server.write(fh, 0, b'file2 contents') self.server.release(fh) f2_inode = self.server.link(f2_inode.st_ino, src_inode.st_ino, b'file2_hardlink', some_ctx) # Create subdirectory d1_inode = self.server.mkdir(src_inode.st_ino, b'dir1', self.dir_mode(), some_ctx) d2_inode = self.server.mkdir(d1_inode.st_ino, b'dir2', self.dir_mode(), some_ctx) # ..with a 3rd hardlink f2_inode = self.server.link(f2_inode.st_ino, d1_inode.st_ino, b'file2_hardlink', some_ctx) # Replicate self.server.copy_tree(src_inode.st_ino, dst_inode.st_ino) # Change files fh = self.server.open(f1_inode.st_ino, os.O_RDWR, some_ctx) self.server.write(fh, 0, b'new file1 contents') self.server.release(fh) fh = self.server.open(f2_inode.st_ino, os.O_RDWR, some_ctx) self.server.write(fh, 0, b'new file2 contents') self.server.release(fh) # Get copy properties f1_inode_c = self.server.lookup(dst_inode.st_ino, b'file1', some_ctx) f2_inode_c = self.server.lookup(dst_inode.st_ino, b'file2', some_ctx) f2h_inode_c = self.server.lookup(dst_inode.st_ino, b'file2_hardlink', some_ctx) d1_inode_c = self.server.lookup(dst_inode.st_ino, b'dir1', some_ctx) d2_inode_c = self.server.lookup(d1_inode_c.st_ino, b'dir2', some_ctx) f2_h_inode_c = self.server.lookup(d1_inode_c.st_ino, b'file2_hardlink', some_ctx) # Check file1 fh = self.server.open(f1_inode_c.st_ino, os.O_RDWR, some_ctx) self.assertEqual(self.server.read(fh, 0, 42), b'file1 contents') self.server.release(fh) self.assertNotEqual(f1_inode.st_ino, f1_inode_c.st_ino) self.assertEqual(self.server.getxattr(f1_inode_c.st_ino, ext_attr_name, some_ctx), ext_attr_val) # Check file2 fh = self.server.open(f2_inode_c.st_ino, os.O_RDWR, some_ctx) self.assertTrue(self.server.read(fh, 0, 42) == b'file2 contents') self.server.release(fh) self.assertEqual(f2_inode_c.st_ino, f2h_inode_c.st_ino) self.assertEqual(f2_inode_c.st_nlink, 3) self.assertNotEqual(f2_inode.st_ino, f2_inode_c.st_ino) self.assertEqual(f2_h_inode_c.st_ino, f2_inode_c.st_ino) # Check subdir1 self.assertNotEqual(d1_inode.st_ino, d1_inode_c.st_ino) self.assertNotEqual(d2_inode.st_ino, d2_inode_c.st_ino) self.server.forget(list(self.server.open_inodes.items())) self.fsck() def test_copy_tree_2(self): src_inode = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(), some_ctx) dst_inode = self.server.mkdir(ROOT_INODE, b'dest', self.dir_mode(), some_ctx) # Create file (fh, inode) = self.server.create(src_inode.st_ino, b'file1', self.file_mode(), os.O_RDWR, some_ctx) self.server.write(fh, 0, b'block 1 contents') self.server.write(fh, self.max_obj_size, b'block 1 contents') self.server.release(fh) self.server.forget([(inode.st_ino, 1)]) self.server.copy_tree(src_inode.st_ino, dst_inode.st_ino) self.server.forget([(src_inode.st_ino, 1), (dst_inode.st_ino, 1)]) self.fsck() def test_lock_tree(self): inode1 = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(), some_ctx) # Create file (fh, inode1a) = self.server.create(inode1.st_ino, b'file1', self.file_mode(), os.O_RDWR, some_ctx) self.server.write(fh, 0, b'file1 contents') self.server.release(fh) # Create subdirectory inode2 = self.server.mkdir(inode1.st_ino, b'dir1', self.dir_mode(), some_ctx) (fh, inode2a) = self.server.create(inode2.st_ino, b'file2', self.file_mode(), os.O_RDWR, some_ctx) self.server.write(fh, 0, b'file2 contents') self.server.release(fh) # Another file (fh, inode3) = self.server.create(ROOT_INODE, b'file1', self.file_mode(), os.O_RDWR, some_ctx) self.server.release(fh) # Lock self.server.lock_tree(inode1.st_ino) for i in (inode1.st_ino, inode1a.st_ino, inode2.st_ino, inode2a.st_ino): self.assertTrue(self.server.inodes[i].locked) # Remove with self.assertRaises(FUSEError) as cm: self.server._remove(inode1.st_ino, b'file1', inode1a.st_ino) self.assertEqual(cm.exception.errno, errno.EPERM) # Rename / Replace with self.assertRaises(FUSEError) as cm: self.server.rename(ROOT_INODE, b'file1', inode1.st_ino, b'file2', some_ctx) self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.rename(inode1.st_ino, b'file1', ROOT_INODE, b'file2', some_ctx) self.assertEqual(cm.exception.errno, errno.EPERM) # Open with self.assertRaises(FUSEError) as cm: self.server.open(inode2a.st_ino, os.O_RDWR, some_ctx) self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.open(inode2a.st_ino, os.O_WRONLY, some_ctx) self.assertEqual(cm.exception.errno, errno.EPERM) self.server.release(self.server.open(inode3.st_ino, os.O_WRONLY, some_ctx)) # Write fh = self.server.open(inode2a.st_ino, os.O_RDONLY, some_ctx) with self.assertRaises(FUSEError) as cm: self.server.write(fh, 0, b'foo') self.assertEqual(cm.exception.errno, errno.EPERM) self.server.release(fh) # Create with self.assertRaises(FUSEError) as cm: self.server._create(inode2.st_ino, b'dir1', self.dir_mode(), os.O_RDWR, some_ctx) self.assertEqual(cm.exception.errno, errno.EPERM) # Setattr attr = self.server.getattr(inode2a.st_ino, some_ctx) with self.assertRaises(FUSEError) as cm: self.server.setattr(inode2a.st_ino, attr, SetattrFields(update_mtime=True), None, some_ctx) self.assertEqual(cm.exception.errno, errno.EPERM) # xattr with self.assertRaises(FUSEError) as cm: self.server.setxattr(inode2.st_ino, b'name', b'value', some_ctx) self.assertEqual(cm.exception.errno, errno.EPERM) with self.assertRaises(FUSEError) as cm: self.server.removexattr(inode2.st_ino, b'name', some_ctx) self.assertEqual(cm.exception.errno, errno.EPERM) self.server.forget(list(self.server.open_inodes.items())) self.fsck() def test_remove_tree(self): inode1 = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(), some_ctx) # Create file (fh, inode1a) = self.server.create(inode1.st_ino, b'file1', self.file_mode(), os.O_RDWR, some_ctx) self.server.write(fh, 0, b'file1 contents') self.server.release(fh) # Create subdirectory inode2 = self.server.mkdir(inode1.st_ino, b'dir1', self.dir_mode(), some_ctx) (fh, inode2a) = self.server.create(inode2.st_ino, b'file2', self.file_mode(), os.O_RDWR, some_ctx) self.server.write(fh, 0, b'file2 contents') self.server.release(fh) # Remove self.server.forget(list(self.server.open_inodes.items())) self.server.remove_tree(ROOT_INODE, b'source') for (id_p, name) in ((ROOT_INODE, b'source'), (inode1.st_ino, b'file1'), (inode1.st_ino, b'dir1'), (inode2.st_ino, b'file2')): self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id ' 'WHERE name=? AND parent_inode = ?', (name, id_p))) for id_ in (inode1.st_ino, inode1a.st_ino, inode2.st_ino, inode2a.st_ino): self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (id_,))) self.fsck() s3ql-2.26/tests/t1_serialization.py0000755000175000017500000000303512615000156021004 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t1_serialization.py - this file is part of S3QL. Copyright © 2014 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) from s3ql.common import ThawError, freeze_basic_mapping, thaw_basic_mapping from s3ql.backends.common import checksum_basic_mapping import pytest from collections import OrderedDict def test_simple(): d = { 'hello': 42, 'world': True, 'data': b'fooxyz', 'str': 'nice' } assert thaw_basic_mapping(freeze_basic_mapping(d)) == d def test_wrong_key(): d = { 'hello': 42, True: False } with pytest.raises(ValueError): freeze_basic_mapping(d) def test_cmplx_value(): d = { 'hello': [1,2] } with pytest.raises(ValueError): freeze_basic_mapping(d) def test_thaw_errors(): buf = freeze_basic_mapping({ 'hello': 'world' }) for s in (buf[1:], buf[:-1], b'"foo"[2]', b'open("/dev/null", "r")', b'"foo".__class__'): with pytest.raises(ThawError): thaw_basic_mapping(s) def test_checksum(): d1 = OrderedDict() d2 = OrderedDict() d1['foo'] = 1 d1['bar'] = None d2['bar'] = None d2['foo'] = 1 assert list(d1.keys()) != list(d2.keys()) assert checksum_basic_mapping(d1) == checksum_basic_mapping(d2) d2['foo'] += 1 assert checksum_basic_mapping(d1) != checksum_basic_mapping(d2) s3ql-2.26/tests/t2_block_cache.py0000755000175000017500000005265113223730045020360 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t2_block_cache.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) from contextlib import contextmanager from s3ql.backends import local from s3ql.backends.common import AbstractBackend from s3ql.backends.pool import BackendPool from s3ql.block_cache import BlockCache, QuitSentinel from s3ql.mkfs import init_tables from s3ql.metadata import create_tables from s3ql.database import Connection from s3ql.common import AsyncFn, time_ns import s3ql.block_cache from common import safe_sleep from pytest_checklogs import assert_logs from unittest.mock import patch import errno import os import logging import shutil import stat import tempfile import threading import unittest import queue import pytest log = logging.getLogger(__name__) # A dummy removal queue to monkeypatch around the need for removal and upload # threads class DummyQueue: def __init__(self, cache): self.obj = None self.cache = cache def get_nowait(self): return self.get(block=False) def put(self, obj, timeout=None): self.obj = obj self.cache._removal_loop() return True def get(self, block=True): if self.obj is None: raise queue.Empty() elif self.obj is QuitSentinel: self.obj = None return QuitSentinel else: tmp = self.obj self.obj = QuitSentinel return tmp def qsize(self): return 0 class cache_tests(unittest.TestCase): def setUp(self): self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') self.backend_pool = BackendPool(lambda: local.Backend('local://' + self.backend_dir, None, None)) self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-') self.max_obj_size = 1024 # Destructors are not guaranteed to run, and we can't unlink # the file immediately because apsw refers to it by name. # Therefore, we unlink the file manually in tearDown() self.dbfile = tempfile.NamedTemporaryFile(delete=False) self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) # Create an inode we can work with self.inode = 42 now_ns = time_ns() self.db.execute("INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", (self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH, os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1, 32)) cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache", self.max_obj_size * 100) self.cache = cache # Monkeypatch around the need for removal and upload threads cache.to_remove = DummyQueue(cache) class DummyDistributor: def put(self, arg, timeout=None): cache._do_upload(*arg) return True cache.to_upload = DummyDistributor() # Tested methods assume that they are called from # file system request handler s3ql.block_cache.lock = MockLock() s3ql.block_cache.lock_released = MockLock() def tearDown(self): self.cache.backend_pool = self.backend_pool self.cache.destroy() shutil.rmtree(self.cachedir) shutil.rmtree(self.backend_dir) self.dbfile.close() os.unlink(self.dbfile.name) def test_thread_hang(self): # Make sure that we don't deadlock if uploads threads or removal # threads have died and we try to expire or terminate # Monkeypatch to avoid error messages about uncaught exceptions # in other threads upload_exc = False removal_exc = False def _upload_loop(*a, fn=self.cache._upload_loop): try: return fn(*a) except NotADirectoryError: nonlocal upload_exc upload_exc = True def _removal_loop(*a, fn=self.cache._removal_loop): try: return fn(*a) except NotADirectoryError: nonlocal removal_exc removal_exc = True self.cache._upload_loop = _upload_loop self.cache._removal_loop = _removal_loop # Start threads self.cache.init(threads=3) # Create first object (we'll try to remove that) with self.cache.get(self.inode, 0) as fh: fh.write(b'bar wurfz!') self.cache.start_flush() self.cache.wait() # Make sure that upload and removal will fail os.rename(self.backend_dir, self.backend_dir + '-tmp') open(self.backend_dir, 'w').close() # Create second object (we'll try to upload that) with self.cache.get(self.inode, 1) as fh: fh.write(b'bar wurfz number two!') # Schedule a removal self.cache.remove(self.inode, 0) try: # Try to clean-up (implicitly calls expire) with assert_logs('Unable to drop cache, no upload threads left alive', level=logging.ERROR, count=1): with pytest.raises(OSError) as exc_info: self.cache.destroy() assert exc_info.value.errno == errno.ENOTEMPTY assert upload_exc assert removal_exc finally: # Fix backend dir os.unlink(self.backend_dir) os.rename(self.backend_dir + '-tmp', self.backend_dir) # Remove objects from cache and make final destroy # call into no-op. self.cache.remove(self.inode, 1) self.cache.destroy = lambda: None @staticmethod def random_data(len_): with open("/dev/urandom", "rb") as fh: return fh.read(len_) def test_get(self): inode = self.inode blockno = 11 data = self.random_data(int(0.5 * self.max_obj_size)) # Case 1: Object does not exist yet with self.cache.get(inode, blockno) as fh: fh.seek(0) fh.write(data) # Case 2: Object is in cache with self.cache.get(inode, blockno) as fh: fh.seek(0) self.assertEqual(data, fh.read(len(data))) # Case 3: Object needs to be downloaded self.cache.drop() with self.cache.get(inode, blockno) as fh: fh.seek(0) self.assertEqual(data, fh.read(len(data))) def test_expire(self): inode = self.inode # Define the 4 most recently accessed ones most_recent = [7, 11, 10, 8] for i in most_recent: safe_sleep(0.2) with self.cache.get(inode, i) as fh: fh.write(('%d' % i).encode()) # And some others for i in range(20): if i in most_recent: continue with self.cache.get(inode, i) as fh: fh.write(('%d' % i).encode()) # Flush the 2 most recently accessed ones start_flush(self.cache, inode, most_recent[-2]) start_flush(self.cache, inode, most_recent[-3]) # We want to expire 4 entries, 2 of which are already flushed self.cache.cache.max_entries = 16 self.cache.backend_pool = MockBackendPool(self.backend_pool, no_write=2) self.cache.expire() self.cache.backend_pool.verify() self.assertEqual(len(self.cache.cache), 16) for i in range(20): if i in most_recent: self.assertTrue((inode, i) not in self.cache.cache) else: self.assertTrue((inode, i) in self.cache.cache) def test_upload(self): inode = self.inode datalen = int(0.1 * self.cache.cache.max_size) blockno1 = 21 blockno2 = 25 blockno3 = 7 data1 = self.random_data(datalen) data2 = self.random_data(datalen) data3 = self.random_data(datalen) # Case 1: create new object self.cache.backend_pool = MockBackendPool(self.backend_pool, no_write=1) with self.cache.get(inode, blockno1) as fh: fh.seek(0) fh.write(data1) el1 = fh assert self.cache.upload_if_dirty(el1) self.cache.backend_pool.verify() # Case 2: Link new object self.cache.backend_pool = MockBackendPool(self.backend_pool) with self.cache.get(inode, blockno2) as fh: fh.seek(0) fh.write(data1) el2 = fh assert not self.cache.upload_if_dirty(el2) self.cache.backend_pool.verify() # Case 3: Upload old object, still has references self.cache.backend_pool = MockBackendPool(self.backend_pool, no_write=1) with self.cache.get(inode, blockno1) as fh: fh.seek(0) fh.write(data2) assert self.cache.upload_if_dirty(el1) self.cache.backend_pool.verify() # Case 4: Upload old object, no references left self.cache.backend_pool = MockBackendPool(self.backend_pool, no_del=1, no_write=1) with self.cache.get(inode, blockno2) as fh: fh.seek(0) fh.write(data3) assert self.cache.upload_if_dirty(el2) self.cache.backend_pool.verify() # Case 5: Link old object, no references left self.cache.backend_pool = MockBackendPool(self.backend_pool, no_del=1) with self.cache.get(inode, blockno2) as fh: fh.seek(0) fh.write(data2) assert not self.cache.upload_if_dirty(el2) self.cache.backend_pool.verify() # Case 6: Link old object, still has references # (Need to create another object first) self.cache.backend_pool = MockBackendPool(self.backend_pool, no_write=1) with self.cache.get(inode, blockno3) as fh: fh.seek(0) fh.write(data1) el3 = fh assert self.cache.upload_if_dirty(el3) self.cache.backend_pool.verify() self.cache.backend_pool = MockBackendPool(self.backend_pool) with self.cache.get(inode, blockno1) as fh: fh.seek(0) fh.write(data1) assert not self.cache.upload_if_dirty(el1) self.cache.drop() self.cache.backend_pool.verify() def test_remove_referenced(self): inode = self.inode datalen = int(0.1 * self.cache.cache.max_size) blockno1 = 21 blockno2 = 24 data = self.random_data(datalen) self.cache.backend_pool = MockBackendPool(self.backend_pool, no_write=1) with self.cache.get(inode, blockno1) as fh: fh.seek(0) fh.write(data) with self.cache.get(inode, blockno2) as fh: fh.seek(0) fh.write(data) self.cache.drop() self.cache.backend_pool.verify() self.cache.backend_pool = MockBackendPool(self.backend_pool) self.cache.remove(inode, blockno1) self.cache.backend_pool.verify() def test_remove_cache(self): inode = self.inode data1 = self.random_data(int(0.4 * self.max_obj_size)) # Case 1: Elements only in cache with self.cache.get(inode, 1) as fh: fh.seek(0) fh.write(data1) self.cache.remove(inode, 1) with self.cache.get(inode, 1) as fh: fh.seek(0) self.assertEqual(fh.read(42), b'') def test_upload_race(self): inode = self.inode blockno = 1 data1 = self.random_data(int(0.4 * self.max_obj_size)) with self.cache.get(inode, blockno) as fh: fh.seek(0) fh.write(data1) # Remove it self.cache.remove(inode, blockno) # Try to upload it, may happen if CommitThread is interrupted self.cache.upload_if_dirty(fh) def test_expire_race(self): # Create element inode = self.inode blockno = 1 data1 = self.random_data(int(0.4 * self.max_obj_size)) with self.cache.get(inode, blockno) as fh: fh.seek(0) fh.write(data1) assert self.cache.upload_if_dirty(fh) # Make sure entry will be expired self.cache.cache.max_entries = 0 # Lock it self.cache._lock_entry(inode, blockno, release_global=True) try: # Start expiration, will block on lock t1 = AsyncFn(self.cache.expire) t1.start() # Start second expiration, will block t2 = AsyncFn(self.cache.expire) t2.start() # Release lock self.cache._unlock_entry(inode, blockno) t1.join_and_raise() t2.join_and_raise() assert len(self.cache.cache) == 0 finally: self.cache._unlock_entry(inode, blockno, release_global=True, noerror=True) def test_parallel_expire(self): # Create elements inode = self.inode for i in range(5): data1 = self.random_data(int(0.4 * self.max_obj_size)) with self.cache.get(inode, i) as fh: fh.write(data1) # We want to expire just one element, but have # several threads running expire() simultaneously self.cache.cache.max_entries = 4 # Lock first element so that we have time to start threads self.cache._lock_entry(inode, 0, release_global=True) try: # Start expiration, will block on lock t1 = AsyncFn(self.cache.expire) t1.start() # Start second expiration, will block t2 = AsyncFn(self.cache.expire) t2.start() # Release lock self.cache._unlock_entry(inode, 0) t1.join_and_raise() t2.join_and_raise() assert len(self.cache.cache) == 4 finally: self.cache._unlock_entry(inode, 0, release_global=True, noerror=True) def test_remove_cache_db(self): inode = self.inode data1 = self.random_data(int(0.4 * self.max_obj_size)) # Case 2: Element in cache and db with self.cache.get(inode, 1) as fh: fh.seek(0) fh.write(data1) self.cache.backend_pool = MockBackendPool(self.backend_pool, no_write=1) start_flush(self.cache, inode) self.cache.backend_pool.verify() self.cache.backend_pool = MockBackendPool(self.backend_pool, no_del=1) self.cache.remove(inode, 1) self.cache.backend_pool.verify() with self.cache.get(inode, 1) as fh: fh.seek(0) self.assertEqual(fh.read(42), b'') def test_remove_db(self): inode = self.inode data1 = self.random_data(int(0.4 * self.max_obj_size)) # Case 3: Element only in DB with self.cache.get(inode, 1) as fh: fh.seek(0) fh.write(data1) self.cache.backend_pool = MockBackendPool(self.backend_pool, no_write=1) self.cache.drop() self.cache.backend_pool.verify() self.cache.backend_pool = MockBackendPool(self.backend_pool, no_del=1) self.cache.remove(inode, 1) self.cache.backend_pool.verify() with self.cache.get(inode, 1) as fh: fh.seek(0) self.assertEqual(fh.read(42), b'') def test_issue_241(self): inode = self.inode # Create block with self.cache.get(inode, 0) as fh: fh.write(self.random_data(500)) # "Fill" cache self.cache.cache.max_entries = 0 # Mock locking to reproduce race condition mlock = MockMultiLock(self.cache.mlock) with patch.object(self.cache, 'mlock', mlock): # Start first expiration run, will block in upload thread1 = AsyncFn(self.cache.expire) thread1.start() # Remove the object while the expiration thread waits # for it to become available. thread2 = AsyncFn(self.cache.remove, inode, 0, 1) thread2.start() mlock.yield_to(thread2) thread2.join_and_raise(timeout=10) assert not thread2.is_alive() # Create a new object for the same block with self.cache.get(inode, 0) as fh: fh.write(self.random_data(500)) # Continue first expiration run mlock.yield_to(thread1, block=False) thread1.join_and_raise(timeout=10) assert not thread1.is_alive() class MockMultiLock: def __init__(self, real_mlock): self.cond = real_mlock.cond self.cleared = set() self.real_mlock = real_mlock def yield_to(self, thread, block=True): '''Allow *thread* to proceed''' me = threading.current_thread() log.debug('%s blocked in yield_to(), phase 1', me.name) with self.cond: self.cleared.add(thread) self.cond.notify_all() if not block: return log.debug('%s blocked in yield_to(), phase 2', me.name) with self.cond: if not self.cond.wait_for(lambda: thread not in self.cleared, 10): pytest.fail('Timeout waiting for lock') log.debug('%s completed yield_to()', me.name) @contextmanager def __call__(self, *key): self.acquire(*key) try: yield finally: self.release(*key) def acquire(self, *key, timeout=None): me = threading.current_thread() log.debug('%s blocked in acquire()', me.name) with self.cond: if not self.cond.wait_for(lambda: me in self.cleared, 10): pytest.fail('Timeout waiting for lock') self.real_mlock.locked_keys.add(key) log.debug('%s got lock', me.name) def release(self, *key, noerror=False): me = threading.current_thread() log.debug('%s blocked in release()', me.name) with self.cond: self.cleared.remove(me) self.cond.notify_all() if noerror: self.real_mlock.locked_keys.discard(key) else: self.real_mlock.locked_keys.remove(key) log.debug('%s released lock', me.name) class MockBackendPool(AbstractBackend): has_native_rename = False def __init__(self, backend_pool, no_read=0, no_write=0, no_del=0): super().__init__() self.no_read = no_read self.no_write = no_write self.no_del = no_del self.backend_pool = backend_pool self.backend = backend_pool.pop_conn() self.lock = threading.Lock() def __del__(self): self.backend_pool.push_conn(self.backend) def verify(self): if self.no_read != 0: raise RuntimeError('Got too few open_read calls') if self.no_write != 0: raise RuntimeError('Got too few open_write calls') if self.no_del != 0: raise RuntimeError('Got too few delete calls') @contextmanager def __call__(self): '''Provide connection from pool (context manager)''' with self.lock: yield self def lookup(self, key): return self.backend.lookup(key) def open_read(self, key): self.no_read -= 1 if self.no_read < 0: raise RuntimeError('Got too many open_read calls') return self.backend.open_read(key) def open_write(self, key, metadata=None, is_compressed=False): self.no_write -= 1 if self.no_write < 0: raise RuntimeError('Got too many open_write calls') return self.backend.open_write(key, metadata, is_compressed) def is_temp_failure(self, exc): return self.backend.is_temp_failure(exc) def clear(self): return self.backend.clear() def contains(self, key): return self.backend.contains(key) def delete(self, key, force=False): self.no_del -= 1 if self.no_del < 0: raise RuntimeError('Got too many delete calls') return self.backend.delete(key, force) def list(self, prefix=''): '''List keys in backend Returns an iterator over all keys in the backend. ''' return self.backend.list(prefix) def copy(self, src, dest, metadata=None): return self.backend.copy(src, dest, metadata) def rename(self, src, dest, metadata=None): return self.backend.rename(src, dest, metadata) def update_meta(self, key, metadata): return self.backend.update_meta(key, metadata) def get_size(self, key): '''Return size of object stored under *key*''' return self.backend.get_size(key) def start_flush(cache, inode, block=None): """Upload data for `inode` This is only for testing purposes, since the method blocks until all current uploads have been completed. """ for el in cache.cache.values(): if el.inode != inode: continue if not el.dirty: continue if block is not None and el.blockno != block: continue cache.upload_if_dirty(el) class MockLock(): def __call__(self): return self def acquire(self, timeout=None): pass def release(self): pass def __enter__(self): pass def __exit__(self, *args): pass s3ql-2.26/tests/t1_retry.py0000775000175000017500000000357213160156175017315 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t1_retry.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) from s3ql.backends.common import retry, retry_generator from pytest_checklogs import assert_logs import logging class TemporaryProblem(Exception): pass class NthAttempt: def __init__(self, succeed_on=3): self.count = 0 self.succeed_on = succeed_on @staticmethod def is_temp_failure(exc): return isinstance(exc, TemporaryProblem) @retry def do_stuff(self): if self.count == self.succeed_on: return True self.count += 1 raise TemporaryProblem() @retry_generator def list_stuff(self, upto=10, start_after=-1): for i in range(upto): if i <= start_after: continue if i == 2 and self.count < 1: self.count += 1 raise TemporaryProblem if i == 7 and self.count < 4: self.count += 1 raise TemporaryProblem yield i @retry def test_is_retry(self, is_retry=False): assert is_retry == (self.count != 0) if self.count == self.succeed_on: return True self.count += 1 raise TemporaryProblem() def test_retry(): inst = NthAttempt(3) assert inst.do_stuff() def test_retry_generator(): inst = NthAttempt(3) assert list(inst.list_stuff(10)) == list(range(10)) def test_is_retry(): inst = NthAttempt(3) assert inst.test_is_retry() def test_logging(): inst = NthAttempt(6) with assert_logs(r'^Encountered %s \(%s\), retrying ', count=2, level=logging.WARNING): inst.do_stuff() s3ql-2.26/tests/t1_backends.py0000755000175000017500000012075713160156175017725 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t1_backends.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) from s3ql.logging import logging import mock_server from dugong import ConnectionClosed from s3ql import backends, BUFSIZE from s3ql.backends.local import Backend as LocalBackend from s3ql.backends.gs import Backend as GSBackend from s3ql.backends.common import (NoSuchObject, CorruptedObjectError) from s3ql.backends.comprenc import ComprencBackend, ObjectNotEncrypted from s3ql.backends.s3c import BadDigestError, OperationAbortedError, HTTPError, S3Error from s3ql.backends.swift import Backend as SwiftBackend from argparse import Namespace from common import get_remote_test_info, NoTestSection, CLOCK_GRANULARITY from pytest_checklogs import assert_logs import s3ql.backends.common import tempfile import re import functools import time import pytest from pytest import raises as assert_raises import shutil import struct import threading log = logging.getLogger(__name__) empty_set = set() def brace_expand(s): hit = re.search(r'^(.*)\{(.+)\}(.*)$', s) if not hit: return [s] (p, e, s) = hit.groups() l = [] for el in e.split(','): l.append(p+el+s) return l def enable_temp_fail(backend): if isinstance(backend, ComprencBackend): backend = backend.backend backend.unittest_info.may_temp_fail = True # It'd be nice if we could use the setup_module hook instead, but # unfortunately that gets executed *after* pytest_generate_tests. def _get_backend_info(): '''Get information about raw backends available for testing''' info = [] # Local backend bi = Namespace() bi.name = 'local' bi.classname = 'local' info.append(bi) # Backends talking to actual remote servers (if available) for name in backends.prefix_map: if name == 'local': # local backend has already been handled continue try: (login, password, storage_url) = get_remote_test_info(name + '-test') except NoTestSection as exc: log.info('Not running remote tests for %s backend: %s', name, exc.reason) continue bi = Namespace() bi.name = 'remote-' + name bi.classname = name bi.storage_url = storage_url bi.login = login bi.password = password info.append(bi) # Backends talking to local mock servers for (request_handler, storage_url) in mock_server.handler_list: name = re.match(r'^([a-zA-Z0-9]+)://', storage_url).group(1) bi = Namespace() bi.name = 'mock-' + name bi.classname = name bi.request_handler = request_handler bi.storage_url = storage_url info.append(bi) return info def pytest_generate_tests(metafunc, _info_cache=[]): if _info_cache: backend_info = _info_cache[0] else: backend_info = _get_backend_info() _info_cache.append(backend_info) if 'backend' not in metafunc.fixturenames: return fn = metafunc.function assert hasattr(fn, 'with_backend') test_params = [] for spec in fn.with_backend.args: (backend_spec, comprenc_spec) = spec.split('/') # Expand compression/encryption specification # raw == don't use ComprencBackend at all # plain = use ComprencBackend without compression and encryption if comprenc_spec == '*': comprenc_kinds = [ 'aes', 'aes+zlib', 'plain', 'zlib', 'bzip2', 'lzma', 'raw' ] else: comprenc_kinds = brace_expand(comprenc_spec) # Expand backend specification if backend_spec == '*': test_bi = backend_info else: test_bi = [] for name in brace_expand(backend_spec): test_bi += [ x for x in backend_info if x.classname == name ] # Filter if fn.with_backend.kwargs.get('require_mock_server', False): test_bi = [ x for x in test_bi if 'request_handler' in x ] if fn.with_backend.kwargs.get('require_immediate_consistency', False): test_bi = [ x for x in test_bi if 'request_handler' in x or x.classname in ('local', 'gs') ] for comprenc_kind in comprenc_kinds: for bi in test_bi: test_params.append((bi, comprenc_kind)) metafunc.parametrize("backend", test_params, indirect=True, ids=[ '%s/%s' % (x[0].name, x[1]) for x in test_params] ) @pytest.yield_fixture() def backend(request): (backend_info, comprenc_kind) = request.param if backend_info.classname == 'local': gen = yield_local_backend(backend_info) elif 'request_handler' in backend_info: gen = yield_mock_backend(backend_info) else: gen = yield_remote_backend(backend_info) for raw_backend in gen: if comprenc_kind == 'raw': backend = raw_backend elif comprenc_kind == 'plain': backend = ComprencBackend(None, (None, 6), raw_backend) elif comprenc_kind == 'aes+zlib': backend = ComprencBackend(b'schlurz', ('zlib', 6), raw_backend) elif comprenc_kind == 'aes': backend = ComprencBackend(b'schlurz', (None, 6), raw_backend) else: backend = ComprencBackend(None, (comprenc_kind, 6), raw_backend) backend.unittest_info = raw_backend.unittest_info yield backend def yield_local_backend(bi): backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') backend = LocalBackend('local://' + backend_dir, None, None) backend.unittest_info = Namespace() backend.unittest_info.retry_time = 0 try: yield backend finally: backend.close() shutil.rmtree(backend_dir) def yield_mock_backend(bi): backend_class = backends.prefix_map[bi.classname] server = mock_server.StorageServer(bi.request_handler, ('localhost', 0)) thread = threading.Thread(target=server.serve_forever) thread.daemon = True thread.start() storage_url = bi.storage_url % { 'host': server.server_address[0], 'port': server.server_address[1] } backend = backend_class(storage_url, 'joe', 'swordfish', { 'no-ssl': True }) # Enable OAuth when using Google Backend if isinstance(backend, GSBackend): backend.use_oauth2 = True backend.hdr_prefix = 'x-goog-' # Normally set in __init__ backend.access_token[backend.password] = 'foobar' backend.unittest_info = Namespace() backend.unittest_info.retry_time = 0 # Mock server should not have temporary failures by default is_temp_failure = backend.is_temp_failure @functools.wraps(backend.is_temp_failure) def wrap(exc): return backend.unittest_info.may_temp_fail and is_temp_failure(exc) backend.is_temp_failure = wrap backend.unittest_info.may_temp_fail = False try: yield backend finally: backend.close() server.shutdown() server.server_close() def yield_remote_backend(bi, _ctr=[0]): # Add timestamp + ctr to prefix so we don't have to deal with cruft from # previous tests _ctr[0] += 1 storage_url = bi.storage_url if storage_url[-1] != '/': storage_url += '/' storage_url += '%d_%d/' % (time.time(), _ctr[0]) backend_class = backends.prefix_map[bi.classname] backend = backend_class(storage_url, bi.login, bi.password, {}) backend.unittest_info = Namespace() backend.unittest_info.retry_time = 600 try: yield backend finally: backend.clear() backend.close() def newname(name_counter=[0]): '''Return random, unique string''' name_counter[0] += 1 return "random\\'name' %d" % name_counter[0] def newvalue(): return newname().encode() def fetch_object(backend, key, sleep_time=1): '''Read data and metadata for *key* from *backend* If `NoSuchObject` exception is encountered, retry for up to *retry_time* seconds. ''' waited=0 retry_time = backend.unittest_info.retry_time while True: try: return backend.fetch(key) except NoSuchObject: if waited >= retry_time: raise time.sleep(sleep_time) waited += sleep_time def lookup_object(backend, key, sleep_time=1): '''Read metadata for *key* from *backend* If `NoSuchObject` exception is encountered, retry for up to *retry_time* seconds. ''' retry_time = backend.unittest_info.retry_time waited=0 while True: try: return backend.lookup(key) except NoSuchObject: if waited >= retry_time: raise time.sleep(sleep_time) waited += sleep_time def assert_in_index(backend, keys, sleep_time=1): '''Assert that *keys* will appear in index Raises assertion error if *keys* do not show up within *retry_time* seconds. ''' waited=0 retry_time = backend.unittest_info.retry_time keys = set(keys) # copy while True: index = set(backend.list()) if not keys - index: return elif waited >= retry_time: assert keys - index == empty_set time.sleep(sleep_time) waited += sleep_time def assert_not_in_index(backend, keys, sleep_time=1): '''Assert that *keys* will disappear from index Raises assertion error if *keys* do not disappear within *retry_time* seconds. ''' retry_time = backend.unittest_info.retry_time waited=0 keys = set(keys) # copy while True: index = set(backend.list()) if keys - index == keys: return elif waited >= retry_time: assert keys - index == keys time.sleep(sleep_time) waited += sleep_time def assert_not_readable(backend, key, sleep_time=1): '''Assert that *key* does not exist in *backend* Asserts that a `NoSuchObject` exception will be raised when trying to read the object after at most *retry_time* seconds. ''' waited=0 retry_time = backend.unittest_info.retry_time while True: try: backend.fetch(key) except NoSuchObject: return if waited >= retry_time: pytest.fail('object %s still present in backend' % key) time.sleep(sleep_time) waited += sleep_time @pytest.mark.with_backend('*/*') def test_read_write(backend): key = newname() value = newvalue() metadata = { 'jimmy': 'jups@42' } assert key not in backend assert_raises(NoSuchObject, backend.lookup, key) assert_raises(NoSuchObject, backend.fetch, key) def do_write(fh): fh.write(value) backend.perform_write(do_write, key, metadata) assert_in_index(backend, [key]) (value2, metadata2) = fetch_object(backend, key) assert value == value2 assert metadata == metadata2 assert lookup_object(backend, key) == metadata @pytest.mark.with_backend('swift/raw') def test_issue114(backend, monkeypatch): key = newname() value = newvalue() monkeypatch.setitem(backend.options, 'disable-expect100', True) backend[key] = value assert_in_index(backend, [key]) @pytest.mark.with_backend('*/raw', 'local/{plain,aes,zlib}') def test_complex_meta(backend): key = newname() value = newvalue() metadata = { 'com\nplex: key': 42, 'farp_': False, 'non-farp': True, 'blu%rz': 23.283475, 'görp': b'heelo', 'sch.al': 'gorroobalp\nfurrö!', 'lo-ng': 'foobarz' * 40 } assert key not in backend backend.store(key, value, metadata) (value2, metadata2) = fetch_object(backend, key) assert value == value2 assert metadata == metadata2 assert lookup_object(backend, key) == metadata # No need to run with different encryption/compression settings, # ComprencBackend should just forward this 1:1 to the raw backend. @pytest.mark.with_backend('*/aes') def test_list(backend): keys = ([ 'prefixa' + newname() for dummy in range(6) ] + [ 'prefixb' + newname() for dummy in range(6) ]) values = [ newvalue() for dummy in range(12) ] assert set(backend.list()) == empty_set for i in range(12): backend[keys[i]] = values[i] assert_in_index(backend, keys) assert set(backend.list('prefixa')) == set(keys[:6]) assert set(backend.list('prefixb')) == set(keys[6:]) assert set(backend.list('prefixc')) == empty_set @pytest.mark.with_backend('*/raw', 'local/{plain,aes,zlib,aes+zlib}', require_immediate_consistency=True) def test_readslowly(backend): key = newname() value = newvalue() metadata = { 'jimmy': 'jups@42' } backend.store(key, value, metadata) s3ql.backends.common.BUFSIZE = 1 try: with backend.open_read(key) as fh: # Force slow reading from underlying layer if hasattr(fh, 'fh'): def read_slowly(size, *, real_read=fh.fh.read): return real_read(1) fh.fh.read = read_slowly buf = [] while True: buf.append(fh.read(1)) if not buf[-1]: break value2 = b''.join(buf) metadata2 = fh.metadata finally: s3ql.backends.common.BUFSIZE = BUFSIZE assert value == value2 assert metadata == metadata2 # No need to run with different encryption/compression settings, # ComprencBackend should just forward this 1:1 to the raw backend. @pytest.mark.with_backend('*/aes') def test_delete(backend): key = newname() value = newvalue() backend[key] = value # Wait for object to become visible assert_in_index(backend, [key]) fetch_object(backend, key) # Delete it del backend[key] # Make sure that it's truly gone assert_not_in_index(backend, [key]) assert_not_readable(backend, key) # No need to run with different encryption/compression settings, # ComprencBackend should just forward this 1:1 to the raw backend. @pytest.mark.with_backend('*/aes') def test_delete_multi(backend): keys = [ newname() for _ in range(30) ] value = newvalue() # Create objects for key in keys: backend[key] = value # Wait for them assert_in_index(backend, keys) for key in keys: fetch_object(backend, key) # Delete half of them # We don't use force=True but catch the exception to increase the # chance that some existing objects are not deleted because of the # error. to_delete = keys[::2] to_delete.insert(7, 'not_existing') try: backend.delete_multi(to_delete) except NoSuchObject: pass # Without full consistency, deleting an non-existing object # may not give an error # Swift backend does not return a list of actually deleted objects # so to_delete wil always be empty for Swift and this assertion fails if not isinstance(backend.backend, SwiftBackend): assert backend.unittest_info.retry_time or len(to_delete) > 0 deleted = set(keys[::2]) - set(to_delete) assert len(deleted) > 0 remaining = set(keys) - deleted assert_not_in_index(backend, deleted) for key in deleted: assert_not_readable(backend, key) assert_in_index(backend, remaining) for key in remaining: fetch_object(backend, key) # No need to run with different encryption/compression settings, # ComprencBackend should just forward this 1:1 to the raw backend. @pytest.mark.with_backend('*/aes') def test_clear(backend): keys = [ newname() for _ in range(5) ] value = newvalue() # Create objects for key in keys: backend[key] = value # Wait for them assert_in_index(backend, keys) for key in keys: fetch_object(backend, key) # Delete everything backend.clear() assert_not_in_index(backend, keys) for key in keys: assert_not_readable(backend, key) @pytest.mark.with_backend('*/raw', 'local/{plain,aes,zlib}') def test_copy(backend): key1 = newname() key2 = newname() value = newvalue() metadata = { 'jimmy': 'jups@42' } backend.store(key1, value, metadata) # Wait for object to become visible assert_in_index(backend, [key1]) fetch_object(backend, key1) assert_not_in_index(backend, [key2]) assert_not_readable(backend, key2) backend.copy(key1, key2) assert_in_index(backend, [key2]) (value2, metadata2) = fetch_object(backend, key2) assert value == value2 assert metadata == metadata2 @pytest.mark.with_backend('*/raw') def test_copy_special(backend): key1 = 'with_+_char/orig' key2 = 'with_+_char/dest' value = b'Just a couple of random bytes' backend.store(key1, value) assert_not_in_index(backend, [key2]) assert_not_readable(backend, key2) # Wait for object to become visible assert_in_index(backend, [key1]) fetch_object(backend, key1) backend.copy(key1, key2) assert_in_index(backend, [key2]) value2 = backend[key2] assert value == value2 @pytest.mark.with_backend('*/raw', 'local/{aes,zlib}') def test_copy_newmeta(backend): key1 = newname() key2 = newname() value = newvalue() meta1 = { 'jimmy': 'jups@42' } meta2 = { 'jiy': 'jfobauske42' } backend.store(key1, value, meta1) # Wait for object to become visible assert_in_index(backend, [key1]) fetch_object(backend, key1) assert_not_in_index(backend, [key2]) assert_not_readable(backend, key2) backend.copy(key1, key2, meta2) assert_in_index(backend, [key2]) (value2, meta) = fetch_object(backend, key2) assert value == value2 assert meta == meta2 @pytest.mark.with_backend('*/raw', 'local/{aes,zlib}') def test_rename(backend): key1 = newname() key2 = newname() value = newvalue() metadata = { 'jimmy': 'jups@42' } backend.store(key1, value, metadata) # Wait for object to become visible assert_in_index(backend, [key1]) fetch_object(backend, key1) assert_not_in_index(backend, [key2]) assert_not_readable(backend, key2) backend.rename(key1, key2) assert_in_index(backend, [key2]) (value2, metadata2) = fetch_object(backend, key2) assert value == value2 assert metadata == metadata2 assert_not_in_index(backend, [key1]) assert_not_readable(backend, key1) @pytest.mark.with_backend('*/raw', 'local/{aes,zlib}') def test_rename_newmeta(backend): key1 = newname() key2 = newname() value = newvalue() meta1 = { 'jimmy': 'jups@42' } meta2 = { 'apple': 'potatoes' } backend.store(key1, value, meta1) # Wait for object to become visible assert_in_index(backend, [key1]) fetch_object(backend, key1) assert_not_in_index(backend, [key2]) assert_not_readable(backend, key2) backend.rename(key1, key2, meta2) assert_in_index(backend, [key2]) (value2, meta) = fetch_object(backend, key2) assert value == value2 assert meta == meta2 @pytest.mark.with_backend('*/raw', 'local/{aes,zlib}') def test_update_meta(backend): key = 'simple' value = b'not too hard' meta1 = { 'jimmy': 'jups@42' } meta2 = { 'apple': 'potatoes' } backend.store(key, value, meta1) # Wait for object to become visible assert_in_index(backend, [key]) fetch_object(backend, key) backend.update_meta(key, meta2) # Wait for updated metadata waited=0 sleep_time = 1 while True: (value2, meta) = fetch_object(backend, key) if meta != meta1: break elif waited >= backend.unittest_info.retry_time: pytest.fail('metadata for %s not updated after %d seconds' % (key, waited)) time.sleep(sleep_time) waited += sleep_time assert value == value2 assert meta == meta2 # Choice of compression algorithm should not make a difference @pytest.mark.with_backend('s3c/{raw,aes,zlib}', require_mock_server=True) def test_copy_error(backend, monkeypatch): value = b'hello there, let us see whats going on' key1 = 'object-key1' key2 = 'object-key2' backend[key1] = value # Monkeypatch request handler to produce error handler_class = mock_server.S3CRequestHandler def do_PUT(self, real_PUT=handler_class.do_PUT, count=[0]): count[0] += 1 if count[0] > 3: return real_PUT(self) else: self.send_error(200, code='OperationAborted') monkeypatch.setattr(handler_class, 'do_PUT', do_PUT) assert_raises(OperationAbortedError, backend.copy, key1, key2) enable_temp_fail(backend) backend.copy(key1, key2) @pytest.mark.with_backend('local/{aes,aes+zlib,zlib,bzip2,lzma}') def test_corruption(backend): plain_backend = backend.backend # Create compressed object key = newname() value = newvalue() backend[key] = value # Retrieve compressed data (compr_value, meta) = fetch_object(plain_backend, key) compr_value = bytearray(compr_value) # Overwrite with corrupted data # (this needs immediate consistency) compr_value[-3:] = b'000' plain_backend.store(key, compr_value, meta) with pytest.raises(CorruptedObjectError) as exc: fetch_object(backend, key) if backend.passphrase is None: # compression only assert exc.value.str == 'Invalid compressed stream' else: assert exc.value.str == 'HMAC mismatch' @pytest.mark.with_backend('local/{aes,aes+zlib,zlib,bzip2,lzma}') def test_extra_data(backend): plain_backend = backend.backend # Create compressed object key = newname() value = newvalue() backend[key] = value # Retrieve compressed data (compr_value, meta) = fetch_object(plain_backend, key) compr_value = bytearray(compr_value) # Overwrite with extended data # (this needs immediate consistency) compr_value += b'000' plain_backend.store(key, compr_value, meta) with pytest.raises(CorruptedObjectError) as exc: fetch_object(backend, key) if backend.passphrase is None: # compression only assert exc.value.str == 'Data after end of compressed stream' else: assert exc.value.str == 'Extraneous data at end of object' @pytest.mark.with_backend('*/{raw,plain,aes,aes+zlib,zlib}') def test_multi_packet(backend): '''Write and read packet extending over multiple chunks''' key = newname() def do_write(fh): for i in range(5): fh.write(b'\xFF' * BUFSIZE) backend.perform_write(do_write, key) def do_read(fh): buf = bytearray() while True: tmp = fh.read(BUFSIZE//2) if not tmp: break buf += tmp return buf res = backend.perform_read(do_read, key) assert res == b'\xFF' * (5*BUFSIZE) @pytest.mark.with_backend('local/{raw,plain,aes,aes+zlib,zlib}') def test_issue431(backend): key = newname() hdr_len = struct.calcsize(b' 3: return real_GET(self) else: self.send_error(503, code='OperationAborted') monkeypatch.setattr(handler_class, 'do_GET', do_GET) assert_raises(OperationAbortedError, backend.fetch, value) enable_temp_fail(backend) assert backend[key] == value @pytest.mark.with_backend('s3c/{raw,aes+zlib}', require_mock_server=True) def test_head_s3error(backend, monkeypatch): value = b'hello there, let us see whats going on' key = 'quote' meta = {'bar': 42, 'foo': 42**2} backend.store(key, value, metadata=meta) # Monkeypatch request handler to produce 3 errors handler_class = mock_server.S3CRequestHandler def do_HEAD(self, real_HEAD=handler_class.do_HEAD, count=[0]): count[0] += 1 if count[0] > 3: return real_HEAD(self) else: self.send_error(503, code='OperationAborted') monkeypatch.setattr(handler_class, 'do_HEAD', do_HEAD) with pytest.raises(HTTPError) as exc: backend.lookup(key) assert exc.value.status == 503 enable_temp_fail(backend) assert backend.lookup(key) == meta @pytest.mark.with_backend('s3c/raw', require_mock_server=True) def test_delete_s3error(backend, monkeypatch): value = b'hello there, let us see whats going on' key = 'quote' backend[key] = value # Monkeypatch request handler to produce 3 errors handler_class = mock_server.S3CRequestHandler def do_DELETE(self, real_DELETE=handler_class.do_DELETE, count=[0]): count[0] += 1 if count[0] > 3: return real_DELETE(self) else: self.send_error(503, code='OperationAborted') monkeypatch.setattr(handler_class, 'do_DELETE', do_DELETE) assert_raises(OperationAbortedError, backend.delete, key) enable_temp_fail(backend) backend.delete(key) @pytest.mark.with_backend('s3c/raw', require_mock_server=True) def test_backoff(backend, monkeypatch): value = b'hello there, let us see whats going on' key = 'quote' backend[key] = value # Monkeypatch request handler handler_class = mock_server.S3CRequestHandler timestamps = [] def do_DELETE(self, real_DELETE=handler_class.do_DELETE): timestamps.append(time.time()) if len(timestamps) < 3: self.send_error(503, code='SlowDown', extra_headers={'Retry-After': '1'}) else: return real_DELETE(self) monkeypatch.setattr(handler_class, 'do_DELETE', do_DELETE) enable_temp_fail(backend) backend.delete(key) assert timestamps[1] - timestamps[0] > 1 - CLOCK_GRANULARITY assert timestamps[2] - timestamps[1] > 1 - CLOCK_GRANULARITY assert timestamps[2] - timestamps[0] < 10 @pytest.mark.with_backend('s3c/raw', require_mock_server=True) def test_httperror(backend, monkeypatch): value = b'hello there, let us see whats going on' key = 'quote' backend[key] = value # Monkeypatch request handler to produce a HTTP Error handler_class = mock_server.S3CRequestHandler def do_DELETE(self, real_DELETE=handler_class.do_DELETE, count=[0]): count[0] += 1 if count[0] >= 3: return real_DELETE(self) content = "I'm a proxy, and I messed up!".encode('utf-8') self.send_response(502, "Bad Gateway") self.send_header("Content-Type", 'text/plain; charset="utf-8"') self.send_header("Content-Length", str(len(content))) self.end_headers() if self.command != 'HEAD': self.wfile.write(content) monkeypatch.setattr(handler_class, 'do_DELETE', do_DELETE) assert_raises(HTTPError, backend.delete, key) enable_temp_fail(backend) backend.delete(key) @pytest.mark.with_backend('s3c/{raw,aes+zlib}', require_mock_server=True) def test_put_s3error_early(backend, monkeypatch): '''Fail after expect-100''' data = b'hello there, let us see whats going on' key = 'borg' # Monkeypatch request handler to produce 3 errors handler_class = mock_server.S3CRequestHandler def handle_expect_100(self, real=handler_class.handle_expect_100, count=[0]): count[0] += 1 if count[0] > 3: return real(self) else: self.send_error(503, code='OperationAborted') return False monkeypatch.setattr(handler_class, 'handle_expect_100', handle_expect_100) fh = backend.open_write(key) fh.write(data) assert_raises(OperationAbortedError, fh.close) enable_temp_fail(backend) fh.close() @pytest.mark.with_backend('s3c/{raw,aes+zlib}', require_mock_server=True) def test_put_s3error_med(backend, monkeypatch): '''Fail as soon as data is received''' data = b'hello there, let us see whats going on' key = 'borg' # Monkeypatch request handler to produce 3 errors handler_class = mock_server.S3CRequestHandler def do_PUT(self, real_PUT=handler_class.do_PUT, count=[0]): count[0] += 1 # Note: every time we return an error, the request will be retried # *twice*: once because of the error, and a second time because the # connection has been closed by the server. if count[0] > 2: return real_PUT(self) else: self.send_error(503, code='OperationAborted') # Since we don't read all the data, we have to close # the connection self.close_connection = True monkeypatch.setattr(handler_class, 'do_PUT', do_PUT) fh = backend.open_write(key) fh.write(data) assert_raises(OperationAbortedError, fh.close) enable_temp_fail(backend) fh.close() @pytest.mark.with_backend('s3c/{raw,aes+zlib}', require_mock_server=True) def test_put_s3error_late(backend, monkeypatch): '''Fail after reading all data''' data = b'hello there, let us see whats going on' key = 'borg' # Monkeypatch request handler to produce 3 errors handler_class = mock_server.S3CRequestHandler def do_PUT(self, real_PUT=handler_class.do_PUT, count=[0]): count[0] += 1 if count[0] > 3: return real_PUT(self) else: self.rfile.read(int(self.headers['Content-Length'])) self.send_error(503, code='OperationAborted') monkeypatch.setattr(handler_class, 'do_PUT', do_PUT) fh = backend.open_write(key) fh.write(data) assert_raises(OperationAbortedError, fh.close) enable_temp_fail(backend) fh.close() @pytest.mark.with_backend('s3c/{raw,aes+zlib}', require_mock_server=True) def test_issue58(backend, monkeypatch): '''Send error while client is sending data''' # Monkeypatch request handler handler_class = mock_server.S3CRequestHandler def do_PUT(self, real=handler_class.do_PUT, count=[0]): count[0] += 1 if count[0] > 1: return real(self) # Read half the data self.rfile.read(min(BUFSIZE, int(self.headers['Content-Length'])//2)) # Then generate an error and close the connection self.send_error(401, code='MalformedXML') self.close_connection = True monkeypatch.setattr(handler_class, 'do_PUT', do_PUT) # Write a big object. We need to write random data, or # compression while make the payload too small with pytest.raises(S3Error) as exc_info: with backend.open_write('borg') as fh, \ open('/dev/urandom', 'rb') as rnd: for _ in range(5): fh.write(rnd.read(BUFSIZE)) assert exc_info.value.code == 'MalformedXML' enable_temp_fail(backend) fh.close() @pytest.mark.with_backend('s3c/{raw,aes+zlib}', require_mock_server=True) def test_issue58_b(backend, monkeypatch): '''Close connection while client is sending data''' # Monkeypatch request handler handler_class = mock_server.S3CRequestHandler def do_PUT(self, real=handler_class.do_PUT, count=[0]): count[0] += 1 if count[0] > 1: return real(self) # Read half the data self.rfile.read(min(BUFSIZE, int(self.headers['Content-Length'])//2)) # Then close the connection silently self.close_connection = True monkeypatch.setattr(handler_class, 'do_PUT', do_PUT) # Write a big object. We need to write random data, or # compression while make the payload too small with pytest.raises(ConnectionClosed): with backend.open_write('borg') as fh, \ open('/dev/urandom', 'rb') as rnd: for _ in range(5): fh.write(rnd.read(BUFSIZE)) enable_temp_fail(backend) fh.close() @pytest.mark.with_backend('gs/{raw,aes+zlib}', require_mock_server=True) def test_expired_token_get(backend, monkeypatch): '''Test handling of expired OAuth token''' key = 'borg' data = b'hello there, let us see whats going on' # Monkeypatch backend class to check if token is refreshed token_refreshed = False def _get_access_token(self): nonlocal token_refreshed token_refreshed = True self.access_token[self.password] = 'foobar' monkeypatch.setattr(GSBackend, '_get_access_token', _get_access_token) # Store some data backend[key] = data # Monkeypatch request handler to produce error handler_class = mock_server.GSRequestHandler def do_GET(self, real=handler_class.do_GET, count=[0]): count[0] += 1 if count[0] > 1: return real(self) else: self.send_error(401, code='AuthenticationRequired') monkeypatch.setattr(handler_class, 'do_GET', do_GET) token_refreshed = False assert backend[key] == data assert token_refreshed @pytest.mark.with_backend('gs/{raw,aes+zlib}', require_mock_server=True) def test_expired_token_put(backend, monkeypatch): '''Test handling of expired OAuth token''' key = 'borg' data = b'hello there, let us see whats going on' # Monkeypatch backend class to check if token is refreshed token_refreshed = False def _get_access_token(self): nonlocal token_refreshed token_refreshed = True self.access_token[self.password] = 'foobar' monkeypatch.setattr(GSBackend, '_get_access_token', _get_access_token) # Monkeypatch request handler to produce error handler_class = mock_server.GSRequestHandler def do_PUT(self, real=handler_class.do_PUT, count=[0]): count[0] += 1 if count[0] > 1: return real(self) else: self.rfile.read(int(self.headers['Content-Length'])) self.send_error(401, code='AuthenticationRequired') monkeypatch.setattr(handler_class, 'do_PUT', do_PUT) token_refreshed = False backend[key] = data assert token_refreshed @pytest.mark.with_backend('s3c/{raw,aes+zlib}', require_mock_server=True) def test_conn_abort(backend, monkeypatch): '''Close connection while sending data''' data = b'hello there, let us see whats going on' key = 'borg' backend[key] = data # Monkeypatch request handler handler_class = mock_server.S3CRequestHandler def send_data(self, data, count=[0]): count[0] += 1 if count[0] >= 3: self.wfile.write(data) else: self.wfile.write(data[:len(data)//2]) self.close_connection = True monkeypatch.setattr(handler_class, 'send_data', send_data) with pytest.raises(ConnectionClosed): with assert_logs("^Object closed prematurely, can't check MD5", count=1, level=logging.WARNING): backend.fetch(key) enable_temp_fail(backend) assert backend[key] == data s3ql-2.26/tests/pytest.ini0000644000175000017500000000034413223730045017202 0ustar nikrationikratio00000000000000[pytest] addopts = --verbose --assert=rewrite --exitfirst --tb=native python_files = t?_*.py log_cli_level = 100 log_format = %(asctime)s.%(msecs)03d %(threadName)s %(name)s.%(funcName)s: %(message)s log_date_format = %H:%M:%S s3ql-2.26/tests/t4_fuse.py0000775000175000017500000003015213160156175017107 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t4_fuse.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) from os.path import basename from s3ql import CTRL_NAME from s3ql.common import path2bytes from common import retry, skip_if_no_fusermount import filecmp import llfuse import os.path import shutil import platform import stat import subprocess import tempfile import pytest from pytest import raises as assert_raises # For debugging USE_VALGRIND = False @pytest.mark.usefixtures('s3ql_cmd_argv', 'pass_reg_output') class TestFuse: def setup_method(self, method): if platform.system() != 'Darwin': skip_if_no_fusermount() # We need this to test multi block operations self.src = __file__ if os.path.getsize(self.src) < 1048: raise RuntimeError("test file %s should be bigger than 1 KiB" % self.src) self.mnt_dir = tempfile.mkdtemp(prefix='s3ql-mnt-') self.cache_dir = tempfile.mkdtemp(prefix='s3ql-cache-') self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') self.storage_url = 'local://%s/' % (self.backend_dir,) self.passphrase = 'oeut3d' self.backend_login = None self.backend_passphrase = None self.mount_process = None self.name_cnt = 0 def mkfs(self, max_obj_size=500): argv = (self.s3ql_cmd_argv('mkfs.s3ql') + [ '-L', 'test fs', '--max-obj-size', str(max_obj_size), '--cachedir', self.cache_dir, '--quiet', '--authfile', '/dev/null', self.storage_url ]) if self.passphrase is None: argv.append('--plain') proc = subprocess.Popen(argv, stdin=subprocess.PIPE, universal_newlines=True) if self.backend_login is not None: print(self.backend_login, file=proc.stdin) print(self.backend_passphrase, file=proc.stdin) if self.passphrase is not None: print(self.passphrase, file=proc.stdin) print(self.passphrase, file=proc.stdin) proc.stdin.close() assert proc.wait() == 0 self.reg_output(r'^WARNING: Maximum object sizes less than ' '1 MiB will degrade performance\.$', count=1) def mount(self, expect_fail=None): cmd = (self.s3ql_cmd_argv('mount.s3ql') + ["--fg", '--cachedir', self.cache_dir, '--log', 'none', '--compress', 'zlib', '--quiet', self.storage_url, self.mnt_dir, '--authfile', '/dev/null' ]) self.mount_process = subprocess.Popen(cmd, stdin=subprocess.PIPE, universal_newlines=True) if self.backend_login is not None: print(self.backend_login, file=self.mount_process.stdin) print(self.backend_passphrase, file=self.mount_process.stdin) if self.passphrase is not None: print(self.passphrase, file=self.mount_process.stdin) self.mount_process.stdin.close() if expect_fail: retry(30, self.mount_process.poll) assert self.mount_process.returncode == expect_fail else: def poll(): if os.path.ismount(self.mnt_dir): return True assert self.mount_process.poll() is None retry(30, poll) def umount(self): with open('/dev/null', 'wb') as devnull: retry(5, lambda: subprocess.call(['fuser', '-m', self.mnt_dir], stdout=devnull, stderr=devnull) == 1) proc = subprocess.Popen(self.s3ql_cmd_argv('umount.s3ql') + ['--quiet', self.mnt_dir]) retry(90, lambda : proc.poll() is not None) assert proc.wait() == 0 assert self.mount_process.poll() == 0 assert not os.path.ismount(self.mnt_dir) def fsck(self): # Use fsck to test authinfo reading with tempfile.NamedTemporaryFile('wt') as authinfo_fh: print('[entry1]', 'storage-url: %s' % self.storage_url[:6], 'fs-passphrase: clearly wrong', 'backend-login: bla', 'backend-password: not much better', '', '[entry2]', 'storage-url: %s' % self.storage_url, 'fs-passphrase: %s' % self.passphrase, 'backend-login: %s' % self.backend_login, 'backend-password:%s' % self.backend_passphrase, file=authinfo_fh, sep='\n') authinfo_fh.flush() proc = subprocess.Popen(self.s3ql_cmd_argv('fsck.s3ql') + [ '--force', '--quiet', '--log', 'none', '--cachedir', self.cache_dir, '--authfile', authinfo_fh.name, self.storage_url ], stdin=subprocess.PIPE, universal_newlines=True) proc.stdin.close() assert proc.wait() == 0 def teardown_method(self, method): with open('/dev/null', 'wb') as devnull: if platform.system() == 'Darwin': subprocess.call(['umount', '-l', self.mnt_dir], stderr=devnull) else: subprocess.call(['fusermount', '-z', '-u', self.mnt_dir], stderr=devnull) os.rmdir(self.mnt_dir) # Give mount process a little while to terminate if self.mount_process is not None: try: retry(90, lambda : self.mount_process.poll() is not None) except TimeoutError: # Ignore errors during teardown pass shutil.rmtree(self.cache_dir) shutil.rmtree(self.backend_dir) def test(self): # Run all tests in same environment, mounting and umounting # just takes too long otherwise self.mkfs() self.mount() self.tst_chown() self.tst_link() self.tst_mkdir() self.tst_mknod() self.tst_readdir() self.tst_statvfs() self.tst_symlink() self.tst_truncate() self.tst_truncate_nocache() self.tst_write() self.tst_bug382() self.umount() self.fsck() def newname(self): self.name_cnt += 1 return "s3ql_%d" % self.name_cnt def tst_mkdir(self): dirname = self.newname() fullname = self.mnt_dir + "/" + dirname os.mkdir(fullname) fstat = os.stat(fullname) assert stat.S_ISDIR(fstat.st_mode) assert llfuse.listdir(fullname) == [] assert fstat.st_nlink == 1 assert dirname in llfuse.listdir(self.mnt_dir) os.rmdir(fullname) assert_raises(FileNotFoundError, os.stat, fullname) assert dirname not in llfuse.listdir(self.mnt_dir) def tst_symlink(self): linkname = self.newname() fullname = self.mnt_dir + "/" + linkname os.symlink("/imaginary/dest", fullname) fstat = os.lstat(fullname) assert stat.S_ISLNK(fstat.st_mode) assert os.readlink(fullname) == "/imaginary/dest" assert fstat.st_nlink == 1 assert linkname in llfuse.listdir(self.mnt_dir) os.unlink(fullname) assert_raises(FileNotFoundError, os.lstat, fullname) assert linkname not in llfuse.listdir(self.mnt_dir) def tst_mknod(self): filename = os.path.join(self.mnt_dir, self.newname()) src = self.src shutil.copyfile(src, filename) fstat = os.lstat(filename) assert stat.S_ISREG(fstat.st_mode) assert fstat.st_nlink == 1 assert basename(filename) in llfuse.listdir(self.mnt_dir) assert filecmp.cmp(src, filename, False) os.unlink(filename) assert_raises(FileNotFoundError, os.stat, filename) assert basename(filename) not in llfuse.listdir(self.mnt_dir) def tst_chown(self): filename = os.path.join(self.mnt_dir, self.newname()) os.mkdir(filename) fstat = os.lstat(filename) uid = fstat.st_uid gid = fstat.st_gid uid_new = uid + 1 os.chown(filename, uid_new, -1) fstat = os.lstat(filename) assert fstat.st_uid == uid_new assert fstat.st_gid == gid gid_new = gid + 1 os.chown(filename, -1, gid_new) fstat = os.lstat(filename) assert fstat.st_uid == uid_new assert fstat.st_gid == gid_new os.rmdir(filename) assert_raises(FileNotFoundError, os.stat, filename) assert basename(filename) not in llfuse.listdir(self.mnt_dir) def tst_write(self): name = os.path.join(self.mnt_dir, self.newname()) src = self.src shutil.copyfile(src, name) assert filecmp.cmp(name, src, False) # Don't unlink file, we want to see if cache flushing # works def tst_statvfs(self): os.statvfs(self.mnt_dir) def tst_link(self): name1 = os.path.join(self.mnt_dir, self.newname()) name2 = os.path.join(self.mnt_dir, self.newname()) src = self.src shutil.copyfile(src, name1) assert filecmp.cmp(name1, src, False) os.link(name1, name2) fstat1 = os.lstat(name1) fstat2 = os.lstat(name2) assert fstat1 == fstat2 assert fstat1.st_nlink == 2 assert basename(name2) in llfuse.listdir(self.mnt_dir) assert filecmp.cmp(name1, name2, False) os.unlink(name2) fstat1 = os.lstat(name1) assert fstat1.st_nlink == 1 os.unlink(name1) def tst_readdir(self): dir_ = os.path.join(self.mnt_dir, self.newname()) file_ = dir_ + "/" + self.newname() subdir = dir_ + "/" + self.newname() subfile = subdir + "/" + self.newname() src = self.src os.mkdir(dir_) shutil.copyfile(src, file_) os.mkdir(subdir) shutil.copyfile(src, subfile) listdir_is = llfuse.listdir(dir_) listdir_is.sort() listdir_should = [ basename(file_), basename(subdir) ] listdir_should.sort() assert listdir_is == listdir_should os.unlink(file_) os.unlink(subfile) os.rmdir(subdir) os.rmdir(dir_) def tst_truncate(self): filename = os.path.join(self.mnt_dir, self.newname()) src = self.src shutil.copyfile(src, filename) assert filecmp.cmp(filename, src, False) fstat = os.stat(filename) size = fstat.st_size fd = os.open(filename, os.O_RDWR) os.ftruncate(fd, size + 1024) # add > 1 block assert os.stat(filename).st_size == size + 1024 os.ftruncate(fd, size - 1024) # Truncate > 1 block assert os.stat(filename).st_size == size - 1024 os.close(fd) os.unlink(filename) def tst_truncate_nocache(self): filename = os.path.join(self.mnt_dir, self.newname()) src = self.src shutil.copyfile(src, filename) assert filecmp.cmp(filename, src, False) fstat = os.stat(filename) size = fstat.st_size subprocess.check_call(self.s3ql_cmd_argv('s3qlctrl') + [ '--quiet', 'flushcache', self.mnt_dir ]) fd = os.open(filename, os.O_RDWR) os.ftruncate(fd, size + 1024) # add > 1 block assert os.stat(filename).st_size == size + 1024 os.ftruncate(fd, size - 1024) # Truncate > 1 block assert os.stat(filename).st_size == size - 1024 os.close(fd) os.unlink(filename) def tst_bug382(self): dirname = self.newname() fullname = self.mnt_dir + "/" + dirname os.mkdir(fullname) assert stat.S_ISDIR(os.stat(fullname).st_mode) assert dirname in llfuse.listdir(self.mnt_dir) cmd = ('(%d, %r)' % (llfuse.ROOT_INODE, path2bytes(dirname))).encode() llfuse.setxattr('%s/%s' % (self.mnt_dir, CTRL_NAME), 'rmtree', cmd) assert_raises(FileNotFoundError, os.stat, fullname) assert dirname not in llfuse.listdir(self.mnt_dir) s3ql-2.26/tests/common.py0000644000175000017500000002247413177323070017027 0ustar nikrationikratio00000000000000''' common.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. This module contains common functions used by multiple unit tests. ''' import time import os import subprocess import stat import random import configparser import pytest import functools def get_clock_granularity(): resolution = float('inf') for i in range(50): stamp1 = time.time() stamp2 = stamp1 while stamp1 == stamp2: stamp2 = time.time() resolution = min(resolution, stamp2 - stamp1) time.sleep(0.01) return resolution CLOCK_GRANULARITY = get_clock_granularity() # When testing, we want to make sure that we don't sleep for too short a time # (cause it may cause spurious test failures), and that the sleep interval # covers at least one timer update. We have to monkeypatch because we especially # want functions like s3ql.backends.common.retry to use the "safe" sleep # version. @functools.wraps(time.sleep) def safe_sleep(secs, _sleep_real=time.sleep): '''Like time.sleep(), but sleep for at least *secs* `time.sleep` may sleep less than the given period if a signal is received. This function ensures that we sleep for at least the desired time. ''' now = time.time() end = now + secs while now < end: _sleep_real(max(end - now, CLOCK_GRANULARITY)) now = time.time() @pytest.fixture(autouse=True, scope='session') def install_safe_sleep(): time.sleep = safe_sleep def retry(timeout, fn, *a, **kw): """Wait for fn(*a, **kw) to return True. If the return value of fn() returns something True, this value is returned. Otherwise, the function is called repeatedly for `timeout` seconds. If the timeout is reached, `RetryTimeoutError` is raised. """ step = 0.2 waited = 0 while waited < timeout: ret = fn(*a, **kw) if ret: return ret time.sleep(step) waited += step if step < waited / 30: step *= 2 raise RetryTimeoutError() class RetryTimeoutError(Exception): '''Raised by `retry()` when a timeout is reached.''' pass def skip_if_no_fusermount(): '''Raise SkipTest if fusermount is not available''' with subprocess.Popen(['which', 'fusermount'], stdout=subprocess.PIPE, universal_newlines=True) as which: fusermount_path = which.communicate()[0].strip() if not fusermount_path or which.returncode != 0: pytest.skip("Can't find fusermount executable") if not os.path.exists('/dev/fuse'): pytest.skip("FUSE kernel module does not seem to be loaded") if os.getuid() == 0: return mode = os.stat(fusermount_path).st_mode if mode & stat.S_ISUID == 0: pytest.skip('fusermount executable not setuid, and we are not root.') try: fd = os.open('/dev/fuse', os.O_RDWR) except OSError as exc: pytest.skip('Unable to open /dev/fuse: %s' % exc.strerror) else: os.close(fd) def skip_without_rsync(): try: with open('/dev/null', 'wb') as null: subprocess.call(['rsync', '--version'], stdout=null, stderr=subprocess.STDOUT,) except FileNotFoundError: pytest.skip('rsync not installed') def populate_dir(path, entries=1000, size=20*1024*1024, pooldir='/usr/bin', seed=None): '''Populate directory with random data *entries* specifies the total number of directory entries that are created in the tree. *size* specifies the size occupied by all files together. The files in *pooldir* are used as a source of directory names and file contents. *seed* is used to initalize the random number generator and can be used to make the created structure reproducible (provided that the contents of *pooldir* don't change). ''' poolnames = os.listdir(pooldir) if seed is None: # We want tests to be reproducible on a given system, so users # can report meaningful bugs seed = len(poolnames) random.seed(seed) # Entries in percentages subdir_cnt = random.randint(5, 10) file_cnt = random.randint(60, 70) fifo_cnt = random.randint(5, 10) symlink_cnt = random.randint(10, 20) hardlink_cnt = random.randint(5, 15) # Normalize to desired entry count scale = entries / sum((subdir_cnt, file_cnt, fifo_cnt, symlink_cnt, hardlink_cnt)) subdir_cnt = int(scale * subdir_cnt) file_cnt = int(scale * file_cnt) fifo_cnt = int(scale * fifo_cnt) symlink_cnt = int(scale * symlink_cnt) hardlink_cnt = int(scale * hardlink_cnt) # Sizes, make sure there is at least one big file file_sizes = [ random.randint(0, 100) for _ in range(file_cnt-1) ] scale = 0.5 * size / sum(file_sizes) file_sizes = [ int(scale * x) for x in file_sizes ] file_sizes.append(int(0.5 * size)) # Special characters for use in filenames special_chars = [ chr(x) for x in range(128) if x not in (0, ord('/')) ] def random_name(path): '''Get random, non-existing file name underneath *path* Returns a fully qualified path with a filename chosen from *poolnames*. ''' while True: name = poolnames[random.randrange(len(poolnames))] # Special characters len_ = random.randrange(4) if len_ > 0: pos = random.choice((-1,0,1)) # Prefix, Middle, Suffix s = ''.join(special_chars[random.randrange(len(special_chars))] for _ in range(len_)) if pos == -1: name = s + name elif pos == 1: name += s else: name += s + poolnames[random.randrange(len(poolnames))] fullname = os.path.join(path, name) if not os.path.lexists(fullname): break return fullname # # Step 1: create directory tree # dirs = [ path ] for _ in range(subdir_cnt): idx = random.randrange(len(dirs)) name = random_name(dirs[idx]) os.mkdir(name) dirs.append(name) # # Step 2: populate the tree with files # files = [] for size in file_sizes: idx = random.randrange(len(dirs)) name = random_name(dirs[idx]) with open(name, 'wb') as dst: while size > 0: idx = random.randrange(len(poolnames)) srcname = os.path.join(pooldir, poolnames[idx]) if not os.path.isfile(srcname): continue with open(srcname, 'rb') as src: buf = src.read(size) dst.write(buf) size -= len(buf) files.append(name) # # Step 3: Special files # for _ in range(fifo_cnt): name = random_name(dirs[random.randrange(len(dirs))]) os.mkfifo(name) files.append(name) # # Step 4: populate tree with symlinks # for _ in range(symlink_cnt): relative = random.choice((True, False)) existing = random.choice((True, False)) idx = random.randrange(len(dirs)) dir_ = dirs[idx] name = random_name(dir_) if existing: directory = random.choice((True, False)) if directory: target = dirs[random.randrange(len(dirs))] else: target = files[random.randrange(len(files))] else: target = random_name(dirs[random.randrange(len(dirs))]) if relative: target = os.path.relpath(target, dir_) else: target = os.path.abspath(target) os.symlink(target, name) # # Step 5: Create some hardlinks # for _ in range(hardlink_cnt): samedir = random.choice((True, False)) target = files[random.randrange(len(files))] if samedir: dir_ = os.path.dirname(target) else: dir_ = dirs[random.randrange(len(dirs))] name = random_name(dir_) os.link(target, name) files.append(name) class NoTestSection(Exception): ''' Raised by get_remote_test_info if no matching test section was found. ''' def __init__(self, reason): self.reason = reason def get_remote_test_info(name): authfile = os.path.expanduser('~/.s3ql/authinfo2') if not os.path.exists(authfile): raise NoTestSection('No authentication file found.') mode = os.stat(authfile).st_mode if mode & (stat.S_IRGRP | stat.S_IROTH): raise NoTestSection("Authentication file has insecure permissions") config = configparser.ConfigParser() config.read(authfile) try: fs_name = config.get(name, 'test-fs') backend_login = config.get(name, 'backend-login') backend_password = config.get(name, 'backend-password') except (configparser.NoOptionError, configparser.NoSectionError): raise NoTestSection("Authentication file does not have %s section" % name) # Append prefix to make sure that we're starting with an empty bucket if fs_name[-1] != '/': fs_name += '/' fs_name += 's3ql_test_%d/' % time.time() return (backend_login, backend_password, fs_name) s3ql-2.26/tests/t3_inode_cache.py0000755000175000017500000000610412742247106020363 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t2_inode_cache.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) from s3ql import inode_cache from s3ql.mkfs import init_tables from s3ql.common import time_ns from s3ql.metadata import create_tables from s3ql.database import Connection import unittest import tempfile import os class cache_tests(unittest.TestCase): def setUp(self): # Destructors are not guaranteed to run, and we can't unlink # the file immediately because apsw refers to it by name. # Therefore, we unlink the file manually in tearDown() self.dbfile = tempfile.NamedTemporaryFile(delete=False) self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) self.cache = inode_cache.InodeCache(self.db, 0) def tearDown(self): self.cache.destroy() os.unlink(self.dbfile.name) def test_create(self): attrs = {'mode': 784, 'refcount': 3, 'uid': 7, 'gid': 2, 'size': 34674, 'rdev': 11, 'atime_ns': time_ns(), 'ctime_ns': time_ns(), 'mtime_ns': time_ns() } inode = self.cache.create_inode(**attrs) for key in list(attrs.keys()): self.assertEqual(attrs[key], getattr(inode, key)) self.assertTrue(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,))) def test_del(self): attrs = {'mode': 784, 'refcount': 3, 'uid': 7, 'gid': 2, 'size': 34674, 'rdev': 11, 'atime_ns': time_ns(), 'ctime_ns': time_ns(), 'mtime_ns': time_ns() } inode = self.cache.create_inode(**attrs) del self.cache[inode.id] self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,))) self.assertRaises(KeyError, self.cache.__delitem__, inode.id) def test_get(self): attrs = {'mode': 784, 'refcount': 3, 'uid': 7, 'gid': 2, 'size': 34674, 'rdev': 11, 'atime_ns': time_ns(), 'ctime_ns': time_ns(), 'mtime_ns': time_ns() } inode = self.cache.create_inode(**attrs) for (key, val) in attrs.items(): self.assertEqual(getattr(inode, key), val) # Create another inode self.cache.create_inode(**attrs) self.db.execute('DELETE FROM inodes WHERE id=?', (inode.id,)) # Entry should still be in cache self.assertEqual(inode, self.cache[inode.id]) # Now it should be out of the cache for _ in range(inode_cache.CACHE_SIZE + 1): self.cache.create_inode(**attrs) self.assertRaises(KeyError, self.cache.__getitem__, inode.id) s3ql-2.26/tests/t5_lock_rm.py0000755000175000017500000000276412615000156017571 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t5_lock_rm.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) import llfuse import os.path import s3ql.lock import s3ql.remove import sys from pytest import raises as assert_raises import pytest import t4_fuse class TestLockRemove(t4_fuse.TestFuse): def test(self): self.mkfs() self.mount() self.tst_lock_rm() self.umount() self.fsck() def tst_lock_rm(self): # Extract tar tempdir = os.path.join(self.mnt_dir, 'lock_dir') filename = os.path.join(tempdir, 'myfile') os.mkdir(tempdir) with open(filename, 'w') as fh: fh.write('Hello, world') # copy try: s3ql.lock.main([tempdir]) except: sys.excepthook(*sys.exc_info()) pytest.fail("s3qllock raised exception") # Try to delete assert_raises(PermissionError, os.unlink, filename) # Try to write with pytest.raises(PermissionError): open(filename, 'w+').write('Hello') # delete properly try: s3ql.remove.main([tempdir]) except: sys.excepthook(*sys.exc_info()) pytest.fail("s3qlrm raised exception") assert 'lock_dir' not in llfuse.listdir(self.mnt_dir) s3ql-2.26/tests/t3_fsck.py0000755000175000017500000006505712742247106017104 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t3_fsck.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) from s3ql.backends import local from s3ql import ROOT_INODE from s3ql.mkfs import init_tables from s3ql.metadata import create_tables from s3ql.database import Connection, NoSuchRowError from s3ql.fsck import Fsck from s3ql.common import time_ns import os import shutil import hashlib import stat import tempfile import _thread import unittest def sha256(s): return hashlib.sha256(s).digest() class fsck_tests(unittest.TestCase): def setUp(self): self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') self.backend = local.Backend('local://' + self.backend_dir, None, None) self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-') self.max_obj_size = 1024 self.dbfile = tempfile.NamedTemporaryFile() self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) self.fsck = Fsck(self.cachedir, self.backend, { 'max_obj_size': self.max_obj_size }, self.db) self.fsck.expect_errors = True def tearDown(self): shutil.rmtree(self.cachedir) shutil.rmtree(self.backend_dir) self.dbfile.close() def assert_fsck(self, fn): '''Check that fn detects and corrects an error''' self.fsck.found_errors = False fn() self.assertTrue(self.fsck.found_errors) self.fsck.found_errors = False self.fsck.check() self.assertFalse(self.fsck.found_errors) def test_cache(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1, 8)) self._link(b'test-entry', inode) # Create new block fh = open(self.cachedir + '/%d-0' % inode, 'wb') fh.write(b'somedata') fh.close() self.assert_fsck(self.fsck.check_cache) self.assertEqual(self.backend['s3ql_data_1'], b'somedata') # Existing block self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + 8, inode)) with open(self.cachedir + '/%d-1' % inode, 'wb') as fh: fh.write(b'somedata') self.assert_fsck(self.fsck.check_cache) # Old block preserved with open(self.cachedir + '/%d-0' % inode, 'wb') as fh: fh.write(b'somedat2') self.assert_fsck(self.fsck.check_cache) # Old block removed with open(self.cachedir + '/%d-1' % inode, 'wb') as fh: fh.write(b'somedat3') self.assert_fsck(self.fsck.check_cache) def test_lof1(self): # Make lost+found a file inode = self.db.get_val("SELECT inode FROM contents_v WHERE name=? AND parent_inode=?", (b"lost+found", ROOT_INODE)) self.db.execute('DELETE FROM contents WHERE parent_inode=?', (inode,)) self.db.execute('UPDATE inodes SET mode=?, size=? WHERE id=?', (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, inode)) def check(): self.fsck.check_lof() self.fsck.check_inodes_refcount() self.assert_fsck(check) def test_lof2(self): # Remove lost+found name_id = self.db.get_val('SELECT id FROM names WHERE name=?', (b'lost+found',)) inode = self.db.get_val('SELECT inode FROM contents WHERE name_id=? AND ' 'parent_inode=?', (name_id, ROOT_INODE)) self.db.execute('DELETE FROM inodes WHERE id=?', (inode,)) self.db.execute('DELETE FROM contents WHERE name_id=? and parent_inode=?', (name_id, ROOT_INODE)) self.db.execute('UPDATE names SET refcount = refcount-1 WHERE id=?', (name_id,)) self.assert_fsck(self.fsck.check_lof) def test_wrong_inode_refcount(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 0)) self._link(b'name1', inode) self._link(b'name2', inode) self.assert_fsck(self.fsck.check_inodes_refcount) def test_orphaned_inode(self): self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 0)) self.assert_fsck(self.fsck.check_inodes_refcount) def test_name_refcount(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 2, 0)) self._link(b'name1', inode) self._link(b'name2', inode) self.db.execute('UPDATE names SET refcount=refcount+1 WHERE name=?', (b'name1',)) self.assert_fsck(self.fsck.check_names_refcount) def test_orphaned_name(self): self._add_name(b'zupbrazl') self.assert_fsck(self.fsck.check_names_refcount) def test_contents_inode(self): self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (self._add_name(b'foobar'), 124, ROOT_INODE)) self.assert_fsck(self.fsck.check_contents_inode) def test_contents_inode_p(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 0)) self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (self._add_name(b'foobar'), inode, 123)) self.assert_fsck(self.fsck.check_contents_parent_inode) def test_contents_name(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 0)) self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (42, inode, ROOT_INODE)) self.assert_fsck(self.fsck.check_contents_name) def _add_name(self, name): '''Get id for *name* and increase refcount Name is inserted in table if it does not yet exist. ''' try: name_id = self.db.get_val('SELECT id FROM names WHERE name=?', (name,)) except NoSuchRowError: name_id = self.db.rowid('INSERT INTO names (name, refcount) VALUES(?,?)', (name, 1)) else: self.db.execute('UPDATE names SET refcount=refcount+1 WHERE id=?', (name_id,)) return name_id def _link(self, name, inode, parent_inode=ROOT_INODE): '''Link /*name* to *inode*''' self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (self._add_name(name), inode, parent_inode)) def test_inodes_size(self): id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 128)) self._link(b'test-entry', id_) obj_id = self.db.rowid('INSERT INTO objects (refcount,size) VALUES(?,?)', (1, 36)) block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size, hash) ' 'VALUES(?,?,?,?)', (1, obj_id, 512, sha256(b'foo'))) self.backend['s3ql_data_%d' % obj_id] = b'foo' # Case 1 self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + 120, id_)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 1, block_id)) self.assert_fsck(self.fsck.check_inodes_size) # Case 2 self.db.execute('DELETE FROM inode_blocks WHERE inode=?', (id_,)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 0, block_id)) self.db.execute('UPDATE inodes SET size=? WHERE id=?', (129, id_)) self.assert_fsck(self.fsck.check_inodes_size) # Case 3 self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 1, block_id)) self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + 120, id_)) self.db.execute('UPDATE blocks SET refcount = refcount + 1 WHERE id = ?', (block_id,)) self.assert_fsck(self.fsck.check_inodes_size) def test_objects_id(self): # Create an object that only exists in the backend self.backend['s3ql_data_4364'] = b'Testdata' self.assert_fsck(self.fsck.check_objects_id) # Create an object that does not exist in the backend self.db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)', (34, 1, 27)) self.assert_fsck(self.fsck.check_objects_id) def test_blocks_checksum(self): id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 8)) self._link(b'test-entry', id_) # Assume that due to a crash we did not write the hash for the block self.backend['s3ql_data_4364'] = b'Testdata' self.db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)', (4364, 1, 8)) block_id = self.db.execute('INSERT INTO blocks (obj_id, refcount, size) VALUES(?, ?, ?)', (4364, 1, 8)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 0, block_id)) # Should pick up wrong hash and delete objects self.fsck.found_errors = False self.fsck.check_blocks_checksum() assert self.fsck.found_errors self.fsck.found_errors = False self.fsck.check_blocks_checksum() assert not self.fsck.found_errors # Should save files in lost+found self.fsck.found_errors = False self.fsck.check() assert self.fsck.found_errors # Now everything should be good self.fsck.found_errors = False self.fsck.check() assert not self.fsck.found_errors assert not self.db.has_val('SELECT block_id FROM inode_blocks WHERE inode=?', (id_,)) inode_p = self.db.get_val('SELECT parent_inode FROM contents_v WHERE inode=?', (id_,)) lof_id = self.db.get_val("SELECT inode FROM contents_v WHERE name=? AND parent_inode=?", (b"lost+found", ROOT_INODE)) assert inode_p == lof_id def test_blocks_obj_id(self): block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, 48, 128)) id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 128)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (id_, 0, block_id)) self._link(b'test-entry', id_) self.assert_fsck(self.fsck.check_blocks_obj_id) def test_missing_obj(self): obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 32)') block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 128)) id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 128)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (id_, 0, block_id)) self._link(b'test-entry', id_) self.assert_fsck(self.fsck.check_objects_id) def test_inode_blocks_inode(self): obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 42)') self.backend['s3ql_data_%d' % obj_id] = b'foo' block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 34)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (27, 0, block_id)) self.assert_fsck(self.fsck.check_inode_blocks_inode) def test_inode_blocks_block_id(self): id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 128)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (id_, 0, 35)) self._link(b'test-entry', id_) self.assert_fsck(self.fsck.check_inode_blocks_block_id) def test_symlinks_inode(self): self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (42, b'somewhere else')) self.assert_fsck(self.fsck.check_symlinks_inode) def test_ext_attrs_inode(self): self.db.execute('INSERT INTO ext_attributes (name_id, inode, value) VALUES(?,?,?)', (self._add_name(b'some name'), 34, b'some value')) self.assert_fsck(self.fsck.check_ext_attributes_inode) def test_ext_attrs_name(self): id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1, 128)) self._link(b'test-entry', id_) self.db.execute('INSERT INTO ext_attributes (name_id, inode, value) VALUES(?,?,?)', (34, id_, b'some value')) self.assert_fsck(self.fsck.check_ext_attributes_name) @staticmethod def random_data(len_): with open("/dev/urandom", "rb") as fd: return fd.read(len_) def test_loops(self): # Create some directory inodes inodes = [ self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(), time_ns(), time_ns(), 1)) for dummy in range(3) ] inodes.append(inodes[0]) last = inodes[0] for inode in inodes[1:]: self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?, ?, ?)', (self._add_name(str(inode).encode()), inode, last)) last = inode self.assert_fsck(self.fsck.check_loops) def test_tmpfile(self): # Ensure that path exists objname = 's3ql_data_38375' self.backend[objname] = b'bla' del self.backend[objname] path = self.backend._key_to_path(objname) tmpname = '%s#%d-%d.tmp' % (path, os.getpid(), _thread.get_ident()) with open(tmpname, 'wb') as fh: fh.write(b'Hello, world') self.assert_fsck(self.fsck.check_objects_temp) def test_obj_refcounts(self): obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 42)') block_id_1 = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size, hash) ' 'VALUES(?,?,?,?)', (1, obj_id, 0, sha256(b'foo'))) block_id_2 = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size, hash) ' 'VALUES(?,?,?,?)', (1, obj_id, 0, sha256(b'bar'))) self.backend['s3ql_data_%d' % obj_id] = b'foo and bar' inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1, 2048)) self._link(b'test-entry', inode) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 1, block_id_1)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 2, block_id_2)) self.assert_fsck(self.fsck.check_objects_refcount) def test_orphaned_obj(self): self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 33)') self.assert_fsck(self.fsck.check_objects_refcount) def test_wrong_block_refcount(self): obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 23)') self.backend['s3ql_data_%d' % obj_id] = b'foo' block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size, hash) ' 'VALUES(?,?,?,?)', (1, obj_id, 0, sha256(b''))) inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1, self.max_obj_size)) self._link(b'test-entry', inode) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 0, block_id)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 1, block_id)) self.assert_fsck(self.fsck.check_blocks_refcount) def test_orphaned_block(self): obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 24)') self.backend['s3ql_data_%d' % obj_id] = b'foo' self.db.rowid('INSERT INTO blocks (refcount, obj_id, size, hash) VALUES(?,?,?,?)', (1, obj_id, 3, sha256(b'xyz'))) self.assert_fsck(self.fsck.check_blocks_refcount) def test_unix_size(self): inode = 42 self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1, 0)) self._link(b'test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('UPDATE inodes SET size = 1 WHERE id=?', (inode,)) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_size_symlink(self): inode = 42 target = b'some funny random string' self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", (inode, stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1, len(target))) self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (inode, target)) self._link(b'test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('UPDATE inodes SET size = 0 WHERE id=?', (inode,)) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_target(self): inode = 42 self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?,?)", (inode, stat.S_IFCHR | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1)) self._link(b'test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (inode, 'foo')) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_nomode_reg(self): perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH | stat.S_IRGRP stamp = time_ns() inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (perms, os.getuid(), os.getgid(), stamp, stamp, stamp, 1)) self._link(b'test-entry', inode) self.assert_fsck(self.fsck.check_unix) newmode = self.db.get_val('SELECT mode FROM inodes WHERE id=?', (inode,)) self.assertEqual(stat.S_IMODE(newmode), perms) self.assertEqual(stat.S_IFMT(newmode), stat.S_IFREG) def test_unix_nomode_dir(self): perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH | stat.S_IRGRP stamp = time_ns() inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (perms, os.getuid(), os.getgid(), stamp, stamp, stamp, 1)) inode2 = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (perms | stat.S_IFREG, os.getuid(), os.getgid(), stamp, stamp, stamp, 1)) self._link(b'test-entry', inode) self._link(b'subentry', inode2, inode) self.assert_fsck(self.fsck.check_unix) newmode = self.db.get_val('SELECT mode FROM inodes WHERE id=?', (inode,)) self.assertEqual(stat.S_IMODE(newmode), perms) self.assertEqual(stat.S_IFMT(newmode), stat.S_IFDIR) def test_unix_symlink_no_target(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1)) self._link(b'test-entry', inode) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_rdev(self): inode = 42 self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?,?)", (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1)) self._link(b'test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('UPDATE inodes SET rdev=? WHERE id=?', (42, inode)) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_child(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1)) self._link(b'test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)', (self._add_name(b'foo'), ROOT_INODE, inode)) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) def test_unix_blocks(self): inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFSOCK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1)) self._link(b'test-entry', inode) self.fsck.found_errors = False self.fsck.check_unix() self.assertFalse(self.fsck.found_errors) obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 32)') block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)', (1, obj_id, 0)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 1, block_id)) self.fsck.check_unix() self.assertTrue(self.fsck.found_errors) s3ql-2.26/tests/t3_verify.py0000775000175000017500000000677713160156175017470 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' t3_verify.py - this file is part of S3QL. Copyright © 2014 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' if __name__ == '__main__': import pytest import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) from s3ql.backends import local from s3ql.backends.comprenc import ComprencBackend from s3ql.mkfs import init_tables from s3ql.metadata import create_tables from s3ql.database import Connection from s3ql import verify from pytest_checklogs import assert_logs import io import logging import shutil import tempfile import pytest @pytest.yield_fixture() def backend(): backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') be = local.Backend('local://' + backend_dir, None, None) try: yield be finally: be.close() shutil.rmtree(backend_dir) @pytest.yield_fixture() def db(): dbfile = tempfile.NamedTemporaryFile() db = Connection(dbfile.name) create_tables(db) init_tables(db) try: yield db finally: db.close() dbfile.close() def test_retrieve(backend, db): plain_backend = backend backend = ComprencBackend(b'schnorz', ('zlib', 6), plain_backend) # Create a few objects in db obj_ids = (22, 25, 30, 31) for id_ in obj_ids: db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)', (id_, 1, 27 * id_)) # Object one will be missing in backend # Object two will have a checksum error in the data key = 's3ql_data_%d' % obj_ids[1] backend[key] = b'some data that will be broken on a data check' (raw, meta) = plain_backend.fetch(key) raw = bytearray(raw) assert len(raw) > 20 raw[-10:-6] = b'forg' plain_backend.store(key, raw, meta) # Object three will have a checksum error in the metadata key = 's3ql_data_%d' % obj_ids[2] backend.store(key, b'some data that will be broken on a metadata check', { 'meta-key1': 'some textual data that just increases', 'meta-key2': 'the metadata size so that we can tamper with it' }) meta = plain_backend.lookup(key) raw = bytearray(meta['data']) assert len(raw) > 20 raw[-10:-6] = b'forg' meta['data'] = raw plain_backend.update_meta(key, meta) # Object four will be ok backend['s3ql_data_%d' % obj_ids[3]] = b'some data that is well' # When using a single thread, we can fake the backend factory def backend_factory(): return backend missing_fh = io.StringIO() corrupted_fh = io.StringIO() with assert_logs('^Backend seems to have lost', count=1, level=logging.WARNING), \ assert_logs('^Object %d is corrupted', count=1, level=logging.WARNING): verify.retrieve_objects(db, backend_factory, corrupted_fh, missing_fh, thread_count=1, full=False) assert missing_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[0] assert corrupted_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[2] missing_fh = io.StringIO() corrupted_fh = io.StringIO() with assert_logs('^Backend seems to have lost', count=1, level=logging.WARNING), \ assert_logs('^Object %d is corrupted', count=2, level=logging.WARNING): verify.retrieve_objects(db, backend_factory, corrupted_fh, missing_fh, thread_count=1, full=True) assert missing_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[0] assert corrupted_fh.getvalue() == ('s3ql_data_%d\n'*2) % obj_ids[1:3] s3ql-2.26/tests/conftest.py0000664000175000017500000001057313160156175017365 0ustar nikrationikratio00000000000000''' conftest.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. This module is loaded automatically by py.test and is used to initialize logging and adjust the load path before running any tests. ''' # Python version check import sys if sys.version_info < (3,3): raise SystemExit('Python version is %d.%d.%d, but S3QL requires Python 3.3 or newer' % sys.version_info[:3]) import logging.handlers import sys import os.path import pytest import faulthandler import signal import gc import time # If a test fails, wait a moment before retrieving the captured # stdout/stderr. When using a server process (like in t4_fuse.py), this makes # sure that we capture any potential output of the server that comes *after* a # test has failed. For example, if a request handler raises an exception, the # server first signals an error to FUSE (causing the test to fail), and then # logs the exception. Without the extra delay, the exception will go into # nowhere. @pytest.mark.hookwrapper def pytest_pyfunc_call(pyfuncitem): outcome = yield failed = outcome.excinfo is not None if failed: time.sleep(1) @pytest.fixture(scope="class") def s3ql_cmd_argv(request): '''Provide argument list to execute s3ql commands in tests''' if request.config.getoption('installed'): request.cls.s3ql_cmd_argv = lambda self, cmd: [ cmd ] else: basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) request.cls.s3ql_cmd_argv = lambda self, cmd: [ sys.executable, os.path.join(basedir, 'bin', cmd) ] # Enable output checks pytest_plugins = ('pytest_checklogs') # Ignore DeprecationWarnings when running unit tests. They are # unfortunately quite often a result of indirect imports via third party # modules, so we can't actually fix them. @pytest.fixture(autouse=True) def ignore_depreciation_warnings(reg_output): reg_output(r'(Pending)?DeprecationWarning', count=0) @pytest.fixture() def pass_reg_output(request, reg_output): '''Provide reg_output function to UnitTest instances''' request.instance.reg_output = reg_output def pytest_addoption(parser): group = parser.getgroup("terminal reporting") group._addoption("--logdebug", action="append", metavar='', help="Activate debugging output from for tests. Use `all` " "to get debug messages from all modules. This option can be " "specified multiple times.") group = parser.getgroup("general") group._addoption("--installed", action="store_true", default=False, help="Test the installed package.") def pytest_configure(config): # If we are running from the S3QL source directory, make sure that we # load modules from here basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) if not config.getoption('installed'): if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path # When running from HG repo, enable all warnings if os.path.exists(os.path.join(basedir, 'MANIFEST.in')): import warnings warnings.resetwarnings() warnings.simplefilter('default') # Enable faulthandler global faultlog_fh faultlog_fh = open(os.path.join(basedir, 'tests', 'test_crit.log'), 'a') faulthandler.enable(faultlog_fh) faulthandler.register(signal.SIGUSR1, file=faultlog_fh) # Configure logging. We don't set a default handler but rely on # the catchlog pytest plugin. logdebug = config.getoption('logdebug') root_logger = logging.getLogger() if logdebug is not None: logging.disable(logging.NOTSET) if 'all' in logdebug: root_logger.setLevel(logging.DEBUG) else: for module in logdebug: logging.getLogger(module).setLevel(logging.DEBUG) else: root_logger.setLevel(logging.INFO) logging.disable(logging.DEBUG) logging.captureWarnings(capture=True) # Run gc.collect() at the end of every test, so that we get ResourceWarnings # as early as possible. def pytest_runtest_teardown(item, nextitem): gc.collect() s3ql-2.26/Changes.txt0000644000175000017500000014625113246754327016146 0ustar nikrationikratio000000000000002018-03-04, S3QL 2.26 * Added support for py.test 3.3.0 * Improved metadata rotation performance. 2018-01-05, S3QL 2.25 * s3qlctrl now accepts a new *dropcache* command. * Fixed a race condition that resulted in mount.s3ql crashing with "I/O operation on closed file". Thanks to www.imCode.com for sponsoring this work! 2017-11-04, S3QL 2.24 * fsck.s3ql now accepts a new --force-remote parameter. It does dangerous things, and you should probably not use it. * When terminated by a signal (rather than by an unmount call), mount.s3ql now terminates as quickly as possible (i.e., without cleaning the cache), resulting in an unclean unmount. * The Swift backend feature detection is no longer affected by the no-ssl backend option (which is intended only for connections to the authentication server). * Accessing the cn-north-1 S3 storage region is now working. 2017-08-18, S3QL 2.23 * Accessing the us-east-1 S3 storage region is now working again. * Retry for some additional SSL Errors. Unfortunately we're playing whack-a-mole here. * The swift backend now dynamically detects the features supported by the backend server. * The swift backend now supports optimized object copy and bulk deletion (if supported by the server). 2017-06-23, S3QL 2.22 * The storage URL for Amazon S3 buckets has changed. It now includes the bucket's region. * Added support for AWS v4 authentication. Thanks to ibbchina.com for sponsoring this work! 2016-10-28, S3QL 2.21 * Added workaround for an issue where S3QL crashes with an "ssl.SSLError: [SSL: BAD_WRITE_RETRY]" exception. This problem has to be fixed in Python, but we now work around it by closing and re-opening the connection when it occurs. 2016-08-24, S3QL 2.20 * Fixed a problem with exporting S3QL file systems over NFS4, where directory contents would appear in duplicate. 2016-06-27, S3QL 2.19 * Fixed a crash when running s3qlstat on an almost empty file system. 2016-04-29, S3QL 2.18 * The `runtests.py` file has been dropped in favor of requiring installation of py.test. * S3QL now uses semantic versioning. This means that backwards-incompatible versions (i.e., versions that require an upgrade of the file system revision) will be reflected in an increase of the major version number, i.e. the next backwards-incompatible version will have version 3.0. * Enabled several FUSE optimizations (most importantly, the ``big_writes`` feature) that got accidentally disabled at some point in the past. This should result in significant performance improvements. Thanks to Simon Brown for the report! * Fixed a crash with "BUG ALERT: Dirty inode was destroyed". 2016-03-08, S3QL 2.17.1 * Fixed a bug in the upgrade procedure that prevented file system modifications when successive commands used the local metadata cache. * Fixed a bug in the upgrade procedure that resulted in corrupted file [acm]times when there was no cached metadata available for both s3qladm and the first fsck.s3ql/mount.s3ql invocation after the upgrade. 2016-03-08, S3QL 2.17 * The internal file system revision has changed. File systems created with S3QL 2.17 or newer are not compatible with prior S3QL versions. To update an existing file system to the newest revision, use the `s3qladm upgrade` command. * Due to a bug in the FUSE kernel module (cf. https://github.com/libfuse/libfuse/issues/23) S3QL does not (and did not) honor the write restriction bit. This limitation is now documented. (It is unknown if the same problem exists when running under BSD or OS-X). * S3QL now honors the setgid bit on directories (as opposed to just storing them). * The `s3qlstat` command now accepts a --raw option to inhibit pretty-printing of sizes. * The `s3qlctrl log` command is now working again. * The `s3qlctrl cachesize` command is now working again. * The s3 backend now supports the "infrequent access" storage class (cf. https://aws.amazon.com/s3/storage-classes/) * The 's3qlstat' command now prints more information about cache usage, including the number of cache entries and pending object removals. 2016-02-23, S3QL 2.16 * S3QL now supports (and requires) Python-LLFUSE 0.43 or newer (but not never than 1.0). * S3QL now retries when receiving a "ServiceUnavailable" error from an S3 (or compatible) backend. * When retrying network requests, the back-off time now includes a random factor to avoid accidentally flooding the server with too many simultanuous requests. * Fixed a problem with fsck.s3ql aborting with a "ConstraintError" in some situations. * Fixed a problem with fsck.s3ql repeatedly renaming a file in /lost+found because it may lack data. * In the authinfo2 file, storage urls for the local backend may now be specified with a trailing slash. 2015-09-18, S3QL 2.15 * Fixed a compatibility problem with recent versions of Python-LLFUSE (0.41 and newer). * S3QL now supports using special characters in the prefix for S3 buckets. Previously, characters that would need to be percent-encoded when part of an URL (like + or %) would result in a crash. 2015-07-31, S3QL 2.14 * S3QL now longer crashes when the server sends a zero-length response that claims to be XML. * The default tcp-timeout has been increased from 10 seconds to 20 seconds. * Fixed test failures when FUSE is not available, tests should now again be skipped. * S3QL now tracks the frequency of server errors and logs warnings if they occur more often than 5 times per second over a period of at least 60 seconds. Previously, retries would only be logged if the same request has been retried more than 3 times. * The "s3qlctrl cachesize" command is now working again (previously it would appear to succeed, but not actually change the cache size). * 404 errors when retrying a delete request are now ignored - most likely they mean that the server actually received and executed the previous try. * Fixed an issue with fsck.s3ql crashing when trying to create a file with a very long name in /lost+found. 2015-01-31, S3QL 2.13 * The internal file system revision has changed. File systems created with S3QL 2.13 or newer are not compatible with prior S3QL versions. To update an existing file system to the newest revision, use the 's3qladm upgrade' command. During the upgrade, all storage objects that were created by S3QL 1.1 or earlier will be downloaded and re-uploaded. For objects created by S3QL 1.1.2 or later, the upgrade will only affect the storage object's metadata (so no contents need to be transferred). * SECURITY ADVISORY: Previous mkfs.s3ql versions used /dev/urandom to generate the master key when creating a new encrypted file system. However, /dev/urandom is not guaranteed to provide the 256-bits of entropy requested by S3QL. This may have allowed an attacker to predict parts of the master key in situations where little entropy is available (e.g. right after system boot, or if a different program has previously drained the entropy pool). Note that the master key is not affected when the file system passphrase is changed. The only way to recover from a potentially compromised master key is to create a new file system and copy over all data. * When creating new file systems, the master key is now generated by reading /dev/random rather than /dev/urandom to ensure sufficient entropy. * The 'no-ssl' option for the swift backend is now used only when connecting to the authentication server. Whether SSL is used when connecting to the storage server is determined by the authentication server. * Fixed a crash when using the "disable-expect100" swift backend option. * Fixed a race condition that could lead to a "KeyError" crash when refreshing a Google Storage OAuth2 access token. * Fixed a race condition that could lead to a hanging mount.s3ql process and hanging test_thread_hang() unit test. * Updated internal metadata storage and checksum format. The old format was difficult to secure against malicious backend data and could have resulted in false-positive checksum mismatches with future or different Python interpreters. * Sizes (e.g. in the s3qlstat output) are now always reported with three significant digits. * Fixed a bug that caused fsck.s3ql to either abort with a "apsw.ConstraintError" or to incorrectly consider storage objects as missing when the connection to remote server is interrupted while retrieving the object list. * Storage urls without any prefix (e.g. s3://foo, but also s3://foo/) are now using the same local cache directory. Previously they would use different directories, despite pointing to the same file system. (Note that this does not affect storage urls with a prefix, s3://foo/bar and s3://foo/bar/ refer to different locations in the bucket, and thus correctly use different cache directories). * Fixed a problem where mount.s3ql would crash when unmouting the file system because it could not delete the cache directory. This could happen when the file system was not unmounted cleanly, but fsck.s3ql was then run on a different system (or using a different cache directory). * S3QL now requires at least version 3.4 of the dugong Python module. 2014-11-09, S3QL 2.12 * The s3c backend has a new 'dumb-copy' option. If this option is enabled, copy operations are assumed to have succeeded as soon as the server returns a '200 OK' status, without checking the contents of the response body. * The swift and s3c backends have a new 'disable-expect100' to disable support for the 'Expect: continue' header. Using this option allows S3QL to work with storage servers without proper HTTP 1.1 support, but may decrease performance as object data will be transmitted to the server more than once in some circumstances. * contrib/benchmark.py is now working again. * The `tcp-timeout` backend option is now actually working instead of resulting in a "TypeError". * Fixed a problem where saving metadata would fail with "ValueError: Can't dump NULL values". * s3qlstat now also gives information about cache usage. 2014-09-04, S3QL 2.11.1 * By popular demand, mount.s3ql is now able to daemonize again and also does so by default. * Un-encrypted file systems (created with mkfs.s3ql --plain) can now be upgraded to the newest revision. * S3QL now automatically retries a request if a backend server reports a 408 (Request Timeout) status code. * The Google Storage backend now copes better with server errors when refreshing an OAuth2 access token. 2014-08-27, S3QL 2.11 * SECURITY UPDATE (CVE-2014-0485). A remote code execution vulnerability was fixed. An attacker with control over the communication with the storage backend or the ability to manipulate the data stored in the backend was able to trigger execution of arbitrary code by mount.s3ql, fsck.s3ql, mkfs.s3ql, s3qladm and s3ql_verify. Both encrypted and unencrypted file systems were vulnerable. * s3ql_verify no longer crashes when checking an empty file system. * Fixed a crash when using Google OAuth2 and the first request after the access token has expired is a write request. * mount.s3ql now supports systemd-style readyness notification. To use this feature, make sure that you have the 'systemd' module installed for Python 3. This feature has not been tested, if you use it or encounter problems, please report back. * Fixed a race condition that could cause tests/t5_failsafe.py to fail. * mount.s3ql no longer daemonizes on its own. With a modern init system this should no longer be necessary, and when running mount.s3ql from the command line the shell can be used to put the process into background. * There is a new --backend-options parameter that can be used to pass backend-specific options to any S3QL command that accepts a storage url. * The --no-ssl and --ssl-ca-path parameters have been removed. For those backends were these parameters make sense, you can use the backend options of the same name instead (e.g. instead of `--no-ssl` use `--backend-options no-ssl`). * Several backends now accept a `tcp-timeout` option. If S3QL is unable to communicate with the remote server for longer than this period, the TCP/IP connection is re-established. * The Amazon S3 backend now accepts a 'sse' option to enable server side encryption. Both costs & benefits of S3 server side encryption are probably very small, and this option does *not* affect any client side encryption performed by S3QL itself. * The Amazon S3 backend now accepts a 'rrs' option to enable reduced redundancy storage for any newly created objects. 2014-07-28, S3QL 2.10.1 * 'setup.py install' now also installs the s3ql_verify command. 2014-07-27, S3QL 2.10 * The internal file system revision has changed. File systems created with S3QL 2.10 or newer are not compatible with prior S3QL versions. To update an existing file system to the newest revision, use the 's3qladm upgrade' command. It is strongly recommended to run the (new) s3ql_verify command with the --data option at shortly after the upgrade. This is necessary to ensure that the upgrade to the next (2.11) S3QL release will run smoothly. * The User's Guide now contains a description of the possible failure modes of mount.s3ql. * The --debug command line parameter now generates a bit less output by default, and there is an additional --debug-module parameter to activate additional messages. * When using encryption, S3QL now checks that a storage object's key corresponds to the data stored under it. The lack of this check in previous versions allowed an attacker with control over the storage server to interchange blocks within the same file system (which would have resulted in apparent data corruption in the file system). Targeted modification of specific files, however, would have been very unlikely, because the interchange of blocks had to be done blindly (with the attacker not knowing which file any block belongs to, nor what its contents are). Fixes https://bitbucket.org/nikratio/s3ql/issue/52/. * S3QL now aborts immediately instead of retrying if the server storage server reports that the local clock is skewed. * There is a new 's3ql_verify' command. This program retrieves and checks every storage object to ensure that all objects are available and have not been tampered with. In contrast to fsck.s3ql, s3ql_verify does not trust the object list provided by the storage server and actually attempts to download the objects one by one. * S3QL now requires version 3.2 or newer of the dugong module. 2014-06-28, S3QL 2.9 * Fix crash when using swift backend and server uses an authentication URL other than /v1.0. * Fixed two test failures when running unit tests as root. * Fixed problems when receiving an HTTP error without a well-formed XML body from the remote server (this may happen e.g. due to failure in a transparent proxy or load balancer). * S3QL now depends on the defusedxml Python module (https://pypi.python.org/pypi/defusedxml/). * S3QL is no longer vulnerable to DOS attacks from malicious backend servers. Previously, a malicious server could induce S3QL to consume arbitrary amounts of memory by recursive XML entity expansion. * S3QL now supports Google OAuth2 authentication. To use it, specify 'oauth2' as backend login, and a valid OAuth2 refresh token as backend password. To obtain the refresh token, the (new) s3ql_oauth_client command may be used. * S3QL now requires version 3.1 or newer of the dugong module. * In some cases, transmission errors when reading storage objects from a backend may have been misreported as corrupted backend objects. This has been fixed. * S3QL no longer crashes when data corruption occurs in the first few bytes of an LZMA compressed storage object. * S3QL now honors the "Retry-After" HTTP header when receiving an XML error from a storage server. * Fixed a crash that could occur when the remote server (or some intermediate proxy) sends a non-XML error response. * mount.s3ql and fsck.s3ql now use different exit codes to indicate different failure conditions. 2014-03-29, S3QL 2.8.1 * No changes in S3QL itself. * The S3QL 2.8 tarball accidentally included a copy of the Python dugong module. This has been fixed in the 2.8.1 release. 2014-03-13, S3QL 2.8 * Fixed various problems with using a proxy server. * Sending SIGUSR1 to mount.s3ql now generates a stacktrace (debugging feature). * When passing --installed to the test runner, S3QL commands are now loaded from $PATH instead of the packages bin/ directory. * The runtest.py script now comes with the correct shebang (i.e., it can now be called as "./runtests.py" instead of "python3 runtests.py"). * S3QL now requires the python dugong module (https://bitbucket.org/nikratio/python-dugong) * Fixed a file system hang when all upload threads encounter unexpected backend problems. 2013-12-16, S3QL 2.7 * Fixed another race condition that could lead to mount.s3ql crashing with `ValueError: I/O operation on closed file`. * S3QL no longer generates warning messages for the first two times that it has to resend a request to the storage backend. If there is no success after the second try, messages are emitted as before. * S3QL now stores multiple copies of the master encryption key to allow recovery if the backend looses the object holding the primary copy. To take advantage of this functionality for existing file systems, change the file system passphrase with s3qladm. * Fixed problem with automatic cache size detection (mount.s3ql was treating bytes as kilobytes). Thanks to GVormayr@gmail.com for the patch! * Fixed "AttributeError in LegacyDecryptDecompressFilter" crash when reading objects written by old S3QL versions. * Fixed a problem with umount.s3ql giving a strange error when the mountpoint is still in use. 2013-12-03, S3QL 2.6 * Fixed a problem with mount.s3ql crashing with `KeyError` in `collections/abc.py`. * Fixed a problem with mount.s3ql crashing with `ValueError: I/O operation on closed file`. * Fixed a race condition that could lead to data corruption when multiple processes trigger download of the same data block simultaneously. * Fixed a problem with S3QL crashing with "SSLEOFError" when an SSL connection is terminated unexpectedly. * Increased deduplication performance. Previously, block checksums were accidentally computed without parallelism. * Fixed a problem with mount.s3ql (incorrectly) reporting corrupted data for compressed blocks of some specific sizes. Many thanks to Balázs for extensive debugging of this problem. * Specifying --debug when using syslog or file logging no longer suppresses informational messages to stdout. * Fixed build under MacOS X. Thanks to john@nachtimwald.com for the patch! * mount.s3ql now autodetects a sensible defaults for the --max-cache-entries and --cachesize options. 2013-10-19, S3QL 2.5 * Removed a race condition that resulted in a crash when a file was deleted while the upload was in progress, and the upload was then restarted because of a connection problem. * Specifying bzip2 as the compression algorithm for mount.s3ql no longer gives an error. * Fixed another race condition that could lead to the (apparent) vanishing of the file system metadata when the mount.s3ql is killed at an unfortunate time. * S3QL programs no longer create an empty file in the default log file location if a custom log file is specified with --log. * contrib/benchmark.py is now working again - thanks to davidthomson@gmail.com. * If a backend returns corrupted data, mount.s3ql no longer crashes but switches to read-only operation. * Fixed a problem with error messages from mount.s3ql being lost, resulting in a file system shutdown for apparently no reason. 2013-08-27, S3QL 2.4 * Attempts to upgrade a file system that is already at the newest revision no longer render the local metadata copy unusable. * Google storage bucket names containing dots no longer cause problems with SSL certificate validation. * Fixed crash when statvfs() is called on an almost empty file system (which may happen automatically when using some desktop environments, resulting in an immediate crash after mounting an empty filesystem). * There is a new "swiftks" backend for OpenStack/Swift servers that supports keystone authentication and allows specification of the desired storage region. * Eliminated a race condition that could lead to mount.s3ql crashing with a "KeyError in collections/abc.py" under some circumstances. * Fixed a crash that could occur when trying to clean up after receiving a connection or server error when uploading an object. 2013-07-18, S3QL 2.3 * Reading the file system passphrase from the authinfo2 file is now working again. * The precedence of the authinfo2 sections has been fixed to agree with the documentation: later entries override earlier entries rather than the other way around. * Fixed a problem with mount.s3ql not recognizing the upgraded revision when `s3qladm upgrade` was run with a different --cachedir argument. * Fixed a crash of the s3 backend when multiple objects are deleted and the s3 connection needs to be re-established. 2013-07-12, S3QL 2.2 * Eliminated a race condition that resulted in an apparent loss of remote metadata when the mount.s3ql process was interrupted after renaming the remote metadata but before having completely uploaded the local metadata. * Attempting to run mount.s3ql with --allow-user but without having the necessary permissions no longer results in the file system being marked as needing fsck. * The S3 backend now deletes multiple storage objects using one request if possible, reducing latency, traffic and number of requests to S3. * S3QL is now able to detect server errors and redirections before all data has been sent to the server. This avoids pointless data transfers and should fix a problem with S3QL operating very slow when operating on recently created storage buckets/containers. * There is a new 'rackspace' backend that allows specification of the object storage region in the storage URL. The rackspace backend also uses a different authentication API than the swift backend (which can still be used for Rackspace as well). When using the 'rackspace' backend, use your regular Rackspace password instead of the API key as the backend password. * The mount.s3ql --compress option now allows specification of the compression level in addition to the compression algorithm. 2013-06-14, S3QL 2.1 (beta) * S3QL now uses SSL connections by default. * S3QL now verifies server certificates when using SSL connections. * There is a new --no-ssl option that now works for every backend. * The Amazon S3 and Swift backends are now working again (regression was introduced in 2.0-beta). 2013-05-12, S3QL 2.0 (beta) * S3QL now requires Python 3.3 or newer. * S3QL now requires the pycrypto third-party Python module. * S3QL now requires at least version 0.39 of the llfuse Python module. * Cycling of metadata backups when using the local backend now takes much less time. * S3QL no longer requires the unittest2, pycryptopp and lzma third-party Python modules. * It is no longer required that S3QL and Python APSW be linked to the same SQLite shared library. * mount.s3ql: fixed crash when using --metadata-upload-interval 0. * Instead of completely unmounting the file system (and leaving the mount point inaccessible) when backend data is missing or corrupted, S3QL now returns an error and switches to read-only operation. 2013-04-09, S3QL 1.14 * Reduced CPU consumption when S3QL is idle. * S3QL now automatically retries requests when S3 reports an "internal error" (this functionaly got accidentally broken in S3QL 1.13). * Fixed a hang when using s3qlrm to delete a directory with a very large number of sub-directories. 2013-03-03, S3QL 1.13.2 * Allow S3 bucket names starting with a number. * Return EOPNOTSUP error on ACL operations. The previous value of EINVAL works with FUSE, but confuses several applications. 2013-02-24, S3QL 1.13.1 * Change error code returned to applications trying to set ACL extended attributes. The previous error code of ENOSYS had the unintended side effect that some FUSE versions gave up trying to get/set any extended attributes at all. 2013-02-23, S3QL 1.13 * Fixed issues when trying to access a directory at the root of the file system that has recently been removed with s3qlrm. (In that case the directory was no longer included in the readdir() output, but it was still posssible to stat() it because was still present in the kernel's dentry cache). * When trying to connect to the backend, S3QL now also retries at increasing intervals if DNS appears to be unavailable (i.e., if there's currently no network at all). This means that mount.s3ql will no longer bail out when called without a network connection but simply retry (printing warning messages in the process). * Work around undocumented "Timeout" reply from Google Storage and retry instead of crashing. * Instead of failing with a bogus "Invalid credentials or skewed system clock" error when trying to access a bucket with an invalid DNS name, S3QL now prints a proper error message. (Such bucket names are still allowed when using the US Standard regions, but generally not a good idea). * Removed ostensible ACL support. It was found that S3QL never kept ACLs and file permission bits synchronized as mandated by POSIX. This is due to a bug in the FUSE library and cannot easily be fixed in S3QL. Consequently, S3QL no longer reports ACL support unless a FUSE version with the necessary bugfix version is installed. * Fixed a bug that caused malformed metadata to be written out when using recent eglibc versions (the relevant change in eglibc was introduced between eglibc versions 2.13 and 2.17). 2012-09-03, S3QL 1.12 * Be more verbose about how to remedy the situation when attempting to upgrade file system needing fsck. * Fsck now detects and fixes directory entries which do not have an entry type stored in their mode field. 2012-05-04, S3QL 1.11.1 * Fixed crash when using S3 backend. (Regression introduced in release 1.11). * Increase minimum reported file system size to 1 TB, and work around df bug so that size is reported properly. * umount.s3ql now produces a more helpful error message if the file system has crashed. 2012-04-29, S3QL 1.11 * S3QL no longer uses the confusing "bucket" term (which is S3 specific). The "bucket-passphrase" option in the authentication file has been renamed to "fs-passphrase". * Metadata about stored objects is now split into multiple fields, so that no field is longer than 256 bytes. This makes S3QL compatible with a wider range of storage providers. Thanks to Stuart Wallace for reporting this issue. * The SWIFT backend now retries if it receives 5xx error codes from the remote server, and no longer fails when attempting to upload data with an expired auth token. Thanks to Ken for the report and initial patch! 2012-02-24, S3QL 1.10 * The s3s, s3cs and gss backends have been removed. Use the new --ssl option together with the s3, s3c and gs backends instead. * S3QL no longer keeps track of consistency guarantees for the different backends. The increasing number of different storage providers offering different regions, redundancy and availability levels makes this no longer feasible. * The User's Guide contains a new section "Important Rules to Avoid Losing Data". Reading it is strongly recommended. 2012-01-21, S3QL 1.9 * Fixed a problem with file and directory time stamps changing with the time zone of the mount.s3ql process. * Fixed a crash in contrib/benchmark.py and made output (hopefully) easier to understand. * Fixed "Too many chained redirections" bug when trying to access a new bucket immediately after creation. * When receiving 5xx errors from the server, S3QL now waits and retries instead of aborting immediately. * contrib/s3ql_upstart.conf now supports running S3QL as an ordinary user. * S3QL now has experimental, native support for OpenStack/SWIFT servers. Thanks to the guys from www.bitbackup.de for providing support and a test server! * S3QL now has experimental proxy support and will honor the http_proxy and https_proxy environment variables. 2011-12-06, S3QL 1.8.1 * Fixed direct updates from 1.6 to 1.8. 2011-12-05, S3QL 1.8 * Fixed "ValueError: Can't dump NULL values" crash when file system metadata was uploaded during heavy file system activity. * Deleting files no longer takes O(number of distinct filenames) time (bug introduced in 1.7). * Improved s3qlcp and directory listing performance for very large directories. * The --blocksize option of mkfs.s3ql has often been misunderstood, leading people to create file systems with block sizes significantly smaller than 1 MB. As a matter of fact, a small S3QL block size does *not* have any advantage over a large block size when storing lots of small files. A small block size, however, seriously degrades performance when storing larger files. This is because S3QL is effectively using a dynamic block size, and the --blocksize value merely specifies an upper limit. To make this more clear, the --blocksize option has been renamed to --max-obj-size. If you created your file system with a block size of less than 1 MB, it is strongly recommended to recreate it with a larger block size. Unfortunately, an in-place conversion of the block size is not possible. * mount.s3ql has a new --min-obj-size option. In the future, files smaller than the minimum object size may be combined into groups that are stored as single objects in the storage backend. * Depend on python-llfuse 0.37. This fixes a problem with the file system effectively blocking any other request while an s3qlrm, s3qllock or s3qlcp instance is running. * Fixed some crashes happening under heavily concurrent file system usage. 2011-11-27, S3QL 1.7 * Extended attribute names are now de-duplicated. * Metadata is now stored in a custom, delta-encoded binary format and then BZIP2 compressed, resulting in a 5-fold speedup when dumping and compressing. * Inodes are now assigned sequentially rather than randomly, and old inodes are not reused. This makes S3QL fully NFS compatible and allows metadata to be stored much more efficiently, resulting in a 4 to 8 fold decrease in compressed metadata size. * s3qlcp now also copies extended attributes. * s3qlcp no longer generates incorrect block reference counts when copying a file that has identical blocks (i.e., that can be de-duplicated within itself). * Eliminated a race condition in s3qlcp. When copying a file with s3qlcp immediately after it was modified or created, it was possible that s3qlcp would copy the new file attributes (size, modification time, etc.) but the old data blocks. A copy of a newly created file would then contain only zero bytes, while a copy of a modified file would look like the original but contain data from before the modification. * "mkfs.s3ql --force" and "s3qladm clear" are working again. 2011-11-20, S3QL 1.6 * fsck: gracefully recover if temporary indices already exist (e.g. if previous fsck was interrupted). * Due not fail with "BadDigest" error when objects are only partially retrieved from backend. 2011-10-20, S3QL 1.5 * Fixed parsing of storage urls, s3s:// no longer generates bogus error message. * Fixed support for prefix in storage urls. * Retry on timeout when transmitting data to remote server. * Do not free mount point when terminating due to unhandled exception in background thread. 2011-10-06, S3QL 1.4 * Metadata is now always LZMA compressed, mount.s3ql's --compress option only affects compression of file contents. * Network errors that occur in the middle of a read operation are now handled correctly as well. * s3qladm now uses cached metadata if available. 2011-10-04, S3QL 1.3 * Fixed an s3qlcp crash happening when source or destination inode number was a 64 bit value. * "Connection reset by peer" network errors are now handled correctly. * fsck.s3ql can now renumber inodes. This is useful if the file system was mounted using S3QL 1.2 or 1.1.x without the --nfs option and can now no longer be accessed on 32bit systems. * Use only 32 bits for inodes, even when --nfs is not specified. This ensures that file systems can be safely used by both 32 and 64 bit systems. 2011-09-28, S3QL 1.2 * Fixed a database problem that was responsible for file system access becomer slower and slower the more data was stored in the file system. * Fixed a race condition that could cause applications to get just zero bytes when trying to read from a file that has just been copied with s3qlcp. 2011-09-20, S3QL 1.1.4 * Fixed a typo that caused errors when trying to remove any blocks that have been committed to the backend. * Improved accuracy of s3qlstat during active file transfers (but still not 100% accurate). * Fixed some theoretical deadlocks. * contrib/benchmark.py is now working again and also takes into account the throughput from userspace to S3QL. 2011-09-18, S3QL 1.1.3 * Fixed a race condition in the local backend that resulted in errors of the form "[Errno 17] File exists: [bucket path]". * Added Google Storage backend. * Added backend for general, S3 compatible storage services. * Fixed a bug that caused S3QL to complain about the backend having lost objects when trying to download an object before its upload was completed. This could happen because locking was done based on inode and block number rather than object id, and the de-duplication feature can reuse an object for another inode before the upload is completed. * Fixed a data corruption bug. If a data block was changed while being uploaded to the backend, and a second, identical data block was flushed while the upload was in progress, but before the first block was changed, the second data block was linked to the *modified* data. This happened because changes to an object in transit were only checked for after the upload completed, leaving a window in which the contents of the upload object did not agree with its stored hash. This problem can be detected by verifying the hash values of all stored data blocks. This procedure will automatically be done when the file system is updated to the newest revision, but may take a longer time since every object needs to be downloaded and checked. 2011-09-08, S3QL 1.1.2 * The modules for communicating with the different storage providers have been completely rewritten, resulting in improved performance, more features and better maintainability. * S3 buckets can now be used with arbitrary prefixes, allowing to store more than one S3QL file system in a bucket. * The format of the --authinfo file has changed. See the documentation for details of the new format. To avoid breaking backwards compatibility, the default file is now called authinfo2. * Network errors are now handled much more consistently. * There is a new --nfs option for mount.s3ql that needs to be used when the S3QL file system will be exported over NFS. * The local backend now stores data and metadata in the same file, so it needs only half as many files as before. * The --homedir option has been replaced by the more finely grained --authfile, --cachedir and --log options. * S3QL can now log directly to syslog. * The sftp storage backend has been dropped. The recommended way to use S3QL over ssh is to use sshfs (http://fuse.sourceforge.net/sshfs.html) with S3QL's local backend. * fsck now checks if all indices have been created. This avoids a huge performance problem when mount.s3ql was interrupted after downloading metadata, but before creating the indices. 2011-07-23, S3QL 1.1 (development version) * Restructured metadata. This should also significantly reduce the size of the SQLite database file. * Fixed license typo in file header comments, license is GNU GPL Version 3, not LGPL. * Fixed problem with fsck.s3ql generating extraordinary long filenames in /lost+found and then crashing. * When called as root, use umount rather than fusermount for compatibility with FUSE4BSD. 2011-05-20, S3QL 1.0.1 * Disabled WAL mode again for now because of unexpected problems with s3qlcp, s3qllock and s3qlrm (performance going down orders of magnitude, and *very* large *.db-wal file in ~/.s3ql). 2011-05-13, S3QL 1.0 * S3QL has been declared stable after 2 years of beta-testing did not reveal any data-critical bugs. * Fixed occasional assertion error when calling s3qlctrl flushcache or unmounting the file system. * Fixed a race condition when a block is expired while it is in transit but has already been modified again. * expire_backups.py no longer has an --init option, the state file is created automatically if this operation is safe. Instead, there is a --reconstruct-state option that can be used to try to reconstruct a lost state file. * The size of symbolic links is now reported as the length of the target instead of zero. This used to confuse revision control systems like git and hg. * Added man pages for all S3QL commands. 2011-02-04, S3QL 0.30 * S3QL now defaults to use unencrypted HTTP connections, which significantly improves performance when using the S3 backend. For an encrypted file system, all data is already encrypted anyway, and authentication data is never transmitted in plain text even for unencrypted file systems. Therefore, the use of SSL brings little gain for most users. To force SSL usage, the new --ssl option can be used. * mkfs.s3ql now has a --force option to overwrite an existing file system. 2010-12-30, S3QL 0.29 * The FUSE interface has been rewritten using Cython and factored out into a separate package, http://code.google.com/p/python-llfuse/. This should result in easier installation, better performance and better maintainability. 2010-12-19, S3QL 0.28 * "s3qlctrl upload-meta" now works even if the file system has been mounted with --metadata-upload-interval=0. * File system metadata is now permanently cached locally. This significantly reduces the time required to mount the file system. * The documentation is now also included in PDF format. 2010-12-11, S3QL 0.27 * The authinfo file now supports passwords that include white space. * The s3qladm command can now be used to download metadata backups. * The --strip-meta option for mount.s3ql has been removed, redundant data is now always stripped before upload. * mount.s3ql now has a --upstart option so that it can easily run as an upstart job. An example job definition is included in contrib/s3ql.conf. * s3qlctrl now has an 'upload-meta' command to trigger a metadata upload while the file system is mounted. * Fixed a bug that preserved old data when truncating a file to zero. If you ever had fsck errors of the form Size of inode [ddd] ([filename]) does not agree with number of blocks then the affected files may contain additional, old data at the end that is not supposed to be there. 2010-10-30, S3QL 0.26 * Fixed a problem with the sftp backend failing because it tries to access a file locally. * Various minor bugfixes 2010-09-28, S3QL 0.25 * A critical bug that could cause backups to be deleted too early and potentially break the whole backup strategy has been fixed in contrib/expire_backups.py. The new version has changed semantics that and also copes significantly better when backups are made in irregular time intervals. * S3QL should now respond with more consistent performance when accessing the file system while s3qlcp, s3qllock or s3qlrm is running at the same time. * When enabling debug output for the `UploadManager` module, S3QL now logs detailed messages about de-duplication, upload and compression performance. 2010-09-18, S3QL 0.24 * Fixed a deadlock that could cause the mount.s3ql process to hang around after umount. * Fixed a bug that caused S3QL to consider all downloaded blocks as dirty and resulted in unneccessary checksum calculations on expiration. * s3qlctrl can now change the log level at runtime. * s3qladm delete now also deletes any local stale cache files. * Periodic metadata upload can now be disabled completely by specifying an interval of zero. 2010-09-03, S3QL 0.23 * Fixed problem with global_lock.py not being installed by setup.py 2010-08-31, S3QL 0.22 * Fixed a bug that could cause file system errors when calling s3qlrm on a lot of really large files. * The sftp backend is now significantly faster, thanks to skyang2009 for the patch. * s3qlctrl can now change the cache size of a mounted file system. 2010-08-09, S3QL 0.21 * The metadata is now only uploaded if there have been any changes. * mount.s3ql now supports several parallel compression and encryption threads with the --compression-threads option. * S3QL now supports "immutable directories". This important new feature can be used to protect backups from modification after they have completed. See the User's Guide for details. * Using S3 RRS is now deprecated, see User's Guide for details. * fsck.s3ql now moves damaged files due to data lost by a backend into /lost+found/ * expire_backups is no longer installed automatically and can now be found in the contrib/ directory. * S3QL now comes with sample backup script in contrib/s3_backup.sh * Added contrib/pcp.py, an rsync wrapper to recursively copy directory trees with several parallel rsync processes. 2010-08-01, S3QL 0.20.1 * Hotfix for s3qladm upgrade. 2010-08-01, S3QL 0.20 * Added contrib/make_dummy.py. This script creates a dummy copy of a bucket that contains only the file system metadata. The resulting dummy can be used for debugging. * Mounting with the local and sftp backends is now significantly faster, because the object tree is no longer traversed completely. * Fixed a race condition that occasionally produced errors when deleting large files (spanning multiple blocks). * The file system now stays responsive to other requests while compressing blocks. * s3qlstat is now much faster since the size after de-duplication does not need to be queried from the backend anymore but is stored in the metadata. 2010-07-25, S3QL 0.19 * Fixed various smaller bugs, see Mercurial changelog for details. 2010-07-11, S3QL 0.18 * Added --strip-meta option to mount.s3ql * Added --metadata-upload-interval option to mount.s3ql. This allows to periodically upload updated metadata even while the file system is mounted. * stat.s3ql, tune.s3ql and cp.s3ql have been renamed to s3qlstat, s3qladm and s3qlcp respectively. * sftp backend is working again * Added the s3qlctrl command. 2010-06-29, S3QL 0.17 * The local and sftp backends now spread their files into different sub-directories. * Amazon S3 Reduced Redundancy Storage (RRS) is now supported. To use it, use a storage url of the form s3rr:// instead of s3://. 2010-06-15, S3QL 0.16 * Fixed problem with readdir() returning entries twice or skipping them if files are added or removed while readdir() is in progress. * Fixed build problem on Gentoo. * fsck.s3ql now does a rudimentary check if the file system is still mounted to prevent checking a mounted file system. 2010-05-28, S3QL 0.15 * Fixed test cases to handle systems with low system clock resolution. * Corrected installation instructions for Debian * mount.s3ql: instead of --bzip2, --zlib and --no-compress, there is now just one option --compress=. * File system metadata is now uploaded with all indices. This makes mounting the file system much faster. Only if LZMA compression has been chosen, indices are stripped for storage and regenerated on the next mount. 2010-05-14, S3QL 0.14 * fsck now detects if a cached block is dirty and commits only dirty blocks to the backend. * Installation in Debian and Ubuntu is now much simpler, it can be done completely with aptitude. 2010-05-04, S3QL 0.13 * S3QL now works with Ubuntu Karmic / 10.04 LTS * The test command no longer produces bogus error messages after all tests have completed. * The umount.s3ql command now properly handles the 'fuser' output with Kernel 2.6.32 (before it always refused to unmount, claiming that the mount point was busy). * The compression can now be specified independently from the encryption, so it is possible to have an unencrypted, but compressed file system. * Apart from zlib, bzip2 and lzma, data can now also be stored without any compression. * S3QL no longer emulates the . and .. directory entries since this is not required by POSIX and makes the code much simpler. This also means that the st_nlink value of a directory is not longer 2 + number of subdirectories. * Fixed a bug that caused files to be listed with wrong sizes under certain conditions. * Added `expire_backups` command and instructions for a simple backup solution using S3QL and rsync. 2010-04-27, S3QL 0.12 * fsck.s3ql now accepts a --batch option to not prompt for any user input and requires a --force option to check the file system even if it is marked as clean. * Fixed a bug in cp.s3ql that caused incorrect st_nlink values in the copy. * fsck.s3ql has been even more optimized. * Fixed a problem with running out of file descriptors when lots of objects are deleted. * Bucket encryption passwords can now also be stored in the ~/.s3ql/authinfo file. * mount.s3ql doesn't complain any more if it receives any of the standard mount(8) mount options. 2010-04-24, S3QL 0.11 * S3QL file system can now be mounted from /etc/fstab * Metadata now takes significantly less space. * Files with extended attributes can now be deleted. * Extended attributes can now be listed. * It is now possible to choose between zlib, BZip2 and LZMA compression every time the file system is mounted. * Added contrib/benchmark.py to find out optimal compression method for given network bandwidth. * fsck.s3ql no longer uses mknod(3) , since that lead to errors on e.g. NFS mounted volumes under Fedora. * File access, modification and inode change times before 1972 are now supported. * Fixed a deadlock when removing or overwriting files. 2010-04-21, S3QL 0.10 * S3QL now depends on FUSE version 2.8 * Cached blocks are now flushed to the backend as soon as they have not been accessed for more than 10 seconds. * The setup script now automatically checks and, if necessary, installs the Python module dependencies. * mkfs.s3ql now creates compressed and encrypted file systems by default. * Data is now compressed with LZMA instead of Bzip2. * Data compression and data upload is now done in parallel, so the full network can bandwidth is continuously without breaks for data compression. * fsck.s3ql is now several orders of magnitude faster. The code has been optimized and some special database indices are now precalculated. * When running cp.s3ql, the attributes of the target directory are now immediately refreshed (so that e.g. ls immediately shows the correct number of hard links). * File removal is now significantly faster, since the network transactions are carried out asynchronously as part of the cache expiration. * mount.s3ql no longer tries to create files with mknod(), since that lead to errors on NFS mounted volumes under Fedora. * This releases includes a lot of new code, so it may be slightly more beta-ish than usual. 2010-04-04, S3QL 0.9 * The --cachedir, --logfile, --awskey and --credfile options are gone and have been replaced by --homedir. * ~/.awssecret is no longer read, instead there is a common file with authentication data for all backends in ~/.s3ql/authinfo * The syntax for mounting S3 buckets is now s3://bucketname instead of just the bucket name * There is now an SFTP backend. Thanks to Ron Knapp for most of the code. 2010-03-07, S3QL 0.8 * S3QL now uses Python's default unittest.py instead of shipping its own. * Most documentation has been moved from the Wiki into the tarball, so that it always corresponds to the correct version. * setuptools is now used for installation. This allows .egg creation, dependency resolution and generation of the HTML documentation. * The S3 part of boto has been integrated into S3QL. 2010-02-22, beta7 * mount.s3ql no longer chdirs into / when daemonizing but into the cache directory. * Added example backup script in contrib/backup.py * tune.s3ql --change-passphrase is working again * Added testcase for tune.s3ql --change-passphrase * Internal S3 errors are now handled properly by retrying the upload. 2010-02-19, beta6 * tune.s3ql --copy is now *really* working properly (brrr) 2010-02-19, beta5 * mkfs.s3ql now makes strict checks on the bucket name * Removed obsolete mount.s3ql_local from distribution * tune.s3ql --copy is now working properly 2010-02-19, beta4 * tune.s3ql now has a --copy option to copy buckets * Storage location for new buckets can be specified in mkfs.s3ql and tune.s3ql with --s3-location * Fixed a deadlock in mount.s3ql when using local buckets without --fg * The local: bucket specifier is no longer artificially slow. * mount.s3ql: --allow_other is now working 2010-02-04, beta3 * Fixed a deadlock that caused umount.s3ql to hang indefinitely when mount was called without --fg * The '.' and '..' directory entries are no longer explicitly stored in the database. * Rewrote the mechanism to handle delayed updates. Now it no longer depends on a small object being propagated relatively fast, but is able to handle arbitrary network outages. s3ql-2.26/setup.py0000755000175000017500000002445613227212420015532 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' setup.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' # Python version check import sys if sys.version_info < (3,3): raise SystemExit('Python version is %d.%d.%d, but S3QL requires Python 3.3 or newer' % sys.version_info[:3]) try: import setuptools except ImportError: raise SystemExit('Setuptools package not found. Please install from ' 'https://pypi.python.org/pypi/setuptools') from setuptools import Extension from distutils.version import LooseVersion import os import subprocess import re from glob import glob import faulthandler faulthandler.enable() basedir = os.path.abspath(os.path.dirname(sys.argv[0])) DEVELOPER_MODE = os.path.exists(os.path.join(basedir, 'MANIFEST.in')) if DEVELOPER_MODE: print('MANIFEST.in exists, running in developer mode') # Add S3QL sources sys.path.insert(0, os.path.join(basedir, 'src')) sys.path.insert(0, os.path.join(basedir, 'util')) import s3ql class build_docs(setuptools.Command): description = 'Build Sphinx documentation' user_options = [ ('fresh-env', 'E', 'discard saved environment'), ('all-files', 'a', 'build all files'), ] boolean_options = ['fresh-env', 'all-files'] def initialize_options(self): self.fresh_env = False self.all_files = False def finalize_options(self): pass def run(self): try: from sphinx.application import Sphinx from docutils.utils import SystemMessage except ImportError: raise SystemExit('This command requires Sphinx to be installed.') from None fix_docutils() dest_dir = os.path.join(basedir, 'doc') src_dir = os.path.join(basedir, 'rst') confoverrides = {} confoverrides['version'] = s3ql.VERSION confoverrides['release'] = s3ql.RELEASE for builder in ('html', 'latex', 'man'): print('Running %s builder...' % builder) self.mkpath(os.path.join(dest_dir, builder)) app = Sphinx(srcdir=src_dir, confdir=src_dir, outdir=os.path.join(dest_dir, builder), doctreedir=os.path.join(dest_dir, 'doctrees'), buildername=builder, confoverrides=confoverrides, freshenv=self.fresh_env) self.fresh_env = False self.all_files = False try: if self.all_files: app.builder.build_all() else: app.builder.build_update() except SystemMessage as err: print('reST markup error:', err.args[0].encode('ascii', 'backslashreplace'), file=sys.stderr) # These shouldn't be installed by default for name in ('expire_backups.1', 'pcp.1'): os.rename(os.path.join(dest_dir, 'man', name), os.path.join(basedir, 'contrib', name)) print('Running pdflatex...') for _ in range(3): with open('/dev/null', 'wb') as null: subprocess.check_call(['pdflatex', '-interaction', 'batchmode', 'manual.tex'], cwd=os.path.join(dest_dir, 'latex'), stdout=null) os.rename(os.path.join(dest_dir, 'latex', 'manual.pdf'), os.path.join(dest_dir, 'manual.pdf')) def main(): with open(os.path.join(basedir, 'README.rst'), 'r') as fh: long_desc = fh.read() compile_args = ['-Wall', '-Wextra', '-Wconversion', '-Wsign-compare'] # Value-changing conversions should always be explicit. compile_args.append('-Werror=conversion') # Note that (i > -1) is false if i is unsigned (-1 will be converted to # a large positive value). We certainly don't want to do this by # accident. compile_args.append('-Werror=sign-compare') # Enable all fatal warnings only when compiling from Mercurial tip. # (otherwise we break forward compatibility because compilation with newer # compiler may fail if additional warnings are added) if DEVELOPER_MODE: if os.environ.get('CI') != 'true': compile_args.append('-Werror') compile_args.append('-Wfatal-errors') compile_args.append('-Wno-unused-function') required_pkgs = ['apsw >= 3.7.0', 'pycrypto', 'requests', 'defusedxml', 'dugong >= 3.4, < 4.0', 'llfuse >= 1.0, < 2.0' ] setuptools.setup( name='s3ql', zip_safe=True, version=s3ql.VERSION, description='a full-featured file system for online data storage', long_description=long_desc, author='Nikolaus Rath', author_email='Nikolaus@rath.org', url='https://bitbucket.org/nikratio/s3ql/', download_url='https://bitbucket.org/nikratio/s3ql/downloads', license='GPLv3', classifiers=['Development Status :: 5 - Production/Stable', 'Environment :: No Input/Output (Daemon)', 'Environment :: Console', 'License :: OSI Approved :: GNU Library or Lesser General Public License (GPLv3)', 'Topic :: Internet', 'Operating System :: POSIX', 'Topic :: System :: Archiving'], platforms=[ 'POSIX', 'UNIX', 'Linux' ], keywords=['FUSE', 'backup', 'archival', 'compression', 'encryption', 'deduplication', 'aws', 's3' ], package_dir={'': 'src'}, packages=setuptools.find_packages('src'), provides=['s3ql'], ext_modules=[Extension('s3ql.deltadump', ['src/s3ql/deltadump.c'], extra_compile_args=compile_args, extra_link_args=[ '-lsqlite3'])], data_files=[ ('share/man/man1', [ os.path.join('doc/man/', x) for x in glob(os.path.join(basedir, 'doc', 'man', '*.1')) ]) ], entry_points={ 'console_scripts': [ 'mkfs.s3ql = s3ql.mkfs:main', 'fsck.s3ql = s3ql.fsck:main', 'mount.s3ql = s3ql.mount:main', 'umount.s3ql = s3ql.umount:main', 's3qlcp = s3ql.cp:main', 's3qlstat = s3ql.statfs:main', 's3qladm = s3ql.adm:main', 's3qlctrl = s3ql.ctrl:main', 's3qllock = s3ql.lock:main', 's3qlrm = s3ql.remove:main', 's3ql_oauth_client = s3ql.oauth_client:main', 's3ql_verify = s3ql.verify:main', ] }, install_requires=required_pkgs, cmdclass={'upload_docs': upload_docs, 'build_cython': build_cython, 'build_sphinx': build_docs }, command_options={ 'sdist': { 'formats': ('setup.py', 'bztar') } }, ) class build_cython(setuptools.Command): user_options = [] boolean_options = [] description = "Compile .pyx to .c" def initialize_options(self): pass def finalize_options(self): # Attribute defined outside init #pylint: disable=W0201 self.extensions = self.distribution.ext_modules def run(self): cython = None for c in ('cython3', 'cython'): try: version = subprocess.check_output([c, '--version'], universal_newlines=True, stderr=subprocess.STDOUT) cython = c except FileNotFoundError: pass if cython is None: raise SystemExit('Cython needs to be installed for this command') from None hit = re.match('^Cython version (.+)$', version) if not hit or LooseVersion(hit.group(1)) < "0.17": raise SystemExit('Need Cython 0.17 or newer, found ' + version) cmd = [cython, '-Wextra', '-f', '-3', '-X', 'embedsignature=True' ] if DEVELOPER_MODE: cmd.append('-Werror') # Work around http://trac.cython.org/cython_trac/ticket/714 cmd += ['-X', 'warn.maybe_uninitialized=False' ] for extension in self.extensions: for file_ in extension.sources: (file_, ext) = os.path.splitext(file_) path = os.path.join(basedir, file_) if ext != '.c': continue if os.path.exists(path + '.pyx'): print('compiling %s to %s' % (file_ + '.pyx', file_ + ext)) if subprocess.call(cmd + [path + '.pyx']) != 0: raise SystemExit('Cython compilation failed') class upload_docs(setuptools.Command): user_options = [] boolean_options = [] description = "Upload documentation" def initialize_options(self): pass def finalize_options(self): pass def run(self): subprocess.check_call(['rsync', '-aHv', '--del', os.path.join(basedir, 'doc', 'html') + '/', 'ebox.rath.org:/srv/www.rath.org/s3ql-docs/']) subprocess.check_call(['rsync', '-aHv', '--del', os.path.join(basedir, 'doc', 'manual.pdf'), 'ebox.rath.org:/srv/www.rath.org/s3ql-docs/']) def fix_docutils(): '''Work around https://bitbucket.org/birkenfeld/sphinx/issue/1154/''' import docutils.parsers from docutils.parsers import rst old_getclass = docutils.parsers.get_parser_class # Check if bug is there try: old_getclass('rst') except AttributeError: pass else: return def get_parser_class(parser_name): """Return the Parser class from the `parser_name` module.""" if parser_name in ('rst', 'restructuredtext'): return rst.Parser else: return old_getclass(parser_name) docutils.parsers.get_parser_class = get_parser_class assert docutils.parsers.get_parser_class('rst') is rst.Parser if __name__ == '__main__': main() s3ql-2.26/src/0000775000175000017500000000000013246754372014615 5ustar nikrationikratio00000000000000s3ql-2.26/src/s3ql/0000775000175000017500000000000013246754372015477 5ustar nikrationikratio00000000000000s3ql-2.26/src/s3ql/fsck.py0000644000175000017500000017200513237312336016770 0ustar nikrationikratio00000000000000''' fsck.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from .logging import logging, setup_logging, QuietError from . import CURRENT_FS_REV, BUFSIZE, CTRL_INODE, ROOT_INODE from .backends.common import NoSuchObject from .backends.comprenc import ComprencBackend from .backends.local import Backend as LocalBackend from .common import (inode_for_path, sha256_fh, get_path, get_backend_cachedir, get_seq_no, is_mounted, get_backend, load_params, save_params, time_ns) from .database import NoSuchRowError, Connection from .metadata import create_tables, dump_and_upload_metadata, download_metadata from .parse_args import ArgumentParser from os.path import basename import apsw import os import re import shutil import itertools import stat import sys import textwrap import time import atexit log = logging.getLogger(__name__) S_IFMT = (stat.S_IFDIR | stat.S_IFREG | stat.S_IFSOCK | stat.S_IFBLK | stat.S_IFCHR | stat.S_IFIFO | stat.S_IFLNK) class Fsck(object): def __init__(self, cachedir_, backend_, param, conn): self.cachedir = cachedir_ self.backend = backend_ self.expect_errors = False self.found_errors = False self.uncorrectable_errors = False self.max_obj_size = param['max_obj_size'] self.conn = conn # Set of blocks that have been unlinked by check_cache. # check_block_refcounts() will not report errors if these blocks still # exist even though they have refcount=0 self.unlinked_blocks = set() # Similarly for objects self.unlinked_objects = set() # Set of inodes that have been moved to lost+found (so that we # don't move them there repeatedly) self.moved_inodes = set() def check(self): """Check file system Sets instance variable `found_errors`. """ # Create indices required for reference checking log.info('Creating temporary extra indices...') for idx in ('tmp1', 'tmp2', 'tmp3', 'tmp4', 'tmp5'): self.conn.execute('DROP INDEX IF EXISTS %s' % idx) self.conn.execute('CREATE INDEX tmp1 ON blocks(obj_id)') self.conn.execute('CREATE INDEX tmp2 ON inode_blocks(block_id)') self.conn.execute('CREATE INDEX tmp3 ON contents(inode)') self.conn.execute('CREATE INDEX tmp4 ON contents(name_id)') self.conn.execute('CREATE INDEX tmp5 ON ext_attributes(name_id)') try: self.check_lof() self.check_cache() self.check_names_refcount() self.check_contents_name() self.check_contents_inode() self.check_contents_parent_inode() self.check_objects_temp() self.check_objects_refcount() self.check_objects_id() self.check_objects_size() self.check_blocks_obj_id() self.check_blocks_refcount() self.check_blocks_checksum() self.check_inode_blocks_block_id() self.check_inode_blocks_inode() self.check_inodes_refcount() self.check_inodes_size() self.check_ext_attributes_name() self.check_ext_attributes_inode() self.check_symlinks_inode() self.check_loops() self.check_unix() self.check_foreign_keys() finally: log.info('Dropping temporary indices...') for idx in ('tmp1', 'tmp2', 'tmp3', 'tmp4', 'tmp5'): self.conn.execute('DROP INDEX %s' % idx) def log_error(self, *a, **kw): '''Log file system error if not expected''' if self.expect_errors: return log.debug(*a, **kw) else: return log.warning(*a, **kw) def check_foreign_keys(self): '''Check for referential integrity Checks that all foreign keys in the SQLite tables actually resolve. This is necessary, because we disable runtime checking by SQLite for performance reasons. Note: any problems should have already been caught by the more specific checkers. ''' log.info("Checking referential integrity...") for (table,) in self.conn.query("SELECT name FROM sqlite_master WHERE type='table'"): for row in self.conn.query('PRAGMA foreign_key_list(%s)' % table): sql_objs = { 'src_table': table, 'dst_table': row[2], 'src_col': row[3], 'dst_col': row[4] } for (val,) in self.conn.query('SELECT %(src_table)s.%(src_col)s ' 'FROM %(src_table)s LEFT JOIN %(dst_table)s ' 'ON %(src_table)s.%(src_col)s = %(dst_table)s.%(dst_col)s ' 'WHERE %(dst_table)s.%(dst_col)s IS NULL ' 'AND %(src_table)s.%(src_col)s IS NOT NULL' % sql_objs): self.found_errors = True sql_objs['val'] = val self.log_error('%(src_table)s.%(src_col)s refers to non-existing key %(val)s ' 'in %(dst_table)s.%(dst_col)s, deleting.', sql_objs) log.error('This should not happen, please report a bug.') self.uncorrectable_errors = True def check_cache(self): """Commit uncommitted cache files""" log.info("Checking for dirty cache objects...") if not os.path.exists(self.cachedir): return candidates = os.listdir(self.cachedir) if sys.stdout.isatty(): stamp1 = 0 else: stamp1 = float('inf') for (i, filename) in enumerate(candidates): stamp2 = time.time() if stamp2 - stamp1 > 1: sys.stdout.write('\r..processed %d/%d files (%d%%)..' % (i, len(candidates), i/len(candidates)*100)) sys.stdout.flush() stamp1 = stamp2 match = re.match('^(\\d+)-(\\d+)$', filename) if match: inode = int(match.group(1)) blockno = int(match.group(2)) else: raise RuntimeError('Strange file in cache directory: %s' % filename) # Calculate block checksum with open(os.path.join(self.cachedir, filename), "rb") as fh: size = os.fstat(fh.fileno()).st_size hash_should = sha256_fh(fh) log.debug('%s has checksum %s', filename, hash_should) # Check if stored block has same checksum try: block_id = self.conn.get_val('SELECT block_id FROM inode_blocks ' 'WHERE inode=? AND blockno=?', (inode, blockno,)) hash_is = self.conn.get_val('SELECT hash FROM blocks WHERE id=?', (block_id,)) except NoSuchRowError: hash_is = None log.debug('Inode %d, block %d has checksum %s', inode, blockno, hash_is) if hash_should == hash_is: os.unlink(os.path.join(self.cachedir, filename)) continue self.found_errors = True self.log_error("Writing dirty block %d of inode %d to backend", blockno, inode) hash_ = hash_should try: (block_id, obj_id) = self.conn.get_row('SELECT id, obj_id FROM blocks WHERE hash=?', (hash_,)) except NoSuchRowError: obj_id = self.conn.rowid('INSERT INTO objects (refcount, size) VALUES(1, -1)') block_id = self.conn.rowid('INSERT INTO blocks (refcount, hash, obj_id, size) ' 'VALUES(?, ?, ?, ?)', (1, hash_, obj_id, size)) def do_write(obj_fh): with open(os.path.join(self.cachedir, filename), "rb") as fh: shutil.copyfileobj(fh, obj_fh, BUFSIZE) return obj_fh obj_size = self.backend.perform_write(do_write, 's3ql_data_%d' % obj_id).get_obj_size() self.conn.execute('UPDATE objects SET size=? WHERE id=?', (obj_size, obj_id)) else: self.conn.execute('UPDATE blocks SET refcount=refcount+1 WHERE id=?', (block_id,)) try: old_block_id = self.conn.get_val('SELECT block_id FROM inode_blocks ' 'WHERE inode=? AND blockno=?', (inode, blockno)) except NoSuchRowError: self.conn.execute('INSERT INTO inode_blocks (block_id, inode, blockno) VALUES(?,?,?)', (block_id, inode, blockno)) else: self.conn.execute('UPDATE inode_blocks SET block_id=? WHERE inode=? AND blockno=?', (block_id, inode, blockno)) # We just decrease the refcount, but don't take any action # because the reference count might be wrong self.conn.execute('UPDATE blocks SET refcount=refcount-1 WHERE id=?', (old_block_id,)) self.unlinked_blocks.add(old_block_id) os.unlink(os.path.join(self.cachedir, filename)) def check_lof(self): """Ensure that there is a lost+found directory""" log.info('Checking lost+found...') now_ns = time_ns() try: (inode_l, name_id) = self.conn.get_row("SELECT inode, name_id FROM contents_v " "WHERE name=? AND parent_inode=?", (b"lost+found", ROOT_INODE)) except NoSuchRowError: self.found_errors = True self.log_error("Recreating missing lost+found directory") inode_l = self.create_inode(mode=stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR, atime_ns=now_ns, ctime_ns=now_ns, mtime_ns=now_ns, refcount=1) self.conn.execute("INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)", (self._add_name(b"lost+found"), inode_l, ROOT_INODE)) mode = self.conn.get_val('SELECT mode FROM inodes WHERE id=?', (inode_l,)) if not stat.S_ISDIR(mode): self.found_errors = True self.log_error('/lost+found is not a directory! Old entry will be saved as ' '/lost+found/inode-%s*', inode_l) # We leave the old inode unassociated, so that it will be added # to lost+found later on. inode_l = self.create_inode(mode=stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR, atime_ns=now_ns, ctime_ns=now_ns, mtime_ns=now_ns, refcount=1) self.conn.execute('UPDATE contents SET inode=? WHERE name_id=? AND parent_inode=?', (inode_l, name_id, ROOT_INODE)) def check_contents_name(self): """Check contents.name_id""" log.info('Checking contents (names)...') for (rowid, name_id, inode_p, inode) in self.conn.query('SELECT contents.rowid, name_id, parent_inode, inode ' 'FROM contents LEFT JOIN names ' 'ON name_id = names.id WHERE names.id IS NULL'): self.found_errors = True try: path = get_path(inode_p, self.conn)[1:] except NoSuchRowError: newname = ('-%d' % inode).encode() else: newname = escape(path) + ('-%d' % inode).encode() (id_p_new, newname) = self.resolve_free(b"/lost+found", newname) self.log_error('Content entry for inode %d refers to non-existing name with id %d, ' 'moving to /lost+found/%s', inode, name_id, to_str(newname)) self.conn.execute('UPDATE contents SET name_id=?, parent_inode=? WHERE rowid=?', (self._add_name(newname), id_p_new, rowid)) def check_contents_parent_inode(self): """Check contents.parent_inode""" log.info('Checking contents (parent inodes)...') for (rowid, inode_p, name_id) in self.conn.query('SELECT contents.rowid, parent_inode, name_id ' 'FROM contents LEFT JOIN inodes ' 'ON parent_inode = inodes.id WHERE inodes.id IS NULL'): self.found_errors = True name = self.conn.get_val('SELECT name FROM names WHERE id = ?', (name_id,)) (id_p_new, newname) = self.resolve_free(b"/lost+found", ('[%d]-%s' % (inode_p, name)).encode()) self.log_error('Parent inode %d for "%s" vanished, moving to /lost+found', inode_p, to_str(name)) self._del_name(name_id) self.conn.execute('UPDATE contents SET name_id=?, parent_inode=? WHERE rowid=?', (self._add_name(newname), id_p_new, rowid)) def check_contents_inode(self): """Check contents.inode""" log.info('Checking contents (inodes)...') to_delete = list() for (rowid, inode_p, inode, name_id) in self.conn.query('SELECT contents.rowid, parent_inode, inode, ' 'name_id FROM contents LEFT JOIN inodes ' 'ON inode = inodes.id WHERE inodes.id IS NULL'): self.found_errors = True try: path = get_path(inode, self.conn)[1:] except NoSuchRowError: path = '[inode %d, parent %d]' % (inode, inode_p) self.log_error('Inode for %s vanished, deleting', to_str(path)) self._del_name(name_id) to_delete.append(rowid) for rowid in to_delete: self.conn.execute('DELETE FROM contents WHERE rowid=?', (rowid,)) def check_ext_attributes_name(self): """Check ext_attributes.name_id""" log.info('Checking extended attributes (names)...') for (rowid, name_id, inode) in self.conn.query('SELECT ext_attributes.rowid, name_id, inode ' 'FROM ext_attributes LEFT JOIN names ' 'ON name_id = names.id WHERE names.id IS NULL'): self.found_errors = True for (name, id_p) in self.conn.query('SELECT name, parent_inode ' 'FROM contents_v WHERE inode=?', (inode,)): path = get_path(id_p, self.conn, name) self.log_error('Extended attribute %d of %s refers to non-existing name %d, renaming..', rowid, to_str(path), name_id) while True: name_id = self._add_name('lost+found_%d' % rowid) if not self.conn.has_val("SELECT 1 FROM ext_attributes WHERE name_id=? AND inode=?", (name_id, inode)): self.conn.execute('UPDATE ext_attributes SET name_id=? WHERE rowid=?', (name_id, rowid)) break self._del_name('lost+found_%d' % rowid) rowid += 1 def check_ext_attributes_inode(self): """Check ext_attributes.inode""" log.info('Checking extended attributes (inodes)...') to_delete = list() for (rowid, inode, name_id) in self.conn.query('SELECT ext_attributes.rowid, inode, name_id ' 'FROM ext_attributes LEFT JOIN inodes ' 'ON inode = inodes.id WHERE inodes.id IS NULL'): self.found_errors = True self.log_error('Extended attribute %d refers to non-existing inode %d, deleting', rowid, inode) to_delete.append(rowid) self._del_name(name_id) for rowid in to_delete: self.conn.execute('DELETE FROM ext_attributes WHERE rowid=?', (rowid,)) def check_loops(self): """Ensure that all directories can be reached from root""" log.info('Checking directory reachability...') self.conn.execute('CREATE TEMPORARY TABLE loopcheck (inode INTEGER PRIMARY KEY, ' 'parent_inode INTEGER)') self.conn.execute('CREATE INDEX ix_loopcheck_parent_inode ON loopcheck(parent_inode)') self.conn.execute('INSERT INTO loopcheck (inode) ' 'SELECT parent_inode FROM contents GROUP BY parent_inode') self.conn.execute('UPDATE loopcheck SET parent_inode = ' '(SELECT contents.parent_inode FROM contents ' ' WHERE contents.inode = loopcheck.inode LIMIT 1)') self.conn.execute('CREATE TEMPORARY TABLE loopcheck2 (inode INTEGER PRIMARY KEY)') self.conn.execute('INSERT INTO loopcheck2 (inode) SELECT inode FROM loopcheck') def delete_tree(inode_p): for (inode,) in self.conn.query("SELECT inode FROM loopcheck WHERE parent_inode=?", (inode_p,)): delete_tree(inode) self.conn.execute('DELETE FROM loopcheck2 WHERE inode=?', (inode_p,)) root = ROOT_INODE while True: delete_tree(root) if not self.conn.has_val("SELECT 1 FROM loopcheck2"): break self.found_errors = True # Try obvious culprits first try: inode = self.conn.get_val('SELECT loopcheck2.inode FROM loopcheck2 JOIN contents ' 'ON loopcheck2.inode = contents.inode ' 'WHERE parent_inode = contents.inode LIMIT 1') except NoSuchRowError: inode = self.conn.get_val("SELECT inode FROM loopcheck2 ORDER BY inode ASC LIMIT 1") (name, name_id) = self.conn.get_row("SELECT name, name_id FROM contents_v " "WHERE inode=? LIMIT 1", (inode,)) (id_p, name) = self.resolve_free(b"/lost+found", name) self.log_error("Found unreachable filesystem entries, re-anchoring %s [%d] " "in /lost+found", to_str(name), inode) self.conn.execute('UPDATE contents SET parent_inode=?, name_id=? ' 'WHERE inode=? AND name_id=?', (id_p, self._add_name(name), inode, name_id)) self._del_name(name_id) self.conn.execute('UPDATE loopcheck SET parent_inode=? WHERE inode=?', (id_p, inode)) root = inode self.conn.execute("DROP TABLE loopcheck") self.conn.execute("DROP TABLE loopcheck2") def check_inodes_size(self): """Check inodes.size""" log.info('Checking inodes (sizes)...') self.conn.execute('CREATE TEMPORARY TABLE min_sizes ' '(id INTEGER PRIMARY KEY, min_size INTEGER NOT NULL)') try: self.conn.execute(''' INSERT INTO min_sizes (id, min_size) SELECT inode, MAX(blockno * ? + size) FROM inode_blocks JOIN blocks ON block_id == blocks.id GROUP BY inode''', (self.max_obj_size,)) self.conn.execute(''' CREATE TEMPORARY TABLE wrong_sizes AS SELECT id, size, min_size FROM inodes JOIN min_sizes USING (id) WHERE size < min_size''') for (id_, size_old, size) in self.conn.query('SELECT * FROM wrong_sizes'): self.found_errors = True self.log_error("Size of inode %d (%s) does not agree with number of blocks, " "setting from %d to %d", id_, to_str(get_path(id_, self.conn)), size_old, size) self.conn.execute("UPDATE inodes SET size=? WHERE id=?", (size, id_)) finally: self.conn.execute('DROP TABLE min_sizes') self.conn.execute('DROP TABLE IF EXISTS wrong_sizes') def check_inodes_refcount(self): """Check inodes.refcount""" log.info('Checking inodes (refcounts)...') self.conn.execute('CREATE TEMPORARY TABLE refcounts ' '(id INTEGER PRIMARY KEY, refcount INTEGER NOT NULL)') try: self.conn.execute('INSERT INTO refcounts (id, refcount) ' 'SELECT inode, COUNT(name_id) FROM contents GROUP BY inode') self.conn.execute(''' CREATE TEMPORARY TABLE wrong_refcounts AS SELECT id, refcounts.refcount, inodes.refcount FROM inodes LEFT JOIN refcounts USING (id) WHERE inodes.refcount != refcounts.refcount OR refcounts.refcount IS NULL''') for (id_, cnt, cnt_old) in self.conn.query('SELECT * FROM wrong_refcounts'): # No checks for root and control if id_ in (ROOT_INODE, CTRL_INODE): continue self.found_errors = True if cnt is None: (id_p, name) = self.resolve_free(b"/lost+found", ("inode-%d" % id_).encode()) self.log_error("Inode %d not referenced, adding as /lost+found/%s", id_, to_str(name)) self.conn.execute("INSERT INTO contents (name_id, inode, parent_inode) " "VALUES (?,?,?)", (self._add_name(basename(name)), id_, id_p)) self.conn.execute("UPDATE inodes SET refcount=? WHERE id=?", (1, id_)) else: self.log_error("Inode %d (%s) has wrong reference count, setting from %d to %d", id_, to_str(get_path(id_, self.conn)), cnt_old, cnt) self.conn.execute("UPDATE inodes SET refcount=? WHERE id=?", (cnt, id_)) finally: self.conn.execute('DROP TABLE refcounts') self.conn.execute('DROP TABLE IF EXISTS wrong_refcounts') def check_blocks_obj_id(self): """Check blocks.obj_id""" log.info('Checking blocks (referenced objects)...') for (block_id, obj_id) in self.conn.query('SELECT blocks.id, obj_id FROM blocks LEFT JOIN objects ' 'ON obj_id = objects.id WHERE objects.id IS NULL'): self.found_errors = True self.log_error('Block %d refers to non-existing object %d', block_id, obj_id) for (inode,) in self.conn.query('SELECT inode FROM inode_blocks WHERE block_id = ? ', (block_id,)): if inode in self.moved_inodes: continue self.moved_inodes.add(inode) affected_entries = list(self.conn.query('SELECT name, name_id, parent_inode ' 'FROM contents_v WHERE inode=?', (inode,))) for (name, name_id, id_p) in affected_entries: path = get_path(id_p, self.conn, name) self.log_error("File may lack data, moved to /lost+found: %s", to_str(path)) (lof_id, newname) = self.resolve_free(b"/lost+found", escape(path)) self.conn.execute('UPDATE contents SET name_id=?, parent_inode=? ' 'WHERE name_id=? AND parent_inode=?', (self._add_name(newname), lof_id, name_id, id_p)) self._del_name(name_id) self.conn.execute('DELETE FROM inode_blocks WHERE block_id=?', (block_id,)) self.conn.execute("DELETE FROM blocks WHERE id=?", (block_id,)) def check_inode_blocks_inode(self): """Check inode_blocks.inode""" log.info('Checking inode-block mapping (inodes)...') to_delete = list() for (rowid, inode, block_id) in self.conn.query('SELECT inode_blocks.rowid, inode, block_id ' 'FROM inode_blocks LEFT JOIN inodes ' 'ON inode = inodes.id WHERE inodes.id IS NULL'): self.found_errors = True self.log_error('Inode-block mapping %d refers to non-existing inode %d, deleting', rowid, inode) to_delete.append(rowid) self.unlinked_blocks.add(block_id) for rowid in to_delete: self.conn.execute('DELETE FROM inode_blocks WHERE rowid=?', (rowid,)) def check_inode_blocks_block_id(self): """Check inode_blocks.block_id""" log.info('Checking inode-block mapping (blocks)...') to_delete = list() for (rowid, block_id, inode) in self.conn.query('SELECT inode_blocks.rowid, block_id, inode FROM inode_blocks ' 'LEFT JOIN blocks ON block_id = blocks.id ' 'WHERE blocks.id IS NULL'): self.found_errors = True self.log_error('Inode-block mapping for inode %d refers to non-existing block %d', inode, block_id) to_delete.append(rowid) if inode in self.moved_inodes: continue self.moved_inodes.add(inode) affected_entries = list(self.conn.query('SELECT name, name_id, parent_inode ' 'FROM contents_v WHERE inode=?', (inode,))) for (name, name_id, id_p) in affected_entries: path = get_path(id_p, self.conn, name) self.log_error("File may lack data, moved to /lost+found: %s", to_str(path)) (lof_id, newname) = self.resolve_free(b"/lost+found", escape(path)) self.conn.execute('UPDATE contents SET name_id=?, parent_inode=? ' 'WHERE name_id=? AND parent_inode=?', (self._add_name(newname), lof_id, name_id, id_p)) self._del_name(name_id) for rowid in to_delete: self.conn.execute('DELETE FROM inode_blocks WHERE rowid=?', (rowid,)) def check_symlinks_inode(self): """Check symlinks.inode""" log.info('Checking symlinks (inodes)...') to_delete = list() for (rowid, inode) in self.conn.query('SELECT symlink_targets.rowid, inode FROM symlink_targets ' 'LEFT JOIN inodes ON inode = inodes.id WHERE inodes.id IS NULL'): self.found_errors = True self.log_error('Symlink %d refers to non-existing inode %d, deleting', rowid, inode) to_delete.append(rowid) for rowid in to_delete: self.conn.execute('DELETE FROM symlink_targets WHERE rowid=?', (rowid,)) def check_blocks_refcount(self): """Check blocks.refcount""" log.info('Checking blocks (refcounts)...') self.conn.execute('CREATE TEMPORARY TABLE refcounts ' '(id INTEGER PRIMARY KEY, refcount INTEGER NOT NULL)') try: self.conn.execute(''' INSERT INTO refcounts (id, refcount) SELECT block_id, COUNT(blockno) FROM inode_blocks GROUP BY block_id ''') self.conn.execute(''' CREATE TEMPORARY TABLE wrong_refcounts AS SELECT id, refcounts.refcount, blocks.refcount, obj_id FROM blocks LEFT JOIN refcounts USING (id) WHERE blocks.refcount != refcounts.refcount OR refcounts.refcount IS NULL''') for (id_, cnt, cnt_old, obj_id) in self.conn.query('SELECT * FROM wrong_refcounts'): if cnt is None and id_ in self.unlinked_blocks: # Block was unlinked by check_cache and can now really be # removed (since we have checked that there are truly no # other references) self.conn.execute('DELETE FROM blocks WHERE id=?', (id_,)) # We can't remove associated objects yet, because their refcounts # might be wrong, too. self.conn.execute('UPDATE objects SET refcount=refcount-1 WHERE id=?', (obj_id,)) self.unlinked_objects.add(obj_id) elif cnt is None: self.found_errors = True (id_p, name) = self.resolve_free(b"/lost+found", ("block-%d" % id_).encode()) self.log_error("Block %d not referenced, adding as /lost+found/%s", id_, to_str(name)) now_ns = time_ns() size = self.conn.get_val('SELECT size FROM blocks WHERE id=?', (id_,)) inode = self.create_inode(mode=stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR, mtime_ns=now_ns, atime_ns=now_ns, ctime_ns=now_ns, refcount=1, size=size) self.conn.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)', (inode, 0, id_)) self.conn.execute("INSERT INTO contents (name_id, inode, parent_inode) VALUES (?,?,?)", (self._add_name(basename(name)), inode, id_p)) self.conn.execute("UPDATE blocks SET refcount=? WHERE id=?", (1, id_)) else: self.found_errors = True self.log_error("Block %d has wrong reference count, setting from %d to %d", id_, cnt_old, cnt) self.conn.execute("UPDATE blocks SET refcount=? WHERE id=?", (cnt, id_)) finally: self.conn.execute('DROP TABLE refcounts') self.conn.execute('DROP TABLE IF EXISTS wrong_refcounts') def check_blocks_checksum(self): """Check blocks.hash""" log.info('Checking blocks (checksums)...') for (block_id, obj_id) in list(self.conn.query('SELECT id, obj_id FROM blocks ' 'WHERE hash IS NULL')): self.found_errors = True # This should only happen when there was an error during upload, # so the object must not have been stored correctly. We cannot # just recalculate the hash for the block, because then we may # be modifying the contents of the inode that refers to this # block. self.log_error("No checksum for block %d, removing from table...", block_id) # At the moment, we support only one block per object assert self.conn.get_val('SELECT refcount FROM objects WHERE id=?', (obj_id,)) == 1 self.conn.execute('DELETE FROM blocks WHERE id=?', (block_id,)) self.conn.execute('DELETE FROM objects WHERE id=?', (obj_id,)) def create_inode(self, mode, uid=os.getuid(), gid=os.getgid(), mtime_ns=None, atime_ns=None, ctime_ns=None, refcount=None, size=0): '''Create inode''' id_ = self.conn.rowid('INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,' 'refcount,size) VALUES (?,?,?,?,?,?,?,?)', (mode, uid, gid, mtime_ns, atime_ns, ctime_ns, refcount, size)) return id_ def check_names_refcount(self): """Check names.refcount""" log.info('Checking names (refcounts)...') self.conn.execute('CREATE TEMPORARY TABLE refcounts ' '(id INTEGER PRIMARY KEY, refcount INTEGER NOT NULL)') try: self.conn.execute('INSERT INTO refcounts (id, refcount) ' 'SELECT id, 0 FROM names') self.conn.execute('UPDATE refcounts SET refcount=' '(SELECT COUNT(name_id) FROM contents WHERE name_id = refcounts.id)' '+ (SELECT COUNT(name_id) FROM ext_attributes ' ' WHERE name_id = refcounts.id)') self.conn.execute(''' CREATE TEMPORARY TABLE wrong_refcounts AS SELECT id, refcounts.refcount, names.refcount FROM names LEFT JOIN refcounts USING (id) WHERE names.refcount != refcounts.refcount OR refcounts.refcount IS NULL''') for (id_, cnt, cnt_old) in self.conn.query('SELECT * FROM wrong_refcounts'): self.found_errors = True if cnt is None: self.log_error("Name %d not referenced, removing (old refcount: %d)", id_, cnt_old) self.conn.execute('DELETE FROM names WHERE id=?', (id_,)) else: self.log_error("Name %d has wrong reference count, setting from %d to %d", id_, cnt_old, cnt) self.conn.execute("UPDATE names SET refcount=? WHERE id=?", (cnt, id_)) finally: self.conn.execute('DROP TABLE refcounts') self.conn.execute('DROP TABLE IF EXISTS wrong_refcounts') def check_unix(self): """Check if file systems for agreement with UNIX conventions This means: - Only directories should have child entries - Only regular files should have data blocks and a size - Only symlinks should have a target - Only devices should have a device number - symlink size is length of target - names are not longer than 255 bytes - All directory entries have a valid mode Note that none of this is enforced by S3QL. However, as long as S3QL only communicates with the UNIX FUSE module, none of the above should happen (and if it does, it would probably confuse the system quite a lot). """ log.info('Checking unix conventions...') for (inode, mode, size, target, rdev) \ in self.conn.query("SELECT id, mode, size, target, rdev " "FROM inodes LEFT JOIN symlink_targets ON id = inode"): has_children = self.conn.has_val('SELECT 1 FROM contents WHERE parent_inode=? LIMIT 1', (inode,)) if stat.S_IFMT(mode) == 0: if has_children: mode = mode | stat.S_IFDIR made_to = 'directory' else: mode = mode | stat.S_IFREG made_to = 'regular file' self.found_errors = True self.log_error('Inode %d (%s): directory entry has no type, changed ' 'to %s.', inode, to_str(get_path(inode, self.conn)), made_to) self.conn.execute('UPDATE inodes SET mode=? WHERE id=?', (mode, inode)) if stat.S_ISLNK(mode) and target is None: self.found_errors = True self.log_error('Inode %d (%s): symlink does not have target. ' 'This is probably going to confuse your system!', inode, to_str(get_path(inode, self.conn))) if stat.S_ISLNK(mode) and target is not None and size != len(target): self.found_errors = True self.log_error('Inode %d (%s): symlink size (%d) does not agree with target ' 'length (%d). This is probably going to confuse your system!', inode, to_str(get_path(inode, self.conn)), size, len(target)) if size != 0 and (not stat.S_ISREG(mode) and not stat.S_ISLNK(mode) and not stat.S_ISDIR(mode)): self.found_errors = True self.log_error('Inode %d (%s) is not regular file but has non-zero size. ' 'This is may confuse your system!', inode, to_str(get_path(inode, self.conn))) if target is not None and not stat.S_ISLNK(mode): self.found_errors = True self.log_error('Inode %d (%s) is not symlink but has symlink target. ' 'This is probably going to confuse your system!', inode, to_str(get_path(inode, self.conn))) if rdev != 0 and not (stat.S_ISBLK(mode) or stat.S_ISCHR(mode)): self.found_errors = True self.log_error('Inode %d (%s) is not device but has device number. ' 'This is probably going to confuse your system!', inode, to_str(get_path(inode, self.conn))) if has_children and not stat.S_ISDIR(mode): self.found_errors = True self.log_error('Inode %d (%s) is not a directory but has child entries. ' 'This is probably going to confuse your system!', inode, to_str(get_path(inode, self.conn))) if (not stat.S_ISREG(mode) and self.conn.has_val('SELECT 1 FROM inode_blocks WHERE inode=?', (inode,))): self.found_errors = True self.log_error('Inode %d (%s) is not a regular file but has data blocks. ' 'This is probably going to confuse your system!', inode, to_str(get_path(inode, self.conn))) for (name, id_p) in self.conn.query('SELECT name, parent_inode FROM contents_v ' 'WHERE LENGTH(name) > 255'): path = get_path(id_p, self.conn, name) self.log_error('Entry name %s... in %s has more than 255 characters, ' 'this could cause problems', to_str(name[:40]), to_str(path[:-len(name)])) self.found_errors = True def check_objects_refcount(self): """Check objects.refcount""" log.info('Checking objects (reference counts)...') self.conn.execute('CREATE TEMPORARY TABLE refcounts ' '(id INTEGER PRIMARY KEY, refcount INTEGER NOT NULL)') try: self.conn.execute('INSERT INTO refcounts (id, refcount) ' 'SELECT obj_id, COUNT(obj_id) FROM blocks GROUP BY obj_id') self.conn.execute(''' CREATE TEMPORARY TABLE wrong_refcounts AS SELECT id, refcounts.refcount, objects.refcount FROM objects LEFT JOIN refcounts USING (id) WHERE objects.refcount != refcounts.refcount OR refcounts.refcount IS NULL''') for (id_, cnt, cnt_old) in self.conn.query('SELECT * FROM wrong_refcounts'): if cnt is None and id_ in self.unlinked_objects and cnt_old == 0: # Object was unlinked by check_block_refcounts self.conn.execute('DELETE FROM objects WHERE id=?', (id_,)) else: self.found_errors = True self.log_error("Object %s has invalid refcount, setting from %d to %d", id_, cnt_old, cnt or 0) if cnt is not None: self.conn.execute("UPDATE objects SET refcount=? WHERE id=?", (cnt, id_)) else: # Orphaned object will be picked up by check_keylist self.conn.execute('DELETE FROM objects WHERE id=?', (id_,)) finally: self.conn.execute('DROP TABLE refcounts') self.conn.execute('DROP TABLE IF EXISTS wrong_refcounts') # Delete objects which (correctly had) refcount=0 for obj_id in self.conn.query('SELECT id FROM objects WHERE refcount = 0'): del self.backend['s3ql_data_%d' % obj_id] self.conn.execute("DELETE FROM objects WHERE refcount = 0") def check_objects_temp(self): """Remove temporary objects""" # Tests may provide a plain backend directly, but in regular operation # we'll always work with a ComprencBackend (even if there is neiter # compression nor encryption) if isinstance(self.backend, ComprencBackend): plain_backend = self.backend.backend else: assert isinstance(self.backend, LocalBackend) plain_backend = self.backend if not isinstance(plain_backend, LocalBackend): return log.info('Checking for temporary objects (backend)...') for (path, dirnames, filenames) in os.walk(plain_backend.prefix, topdown=True): for name in filenames: if not re.search(r'^[^#]+#[0-9]+--?[0-9]+\.tmp$', name): continue self.found_errors = True self.log_error("removing temporary file %s", name) os.unlink(os.path.join(path, name)) def check_objects_id(self): """Check objects.id""" log.info('Checking objects (backend)...') lof_id = self.conn.get_val("SELECT inode FROM contents_v " "WHERE name=? AND parent_inode=?", (b"lost+found", ROOT_INODE)) # We use this table to keep track of the objects that we have seen if sys.stdout.isatty(): stamp1 = 0 else: stamp1 = float('inf') self.conn.execute("CREATE TEMP TABLE obj_ids (id INTEGER PRIMARY KEY)") try: for (i, obj_name) in enumerate(self.backend.list('s3ql_data_')): stamp2 = time.time() if stamp2 - stamp1 > 1: sys.stdout.write('\r..processed %d objects so far..' % i) sys.stdout.flush() stamp1 = stamp2 # We only bother with data objects try: obj_id = int(obj_name[10:]) except ValueError: log.warning("Ignoring unexpected object %r", obj_name) continue self.conn.execute('INSERT INTO obj_ids VALUES(?)', (obj_id,)) for (obj_id,) in self.conn.query('SELECT id FROM obj_ids ' 'EXCEPT SELECT id FROM objects'): try: if obj_id in self.unlinked_objects: del self.backend['s3ql_data_%d' % obj_id] else: # TODO: Save the data in lost+found instead del self.backend['s3ql_data_%d' % obj_id] self.found_errors = True self.log_error("Deleted spurious object %d", obj_id) except NoSuchObject: pass self.conn.execute('CREATE TEMPORARY TABLE missing AS ' 'SELECT id FROM objects EXCEPT SELECT id FROM obj_ids') for (obj_id,) in self.conn.query('SELECT * FROM missing'): if ('s3ql_data_%d' % obj_id) in self.backend: # Object was just not in list yet continue self.found_errors = True self.log_error("object %s only exists in table but not in backend, deleting", obj_id) for (id_,) in self.conn.query('SELECT inode FROM inode_blocks JOIN blocks ON block_id = id ' 'WHERE obj_id=? ', (obj_id,)): # Same file may lack several blocks, but we want to move it # only once if id_ in self.moved_inodes: continue self.moved_inodes.add(id_) # Copy the list, or we may pick up the same entry again and again # (first from the original location, then from lost+found) affected_entries = list(self.conn.query('SELECT name, name_id, parent_inode ' 'FROM contents_v WHERE inode=?', (id_,))) for (name, name_id, id_p) in affected_entries: path = get_path(id_p, self.conn, name) self.log_error("File may lack data, moved to /lost+found: %s", to_str(path)) (_, newname) = self.resolve_free(b"/lost+found", escape(path)) self.conn.execute('UPDATE contents SET name_id=?, parent_inode=? ' 'WHERE name_id=? AND parent_inode=?', (self._add_name(newname), lof_id, name_id, id_p)) self._del_name(name_id) # Unlink missing blocks for (block_id,) in self.conn.query('SELECT id FROM blocks WHERE obj_id=?', (obj_id,)): self.conn.execute('DELETE FROM inode_blocks WHERE block_id=?', (block_id,)) self.conn.execute("DELETE FROM blocks WHERE obj_id=?", (obj_id,)) self.conn.execute("DELETE FROM objects WHERE id=?", (obj_id,)) finally: if sys.stdout.isatty(): sys.stdout.write('\n') self.conn.execute('DROP TABLE obj_ids') self.conn.execute('DROP TABLE IF EXISTS missing') def check_objects_size(self): """Check objects.size""" log.info('Checking objects (sizes)...') for (obj_id,) in self.conn.query('SELECT id FROM objects WHERE size = -1 OR size IS NULL'): self.found_errors = True self.log_error("Object %d has no size information, retrieving from backend...", obj_id) self.conn.execute('UPDATE objects SET size=? WHERE id=?', (self.backend.get_size('s3ql_data_%d' % obj_id), obj_id)) def resolve_free(self, path, name): '''Return parent inode and name of an unused directory entry The directory entry will be in `path`. If an entry `name` already exists there, we append a numeric suffix. ''' if not isinstance(path, bytes): raise TypeError('path must be of type bytes') if not isinstance(name, bytes): raise TypeError('name must be of type bytes') inode_p = inode_for_path(path, self.conn) # Debugging http://code.google.com/p/s3ql/issues/detail?id=217 # and http://code.google.com/p/s3ql/issues/detail?id=261 if len(name) > 255 - 4: name = b''.join((name[0:120], b' ... ', name[-120:])) i = 0 newname = name name += b'-' try: while True: self.conn.get_val("SELECT inode FROM contents_v " "WHERE name=? AND parent_inode=?", (newname, inode_p)) i += 1 newname = name + str(i).encode() except NoSuchRowError: pass return (inode_p, newname) def _add_name(self, name): '''Get id for *name* and increase refcount Name is inserted in table if it does not yet exist. ''' try: name_id = self.conn.get_val('SELECT id FROM names WHERE name=?', (name,)) except NoSuchRowError: name_id = self.conn.rowid('INSERT INTO names (name, refcount) VALUES(?,?)', (name, 1)) else: self.conn.execute('UPDATE names SET refcount=refcount+1 WHERE id=?', (name_id,)) return name_id def _del_name(self, name_id): '''Decrease refcount for name_id, remove if it reaches 0''' self.conn.execute('UPDATE names SET refcount=refcount-1 WHERE id=?', (name_id,)) self.conn.execute('DELETE FROM names WHERE refcount=0 AND id=?', (name_id,)) class ROFsck(Fsck): ''' Check file system database only, and don't correct any errors. ''' def __init__(self, path): db = Connection(path + '.db') db.execute('PRAGMA journal_mode = WAL') param = load_params(path) super().__init__(None, None, param, db) def check(self): self.conn.execute('BEGIN TRANSACTION') try: log.info('Creating temporary indices...') for idx in ('tmp1', 'tmp2', 'tmp3', 'tmp4', 'tmp5'): self.conn.execute('DROP INDEX IF EXISTS %s' % idx) self.conn.execute('CREATE INDEX tmp1 ON blocks(obj_id)') self.conn.execute('CREATE INDEX tmp2 ON inode_blocks(block_id)') self.conn.execute('CREATE INDEX tmp3 ON contents(inode)') self.conn.execute('CREATE INDEX tmp4 ON contents(name_id)') self.conn.execute('CREATE INDEX tmp5 ON ext_attributes(name_id)') self.check_lof() self.check_names_refcount() self.check_contents_name() self.check_contents_inode() self.check_contents_parent_inode() self.check_objects_refcount() self.check_objects_size() self.check_blocks_obj_id() self.check_blocks_refcount() self.check_blocks_checksum() self.check_inode_blocks_block_id() self.check_inode_blocks_inode() self.check_inodes_refcount() self.check_inodes_size() self.check_ext_attributes_name() self.check_ext_attributes_inode() self.check_symlinks_inode() self.check_loops() self.check_unix() self.check_foreign_keys() finally: log.info('Dropping temporary indices...') self.conn.execute('ROLLBACK') def check_blocks_checksum(self): """Check blocks.hash""" log.info('Checking blocks (checksums)...') for (block_id,) in self.conn.query('SELECT id FROM blocks WHERE hash IS NULL'): self.found_errors = True self.log_error("No cached checksum for block %d!", block_id) def check_objects_size(self): """Check objects.size""" log.info('Checking objects (sizes)...') for (obj_id,) in self.conn.query('SELECT id FROM objects WHERE size IS NULL'): self.found_errors = True self.log_error("Object %d has no size information!", obj_id) def parse_args(args): parser = ArgumentParser( description="Checks and repairs an S3QL filesystem.") parser.add_log('~/.s3ql/fsck.log') parser.add_cachedir() parser.add_authfile() parser.add_debug() parser.add_quiet() parser.add_backend_options() parser.add_version() parser.add_storage_url() parser.add_argument("--batch", action="store_true", default=False, help="If user input is required, exit without prompting.") parser.add_argument("--force", action="store_true", default=False, help="Force checking even if file system is marked clean.") parser.add_argument("--force-remote", action="store_true", default=False, help="Force use of remote metadata even when this would " "likely result in data loss.") options = parser.parse_args(args) return options def main(args=None): if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) # Check if fs is mounted on this computer # This is not foolproof but should prevent common mistakes if is_mounted(options.storage_url): raise QuietError('Can not check mounted file system.', exitcode=40) backend = get_backend(options) atexit.register(backend.close) log.info('Starting fsck of %s', options.storage_url) cachepath = get_backend_cachedir(options.storage_url, options.cachedir) seq_no = get_seq_no(backend) db = None # When there was a crash during metadata rotation, we may end up # without an s3ql_metadata object. meta_obj_name = 's3ql_metadata' if meta_obj_name not in backend: meta_obj_name += '_new' if os.path.exists(cachepath + '.params'): assert os.path.exists(cachepath + '.db') param = load_params(cachepath) if param['seq_no'] < seq_no: log.info('Ignoring locally cached metadata (outdated).') param = backend.lookup(meta_obj_name) else: log.info('Using cached metadata.') db = Connection(cachepath + '.db') assert not os.path.exists(cachepath + '-cache') or param['needs_fsck'] if param['seq_no'] > seq_no: log.warning('File system has not been unmounted cleanly.') param['needs_fsck'] = True elif backend.lookup(meta_obj_name)['seq_no'] != param['seq_no']: log.warning('Remote metadata is outdated.') param['needs_fsck'] = True else: param = backend.lookup(meta_obj_name) assert not os.path.exists(cachepath + '-cache') # .db might exist if mount.s3ql is killed at exactly the right instant # and should just be ignored. # Check revision if param['revision'] < CURRENT_FS_REV: raise QuietError('File system revision too old, please run `s3qladm upgrade` first.', exitcode=32) elif param['revision'] > CURRENT_FS_REV: raise QuietError('File system revision too new, please update your ' 'S3QL installation.', exitcode=33) if param['seq_no'] < seq_no: print(textwrap.fill(textwrap.dedent('''\ Backend reports that file system is still mounted elsewhere. Either the file system has not been unmounted cleanly or the data has not yet propagated through the backend. In the later case, waiting for a while should fix the problem, in the former case you should try to run fsck on the computer where the file system has been mounted most recently. You may also continue and use whatever metadata is available in the backend. However, in that case YOU MAY LOOSE ALL DATA THAT HAS BEEN UPLOADED OR MODIFIED SINCE THE LAST SUCCESSFULL METADATA UPLOAD. Moreover, files and directories that you have deleted since then MAY REAPPEAR WITH SOME OF THEIR CONTENT LOST. '''))) print('Enter "continue, I know what I am doing" to use the outdated data anyway:', '> ', sep='\n', end='') if options.force_remote: print('(--force-remote specified, continuing anyway)') elif options.batch: raise QuietError('(in batch mode, exiting)', exitcode=41) elif sys.stdin.readline().strip() != 'continue, I know what I am doing': raise QuietError(exitcode=42) param['seq_no'] = seq_no param['needs_fsck'] = True if not db and os.path.exists(cachepath + '-cache'): for i in itertools.count(): bak_name = '%s-cache.bak%d' % (cachepath, i) if not os.path.exists(bak_name): break log.warning('Found outdated cache directory (%s), renaming to .bak%d', cachepath + '-cache', i) log.warning('You should delete this directory once you are sure that ' 'everything is in order.') os.rename(cachepath + '-cache', bak_name) if (not param['needs_fsck'] and param['max_inode'] < 2 ** 31 and (time.time() - param['last_fsck']) < 60 * 60 * 24 * 31): # last check more than 1 month ago if options.force: log.info('File system seems clean, checking anyway.') else: log.info('File system is marked as clean. Use --force to force checking.') return # If using local metadata, check consistency if db: log.info('Checking DB integrity...') try: # get_list may raise CorruptError itself res = db.get_list('PRAGMA integrity_check(20)') if res[0][0] != 'ok': log.error('\n'.join(x[0] for x in res)) raise apsw.CorruptError() except apsw.CorruptError: raise QuietError('Local metadata is corrupted. Remove or repair the following ' 'files manually and re-run fsck:\n' + cachepath + '.db (corrupted)\n' + cachepath + '.param (intact)', exitcode=43) else: db = download_metadata(backend, cachepath + '.db') # Increase metadata sequence no param['seq_no'] += 1 param['needs_fsck'] = True backend['s3ql_seq_no_%d' % param['seq_no']] = b'Empty' save_params(cachepath, param) fsck = Fsck(cachepath + '-cache', backend, param, db) fsck.check() param['max_inode'] = db.get_val('SELECT MAX(id) FROM inodes') if fsck.uncorrectable_errors: raise QuietError("Uncorrectable errors found, aborting.", exitcode=44+128) if os.path.exists(cachepath + '-cache'): os.rmdir(cachepath + '-cache') if param['max_inode'] >= 2 ** 31: renumber_inodes(db) param['inode_gen'] += 1 param['max_inode'] = db.get_val('SELECT MAX(id) FROM inodes') if fsck.found_errors and not param['needs_fsck']: log.error('File system was marked as clean, yet fsck found problems.') log.error('Please report this to the S3QL mailing list, http://groups.google.com/group/s3ql') param['needs_fsck'] = False param['last_fsck'] = time.time() param['last-modified'] = time.time() dump_and_upload_metadata(backend, db, param) save_params(cachepath, param) log.info('Cleaning up local metadata...') db.execute('ANALYZE') db.execute('VACUUM') db.close() log.info('Completed fsck of %s', options.storage_url) if fsck.found_errors: sys.exit(128) else: sys.exit(0) def renumber_inodes(db): '''Renumber inodes''' log.info('Renumbering inodes...') for table in ('inodes', 'inode_blocks', 'symlink_targets', 'contents', 'names', 'blocks', 'objects', 'ext_attributes'): db.execute('ALTER TABLE %s RENAME TO %s_old' % (table, table)) for table in ('contents_v', 'ext_attributes_v'): db.execute('DROP VIEW %s' % table) create_tables(db) for table in ('names', 'blocks', 'objects'): db.execute('DROP TABLE %s' % table) db.execute('ALTER TABLE %s_old RENAME TO %s' % (table, table)) log.info('..mapping..') db.execute('CREATE TEMPORARY TABLE inode_map (rowid INTEGER PRIMARY KEY AUTOINCREMENT, id INTEGER UNIQUE)') db.execute('INSERT INTO inode_map (rowid, id) VALUES(?,?)', (ROOT_INODE, ROOT_INODE)) db.execute('INSERT INTO inode_map (rowid, id) VALUES(?,?)', (CTRL_INODE, CTRL_INODE)) db.execute('INSERT INTO inode_map (id) SELECT id FROM inodes_old WHERE id > ? ORDER BY id ASC', (CTRL_INODE,)) log.info('..inodes..') db.execute('INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size,locked,rdev) ' 'SELECT (SELECT rowid FROM inode_map WHERE inode_map.id = inodes_old.id), ' ' mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size,locked,rdev FROM inodes_old') log.info('..inode_blocks..') db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) ' 'SELECT (SELECT rowid FROM inode_map WHERE inode_map.id = inode_blocks_old.inode), ' ' blockno, block_id FROM inode_blocks_old') log.info('..contents..') db.execute('INSERT INTO contents (inode, parent_inode, name_id) ' 'SELECT (SELECT rowid FROM inode_map WHERE inode_map.id = contents_old.inode), ' ' (SELECT rowid FROM inode_map WHERE inode_map.id = contents_old.parent_inode), ' ' name_id FROM contents_old') log.info('..symlink_targets..') db.execute('INSERT INTO symlink_targets (inode, target) ' 'SELECT (SELECT rowid FROM inode_map WHERE inode_map.id = symlink_targets_old.inode), ' ' target FROM symlink_targets_old') log.info('..ext_attributes..') db.execute('INSERT INTO ext_attributes (inode, name_id, value) ' 'SELECT (SELECT rowid FROM inode_map WHERE inode_map.id = ext_attributes_old.inode), ' ' name_id, value FROM ext_attributes_old') for table in ('inodes', 'inode_blocks', 'symlink_targets', 'contents', 'ext_attributes'): db.execute('DROP TABLE %s_old' % table) db.execute('DROP TABLE inode_map') def escape(path): '''Escape slashes in path so that is usable as a file name''' return path[1:].replace(b'_', b'__').replace(b'/', b'_') def to_str(name): '''Decode path name for printing''' return str(name, encoding='utf-8', errors='replace') if __name__ == '__main__': main(sys.argv[1:]) s3ql-2.26/src/s3ql/backends/0000775000175000017500000000000013246754372017251 5ustar nikrationikratio00000000000000s3ql-2.26/src/s3ql/backends/comprenc.py0000644000175000017500000005273113123327520021420 0ustar nikrationikratio00000000000000''' comprenc.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from ..logging import logging # Ensure use of custom logger class from .. import BUFSIZE from .common import AbstractBackend, CorruptedObjectError, checksum_basic_mapping from ..common import ThawError, freeze_basic_mapping, thaw_basic_mapping from ..inherit_docstrings import (copy_ancestor_docstring, prepend_ancestor_docstring, ABCDocstMeta) from Crypto.Cipher import AES from Crypto.Util import Counter import bz2 import hashlib import hmac import lzma import io import struct import time import zlib log = logging.getLogger(__name__) HMAC_SIZE = 32 def sha256(s): return hashlib.sha256(s).digest() def aes_cipher(key): '''Return AES cipher in CTR mode for *key*''' return AES.new(key, AES.MODE_CTR, counter=Counter.new(128, initial_value=0)) class ComprencBackend(AbstractBackend, metaclass=ABCDocstMeta): ''' This class adds encryption, compression and integrity protection to a plain backend. ''' def __init__(self, passphrase, compression, backend): super().__init__() assert passphrase is None or isinstance(passphrase, (bytes, bytearray, memoryview)) self.passphrase = passphrase self.compression = compression self.backend = backend if (compression[0] not in ('bzip2', 'lzma', 'zlib', None) or compression[1] not in range(10)): raise ValueError('Unsupported compression: %s' % compression) @property @copy_ancestor_docstring def has_native_rename(self): return self.backend.has_native_rename @copy_ancestor_docstring def reset(self): self.backend.reset() @copy_ancestor_docstring def lookup(self, key): meta_raw = self.backend.lookup(key) return self._verify_meta(key, meta_raw)[1] @prepend_ancestor_docstring def get_size(self, key): ''' This method returns the compressed size, i.e. the storage space that's actually occupied by the object. ''' return self.backend.get_size(key) @copy_ancestor_docstring def is_temp_failure(self, exc): return self.backend.is_temp_failure(exc) def _verify_meta(self, key, metadata): '''Unwrap and authenticate metadata If the backend has a password set but the object is not encrypted, `ObjectNotEncrypted` is raised. Returns the object nonce and its metadata. If the object is not encrypted, the nonce is `None`. ''' if not isinstance(metadata, dict): raise CorruptedObjectError('metadata should be dict, not %s' % type(metadata)) format_version = metadata.get('format_version', 0) if format_version != 2: raise CorruptedObjectError('format_version %s unsupported' % format_version) for mkey in ('encryption', 'compression', 'data'): if mkey not in metadata: raise CorruptedObjectError('meta key %s is missing' % mkey) encr_alg = metadata['encryption'] encrypted = (encr_alg != 'None') if encrypted and self.passphrase is None: raise CorruptedObjectError('Encrypted object and no passphrase supplied') elif not encrypted and self.passphrase is not None: raise ObjectNotEncrypted() meta_buf = metadata['data'] if not encrypted: try: return (None, thaw_basic_mapping(meta_buf)) except ThawError: raise CorruptedObjectError('Invalid metadata') # Encrypted for mkey in ('nonce', 'signature', 'object_id'): if mkey not in metadata: raise CorruptedObjectError('meta key %s is missing' % mkey) nonce = metadata['nonce'] stored_key = metadata['object_id'] meta_key = sha256(self.passphrase + nonce + b'meta') meta_sig = checksum_basic_mapping(metadata, meta_key) if not hmac.compare_digest(metadata['signature'], meta_sig): raise CorruptedObjectError('HMAC mismatch') if stored_key != key: raise CorruptedObjectError('Object content does not match its key (%s vs %s)' % (stored_key, key)) buf = aes_cipher(meta_key).decrypt(meta_buf) try: return (nonce, thaw_basic_mapping(buf)) except ThawError: raise CorruptedObjectError('Invalid metadata') @prepend_ancestor_docstring def open_read(self, key): """ If the backend has a password set but the object is not encrypted, `ObjectNotEncrypted` is raised. """ fh = self.backend.open_read(key) try: meta_raw = fh.metadata (nonce, meta) = self._verify_meta(key, meta_raw) if nonce: data_key = sha256(self.passphrase + nonce) # The `payload_offset` key only exists if the storage object was # created with on old S3QL version. In order to avoid having to # download and re-upload the entire object during the upgrade, the # upgrade procedure adds this header to tell us how many bytes at # the beginning of the object we have to skip to get to the payload. if 'payload_offset' in meta_raw: to_skip = meta_raw['payload_offset'] while to_skip: to_skip -= len(fh.read(to_skip)) encr_alg = meta_raw['encryption'] if encr_alg == 'AES_v2': fh = DecryptFilter(fh, data_key) elif encr_alg != 'None': raise RuntimeError('Unsupported encryption: %s' % encr_alg) compr_alg = meta_raw['compression'] if compr_alg == 'BZIP2': fh = DecompressFilter(fh, bz2.BZ2Decompressor()) elif compr_alg == 'LZMA': fh = DecompressFilter(fh, lzma.LZMADecompressor()) elif compr_alg == 'ZLIB': fh = DecompressFilter(fh,zlib.decompressobj()) elif compr_alg != 'None': raise RuntimeError('Unsupported compression: %s' % compr_alg) fh.metadata = meta except: # Don't emit checksum warning, caller hasn't even # started reading anything. fh.close(checksum_warning=False) raise return fh @copy_ancestor_docstring def open_write(self, key, metadata=None, is_compressed=False): if metadata is None: metadata = dict() elif not isinstance(metadata, dict): raise TypeError('*metadata*: expected dict or None, got %s' % type(metadata)) meta_buf = freeze_basic_mapping(metadata) meta_raw = dict(format_version=2) if is_compressed or self.compression[0] is None: compr = None meta_raw['compression'] = 'None' elif self.compression[0] == 'zlib': compr = zlib.compressobj(self.compression[1]) meta_raw['compression'] = 'ZLIB' elif self.compression[0] == 'bzip2': compr = bz2.BZ2Compressor(self.compression[1]) meta_raw['compression'] = 'BZIP2' elif self.compression[0] == 'lzma': compr = lzma.LZMACompressor(preset=self.compression[1]) meta_raw['compression'] = 'LZMA' if self.passphrase is not None: nonce = struct.pack(' HMAC_SIZE or self.fh.read(1): # Read rest of stream, so that we raise MD5 error instead # if problem is on lower layer self.discard_input() raise CorruptedObjectError('Extraneous data at end of object') if not hmac.compare_digest(inbuf, self.hmac.digest()): raise CorruptedObjectError('HMAC mismatch') self.hmac_checked = True break return outbuf def close(self, *a, **kw): self.fh.close(*a, **kw) def __enter__(self): return self def __exit__(self, *a): self.close() return False def decompress(decomp, buf): '''Decompress *buf* using *decomp* This method encapsulates exception handling for different decompressors. ''' try: return decomp.decompress(buf) except IOError as exc: if exc.args[0].lower().startswith('invalid data stream'): raise CorruptedObjectError('Invalid compressed stream') raise except lzma.LZMAError as exc: if (exc.args[0].lower().startswith('corrupt input data') or exc.args[0].startswith('Input format not supported')): raise CorruptedObjectError('Invalid compressed stream') raise except zlib.error as exc: if exc.args[0].lower().startswith('error -3 while decompressing'): raise CorruptedObjectError('Invalid compressed stream') raise class ObjectNotEncrypted(Exception): ''' Raised by the backend if an object was requested from an encrypted backend, but the object was stored without encryption. We do not want to simply return the uncrypted object, because the caller may rely on the objects integrity being cryptographically verified. ''' pass s3ql-2.26/src/s3ql/backends/swiftks.py0000644000175000017500000001131013160156175021277 0ustar nikrationikratio00000000000000''' swiftks.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from ..logging import logging, QuietError # Ensure use of custom logger class from . import swift from dugong import HTTPConnection, CaseInsensitiveDict from .common import AuthorizationError, retry, DanglingStorageURLError from .s3c import HTTPError from ..inherit_docstrings import copy_ancestor_docstring from urllib.parse import urlsplit import json import re import urllib.parse log = logging.getLogger(__name__) class Backend(swift.Backend): def __init__(self, storage_url, login, password, options): self.region = None super().__init__(storage_url, login, password, options) @copy_ancestor_docstring def _parse_storage_url(self, storage_url, ssl_context): hit = re.match(r'^[a-zA-Z0-9]+://' # Backend r'([^/:]+)' # Hostname r'(?::([0-9]+))?' # Port r'/([a-zA-Z0-9._-]+):' # Region r'([^/]+)' # Bucketname r'(?:/(.*))?$', # Prefix storage_url) if not hit: raise QuietError('Invalid storage URL', exitcode=2) hostname = hit.group(1) if hit.group(2): port = int(hit.group(2)) elif ssl_context: port = 443 else: port = 80 region = hit.group(3) containername = hit.group(4) prefix = hit.group(5) or '' self.hostname = hostname self.port = port self.container_name = containername self.prefix = prefix self.region = region @retry def _get_conn(self): '''Obtain connection to server and authentication token''' log.debug('started') if 'no-ssl' in self.options: ssl_context = None else: ssl_context = self.ssl_context headers = CaseInsensitiveDict() headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json; charset="utf-8"' if ':' in self.login: (tenant,user) = self.login.split(':') else: tenant = None user = self.login auth_body = { 'auth': { 'passwordCredentials': { 'username': user, 'password': self.password } }} if tenant: auth_body['auth']['tenantName'] = tenant with HTTPConnection(self.hostname, port=self.port, proxy=self.proxy, ssl_context=ssl_context) as conn: conn.timeout = int(self.options.get('tcp-timeout', 20)) conn.send_request('POST', '/v2.0/tokens', headers=headers, body=json.dumps(auth_body).encode('utf-8')) resp = conn.read_response() if resp.status == 401: raise AuthorizationError(resp.reason) elif resp.status > 299 or resp.status < 200: raise HTTPError(resp.status, resp.reason, resp.headers) cat = json.loads(conn.read().decode('utf-8')) self.auth_token = cat['access']['token']['id'] avail_regions = [] for service in cat['access']['serviceCatalog']: if service['type'] != 'object-store': continue for endpoint in service['endpoints']: if endpoint['region'] != self.region: avail_regions.append(endpoint['region']) continue o = urlsplit(endpoint['publicURL']) self.auth_prefix = urllib.parse.unquote(o.path) if o.scheme == 'https': ssl_context = self.ssl_context elif o.scheme == 'http': ssl_context = None else: # fall through to scheme used for authentication pass self._detect_features(o.hostname, o.port, ssl_context) conn = HTTPConnection(o.hostname, o.port, proxy=self.proxy, ssl_context=ssl_context) conn.timeout = int(self.options.get('tcp-timeout', 20)) return conn if len(avail_regions) < 10: raise DanglingStorageURLError(self.container_name, 'No accessible object storage service found in region %s' ' (available regions: %s)' % (self.region, ', '.join(avail_regions))) else: raise DanglingStorageURLError(self.container_name, 'No accessible object storage service found in region %s' % self.region) s3ql-2.26/src/s3ql/backends/gs.py0000664000175000017500000002331413160156175020227 0ustar nikrationikratio00000000000000''' backends/gs.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from ..logging import logging, QuietError # Ensure use of custom logger class from . import s3c from .s3c import C_DAY_NAMES, C_MONTH_NAMES, HTTPError, S3Error from .common import AuthenticationError, retry, NoSuchObject from .. import oauth_client from ..inherit_docstrings import copy_ancestor_docstring from dugong import CaseInsensitiveDict, HTTPConnection from urllib.parse import urlencode import re import json import threading import time # Pylint goes berserk with false positives #pylint: disable=E1002,E1101,W0201 log = logging.getLogger(__name__) class Backend(s3c.Backend): """A backend to store data in Google Storage This class uses standard HTTP connections to connect to GS. The backend guarantees immediate get consistency and eventual list consistency. """ xml_ns_prefix = '{http://doc.s3.amazonaws.com/2006-03-01}' known_options = (s3c.Backend.known_options - {'dumb-copy', 'disable-expect100' }) # We don't want to request an access token for each instance, # because there is a limit on the total number of valid tokens. # This class variable holds the mapping from refresh tokens to # access tokens. access_token = dict() _refresh_lock = threading.Lock() def __init__(self, storage_url, gs_key, gs_secret, options): super().__init__(storage_url, gs_key, gs_secret, options) self.use_oauth2 = (gs_key == 'oauth2') self.options['disable-expect100'] = True if self.use_oauth2: self.hdr_prefix = 'x-goog-' @staticmethod def _parse_storage_url(storage_url, ssl_context): # Special case for unit testing against local mock server hit = re.match(r'^gs://!unittest!' r'([^/:]+)' # Hostname r':([0-9]+)' # Port r'/([^/]+)' # Bucketname r'(?:/(.*))?$', # Prefix storage_url) if hit: hostname = hit.group(1) port = int(hit.group(2)) bucket_name = hit.group(3) prefix = hit.group(4) or '' return (hostname, port, bucket_name, prefix) hit = re.match(r'^gs://([^/]+)(?:/(.*))?$', storage_url) if not hit: raise QuietError('Invalid storage URL', exitcode=2) bucket_name = hit.group(1) # Dots in the bucket cause problems with SSL certificate validation, # because server certificate is for *.commondatastorage.googleapis.com # (which does not match e.g. a.b.commondatastorage.googleapis.com) if '.' in bucket_name and ssl_context: hostname = 'commondatastorage.googleapis.com' else: hostname = '%s.commondatastorage.googleapis.com' % bucket_name prefix = hit.group(2) or '' port = 443 if ssl_context else 80 return (hostname, port, bucket_name, prefix) def __str__(self): return 'Google Storage bucket %s, prefix %s' % (self.bucket_name, self.prefix) def _authorize_request(self, method, path, headers, subres, query_string): '''Add authorization information to *headers*''' if not self.use_oauth2: return super()._authorize_request(method, path, headers, subres, query_string) headers['Authorization'] = 'Bearer ' + self.access_token[self.password] now = time.gmtime() headers['Date'] = ('%s, %02d %s %04d %02d:%02d:%02d GMT' % (C_DAY_NAMES[now.tm_wday], now.tm_mday, C_MONTH_NAMES[now.tm_mon - 1], now.tm_year, now.tm_hour, now.tm_min, now.tm_sec)) # This method performs a different kind of HTTP request than the methods # decorated with `retry` that it is called by, so in theory it should do its # own retry handling (perhaps with a new `retry_on` decorator that allows to # specify a custom `is_temp_failure` function instead of calling the # instance method). However, in practice there is currently no difference in # the set of exceptions that are considered temporary when retrieving an # access token, and the set of exceptions checked for in the # `_is_temp_failure` method. Therefore, for now we avoid the additional # complexity of custom retry handling and rely on the @retry decorator of # the caller to handle temporary errors. This should be kept in mind # when modifying either method. def _get_access_token(self): log.info('Requesting new access token') headers = CaseInsensitiveDict() headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=utf-8' body = urlencode({'client_id': oauth_client.CLIENT_ID, 'client_secret': oauth_client.CLIENT_SECRET, 'refresh_token': self.password, 'grant_type': 'refresh_token' }) conn = HTTPConnection('accounts.google.com', 443, proxy=self.proxy, ssl_context=self.ssl_context) try: conn.send_request('POST', '/o/oauth2/token', headers=headers, body=body.encode('utf-8')) resp = conn.read_response() if resp.status > 299 or resp.status < 200: raise HTTPError(resp.status, resp.reason, resp.headers) content_type = resp.headers.get('Content-Type', None) if content_type: hit = re.match(r'application/json(?:; charset="(.+)")?$', resp.headers['Content-Type'], re.IGNORECASE) else: hit = None if not hit: log.error('Unexpected server reply when refreshing access token:\n%s', self._dump_response(resp)) raise RuntimeError('Unable to parse server response') charset = hit.group(1) or 'utf-8' body = conn.readall().decode(charset) resp_json = json.loads(body) if not isinstance(resp_json, dict): log.error('Invalid json server response. Expected dict, got:\n%s', body) raise RuntimeError('Unable to parse server response') if 'error' in resp_json: raise AuthenticationError(resp_json['error']) if 'access_token' not in resp_json: log.error('Unable to find access token in server response:\n%s', body) raise RuntimeError('Unable to parse server response') self.access_token[self.password] = resp_json['access_token'] finally: conn.disconnect() def _do_request(self, method, path, subres=None, query_string=None, headers=None, body=None): # When not using OAuth2, fall-through. if not self.use_oauth2: return super()._do_request(method, path, subres=subres, headers=headers, query_string=query_string, body=body) # If we have an access token, try to use it. token = self.access_token.get(self.password, None) if token is not None: try: return super()._do_request(method, path, subres=subres, headers=headers, query_string=query_string, body=body) except HTTPError as exc: if exc.status != 401: raise except S3Error as exc: if exc.code != 'AuthenticationRequired': raise # If we reach this point, then the access token must have # expired, so we try to get a new one. We use a lock to prevent # multiple threads from refreshing the token simultaneously. with self._refresh_lock: # Don't refresh if another thread has already done so while # we waited for the lock. if token is None or self.access_token.get(self.password, None) == token: self._get_access_token() # Reset body, so we can resend the request with the new access token if body and not isinstance(body, (bytes, bytearray, memoryview)): body.seek(0) # Try request again. If this still fails, propagate the error # (because we have just refreshed the access token). # FIXME: We can't rely on this if e.g. the system hibernated # after refreshing the token, but before reaching this line. return super()._do_request(method, path, subres=subres, headers=headers, query_string=query_string, body=body) # Overwrite, because Google Storage does not return errors after # 200 OK. @retry @copy_ancestor_docstring def copy(self, src, dest, metadata=None): log.debug('started with %s, %s', src, dest) if not (metadata is None or isinstance(metadata, dict)): raise TypeError('*metadata*: expected dict or None, got %s' % type(metadata)) headers = CaseInsensitiveDict() headers[self.hdr_prefix + 'copy-source'] = \ '/%s/%s%s' % (self.bucket_name, self.prefix, src) if metadata is None: headers[self.hdr_prefix + 'metadata-directive'] = 'COPY' else: headers[self.hdr_prefix + 'metadata-directive'] = 'REPLACE' self._add_meta_headers(headers, metadata) try: self._do_request('PUT', '/%s%s' % (self.prefix, dest), headers=headers) self.conn.discard() except s3c.NoSuchKeyError: raise NoSuchObject(src) s3ql-2.26/src/s3ql/backends/s3.py0000644000175000017500000002073013160156175020140 0ustar nikrationikratio00000000000000''' s3.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from ..logging import logging, QuietError # Ensure use of custom logger class from . import s3c from .s3c import get_S3Error from .common import NoSuchObject, retry from ..inherit_docstrings import copy_ancestor_docstring from xml.sax.saxutils import escape as xml_escape import re import time import urllib.parse import hashlib import hmac log = logging.getLogger(__name__) # Maximum number of keys that can be deleted at once MAX_KEYS = 1000 # Pylint goes berserk with false positives #pylint: disable=E1002,E1101 class Backend(s3c.Backend): """A backend to store data in Amazon S3 This class uses standard HTTP connections to connect to S3. The backend guarantees get after create consistency, i.e. a newly created object will be immediately retrievable. Additional consistency guarantees may or may not be available and can be queried for with instance methods. """ known_options = ((s3c.Backend.known_options | { 'sse', 'rrs', 'ia' }) - {'dumb-copy', 'disable-expect100'}) def __init__(self, storage_url, login, password, options): self.region = None self.signing_key = None super().__init__(storage_url, login, password, options) def _parse_storage_url(self, storage_url, ssl_context): hit = re.match(r'^s3s?://([^/]+)/([^/]+)(?:/(.*))?$', storage_url) if not hit: raise QuietError('Invalid storage URL', exitcode=2) self.region = hit.group(1) bucket_name = hit.group(2) # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html if not re.match('^[a-z0-9][a-z0-9.-]{1,60}[a-z0-9]$', bucket_name): raise QuietError('Invalid bucket name.', exitcode=2) if self.region == 'us-east-1': hostname = 's3.amazonaws.com' elif self.region.startswith('cn-'): hostname = 's3.%s.amazonaws.com.cn' % self.region else: hostname = 's3-%s.amazonaws.com' % self.region prefix = hit.group(3) or '' port = 443 if ssl_context else 80 return (hostname, port, bucket_name, prefix) def __str__(self): return 'Amazon S3 bucket %s, prefix %s' % (self.bucket_name, self.prefix) @copy_ancestor_docstring def delete_multi(self, keys, force=False): log.debug('started with %s', keys) while len(keys) > 0: tmp = keys[:MAX_KEYS] try: self._delete_multi(tmp, force=force) finally: keys[:MAX_KEYS] = tmp def _set_storage_options(self, headers): if 'sse' in self.options: headers['x-amz-server-side-encryption'] = 'AES256' if 'ia' in self.options: sc = 'STANDARD_IA' elif 'rrs' in self.options: sc = 'REDUCED_REDUNDANCY' else: sc = 'STANDARD' headers['x-amz-storage-class'] = sc @copy_ancestor_docstring def copy(self, src, dest, metadata=None): extra_headers = {} self._set_storage_options(extra_headers) return super().copy(src, dest, metadata=metadata, extra_headers=extra_headers) @copy_ancestor_docstring def open_write(self, key, metadata=None, is_compressed=False): extra_headers = {} self._set_storage_options(extra_headers) return super().open_write(key, metadata=metadata, is_compressed=is_compressed, extra_headers=extra_headers) @retry def _delete_multi(self, keys, force=False): body = [ '' ] esc_prefix = xml_escape(self.prefix) for key in keys: body.append('%s%s' % (esc_prefix, xml_escape(key))) body.append('') body = '\n'.join(body).encode('utf-8') headers = { 'content-type': 'text/xml; charset=utf-8' } resp = self._do_request('POST', '/', subres='delete', body=body, headers=headers) try: root = self._parse_xml_response(resp) ns_p = self.xml_ns_prefix error_tags = root.findall(ns_p + 'Error') if not error_tags: # No errors occured, everything has been deleted del keys[:] return # Some errors occured, so we need to determine what has # been deleted and what hasn't offset = len(self.prefix) for tag in root.findall(ns_p + 'Deleted'): fullkey = tag.find(ns_p + 'Key').text assert fullkey.startswith(self.prefix) keys.remove(fullkey[offset:]) if log.isEnabledFor(logging.DEBUG): for errtag in error_tags: log.debug('Delete %s failed with %s', errtag.findtext(ns_p + 'Key')[offset:], errtag.findtext(ns_p + 'Code')) # If *force*, just modify the passed list and return without # raising an exception, otherwise raise exception for the first error if force: return errcode = error_tags[0].findtext(ns_p + 'Code') errmsg = error_tags[0].findtext(ns_p + 'Message') errkey = error_tags[0].findtext(ns_p + 'Key')[offset:] if errcode == 'NoSuchKeyError': raise NoSuchObject(errkey) else: raise get_S3Error(errcode, 'Error deleting %s: %s' % (errkey, errmsg)) except: self.conn.discard() def _authorize_request(self, method, path, headers, subres, query_string): '''Add authorization information to *headers*''' # See http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html now = time.gmtime() #now = time.strptime('Fri, 24 May 2013 00:00:00 GMT', # '%a, %d %b %Y %H:%M:%S GMT') ymd = time.strftime('%Y%m%d', now) ymdhms = time.strftime('%Y%m%dT%H%M%SZ', now) headers['x-amz-date'] = ymdhms headers['x-amz-content-sha256'] = 'UNSIGNED-PAYLOAD' #headers['x-amz-content-sha256'] = hashlib.sha256(body).hexdigest() headers.pop('Authorization', None) auth_strs = [method] auth_strs.append(urllib.parse.quote(path)) if query_string: s = urllib.parse.urlencode(query_string, doseq=True).split('&') else: s = [] if subres: s.append(urllib.parse.quote_plus(subres) + '=') if s: s = '&'.join(sorted(s)) else: s = '' auth_strs.append(s) # Headers sig_hdrs = sorted(x.lower() for x in headers.keys()) for hdr in sig_hdrs: auth_strs.append('%s:%s' % (hdr, headers[hdr].strip())) auth_strs.append('') auth_strs.append(';'.join(sig_hdrs)) auth_strs.append(headers['x-amz-content-sha256']) can_req = '\n'.join(auth_strs) #log.debug('canonical request: %s', can_req) can_req_hash = hashlib.sha256(can_req.encode()).hexdigest() str_to_sign = ("AWS4-HMAC-SHA256\n" + ymdhms + '\n' + '%s/%s/s3/aws4_request\n' % (ymd, self.region) + can_req_hash) #log.debug('string to sign: %s', str_to_sign) if self.signing_key is None or self.signing_key[1] != ymd: self.update_signing_key(ymd) signing_key = self.signing_key[0] sig = hmac_sha256(signing_key, str_to_sign.encode(), hex=True) cred = ('%s/%04d%02d%02d/%s/s3/aws4_request' % (self.login, now.tm_year, now.tm_mon, now.tm_mday, self.region)) headers['Authorization'] = ( 'AWS4-HMAC-SHA256 ' 'Credential=%s,' 'SignedHeaders=%s,' 'Signature=%s' % (cred, ';'.join(sig_hdrs), sig)) def update_signing_key(self, ymd): date_key = hmac_sha256(("AWS4" + self.password).encode(), ymd.encode()) region_key = hmac_sha256(date_key, self.region.encode()) service_key = hmac_sha256(region_key, b's3') signing_key = hmac_sha256(service_key, b'aws4_request') self.signing_key = (signing_key, ymd) def hmac_sha256(key, msg, hex=False): d = hmac.new(key, msg, hashlib.sha256) if hex: return d.hexdigest() else: return d.digest() s3ql-2.26/src/s3ql/backends/__init__.py0000644000175000017500000000117212615000156021340 0ustar nikrationikratio00000000000000''' backends/__init__.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from . import local, s3, gs, s3c, swift, rackspace, swiftks #: Mapping from storage URL prefixes to backend classes prefix_map = { 's3': s3.Backend, 'local': local.Backend, 'gs': gs.Backend, 's3c': s3c.Backend, 'swift': swift.Backend, 'swiftks': swiftks.Backend, 'rackspace': rackspace.Backend } __all__ = [ 'common', 'pool', 'comprenc' ] + list(prefix_map.keys()) s3ql-2.26/src/s3ql/backends/rackspace.py0000644000175000017500000000232012615000156021531 0ustar nikrationikratio00000000000000''' rackspace.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from ..logging import logging, QuietError # Ensure use of custom logger class from . import swiftks from ..inherit_docstrings import copy_ancestor_docstring import re log = logging.getLogger(__name__) class Backend(swiftks.Backend): """A backend to store data in Rackspace CloudFiles""" @copy_ancestor_docstring def _parse_storage_url(self, storage_url, ssl_context): hit = re.match(r'^rackspace://' # Backend r'([^/:]+)' # Region r'/([^/]+)' # Bucketname r'(?:/(.*))?$', # Prefix storage_url) if not hit: raise QuietError('Invalid storage URL', exitcode=2) region = hit.group(1) containername = hit.group(2) prefix = hit.group(3) or '' if ssl_context: port = 443 else: port = 80 self.hostname = 'auth.api.rackspacecloud.com' self.port = port self.container_name = containername self.prefix = prefix self.region = region s3ql-2.26/src/s3ql/backends/pool.py0000644000175000017500000000352012615000156020551 0ustar nikrationikratio00000000000000''' pool.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from ..logging import logging # Ensure use of custom logger class import threading from contextlib import contextmanager log = logging.getLogger(__name__) class BackendPool: '''A pool of backends This class is threadsafe. All methods (except for internal methods starting with underscore) may be called concurrently by different threads. ''' def __init__(self, factory): '''Init pool *factory* should be a callable that provides new connections. ''' self.factory = factory self.pool = [] self.lock = threading.Lock() def pop_conn(self): '''Pop connection from pool''' with self.lock: if self.pool: return self.pool.pop() else: return self.factory() def push_conn(self, conn): '''Push connection back into pool''' conn.reset() with self.lock: self.pool.append(conn) def flush(self): '''Close all backends in pool This method calls the `close` method on all backends currently in the pool. ''' with self.lock: while self.pool: self.pool.pop().close() @contextmanager def __call__(self, close=False): '''Provide connection from pool (context manager) If *close* is True, the backend's close method is automatically called (which frees any allocated resources, but may slow down reuse of the backend object). ''' conn = self.pop_conn() try: yield conn finally: if close: conn.close() self.push_conn(conn) s3ql-2.26/src/s3ql/backends/swift.py0000644000175000017500000010220513160156175020745 0ustar nikrationikratio00000000000000''' swift.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from ..logging import logging, QuietError, LOG_ONCE # Ensure use of custom logger class from .. import BUFSIZE from .common import (AbstractBackend, NoSuchObject, retry, AuthorizationError, DanglingStorageURLError, retry_generator, get_proxy, get_ssl_context) from .s3c import HTTPError, ObjectR, ObjectW, md5sum_b64, BadDigestError from . import s3c from ..inherit_docstrings import (copy_ancestor_docstring, prepend_ancestor_docstring, ABCDocstMeta) from dugong import (HTTPConnection, BodyFollowing, is_temp_network_error, CaseInsensitiveDict, ConnectionClosed) from urllib.parse import urlsplit import json import shutil import re import os import urllib.parse import ssl from distutils.version import LooseVersion log = logging.getLogger(__name__) #: Suffix to use when creating temporary objects TEMP_SUFFIX = '_tmp$oentuhuo23986konteuh1062$' class Backend(AbstractBackend, metaclass=ABCDocstMeta): """A backend to store data in OpenStack Swift The backend guarantees get after create consistency, i.e. a newly created object will be immediately retrievable. """ hdr_prefix = 'X-Object-' known_options = {'no-ssl', 'ssl-ca-path', 'tcp-timeout', 'disable-expect100', 'no-feature-detection'} _add_meta_headers = s3c.Backend._add_meta_headers _extractmeta = s3c.Backend._extractmeta _assert_empty_response = s3c.Backend._assert_empty_response _dump_response = s3c.Backend._dump_response clear = s3c.Backend.clear reset = s3c.Backend.reset def __init__(self, storage_url, login, password, options): super().__init__() self.options = options self.hostname = None self.port = None self.container_name = None self.prefix = None self.auth_token = None self.auth_prefix = None self.conn = None self.password = password self.login = login self.features = Features() # We may need the context even if no-ssl has been specified, # because no-ssl applies only to the authentication URL. self.ssl_context = get_ssl_context(options.get('ssl-ca-path', None)) self._parse_storage_url(storage_url, self.ssl_context) self.proxy = get_proxy(self.ssl_context is not None) self._container_exists() def __str__(self): return 'swift container %s, prefix %s' % (self.container_name, self.prefix) @property @copy_ancestor_docstring def has_native_rename(self): return False @retry def _container_exists(self): '''Make sure that the container exists''' try: self._do_request('GET', '/', query_string={'limit': 1 }) self.conn.discard() except HTTPError as exc: if exc.status == 404: raise DanglingStorageURLError(self.container_name) raise def _parse_storage_url(self, storage_url, ssl_context): '''Init instance variables from storage url''' hit = re.match(r'^[a-zA-Z0-9]+://' # Backend r'([^/:]+)' # Hostname r'(?::([0-9]+))?' # Port r'/([^/]+)' # Bucketname r'(?:/(.*))?$', # Prefix storage_url) if not hit: raise QuietError('Invalid storage URL', exitcode=2) hostname = hit.group(1) if hit.group(2): port = int(hit.group(2)) elif ssl_context: port = 443 else: port = 80 containername = hit.group(3) prefix = hit.group(4) or '' self.hostname = hostname self.port = port self.container_name = containername self.prefix = prefix @copy_ancestor_docstring def is_temp_failure(self, exc): #IGNORE:W0613 if isinstance(exc, AuthenticationExpired): return True # In doubt, we retry on 5xx (Server error). However, there are some # codes where retry is definitely not desired. For 4xx (client error) we # do not retry in general, but for 408 (Request Timeout) RFC 2616 # specifies that the client may repeat the request without # modifications. elif (isinstance(exc, HTTPError) and ((500 <= exc.status <= 599 and exc.status not in (501,505,508,510,511,523)) or exc.status == 408 or 'client disconnected' in exc.msg.lower())): return True elif is_temp_network_error(exc): return True # Temporary workaround for # https://bitbucket.org/nikratio/s3ql/issues/87 and # https://bitbucket.org/nikratio/s3ql/issues/252 elif (isinstance(exc, ssl.SSLError) and (str(exc).startswith('[SSL: BAD_WRITE_RETRY]') or str(exc).startswith('[SSL: BAD_LENGTH]'))): return True return False def _get_conn(self): '''Obtain connection to server and authentication token''' log.debug('started') if 'no-ssl' in self.options: ssl_context = None else: ssl_context = self.ssl_context headers = CaseInsensitiveDict() headers['X-Auth-User'] = self.login headers['X-Auth-Key'] = self.password with HTTPConnection(self.hostname, self.port, proxy=self.proxy, ssl_context=ssl_context) as conn: conn.timeout = int(self.options.get('tcp-timeout', 20)) for auth_path in ('/v1.0', '/auth/v1.0'): log.debug('GET %s', auth_path) conn.send_request('GET', auth_path, headers=headers) resp = conn.read_response() if resp.status in (404, 412): log.debug('auth to %s failed, trying next path', auth_path) conn.discard() continue elif resp.status == 401: raise AuthorizationError(resp.reason) elif resp.status > 299 or resp.status < 200: raise HTTPError(resp.status, resp.reason, resp.headers) # Pylint can't infer SplitResult Types #pylint: disable=E1103 self.auth_token = resp.headers['X-Auth-Token'] o = urlsplit(resp.headers['X-Storage-Url']) self.auth_prefix = urllib.parse.unquote(o.path) if o.scheme == 'https': ssl_context = self.ssl_context elif o.scheme == 'http': ssl_context = None else: # fall through to scheme used for authentication pass # mock server can only handle one connection at a time # so we explicitly disconnect this connection before # opening the feature detection connection # (mock server handles both - storage and authentication) conn.disconnect() self._detect_features(o.hostname, o.port, ssl_context) conn = HTTPConnection(o.hostname, o.port, proxy=self.proxy, ssl_context=ssl_context) conn.timeout = int(self.options.get('tcp-timeout', 20)) return conn raise RuntimeError('No valid authentication path found') def _do_request(self, method, path, subres=None, query_string=None, headers=None, body=None): '''Send request, read and return response object This method modifies the *headers* dictionary. ''' log.debug('started with %r, %r, %r, %r, %r, %r', method, path, subres, query_string, headers, body) if headers is None: headers = CaseInsensitiveDict() if isinstance(body, (bytes, bytearray, memoryview)): headers['Content-MD5'] = md5sum_b64(body) if self.conn is None: log.debug('no active connection, calling _get_conn()') self.conn = self._get_conn() # Construct full path path = urllib.parse.quote('%s/%s%s' % (self.auth_prefix, self.container_name, path)) if query_string: s = urllib.parse.urlencode(query_string, doseq=True) if subres: path += '?%s&%s' % (subres, s) else: path += '?%s' % s elif subres: path += '?%s' % subres headers['X-Auth-Token'] = self.auth_token try: resp = self._do_request_inner(method, path, body=body, headers=headers) except Exception as exc: if is_temp_network_error(exc) or isinstance(exc, ssl.SSLError): # We probably can't use the connection anymore self.conn.disconnect() raise # Success if resp.status >= 200 and resp.status <= 299: return resp # Expired auth token if resp.status == 401: self._do_authentication_expired(resp.reason) # raises AuthenticationExpired # If method == HEAD, server must not return response body # even in case of errors self.conn.discard() if method.upper() == 'HEAD': raise HTTPError(resp.status, resp.reason, resp.headers) else: raise HTTPError(resp.status, resp.reason, resp.headers) # Including this code directly in _do_request would be very messy since # we can't `return` the response early, thus the separate method def _do_request_inner(self, method, path, body, headers): '''The guts of the _do_request method''' log.debug('started with %s %s', method, path) use_expect_100c = not self.options.get('disable-expect100', False) if body is None or isinstance(body, (bytes, bytearray, memoryview)): self.conn.send_request(method, path, body=body, headers=headers) return self.conn.read_response() body_len = os.fstat(body.fileno()).st_size self.conn.send_request(method, path, expect100=use_expect_100c, headers=headers, body=BodyFollowing(body_len)) if use_expect_100c: log.debug('waiting for 100-continue') resp = self.conn.read_response() if resp.status != 100: return resp log.debug('writing body data') try: shutil.copyfileobj(body, self.conn, BUFSIZE) except ConnectionClosed: log.debug('interrupted write, server closed connection') # Server closed connection while we were writing body data - # but we may still be able to read an error response try: resp = self.conn.read_response() except ConnectionClosed: # No server response available log.debug('no response available in buffer') pass else: if resp.status >= 400: # error response return resp log.warning('Server broke connection during upload, but signaled ' '%d %s', resp.status, resp.reason) # Re-raise original error raise return self.conn.read_response() @retry @copy_ancestor_docstring def lookup(self, key): log.debug('started with %s', key) if key.endswith(TEMP_SUFFIX): raise ValueError('Keys must not end with %s' % TEMP_SUFFIX) try: resp = self._do_request('HEAD', '/%s%s' % (self.prefix, key)) self._assert_empty_response(resp) except HTTPError as exc: if exc.status == 404: raise NoSuchObject(key) else: raise return self._extractmeta(resp, key) @retry @copy_ancestor_docstring def get_size(self, key): if key.endswith(TEMP_SUFFIX): raise ValueError('Keys must not end with %s' % TEMP_SUFFIX) log.debug('started with %s', key) try: resp = self._do_request('HEAD', '/%s%s' % (self.prefix, key)) self._assert_empty_response(resp) except HTTPError as exc: if exc.status == 404: raise NoSuchObject(key) else: raise try: return int(resp.headers['Content-Length']) except KeyError: raise RuntimeError('HEAD request did not return Content-Length') @retry @copy_ancestor_docstring def open_read(self, key): if key.endswith(TEMP_SUFFIX): raise ValueError('Keys must not end with %s' % TEMP_SUFFIX) try: resp = self._do_request('GET', '/%s%s' % (self.prefix, key)) except HTTPError as exc: if exc.status == 404: raise NoSuchObject(key) raise try: meta = self._extractmeta(resp, key) except BadDigestError: # If there's less than 64 kb of data, read and throw # away. Otherwise re-establish connection. if resp.length is not None and resp.length < 64*1024: self.conn.discard() else: self.conn.disconnect() raise return ObjectR(key, resp, self, meta) @prepend_ancestor_docstring def open_write(self, key, metadata=None, is_compressed=False): """ The returned object will buffer all data and only start the upload when its `close` method is called. """ log.debug('started with %s', key) if key.endswith(TEMP_SUFFIX): raise ValueError('Keys must not end with %s' % TEMP_SUFFIX) headers = CaseInsensitiveDict() if metadata is None: metadata = dict() self._add_meta_headers(headers, metadata, chunksize=self.features.max_meta_len) return ObjectW(key, self, headers) @retry @copy_ancestor_docstring def delete(self, key, force=False, is_retry=False): if key.endswith(TEMP_SUFFIX): raise ValueError('Keys must not end with %s' % TEMP_SUFFIX) log.debug('started with %s', key) try: resp = self._do_request('DELETE', '/%s%s' % (self.prefix, key)) self._assert_empty_response(resp) except HTTPError as exc: # Server may have deleted the object even though we did not # receive the response. if exc.status == 404 and not (force or is_retry): raise NoSuchObject(key) elif exc.status != 404: raise @retry def _delete_multi(self, keys, force=False): """Doing bulk delete of multiple objects at a time. This is a feature of the configurable middleware "Bulk" so it can only be used after the middleware was detected. (Introduced in Swift 1.8.0.rc1) See https://docs.openstack.org/swift/latest/middleware.html#bulk-delete and https://github.com/openstack/swift/blob/master/swift/common/middleware/bulk.py A request example: 'POST /?bulk-delete Content-type: text/plain; charset=utf-8 Accept: application/json /container/prefix_key1 /container/prefix_key2' A successful response: 'HTTP/1.1 200 OK Content-Type: application/json {"Number Not Found": 0, "Response Status": "200 OK", "Response Body": "", "Errors": [], "Number Deleted": 2}' An error response: 'HTTP/1.1 200 OK Content-Type: application/json {"Number Not Found": 0, "Response Status": "500 Internal Server Error", "Response Body": "An error description", "Errors": [ ['/container/prefix_key2', 'An error description'] ], "Number Deleted": 1}' Response when some objects where not found: 'HTTP/1.1 200 OK Content-Type: application/json {"Number Not Found": 1, "Response Status": "400 Bad Request", "Response Body": "Invalid bulk delete.", "Errors": [], "Number Deleted": 1}' """ body = [] esc_prefix = "/%s/%s" % (urllib.parse.quote(self.container_name), urllib.parse.quote(self.prefix)) for key in keys: body.append('%s%s' % (esc_prefix, urllib.parse.quote(key))) body = '\n'.join(body).encode('utf-8') headers = { 'content-type': 'text/plain; charset=utf-8', 'accept': 'application/json' } resp = self._do_request('POST', '/', subres='bulk-delete', body=body, headers=headers) # bulk deletes should always return 200 if resp.status is not 200: raise HTTPError(resp.status, resp.reason, resp.headers) hit = re.match(r'^application/json(;\s*charset="?(.+?)"?)?$', resp.headers['content-type']) if not hit: log.error('Unexpected server response. Expected json, got:\n%s', self._dump_response(resp)) raise RuntimeError('Unexpected server reply') # there might be an arbitrary amount of whitespace before the # JSON response (to keep the connection from timing out) # but json.loads discards these whitespace characters automatically resp_dict = json.loads(self.conn.readall().decode(hit.group(2) or 'utf-8')) log.debug('Response %s', resp_dict) try: resp_status_code, resp_status_text = _split_response_status(resp_dict['Response Status']) except ValueError: raise RuntimeError('Unexpected server reply') if resp_status_code is 200: # No errors occured, everything has been deleted del keys[:] return # Some errors occured, so we need to determine what has # been deleted and what hasn't failed_keys = [] offset = len(esc_prefix) for error in resp_dict['Errors']: fullkey = error[0] # strangely the name is url encoded in JSON assert fullkey.startswith(esc_prefix) key = urllib.parse.unquote(fullkey[offset:]) failed_keys.append(key) log.debug('Delete %s failed with %s', key, error[1]) for key in keys[:]: if key not in failed_keys: keys.remove(key) if resp_status_code in (400, 404) and len(resp_dict['Errors']) == 0: # Swift returns 400 instead of 404 when files were not found. # (but we also accept the correct status code 404 if Swift # decides to correct this in the future) # ensure that we actually have objects that were not found # (otherwise there is a logic error that we need to know about) assert resp_dict['Number Not Found'] > 0 # Since AbstractBackend.delete_multi allows this, we just # swallow this error even when *force* is False. # N.B.: We removed even the keys from *keys* that are not found. # This is because Swift only returns a counter of deleted # objects, not the list of deleted objects (as S3 does). return # At this point it is clear that the server has sent some kind of error response. # Swift is not very consistent in returning errors. # We need to jump through these hoops to get something meaningful. error_msg = resp_dict['Response Body'] error_code = resp_status_code if not error_msg: if len(resp_dict['Errors']) > 0: error_code, error_msg = _split_response_status(resp_dict['Errors'][0][1]) else: error_msg = resp_status_text if error_code == 401: # Expired auth token self._do_authentication_expired(error_msg) # raises AuthenticationExpired if 'Invalid bulk delete.' in error_msg: error_code = 400 # change error message to something more meaningful error_msg = 'Sent a bulk delete with an empty list of keys to delete' elif 'Max delete failures exceeded' in error_msg: error_code = 502 elif 'Maximum Bulk Deletes: ' in error_msg: # Sent more keys in one bulk delete than allowed error_code = 413 elif 'Invalid File Name' in error_msg: # get returned when file name is too long error_code = 422 raise HTTPError(error_code, error_msg, {}) @copy_ancestor_docstring def delete_multi(self, keys, force=False): log.debug('started with %s', keys) if self.features.has_bulk_delete: while len(keys) > 0: tmp = keys[:self.features.max_deletes] try: self._delete_multi(tmp, force=force) finally: keys[:self.features.max_deletes] = tmp else: super().delete_multi(keys, force=force) # We cannot wrap the entire _copy_via_put_post() method into a retry() # decorator, because _copy_via_put_post() issues multiple requests. # If the server happens to regularly close the connection after a request # (even though it was processed correctly), we'd never make any progress # if we always restart from the first request. # We experimented with adding a retry(fn, args, kwargs) function to wrap # individual calls, but this doesn't really improve the code because we # typically also have to wrap e.g. a discard() or assert_empty() call, so we # end up with having to create an additional function anyway. Having # anonymous blocks in Python would be very nice here. @retry def _copy_helper(self, method, path, headers): self._do_request(method, path, headers=headers) self.conn.discard() def _copy_via_put_post(self, src, dest, metadata=None): """Fallback copy method for older Swift implementations.""" headers = CaseInsensitiveDict() headers['X-Copy-From'] = '/%s/%s%s' % (self.container_name, self.prefix, src) if metadata is not None: # We can't do a direct copy, because during copy we can only update the # metadata, but not replace it. Therefore, we have to make a full copy # followed by a separate request to replace the metadata. To avoid an # inconsistent intermediate state, we use a temporary object. final_dest = dest dest = final_dest + TEMP_SUFFIX headers['X-Delete-After'] = '600' try: self._copy_helper('PUT', '/%s%s' % (self.prefix, dest), headers) except HTTPError as exc: if exc.status == 404: raise NoSuchObject(src) raise if metadata is None: return # Update metadata headers = CaseInsensitiveDict() self._add_meta_headers(headers, metadata, chunksize=self.features.max_meta_len) self._copy_helper('POST', '/%s%s' % (self.prefix, dest), headers) # Rename object headers = CaseInsensitiveDict() headers['X-Copy-From'] = '/%s/%s%s' % (self.container_name, self.prefix, dest) self._copy_helper('PUT', '/%s%s' % (self.prefix, final_dest), headers) @retry def _copy_via_copy(self, src, dest, metadata=None): """Copy for more modern Swift implementations that know the X-Fresh-Metadata option and the native COPY method.""" headers = CaseInsensitiveDict() headers['Destination'] = '/%s/%s%s' % (self.container_name, self.prefix, dest) if metadata is not None: self._add_meta_headers(headers, metadata, chunksize=self.features.max_meta_len) headers['X-Fresh-Metadata'] = 'true' resp = self._do_request('COPY', '/%s%s' % (self.prefix, src), headers=headers) self._assert_empty_response(resp) @copy_ancestor_docstring def copy(self, src, dest, metadata=None): log.debug('started with %s, %s', src, dest) if dest.endswith(TEMP_SUFFIX) or src.endswith(TEMP_SUFFIX): raise ValueError('Keys must not end with %s' % TEMP_SUFFIX) if self.features.has_copy: self._copy_via_copy(src, dest, metadata=metadata) else: self._copy_via_put_post(src, dest, metadata=metadata) @retry @copy_ancestor_docstring def update_meta(self, key, metadata): log.debug('started with %s', key) headers = CaseInsensitiveDict() self._add_meta_headers(headers, metadata, chunksize=self.features.max_meta_len) self._do_request('POST', '/%s%s' % (self.prefix, key), headers=headers) self.conn.discard() @retry_generator @copy_ancestor_docstring def list(self, prefix='', start_after='', batch_size=5000): log.debug('started with %s, %s', prefix, start_after) keys_remaining = True marker = self.prefix + start_after prefix = self.prefix + prefix while keys_remaining: log.debug('requesting with marker=%s', marker) try: resp = self._do_request('GET', '/', query_string={'prefix': prefix, 'format': 'json', 'marker': marker, 'limit': batch_size }) except HTTPError as exc: if exc.status == 404: raise DanglingStorageURLError(self.container_name) raise if resp.status == 204: return hit = re.match('application/json; charset="?(.+?)"?$', resp.headers['content-type']) if not hit: log.error('Unexpected server response. Expected json, got:\n%s', self._dump_response(resp)) raise RuntimeError('Unexpected server reply') strip = len(self.prefix) count = 0 try: # JSON does not have a streaming API, so we just read # the entire response in memory. for dataset in json.loads(self.conn.read().decode(hit.group(1))): count += 1 marker = dataset['name'] if marker.endswith(TEMP_SUFFIX): continue yield marker[strip:] except GeneratorExit: self.conn.discard() break keys_remaining = count == batch_size @copy_ancestor_docstring def close(self): self.conn.disconnect() def _detect_features(self, hostname, port, ssl_context): '''Try to figure out the Swift version and supported features by examining the /info endpoint of the storage server. See https://docs.openstack.org/swift/latest/middleware.html#discoverability ''' if 'no-feature-detection' in self.options: log.debug('Skip feature detection') return if not port: port = 443 if ssl_context else 80 detected_features = Features() with HTTPConnection(hostname, port, proxy=self.proxy, ssl_context=ssl_context) as conn: conn.timeout = int(self.options.get('tcp-timeout', 20)) log.debug('GET /info') conn.send_request('GET', '/info') resp = conn.read_response() # 200, 401, 403 and 404 are all OK since the /info endpoint # may not be accessible (misconfiguration) or may not # exist (old Swift version). if resp.status not in (200, 401, 403, 404): log.error("Wrong server response.\n%s", self._dump_response(resp, body=conn.read(2048))) raise HTTPError(resp.status, resp.reason, resp.headers) if resp.status is 200: hit = re.match(r'^application/json(;\s*charset="?(.+?)"?)?$', resp.headers['content-type']) if not hit: log.error("Wrong server response. Expected json. Got: \n%s", self._dump_response(resp, body=conn.read(2048))) raise RuntimeError('Unexpected server reply') info = json.loads(conn.readall().decode(hit.group(2) or 'utf-8')) swift_info = info.get('swift', {}) log.debug('%s:%s/info returns %s', hostname, port, info) swift_version_string = swift_info.get('version', None) if swift_version_string and \ LooseVersion(swift_version_string) >= LooseVersion('2.8'): detected_features.has_copy = True # Default metadata value length constrain is 256 bytes # but the provider could configure another value. # We only decrease the chunk size since 255 is a big enough chunk size. max_meta_len = swift_info.get('max_meta_value_length', None) if isinstance(max_meta_len, int) and max_meta_len < 256: detected_features.max_meta_len = max_meta_len if info.get('bulk_delete', False): detected_features.has_bulk_delete = True bulk_delete = info['bulk_delete'] assert bulk_delete.get('max_failed_deletes', 1000) <= \ bulk_delete.get('max_deletes_per_request', 10000) assert bulk_delete.get('max_failed_deletes', 1000) > 0 # The block cache removal queue has a capacity of 1000. # We do not need bigger values than that. # We use max_failed_deletes instead of max_deletes_per_request # because then we can be sure even when all our delete requests # get rejected we get a complete error list back from the server. # If we would set the value higher, _delete_multi() would maybe # delete some entries from the *keys* list that did not get # deleted and would miss them in a retry. detected_features.max_deletes = min(1000, int(bulk_delete.get('max_failed_deletes', 1000))) log.info('Detected Swift features for %s:%s: %s', hostname, port, detected_features, extra=LOG_ONCE) else: log.debug('%s:%s/info not found or not accessible. Skip feature detection.', hostname, port) self.features = detected_features def _do_authentication_expired(self, reason): '''Closes the current connection and raises AuthenticationExpired''' log.info('OpenStack auth token seems to have expired, requesting new one.') self.conn.disconnect() # Force constructing a new connection with a new token, otherwise # the connection will be reestablished with the same token. self.conn = None raise AuthenticationExpired(reason) def _split_response_status(line): '''Splits a HTTP response line into status code (integer) and status text. Returns 2-tuple (int, string) Raises ValueError when line is not parsable''' hit = re.match('^([0-9]{3})\s+(.*)$', line) if not hit: log.error('Expected valid Response Status, got: %s', line) raise ValueError('Expected valid Response Status, got: %s' % line) return (int(hit.group(1)), hit.group(2)) class AuthenticationExpired(Exception): '''Raised if the provided Authentication Token has expired''' def __init__(self, msg): super().__init__() self.msg = msg def __str__(self): return 'Auth token expired. Server said: %s' % self.msg class Features: """Set of configurable features for Swift servers. Swift is deployed in many different versions and configurations. To be able to use advanced features like bulk delete we need to make sure that the Swift server we are using can handle them. This is a value object.""" __slots__ = ['has_copy', 'has_bulk_delete', 'max_deletes', 'max_meta_len'] def __init__(self, has_copy=False, has_bulk_delete=False, max_deletes=1000, max_meta_len=255): self.has_copy = has_copy self.has_bulk_delete = has_bulk_delete self.max_deletes = max_deletes self.max_meta_len = max_meta_len def __str__(self): features = [] if self.has_copy: features.append('copy via COPY') if self.has_bulk_delete: features.append('Bulk delete %d keys at a time' % self.max_deletes) features.append('maximum meta value length is %d bytes' % self.max_meta_len) return ', '.join(features) def __repr__(self): init_kwargs = [p + '=' + repr(getattr(self, p)) for p in self.__slots__] return 'Features(%s)' % ', '.join(init_kwargs) def __hash__(self): return hash(repr(self)) def __eq__(self, other): return repr(self) == repr(other) def __ne__(self, other): return repr(self) != repr(other) s3ql-2.26/src/s3ql/backends/local.py0000664000175000017500000002322613237313252020706 0ustar nikrationikratio00000000000000''' local.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from ..logging import logging # Ensure use of custom logger class from .. import BUFSIZE from ..inherit_docstrings import (copy_ancestor_docstring, ABCDocstMeta) from .common import (AbstractBackend, DanglingStorageURLError, NoSuchObject, CorruptedObjectError) from ..common import ThawError, freeze_basic_mapping, thaw_basic_mapping import _thread import struct import io import os import shutil log = logging.getLogger(__name__) class Backend(AbstractBackend, metaclass=ABCDocstMeta): ''' A backend that stores data on the local hard disk ''' needs_login = False known_options = set() def __init__(self, storage_url, backend_login=None, backend_pw=None, options=None): '''Initialize local backend Login and password are ignored. ''' # Unused argument #pylint: disable=W0613 super().__init__() self.prefix = storage_url[len('local://'):].rstrip('/') if not os.path.exists(self.prefix): raise DanglingStorageURLError(self.prefix) @property @copy_ancestor_docstring def has_native_rename(self): return False def __str__(self): return 'local directory %s' % self.prefix @copy_ancestor_docstring def is_temp_failure(self, exc): #IGNORE:W0613 return False @copy_ancestor_docstring def lookup(self, key): path = self._key_to_path(key) try: with open(path, 'rb') as src: return _read_meta(src) except FileNotFoundError: raise NoSuchObject(key) @copy_ancestor_docstring def get_size(self, key): return os.path.getsize(self._key_to_path(key)) @copy_ancestor_docstring def open_read(self, key): path = self._key_to_path(key) try: fh = ObjectR(path) except FileNotFoundError: raise NoSuchObject(key) try: fh.metadata = _read_meta(fh) except ThawError: fh.close() raise CorruptedObjectError('Invalid metadata') return fh @copy_ancestor_docstring def open_write(self, key, metadata=None, is_compressed=False): if metadata is None: metadata = dict() elif not isinstance(metadata, dict): raise TypeError('*metadata*: expected dict or None, got %s' % type(metadata)) path = self._key_to_path(key) buf = freeze_basic_mapping(metadata) if len(buf).bit_length() > 16: raise ValueError('Metadata too large') # By renaming, we make sure that there are no # conflicts between parallel reads, the last one wins tmpname = '%s#%d-%d.tmp' % (path, os.getpid(), _thread.get_ident()) dest = ObjectW(tmpname) os.rename(tmpname, path) dest.write(b's3ql_1\n') dest.write(struct.pack(' 16: raise ValueError('Metadata too large') path_src = self._key_to_path(src) path_dest = self._key_to_path(dest) try: src = open(path_src, 'rb') except FileNotFoundError: raise NoSuchObject(src) dest = None try: # By renaming, we make sure that there are no conflicts between # parallel writes, the last one wins tmpname = '%s#%d-%d.tmp' % (path_dest, os.getpid(), _thread.get_ident()) dest = ObjectW(tmpname) if metadata is not None: try: _read_meta(src) except ThawError: raise CorruptedObjectError('Invalid metadata') dest.write(b's3ql_1\n') dest.write(struct.pack(' This work can be distributed under the terms of the GNU GPLv3. ''' from ..logging import logging, QuietError # Ensure use of custom logger class from .. import BUFSIZE from .common import (AbstractBackend, NoSuchObject, retry, AuthorizationError, AuthenticationError, DanglingStorageURLError, retry_generator, get_proxy, get_ssl_context, CorruptedObjectError, checksum_basic_mapping) from ..inherit_docstrings import (copy_ancestor_docstring, prepend_ancestor_docstring, ABCDocstMeta) from io import BytesIO from shutil import copyfileobj from dugong import (HTTPConnection, is_temp_network_error, BodyFollowing, CaseInsensitiveDict, UnsupportedResponse, ConnectionClosed) from base64 import b64encode, b64decode from email.utils import parsedate_tz, mktime_tz from ast import literal_eval from urllib.parse import urlsplit, quote, unquote import defusedxml.cElementTree as ElementTree from itertools import count import hashlib import os import binascii import hmac import re import tempfile import time import ssl import urllib.parse C_DAY_NAMES = [ 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun' ] C_MONTH_NAMES = [ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec' ] XML_CONTENT_RE = re.compile(r'^(?:application|text)/xml(?:;|$)', re.IGNORECASE) log = logging.getLogger(__name__) class Backend(AbstractBackend, metaclass=ABCDocstMeta): """A backend to stored data in some S3 compatible storage service. The backend guarantees only immediate get after create consistency. """ xml_ns_prefix = '{http://s3.amazonaws.com/doc/2006-03-01/}' hdr_prefix = 'x-amz-' known_options = {'no-ssl', 'ssl-ca-path', 'tcp-timeout', 'dumb-copy', 'disable-expect100'} def __init__(self, storage_url, login, password, options): '''Initialize backend object *ssl_context* may be a `ssl.SSLContext` instance or *None*. ''' super().__init__() if 'no-ssl' in options: self.ssl_context = None else: self.ssl_context = get_ssl_context(options.get('ssl-ca-path', None)) (host, port, bucket_name, prefix) = self._parse_storage_url(storage_url, self.ssl_context) self.options = options self.bucket_name = bucket_name self.prefix = prefix self.hostname = host self.port = port self.proxy = get_proxy(self.ssl_context is not None) self.conn = self._get_conn() self.password = password self.login = login @property @copy_ancestor_docstring def has_native_rename(self): return False # NOTE: ! This function is also used by the swift backend ! @copy_ancestor_docstring def reset(self): if (self.conn is not None and (self.conn.response_pending() or self.conn._out_remaining)): log.debug('Resetting state of http connection %d', id(self.conn)) self.conn.disconnect() @staticmethod def _parse_storage_url(storage_url, ssl_context): '''Extract information from storage URL Return a tuple * (host, port, bucket_name, prefix) * . ''' hit = re.match(r'^[a-zA-Z0-9]+://' # Backend r'([^/:]+)' # Hostname r'(?::([0-9]+))?' # Port r'/([^/]+)' # Bucketname r'(?:/(.*))?$', # Prefix storage_url) if not hit: raise QuietError('Invalid storage URL', exitcode=2) hostname = hit.group(1) if hit.group(2): port = int(hit.group(2)) elif ssl_context: port = 443 else: port = 80 bucketname = hit.group(3) prefix = hit.group(4) or '' return (hostname, port, bucketname, prefix) def _get_conn(self): '''Return connection to server''' conn = HTTPConnection(self.hostname, self.port, proxy=self.proxy, ssl_context=self.ssl_context) conn.timeout = int(self.options.get('tcp-timeout', 20)) return conn @staticmethod def _tag_xmlns_uri(elem): '''Extract the XML namespace (xmlns) URI from an element''' if elem.tag[0] == '{': uri, ignore, tag = elem.tag[1:].partition("}") else: uri = None return uri # This method is also used implicitly for the retry handling of # `gs.Backend._get_access_token`. When modifying this method, do not forget # to check if this makes it unsuitable for use by `_get_access_token` (in # that case we will have to implement a custom retry logic there). @copy_ancestor_docstring def is_temp_failure(self, exc): #IGNORE:W0613 if isinstance(exc, (InternalError, BadDigestError, IncompleteBodyError, RequestTimeoutError, OperationAbortedError, SlowDownError, ServiceUnavailableError)): return True elif is_temp_network_error(exc): return True # In doubt, we retry on 5xx (Server error). However, there are some # codes where retry is definitely not desired. For 4xx (client error) we # do not retry in general, but for 408 (Request Timeout) RFC 2616 # specifies that the client may repeat the request without # modifications. elif (isinstance(exc, HTTPError) and ((500 <= exc.status <= 599 and exc.status not in (501,505,508,510,511,523)) or exc.status == 408)): return True # Temporary workaround for # https://bitbucket.org/nikratio/s3ql/issues/87 and # https://bitbucket.org/nikratio/s3ql/issues/252 elif (isinstance(exc, ssl.SSLError) and (str(exc).startswith('[SSL: BAD_WRITE_RETRY]') or str(exc).startswith('[SSL: BAD_LENGTH]'))): return True return False # NOTE: ! This function is also used by the swift backend. ! def _dump_response(self, resp, body=None): '''Return string representation of server response Only the beginning of the response body is read, so this is mostly useful for debugging. ''' if body is None: try: body = self.conn.read(2048) if body: self.conn.discard() except UnsupportedResponse: log.warning('Unsupported response, trying to retrieve data from raw socket!') body = self.conn.read_raw(2048) self.conn.close() else: body = body[:2048] return '%d %s\n%s\n\n%s' % (resp.status, resp.reason, '\n'.join('%s: %s' % x for x in resp.headers.items()), body.decode('utf-8', errors='backslashreplace')) # NOTE: ! This function is also used by the swift backend. ! def _assert_empty_response(self, resp): '''Assert that current response body is empty''' buf = self.conn.read(2048) if not buf: return # expected # Log the problem self.conn.discard() log.error('Unexpected server response. Expected nothing, got:\n' '%d %s\n%s\n\n%s', resp.status, resp.reason, '\n'.join('%s: %s' % x for x in resp.headers.items()), buf) raise RuntimeError('Unexpected server response') @retry @copy_ancestor_docstring def delete(self, key, force=False, is_retry=False): log.debug('started with %s', key) try: resp = self._do_request('DELETE', '/%s%s' % (self.prefix, key)) self._assert_empty_response(resp) except NoSuchKeyError: # Server may have deleted the object even though we did not # receive the response. if force or is_retry: pass else: raise NoSuchObject(key) @retry_generator @copy_ancestor_docstring def list(self, prefix='', start_after=''): log.debug('started with %s, %s', prefix, start_after) keys_remaining = True # Without this, a call to list('foo') would result # in *prefix* being longer than *marker* - which causes # trouble for some S3 implementions (minio). if start_after: marker = self.prefix + start_after else: marker = '' prefix = self.prefix + prefix while keys_remaining: log.debug('requesting with marker=%s', marker) keys_remaining = None resp = self._do_request('GET', '/', query_string={ 'prefix': prefix, 'marker': marker, 'max-keys': 1000 }) if not XML_CONTENT_RE.match(resp.headers['Content-Type']): raise RuntimeError('unexpected content type: %s' % resp.headers['Content-Type']) try: itree = iter(ElementTree.iterparse(self.conn, events=("start", "end"))) (event, root) = next(itree) root_xmlns_uri = self._tag_xmlns_uri(root) if root_xmlns_uri is None: root_xmlns_prefix = '' else: # Validate the XML namespace root_xmlns_prefix = '{%s}' % (root_xmlns_uri, ) if root_xmlns_prefix != self.xml_ns_prefix: log.error('Unexpected server reply to list operation:\n%s', self._dump_response(resp, body=None)) raise RuntimeError('List response has %s as root tag, unknown namespace' % root.tag) for (event, el) in itree: if event != 'end': continue if el.tag == root_xmlns_prefix + 'IsTruncated': keys_remaining = (el.text == 'true') elif el.tag == root_xmlns_prefix + 'Contents': marker = el.findtext(root_xmlns_prefix + 'Key') yield marker[len(self.prefix):] root.clear() except Exception as exc: if is_temp_network_error(exc) or isinstance(exc, ssl.SSLError): # We probably can't use the connection anymore self.conn.disconnect() raise except GeneratorExit: # Need to read rest of response self.conn.discard() break if keys_remaining is None: raise RuntimeError('Could not parse body') @retry @copy_ancestor_docstring def lookup(self, key): log.debug('started with %s', key) try: resp = self._do_request('HEAD', '/%s%s' % (self.prefix, key)) self._assert_empty_response(resp) except HTTPError as exc: if exc.status == 404: raise NoSuchObject(key) else: raise return self._extractmeta(resp, key) @retry @copy_ancestor_docstring def get_size(self, key): log.debug('started with %s', key) try: resp = self._do_request('HEAD', '/%s%s' % (self.prefix, key)) self._assert_empty_response(resp) except HTTPError as exc: if exc.status == 404: raise NoSuchObject(key) else: raise try: return int(resp.headers['Content-Length']) except KeyError: raise RuntimeError('HEAD request did not return Content-Length') @retry @copy_ancestor_docstring def open_read(self, key): try: resp = self._do_request('GET', '/%s%s' % (self.prefix, key)) except NoSuchKeyError: raise NoSuchObject(key) try: meta = self._extractmeta(resp, key) except (BadDigestError, CorruptedObjectError): # If there's less than 64 kb of data, read and throw # away. Otherwise re-establish connection. if resp.length is not None and resp.length < 64*1024: self.conn.discard() else: self.conn.disconnect() raise return ObjectR(key, resp, self, meta) @prepend_ancestor_docstring def open_write(self, key, metadata=None, is_compressed=False, extra_headers=None): """ The returned object will buffer all data and only start the upload when its `close` method is called. """ log.debug('started with %s', key) headers = CaseInsensitiveDict() if extra_headers is not None: headers.update(extra_headers) if metadata is None: metadata = dict() self._add_meta_headers(headers, metadata) return ObjectW(key, self, headers) # NOTE: ! This function is also used by the swift backend. ! def _add_meta_headers(self, headers, metadata, chunksize=255): hdr_count = 0 length = 0 for key in metadata.keys(): if not isinstance(key, str): raise ValueError('dict keys must be str, not %s' % type(key)) val = metadata[key] if (not isinstance(val, (str, bytes, int, float, complex, bool)) and val is not None): raise ValueError('value for key %s (%s) is not elementary' % (key, val)) if isinstance(val, (bytes, bytearray)): val = b64encode(val) buf = ('%s: %s,' % (repr(key), repr(val))) buf = quote(buf, safe='!@#$^*()=+/?-_\'"><\\| `.,;:~') if len(buf) < chunksize: headers['%smeta-%03d' % (self.hdr_prefix, hdr_count)] = buf hdr_count += 1 length += 4 + len(buf) else: i = 0 while i*chunksize < len(buf): k = '%smeta-%03d' % (self.hdr_prefix, hdr_count) v = buf[i*chunksize:(i+1)*chunksize] headers[k] = v i += 1 hdr_count += 1 length += 4 + len(buf) if length > 2048: raise ValueError('Metadata too large') assert hdr_count <= 999 md5 = b64encode(checksum_basic_mapping(metadata)).decode('ascii') headers[self.hdr_prefix + 'meta-format'] = 'raw2' headers[self.hdr_prefix + 'meta-md5'] = md5 @retry @copy_ancestor_docstring def copy(self, src, dest, metadata=None, extra_headers=None): log.debug('started with %s, %s', src, dest) headers = CaseInsensitiveDict() if extra_headers is not None: headers.update(extra_headers) headers[self.hdr_prefix + 'copy-source'] = \ urllib.parse.quote('/%s/%s%s' % (self.bucket_name, self.prefix, src)) if metadata is None: headers[self.hdr_prefix + 'metadata-directive'] = 'COPY' else: headers[self.hdr_prefix + 'metadata-directive'] = 'REPLACE' self._add_meta_headers(headers, metadata) try: resp = self._do_request('PUT', '/%s%s' % (self.prefix, dest), headers=headers) except NoSuchKeyError: raise NoSuchObject(src) # When copying, S3 may return error despite a 200 OK status # http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html # https://doc.s3.amazonaws.com/proposals/copy.html if self.options.get('dumb-copy', False): self.conn.discard() return body = self.conn.readall() root = self._parse_xml_response(resp, body) # Some S3 implemenentations do not have a namespace on # CopyObjectResult. if root.tag in [self.xml_ns_prefix + 'CopyObjectResult', 'CopyObjectResult']: return elif root.tag in [self.xml_ns_prefix + 'Error', 'Error']: raise get_S3Error(root.findtext('Code'), root.findtext('Message'), resp.headers) else: log.error('Unexpected server reply to copy operation:\n%s', self._dump_response(resp, body)) raise RuntimeError('Copy response has %s as root tag' % root.tag) @copy_ancestor_docstring def update_meta(self, key, metadata): self.copy(key, key, metadata) def _do_request(self, method, path, subres=None, query_string=None, headers=None, body=None): '''Send request, read and return response object''' log.debug('started with %s %s?%s, qs=%s', method, path, subres, query_string) if headers is None: headers = CaseInsensitiveDict() if isinstance(body, (bytes, bytearray, memoryview)): headers['Content-MD5'] = md5sum_b64(body) redirect_count = 0 this_method = method while True: resp = self._send_request(this_method, path, headers=headers, subres=subres, query_string=query_string, body=body) if (resp.status < 300 or resp.status > 399): break # Assume redirect redirect_count += 1 if redirect_count > 10: raise RuntimeError('Too many chained redirections') # First try location header... new_url = resp.headers['Location'] if new_url: # Discard body self.conn.discard() # Pylint can't infer SplitResult Types #pylint: disable=E1103 o = urlsplit(new_url) if o.scheme: if self.ssl_context and o.scheme != 'https': raise RuntimeError('Redirect to non-https URL') elif not self.ssl_context and o.scheme != 'http': raise RuntimeError('Redirect to non-http URL') if o.hostname != self.hostname or o.port != self.port: self.hostname = o.hostname self.port = o.port self.conn.disconnect() self.conn = self._get_conn() else: raise RuntimeError('Redirect to different path on same host') # ..but endpoint may also be hidden in message body. # If we have done a HEAD request, we have to change to GET # to actually retrieve the body. elif resp.method == 'HEAD': log.debug('Switching from HEAD to GET to read redirect body') this_method = 'GET' # Try to read new URL from request body else: tree = self._parse_xml_response(resp) new_url = tree.findtext('Endpoint') if not new_url: raise get_S3Error(tree.findtext('Code'), tree.findtext('Message'), resp.headers) self.hostname = new_url self.conn.disconnect() self.conn = self._get_conn() # Update method this_method = method log.info('_do_request(): redirected to %s', self.conn.hostname) if body and not isinstance(body, (bytes, bytearray, memoryview)): body.seek(0) # At the end, the request should have gone out with the right # method if this_method != method: raise RuntimeError('Dazed and confused - HEAD fails but GET works?') # Success if resp.status >= 200 and resp.status <= 299: return resp # Error self._parse_error_response(resp) def _parse_error_response(self, resp): '''Handle error response from server Try to raise most-specific exception. ''' # Note that even though the final server backend may guarantee to always # deliver an XML document body with a detailed error message, we may # also get errors from intermediate proxies. content_type = resp.headers['Content-Type'] # If method == HEAD, server must not return response body # even in case of errors if resp.method.upper() == 'HEAD': assert self.conn.read(1) == b'' raise HTTPError(resp.status, resp.reason, resp.headers) # If not XML, do the best we can if not XML_CONTENT_RE.match(content_type) or resp.length == 0: self.conn.discard() raise HTTPError(resp.status, resp.reason, resp.headers) # We don't stream the data into the parser because we want # to be able to dump a copy if the parsing fails. body = self.conn.readall() try: tree = ElementTree.parse(BytesIO(body)).getroot() except: log.error('Unable to parse server response as XML:\n%s', self._dump_response(resp, body)) raise raise get_S3Error(tree.findtext('Code'), tree.findtext('Message'), resp.headers) def _parse_xml_response(self, resp, body=None): '''Return element tree for XML response''' content_type = resp.headers['Content-Type'] # AWS S3 sometimes "forgets" to send a Content-Type # when responding to a multiple delete request. # https://forums.aws.amazon.com/thread.jspa?threadID=134372 if content_type is None: log.error('Server did not provide Content-Type, assuming XML') elif not XML_CONTENT_RE.match(content_type): log.error('Unexpected server reply: expected XML, got:\n%s', self._dump_response(resp)) raise RuntimeError('Unexpected server response') # We don't stream the data into the parser because we want # to be able to dump a copy if the parsing fails. if body is None: body = self.conn.readall() try: tree = ElementTree.parse(BytesIO(body)).getroot() except: log.error('Unable to parse server response as XML:\n%s', self._dump_response(resp, body)) raise return tree # NOTE: ! This function is also used by the swift backend. ! @prepend_ancestor_docstring def clear(self): """ This method may not be able to see (and therefore also not delete) recently uploaded objects. """ # We have to cache keys, because otherwise we can't use the # http connection to delete keys. for (no, s3key) in enumerate(list(self)): if no != 0 and no % 1000 == 0: log.info('clear(): deleted %d objects so far..', no) log.debug('started with %s', s3key) # Ignore missing objects when clearing bucket self.delete(s3key, True) def __str__(self): return 's3c://%s/%s/%s' % (self.hostname, self.bucket_name, self.prefix) def _authorize_request(self, method, path, headers, subres, query_string): '''Add authorization information to *headers*''' # See http://docs.amazonwebservices.com/AmazonS3/latest/dev/RESTAuthentication.html # Date, can't use strftime because it's locale dependent now = time.gmtime() headers['Date'] = ('%s, %02d %s %04d %02d:%02d:%02d GMT' % (C_DAY_NAMES[now.tm_wday], now.tm_mday, C_MONTH_NAMES[now.tm_mon - 1], now.tm_year, now.tm_hour, now.tm_min, now.tm_sec)) auth_strs = [method, '\n'] for hdr in ('Content-MD5', 'Content-Type', 'Date'): if hdr in headers: auth_strs.append(headers[hdr]) auth_strs.append('\n') for hdr in sorted(x for x in headers if x.lower().startswith('x-amz-')): val = ' '.join(re.split(r'\s*\n\s*', headers[hdr].strip())) auth_strs.append('%s:%s\n' % (hdr, val)) # Always include bucket name in path for signing if self.hostname.startswith(self.bucket_name): path = '/%s%s' % (self.bucket_name, path) sign_path = urllib.parse.quote(path) auth_strs.append(sign_path) if subres: auth_strs.append('?%s' % subres) # False positive, hashlib *does* have sha1 member #pylint: disable=E1101 auth_str = ''.join(auth_strs).encode() signature = b64encode(hmac.new(self.password.encode(), auth_str, hashlib.sha1).digest()).decode() headers['Authorization'] = 'AWS %s:%s' % (self.login, signature) def _send_request(self, method, path, headers, subres=None, query_string=None, body=None): '''Add authentication and send request Returns the response object. ''' if not isinstance(headers, CaseInsensitiveDict): headers = CaseInsensitiveDict(headers) if not self.hostname.startswith(self.bucket_name): path = '/%s%s' % (self.bucket_name, path) headers['host'] = self.hostname self._authorize_request(method, path, headers, subres, query_string) path = urllib.parse.quote(path) if query_string: s = urllib.parse.urlencode(query_string, doseq=True) if subres: path += '?%s&%s' % (subres, s) else: path += '?%s' % s elif subres: path += '?%s' % subres # We can probably remove the assertions at some point and # call self.conn.read_response() directly def read_response(): resp = self.conn.read_response() assert resp.method == method assert resp.path == path return resp use_expect_100c = not self.options.get('disable-expect100', False) try: log.debug('sending %s %s', method, path) if body is None or isinstance(body, (bytes, bytearray, memoryview)): self.conn.send_request(method, path, body=body, headers=headers) else: body_len = os.fstat(body.fileno()).st_size self.conn.send_request(method, path, expect100=use_expect_100c, headers=headers, body=BodyFollowing(body_len)) if use_expect_100c: resp = read_response() if resp.status != 100: # Error return resp try: copyfileobj(body, self.conn, BUFSIZE) except ConnectionClosed: # Server closed connection while we were writing body data - # but we may still be able to read an error response try: resp = read_response() except ConnectionClosed: # No server response available pass else: if resp.status >= 400: # Got error response return resp log.warning('Server broke connection during upload, but signaled ' '%d %s', resp.status, resp.reason) # Re-raise first ConnectionClosed exception raise return read_response() except Exception as exc: if is_temp_network_error(exc) or isinstance(exc, ssl.SSLError): # We probably can't use the connection anymore self.conn.disconnect() raise @copy_ancestor_docstring def close(self): self.conn.disconnect() # NOTE: ! This function is also used by the swift backend ! def _extractmeta(self, resp, obj_key): '''Extract metadata from HTTP response object''' format_ = resp.headers.get('%smeta-format' % self.hdr_prefix, 'raw') if format_ != 'raw2': # Current raise CorruptedObjectError('Invalid metadata format: %s' % format_) parts = [] for i in count(): # Headers is an email.message object, so indexing it # would also give None instead of KeyError part = resp.headers.get('%smeta-%03d' % (self.hdr_prefix, i), None) if part is None: break parts.append(part) buf = unquote(''.join(parts)) meta = literal_eval('{ %s }' % buf) # Decode bytes values for (k,v) in meta.items(): if not isinstance(v, bytes): continue try: meta[k] = b64decode(v) except binascii.Error: # This should trigger a MD5 mismatch below meta[k] = None # Check MD5. There is a case to be made for treating a mismatch as a # `CorruptedObjectError` rather than a `BadDigestError`, because the MD5 # sum is not calculated on-the-fly by the server but stored with the # object, and therefore does not actually verify what the server has # sent over the wire. However, it seems more likely for the data to get # accidentally corrupted in transit than to get accidentally corrupted # on the server (which hopefully checksums its storage devices). md5 = b64encode(checksum_basic_mapping(meta)).decode('ascii') if md5 != resp.headers.get('%smeta-md5' % self.hdr_prefix, None): log.warning('MD5 mismatch in metadata for %s', obj_key) raise BadDigestError('BadDigest', 'Meta MD5 for %s does not match' % obj_key) return meta class ObjectR(object): '''An S3 object open for reading''' # NOTE: This class is used as a base class for the swift backend, # so changes here should be checked for their effects on other # backends. def __init__(self, key, resp, backend, metadata=None): self.key = key self.resp = resp self.closed = False self.md5_checked = False self.backend = backend self.metadata = metadata # False positive, hashlib *does* have md5 member #pylint: disable=E1101 self.md5 = hashlib.md5() def read(self, size=None): '''Read up to *size* bytes of object data For integrity checking to work, this method has to be called until it returns an empty string, indicating that all data has been read (and verified). ''' if size == 0: return b'' # This may raise an exception, in which case we probably can't # re-use the connection. However, we rely on the caller # to still close the file-like object, so that we can do # cleanup in close(). buf = self.backend.conn.read(size) self.md5.update(buf) # Check MD5 on EOF # (size == None implies EOF) if (not buf or size is None) and not self.md5_checked: etag = self.resp.headers['ETag'].strip('"') self.md5_checked = True if etag != self.md5.hexdigest(): log.warning('MD5 mismatch for %s: %s vs %s', self.key, etag, self.md5.hexdigest()) raise BadDigestError('BadDigest', 'ETag header does not agree with calculated MD5') return buf def __enter__(self): return self def __exit__(self, *a): self.close() return False def close(self, checksum_warning=True): '''Close object If *checksum_warning* is true, this will generate a warning message if the object has not been fully read (because in that case the MD5 checksum cannot be checked). ''' if self.closed: return self.closed = True # If we have not read all the data, close the entire # connection (otherwise we loose synchronization) if not self.md5_checked: if checksum_warning: log.warning("Object closed prematurely, can't check MD5, and have to " "reset connection") self.backend.conn.disconnect() class ObjectW(object): '''An S3 object open for writing All data is first cached in memory, upload only starts when the close() method is called. ''' # NOTE: This class is used as a base class for the swift backend, # so changes here should be checked for their effects on other # backends. def __init__(self, key, backend, headers): self.key = key self.backend = backend self.headers = headers self.closed = False self.obj_size = 0 # According to http://docs.python.org/3/library/functions.html#open # the buffer size is typically ~8 kB. We process data in much # larger chunks, so buffering would only hurt performance. self.fh = tempfile.TemporaryFile(buffering=0) # False positive, hashlib *does* have md5 member #pylint: disable=E1101 self.md5 = hashlib.md5() def write(self, buf): '''Write object data''' self.fh.write(buf) self.md5.update(buf) self.obj_size += len(buf) def is_temp_failure(self, exc): return self.backend.is_temp_failure(exc) @retry def close(self): '''Close object and upload data''' # Access to protected member ok #pylint: disable=W0212 log.debug('started with %s', self.key) if self.closed: # still call fh.close, may have generated an error before self.fh.close() return self.fh.seek(0) self.headers['Content-Type'] = 'application/octet-stream' resp = self.backend._do_request('PUT', '/%s%s' % (self.backend.prefix, self.key), headers=self.headers, body=self.fh) etag = resp.headers['ETag'].strip('"') self.backend._assert_empty_response(resp) if etag != self.md5.hexdigest(): # delete may fail, but we don't want to loose the BadDigest exception try: self.backend.delete(self.key) finally: raise BadDigestError('BadDigest', 'MD5 mismatch for %s (received: %s, sent: %s)' % (self.key, etag, self.md5.hexdigest())) self.closed = True self.fh.close() def __enter__(self): return self def __exit__(self, *a): self.close() return False def get_obj_size(self): if not self.closed: raise RuntimeError('Object must be closed first.') return self.obj_size def get_S3Error(code, msg, headers=None): '''Instantiate most specific S3Error subclass''' # Special case # http://code.google.com/p/s3ql/issues/detail?id=369 if code == 'Timeout': code = 'RequestTimeout' if code.endswith('Error'): name = code else: name = code + 'Error' class_ = globals().get(name, S3Error) if not issubclass(class_, S3Error): return S3Error(code, msg, headers) return class_(code, msg, headers) def md5sum_b64(buf): '''Return base64 encoded MD5 sum''' return b64encode(hashlib.md5(buf).digest()).decode('ascii') def _parse_retry_after(header): '''Parse headers for Retry-After value''' hit = re.match(r'^\s*([0-9]+)\s*$', header) if hit: val = int(header) else: date = parsedate_tz(header) if date is None: log.warning('Unable to parse retry-after value: %s', header) return None val = mktime_tz(*date) - time.time() if val > 300 or val < 0: log.warning('Ignoring retry-after value of %.3f s, using 1 s instead', val) val = 1 return val class HTTPError(Exception): ''' Represents an HTTP error returned by S3. ''' def __init__(self, status, msg, headers=None): super().__init__() self.status = status self.msg = msg self.headers = headers if headers and 'Retry-After' in headers: self.retry_after = _parse_retry_after(headers['Retry-After']) else: self.retry_after = None def __str__(self): return '%d %s' % (self.status, self.msg) class S3Error(Exception): ''' Represents an error returned by S3. For possible codes, see http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html ''' def __init__(self, code, msg, headers=None): super().__init__(msg) self.code = code self.msg = msg if headers and 'Retry-After' in headers: self.retry_after = _parse_retry_after(headers['Retry-After']) else: self.retry_after = None def __str__(self): return '%s: %s' % (self.code, self.msg) class NoSuchKeyError(S3Error): pass class AccessDeniedError(S3Error, AuthorizationError): pass class BadDigestError(S3Error): pass class IncompleteBodyError(S3Error): pass class InternalError(S3Error): pass class InvalidAccessKeyIdError(S3Error, AuthenticationError): pass class InvalidSecurityError(S3Error, AuthenticationError): pass class SignatureDoesNotMatchError(S3Error, AuthenticationError): pass class OperationAbortedError(S3Error): pass class RequestTimeoutError(S3Error): pass class SlowDownError(S3Error): pass class ServiceUnavailableError(S3Error): pass class RequestTimeTooSkewedError(S3Error): pass class NoSuchBucketError(S3Error, DanglingStorageURLError): pass s3ql-2.26/src/s3ql/backends/common.py0000644000175000017500000005470213233104455021104 0ustar nikrationikratio00000000000000''' common.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from ..logging import logging, QuietError, LOG_ONCE # Ensure use of custom logger class from abc import abstractmethod, ABCMeta from functools import wraps import time import textwrap import hashlib import struct import hmac import random import inspect import ssl import os import re import threading log = logging.getLogger(__name__) class RateTracker: ''' Maintain an average occurence rate for events over a configurable time window. The rate is computed with one second resolution. ''' def __init__(self, window_length): if not isinstance(window_length, int): raise ValueError('only integer window lengths are supported') self.buckets = [0] * window_length self.window_length = window_length self.last_update = int(time.monotonic()) self.lock = threading.Lock() def register(self, _not_really=False): '''Register occurence of an event. The keyword argument is for class-internal use only. ''' buckets = self.buckets bucket_count = len(self.buckets) now = int(time.monotonic()) elapsed = min(now - self.last_update, bucket_count) for i in range(elapsed): buckets[(now - i) % bucket_count] = 0 if _not_really: return with self.lock: buckets[now % bucket_count] += 1 self.last_update = now def get_rate(self): '''Return average rate of event occurance''' self.register(_not_really=True) return sum(self.buckets) / len(self.buckets) def get_count(self): '''Return total number of events in window''' self.register(_not_really=True) return sum(self.buckets) # We maintain a (global) running average of temporary errors, so # that we can log a warning if this number becomes large. We # use a relatively large window to prevent bogus spikes if # multiple threads all have to retry after a long period of # inactivity. RETRY_TIMEOUT = 60 * 60 * 24 def retry(method, _tracker=RateTracker(60)): '''Wrap *method* for retrying on some exceptions If *method* raises an exception for which the instance's `is_temp_failure(exc)` method is true, the *method* is called again at increasing intervals. If this persists for more than `RETRY_TIMEOUT` seconds, the most-recently caught exception is re-raised. If the method defines a keyword parameter *is_retry*, then this parameter will be set to True whenever the function is retried. ''' if inspect.isgeneratorfunction(method): raise TypeError('Wrapping a generator function is pointless') sig = inspect.signature(method) has_is_retry = 'is_retry' in sig.parameters @wraps(method) def wrapped(*a, **kw): self = a[0] interval = 1 / 50 waited = 0 retries = 0 while True: if has_is_retry: kw['is_retry'] = (retries > 0) try: return method(*a, **kw) except Exception as exc: # Access to protected member ok #pylint: disable=W0212 if not self.is_temp_failure(exc): raise _tracker.register() rate = _tracker.get_rate() if rate > 5: log.warning('Had to retry %d times over the last %d seconds, ' 'server or network problem?', rate * _tracker.window_length, _tracker.window_length) else: log.debug('Average retry rate: %.2f Hz', rate) if waited > RETRY_TIMEOUT: log.error('%s.%s(*): Timeout exceeded, re-raising %r exception', self.__class__.__name__, method.__name__, exc) raise retries += 1 if retries <= 2: log_fn = log.debug elif retries <= 4: log_fn = log.info else: log_fn = log.warning log_fn('Encountered %s (%s), retrying %s.%s (attempt %d)...', type(exc).__name__, exc, self.__class__.__name__, method.__name__, retries) if hasattr(exc, 'retry_after') and exc.retry_after: log.debug('retry_after is %.2f seconds', exc.retry_after) interval = exc.retry_after # Add some random variation to prevent flooding the # server with too many concurrent requests. time.sleep(interval * random.uniform(1, 1.5)) waited += interval interval = min(5*60, 2*interval) extend_docstring(wrapped, 'This method has been wrapped and will automatically re-execute in ' 'increasing intervals for up to `s3ql.backends.common.RETRY_TIMEOUT` ' 'seconds if it raises an exception for which the instance\'s ' '`is_temp_failure` method returns True.') return wrapped def extend_docstring(fun, s): '''Append *s* to *fun*'s docstring with proper wrapping and indentation''' if fun.__doc__ is None: fun.__doc__ = '' # Figure out proper indentation indent = 60 for line in fun.__doc__.splitlines()[1:]: stripped = line.lstrip() if stripped: indent = min(indent, len(line) - len(stripped)) indent_s = '\n' + ' ' * indent fun.__doc__ += ''.join(indent_s + line for line in textwrap.wrap(s, width=80 - indent)) fun.__doc__ += '\n' class RetryIterator: ''' A RetryIterator instance iterates over the elements produced by any generator function passed to its constructor, i.e. it wraps the iterator obtained by calling the generator function. When retrieving elements from the wrapped iterator, exceptions may occur. Most such exceptions are propagated. However, exceptions for which the *is_temp_failure_fn* function returns True are caught. If that happens, the wrapped iterator is replaced by a new one obtained by calling the generator function again with the *start_after* parameter set to the last element that was retrieved before the exception occured. If attempts to retrieve the next element fail repeatedly, the iterator is replaced only after sleeping for increasing intervals. If no new element can be obtained after `RETRY_TIMEOUT` seconds, the last exception is no longer caught but propagated to the caller. This behavior is implemented by wrapping the __next__ method with the `retry` decorator. ''' def __init__(self, generator, is_temp_failure_fn, args=(), kwargs=None): if not inspect.isgeneratorfunction(generator): raise TypeError('*generator* must be generator function') self.generator = generator self.iterator = None self.is_temp_failure = is_temp_failure_fn if kwargs is None: kwargs = {} self.kwargs = kwargs self.args = args def __iter__(self): return self @retry def __next__(self): if self.iterator is None: self.iterator = self.generator(*self.args, **self.kwargs) try: el = next(self.iterator) except Exception as exc: if self.is_temp_failure(exc): self.iterator = None raise self.kwargs['start_after'] = el return el def retry_generator(method): '''Wrap *method* in a `RetryIterator` *method* must return a generator, and accept a keyword argument *start_with*. The RetryIterator's `is_temp_failure` attribute will be set to the `is_temp_failure` method of the instance to which *method* is bound. ''' @wraps(method) def wrapped(*a, **kw): return RetryIterator(method, a[0].is_temp_failure, args=a, kwargs=kw) extend_docstring(wrapped, 'This generator method has been wrapped and will return a ' '`RetryIterator` instance.') return wrapped class AbstractBackend(object, metaclass=ABCMeta): '''Functionality shared between all backends. Instances behave similarly to dicts. They can be iterated over and indexed into, but raise a separate set of exceptions. The backend guarantees get after create consistency, i.e. a newly created object will be immediately retrievable. Additional consistency guarantees may or may not be available and can be queried for with instance methods. ''' needs_login = True def __init__(self): super().__init__() def __getitem__(self, key): return self.fetch(key)[0] def __setitem__(self, key, value): self.store(key, value) def __delitem__(self, key): self.delete(key) def __iter__(self): return self.list() def __contains__(self, key): return self.contains(key) def __enter__(self): return self def __exit__(self, exc_type, exc_value, tb): self.close() return False def iteritems(self): for key in self.list(): yield (key, self[key]) @property @abstractmethod def has_native_rename(self): '''True if the backend has a native, atomic rename operation''' pass def reset(self): '''Reset backend This resets the backend and ensures that it is ready to process requests. In most cases, this method does nothing. However, if e.g. a file handle returned by a previous call to `open_read` was not properly closed (e.g. because an exception happened during reading), the `reset` method will make sure that any underlying connection is properly closed. Obviously, this method must not be called while any file handles returned by the backend are still in use. ''' pass @retry def perform_read(self, fn, key): '''Read object data using *fn*, retry on temporary failure Open object for reading, call `fn(fh)` and close object. If a temporary error (as defined by `is_temp_failure`) occurs during opening, closing or execution of *fn*, the operation is retried. ''' fh = self.open_read(key) try: res = fn(fh) except Exception as exc: # If this is a temporary failure, we now that the call will be # retried, so we don't need to warn that the object was not read # completely. fh.close(checksum_warning=not self.is_temp_failure(exc)) raise else: fh.close() return res @retry def perform_write(self, fn, key, metadata=None, is_compressed=False): '''Read object data using *fn*, retry on temporary failure Open object for writing, call `fn(fh)` and close object. If a temporary error (as defined by `is_temp_failure`) occurs during opening, closing or execution of *fn*, the operation is retried. ''' with self.open_write(key, metadata, is_compressed) as fh: return fn(fh) def fetch(self, key): """Return data stored under `key`. Returns a tuple with the data and metadata. If only the data itself is required, ``backend[key]`` is a more concise notation for ``backend.fetch(key)[0]``. """ def do_read(fh): data = fh.read() return (data, fh.metadata) return self.perform_read(do_read, key) def store(self, key, val, metadata=None): """Store data under `key`. `metadata` can be mapping with additional attributes to store with the object. Keys have to be of type `str`, values have to be of elementary type (`str`, `bytes`, `int`, `float` or `bool`). If no metadata is required, one can simply assign to the subscripted backend instead of using this function: ``backend[key] = val`` is equivalent to ``backend.store(key, val)``. """ self.perform_write(lambda fh: fh.write(val), key, metadata) @abstractmethod def is_temp_failure(self, exc): '''Return true if exc indicates a temporary error Return true if the given exception indicates a temporary problem. Most instance methods automatically retry the request in this case, so the caller does not need to worry about temporary failures. However, in same cases (e.g. when reading or writing an object), the request cannot automatically be retried. In these case this method can be used to check for temporary problems and so that the request can be manually restarted if applicable. ''' pass @abstractmethod def lookup(self, key): """Return metadata for given key. If the key does not exist, `NoSuchObject` is raised. """ pass @abstractmethod def get_size(self, key): '''Return size of object stored under *key*''' pass @abstractmethod def open_read(self, key): """Open object for reading Return a file-like object. Data can be read using the `read` method. Metadata is returned in the file-like object's *metadata* attribute and can be modified by the caller at will. The object must be closed explicitly. """ pass @abstractmethod def open_write(self, key, metadata=None, is_compressed=False): """Open object for writing `metadata` can be mapping with additional attributes to store with the object. Keys have to be of type `str`, values have to be of elementary type (`str`, `bytes`, `int`, `float` or `bool`). Returns a file- like object. The object must be closed closed explicitly. After closing, the *get_obj_size* may be used to retrieve the size of the stored object (which may differ from the size of the written data). The *is_compressed* parameter indicates that the caller is going to write compressed data, and may be used to avoid recompression by the backend. """ pass @abstractmethod def clear(self): """Delete all objects in backend""" pass def contains(self, key): '''Check if `key` is in backend''' try: self.lookup(key) except NoSuchObject: return False else: return True @abstractmethod def delete(self, key, force=False): """Delete object stored under `key` ``backend.delete(key)`` can also be written as ``del backend[key]``. If `force` is true, do not return an error if the key does not exist. Note, however, that even if *force* is False, it is not guaranteed that an attempt to delete a non-existing object will raise an error. """ pass def delete_multi(self, keys, force=False): """Delete objects stored under `keys` Deleted objects are removed from the *keys* list, so that the caller can determine which objects have not yet been processed if an exception is occurs. If *force* is True, attempts to delete non-existing objects will succeed. Note, however, that even if *force* is False, it is not guaranteed that an attempt to delete a non-existing object will raise an error. """ if not isinstance(keys, list): raise TypeError('*keys* parameter must be a list') for (i, key) in enumerate(keys): try: self.delete(key, force=force) except: del keys[:i] raise del keys[:] @abstractmethod def list(self, prefix=''): '''List keys in backend Returns an iterator over all keys in the backend. ''' pass @abstractmethod def copy(self, src, dest, metadata=None): """Copy data stored under key `src` to key `dest` If `dest` already exists, it will be overwritten. If *metadata* is `None` metadata will be copied from the source as well, otherwise *metadata* becomes the metadata for the new object. Copying will be done on the remote side without retrieving object data. """ pass @abstractmethod def update_meta(self, key, metadata): """Replace metadata of *key* with *metadata* `metadata` must be a mapping with keys of type `str`, and values of an elementary type (`str`, `bytes`, `int`, `float` or `bool`). """ pass def rename(self, src, dest, metadata=None): """Rename key `src` to `dest` If `dest` already exists, it will be overwritten. If *metadata* is `None` metadata will be preserved, otherwise *metadata* becomes the metadata for the renamed object. Rename done remotely without retrieving object data. """ self.copy(src, dest, metadata) self.delete(src) def close(self): '''Close any opened resources This method closes any resources allocated by the backend (e.g. network sockets). This method should be called explicitly before a backend object is garbage collected. The backend object may be re-used after `close` has been called, in this case the necessary resources are transparently allocated again. ''' pass class NoSuchObject(Exception): '''Raised if the requested object does not exist in the backend''' def __init__(self, key): super().__init__() self.key = key def __str__(self): return 'Backend does not have anything stored under key %r' % self.key class DanglingStorageURLError(Exception): '''Raised if the backend can't store data at the given location''' def __init__(self, loc, msg=None): super().__init__() self.loc = loc self.msg = msg def __str__(self): if self.msg is None: return '%r does not exist' % self.loc else: return self.msg class AuthorizationError(Exception): '''Raised if the credentials don't give access to the requested backend''' def __init__(self, msg): super().__init__() self.msg = msg def __str__(self): return 'Access denied. Server said: %s' % self.msg class AuthenticationError(Exception): '''Raised if the credentials are invalid''' def __init__(self, msg): super().__init__() self.msg = msg def __str__(self): return 'Access denied. Server said: %s' % self.msg class CorruptedObjectError(Exception): """ Raised if a storage object is corrupted. Note that this is different from BadDigest error, which is raised if a transmission error has been detected. """ def __init__(self, str_): super().__init__() self.str = str_ def __str__(self): return self.str def get_ssl_context(path): '''Construct SSLContext object''' # Best practice according to http://docs.python.org/3/library/ssl.html#protocol-versions context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.options |= ssl.OP_NO_SSLv2 context.verify_mode = ssl.CERT_REQUIRED if path is None: log.debug('Reading default CA certificates.') context.set_default_verify_paths() elif os.path.isfile(path): log.debug('Reading CA certificates from file %s', path) context.load_verify_locations(cafile=path) else: log.debug('Reading CA certificates from directory %s', path) context.load_verify_locations(capath=path) return context def get_proxy(ssl): '''Read system proxy settings Returns either `None`, or a tuple ``(host, port)``. This function may raise `QuietError`. ''' if ssl: proxy_env = 'https_proxy' else: proxy_env = 'http_proxy' if proxy_env in os.environ: proxy = os.environ[proxy_env] hit = re.match(r'^(https?://)?([a-zA-Z0-9.-]+)(:[0-9]+)?/?$', proxy) if not hit: raise QuietError('Unable to parse proxy setting %s=%r' % (proxy_env, proxy), exitcode=13) if hit.group(1) == 'https://': log.warning('HTTPS connection to proxy is probably pointless and not supported, ' 'will use standard HTTP', extra=LOG_ONCE) if hit.group(3): proxy_port = int(hit.group(3)[1:]) else: proxy_port = 80 proxy_host = hit.group(2) log.info('Using proxy %s:%d', proxy_host, proxy_port, extra=LOG_ONCE) proxy = (proxy_host, proxy_port) else: proxy = None return proxy def checksum_basic_mapping(metadata, key=None): '''Compute checksum for mapping of elementary types Keys of *d* must be strings. Values of *d* must be of elementary type (i.e., `str`, `bytes`, `int`, `float`, `complex`, `bool` or None). If there is a key named ``signature``, then it is excluded from the checksum computation. If *key* is None, compute MD5. Otherwise compute HMAC using *key*. ''' # In order to compute a safe checksum, we need to convert each object to a # unique representation (i.e, we can't use repr(), as it's output may change # in the future). Furthermore, we have to make sure that the representation # is theoretically reversible, or there is a potential for collision # attacks. if key is None: chk = hashlib.md5() else: chk = hmac.new(key, digestmod=hashlib.sha256) for mkey in sorted(metadata.keys()): assert isinstance(mkey, str) if mkey == 'signature': continue chk.update(mkey.encode('utf-8')) val = metadata[mkey] if isinstance(val, str): val = b'\0s' + val.encode('utf-8') + b'\0' elif val is None: val = b'\0n' elif isinstance(val, int): val = b'\0i' + ('%d' % val).encode() + b'\0' elif isinstance(val, bool): val = b'\0t' if val else b'\0f' elif isinstance(val, float): val = b'\0d' + struct.pack(' This work can be distributed under the terms of the GNU GPLv3. ''' from .logging import logging, setup_logging, QuietError from .common import assert_fs_owner from .parse_args import ArgumentParser import llfuse import os import stat import sys import textwrap log = logging.getLogger(__name__) def parse_args(args): '''Parse command line''' parser = ArgumentParser( description=textwrap.dedent('''\ Replicates the contents of the directory in the directory . has to be an existing directory and must not exist. Both directories have to be within the same S3QL file system. The replication will not take any additional space. Only if one of directories is modified later on, the modified data will take additional storage space. ''')) parser.add_debug() parser.add_quiet() parser.add_version() parser.add_argument('source', help='source directory', type=(lambda x: x.rstrip('/'))) parser.add_argument('target', help='target directory', type=(lambda x: x.rstrip('/'))) options = parser.parse_args(args) return options def main(args=None): '''Efficiently copy a directory tree''' if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) if not os.path.exists(options.source): raise QuietError('Source directory %r does not exist' % options.source) if os.path.exists(options.target): raise QuietError('Target directory %r must not yet exist.' % options.target) parent = os.path.dirname(os.path.abspath(options.target)) if not os.path.exists(parent): raise QuietError('Target parent %r does not exist' % parent) fstat_s = os.stat(options.source) fstat_p = os.stat(parent) if not stat.S_ISDIR(fstat_s.st_mode): raise QuietError('Source %r is not a directory' % options.source) if not stat.S_ISDIR(fstat_p.st_mode): raise QuietError('Target parent %r is not a directory' % parent) if fstat_p.st_dev != fstat_s.st_dev: raise QuietError('Source and target are not on the same file system.') if os.path.ismount(options.source): raise QuietError('%s is a mount point.' % options.source) ctrlfile = assert_fs_owner(options.source) try: os.mkdir(options.target) except PermissionError: raise QuietError('No permission to create target directory') fstat_t = os.stat(options.target) llfuse.setxattr(ctrlfile, 'copy', ('(%d, %d)' % (fstat_s.st_ino, fstat_t.st_ino)).encode()) if __name__ == '__main__': main(sys.argv[1:]) s3ql-2.26/src/s3ql/parse_args.py0000644000175000017500000002250612742247106020172 0ustar nikrationikratio00000000000000''' argparse.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. This module provides a customized ArgumentParser class. Differences are: * a --version argument is added by default * convenience functions are available for adding --quiet, --debug, --cachedir, --log and --authfile options. * instead of the usage string one can pass a usage list. The first element will be prefixed with ``usage: `` as usual. Additional elements will be printed on separate lines and prefixed with `` or: ``. * When element of an usage list, the ``DEFAULT_USAGE`` object will be replaced by the automatically generated usage message, excluding any --help arguments. * When specified on its own, the replacement will be done including any --help arguments. * The ``usage`` and ``add_help`` settings are inherited from the parent parser to the subparsers. ''' # Pylint really gets confused by this module #pylint: disable-all from . import RELEASE from argparse import ArgumentTypeError, ArgumentError import argparse from .logging import logging # Ensure use of custom logger class import os import re DEFAULT_USAGE = object() log = logging.getLogger(__name__) class HelpFormatter(argparse.HelpFormatter): def _format_usage(self, usage, actions, groups, prefix): '''Special handling for usage lists If usage is a list object, its elements will be printed on separate lines. DEFAULT_USAGE will be replaced by the default usage string of the parser (but, if `usage`` is a list, excluding any --help arguments)). ''' if isinstance(usage, list): # Omit help argument actions = [ x for x in actions if not isinstance(x, argparse._HelpAction) ] res = [] for s in usage: if not res: res.append('usage: ') else: res.append(' or: ') if s is DEFAULT_USAGE: res.append(super()._format_usage(None, actions, groups, '')[:-1]) else: res.append(s % dict(prog=self._prog)) res.append('\n') return '%s\n\n' % ''.join(res) elif usage is DEFAULT_USAGE: return super()._format_usage(None, actions, groups, prefix) else: return super()._format_usage(usage, actions, groups, prefix) def format_help(self): help_ = super().format_help() if help_.count('\n') > 2: return help_ + '\n' else: return help_ class SubParsersAction(argparse._SubParsersAction): '''A replacement for _SubParsersAction that keeps track of the parent parser''' def __init__(self, **kw): self.parent = kw.pop('parent') super().__init__(**kw) def add_parser(self, *a, **kwargs): '''Pass parent usage and add_help attributes to new parser''' if 'usage' not in kwargs: # Inherit, but preserve old progs attribute usage = self.parent.usage repl = dict(prog=self.parent.prog) if isinstance(usage, list): usage = [ (x % repl if isinstance(x, str) else x) for x in usage ] elif usage: usage = usage % repl kwargs['usage'] = usage if 'help' in kwargs: kwargs.setdefault('description', kwargs['help'].capitalize() + '.') kwargs.setdefault('add_help', self.parent.add_help) kwargs.setdefault('formatter_class', self.parent.formatter_class) if 'parents' in kwargs: for p in kwargs['parents']: if p.epilog: kwargs.setdefault('epilog', p.epilog % dict(prog=self.parent.prog)) return super().add_parser(*a, **kwargs) class ArgumentParser(argparse.ArgumentParser): def __init__(self, *a, **kw): if 'formatter_class' not in kw: kw['formatter_class'] = HelpFormatter super().__init__(*a, **kw) self.register('action', 'parsers', SubParsersAction) def add_version(self): self.add_argument('--version', action='version', help="just print program version and exit", version='S3QL %s' % RELEASE) def add_quiet(self): self.add_argument("--quiet", action="store_true", default=False, help="be really quiet") def add_backend_options(self): self.add_argument("--backend-options", default={}, type=suboptions_type, metavar='', help="Backend specific options (separate by commas). See " "backend documentation for available options.") def add_debug(self): destnote = ('Debug messages will be written to the target ' 'specified by the ``--log`` option.') self.add_argument("--debug-modules", metavar='', type=lambda s: s.split(','), dest='debug', help="Activate debugging output from specified modules " "(use commas to separate multiple modules). " + destnote) self.add_argument("--debug", action='append_const', const='s3ql', help="Activate debugging output from all S3QL modules. " + destnote) def add_authfile(self): self.add_argument("--authfile", type=str, metavar='', default=os.path.expanduser("~/.s3ql/authinfo2"), help='Read authentication credentials from this file ' '(default: `~/.s3ql/authinfo2)`') def add_cachedir(self): self.add_argument("--cachedir", type=str, metavar='', default=os.path.expanduser("~/.s3ql"), help='Store cached data in this directory ' '(default: `~/.s3ql)`') def add_log(self, default=None): self.add_argument("--log", type=str_or_None_type, metavar='', default=default, help='Destination for log messages. Specify ``none`` for standard ' 'output or ``syslog`` for the system logging daemon. ' 'Anything else will be interpreted as a file name. Log files ' 'will be rotated when they reach 1 MiB, and at most 5 old log ' 'files will be kept. Default: ``%(default)s``') def add_storage_url(self): self.add_argument("storage_url", metavar='', type=storage_url_type, help='Storage URL of the backend that contains the file system') def add_subparsers(self, **kw): '''Pass parent and set prog to default usage message''' kw.setdefault('parser_class', argparse.ArgumentParser) kw['parent'] = self # prog defaults to the usage message of this parser, skipping # optional arguments and with no "usage:" prefix if kw.get('prog') is None: formatter = self._get_formatter() positionals = self._get_positional_actions() groups = self._mutually_exclusive_groups formatter.add_usage(None, positionals, groups, '') kw['prog'] = formatter.format_help().strip() return super().add_subparsers(**kw) def parse_args(self, *args, **kwargs): try: return super().parse_args(*args, **kwargs) except ArgumentError as exc: self.exit(str(exc)) def storage_url_type(s): '''Validate and canonicalize storage url''' if not re.match(r'^([a-zA-Z0-9]+)://(.+)$', s): raise ArgumentTypeError('%s is not a valid storage url.' % s) if s.startswith('local://'): # Append trailing slash so that we can match patterns with # trailing slash in authinfo2 file. return 'local://%s/' % os.path.abspath(s[len('local://'):]) # If there is no prefix specified, then e.g. s3://foo and s3://foo/ point to # the same location (even though s3://foo/bar and s3://foo/bar/ are pointing # to *different* locations). However, the first two storage urls would # nevertheless get different cache directories, which is undesired. # Therefore, we handle these special cases right when parsing the command # line. In retrospect, it would have been better to always add an implicit # slash (even when using a prefix), but we can't do that now because it # would make file systems created without trailing slash inaccessible. if (re.match(r'^(s3|gs)://[^/]+$', s) or re.match(r'^(s3c|swift(ks)?|rackspace)://[^/]+/[^/]+$', s)): s += '/' return s def str_or_None_type(s): if s.lower() == 'none': return None return s def suboptions_type(s): '''An argument converter for suboptions A suboption takes a comma separated list of additional options, e.g. --backend-options ssl,timeout=42,sse ''' assert isinstance(s, str) opts = dict() for opt in s.split(','): if '=' in opt: (key, val) = opt.split('=', 1) else: key = opt val = True opts[key] = val return opts s3ql-2.26/src/s3ql/calc_mro.py0000644000175000017500000000711212615000156017606 0ustar nikrationikratio00000000000000''' calc_mro.py - this file is part of S3QL. Copyright © 2011 Steven D'Aprano This file may be distributed under the terms of the MIT License. The original source code was retrieved from http://code.activestate.com/recipes/577748-calculate-the-mro-of-a-class/ ''' def calc_mro(*bases): """Calculate the Method Resolution Order of bases using the C3 algorithm. Suppose you intended creating a class K with the given base classes. This function returns the MRO which K would have, *excluding* K itself (since it doesn't yet exist), as if you had actually created the class. Another way of looking at this, if you pass a single class K, this will return the linearization of K (the MRO of K, *including* itself). """ seqs = [list(C.__mro__) for C in bases] + [list(bases)] res = [] while True: non_empty = list(filter(None, seqs)) if not non_empty: # Nothing left to process, we're done. return tuple(res) for seq in non_empty: # Find merge candidates among seq heads. candidate = seq[0] not_head = [s for s in non_empty if candidate in s[1:]] if not_head: # Reject the candidate. candidate = None else: break if not candidate: raise TypeError("inconsistent hierarchy, no C3 MRO is possible") res.append(candidate) for seq in non_empty: # Remove candidate. if seq[0] == candidate: del seq[0] if __name__ == '__main__': # Run self-tests. Prints nothing if they succeed. O = object class SeriousOrderDisagreement: class X(O): pass class Y(O): pass class A(X, Y): pass class B(Y, X): pass bases = (A, B) try: x = calc_mro(*SeriousOrderDisagreement.bases) except TypeError: pass else: print("failed test, mro should have raised but got %s instead" % (x,)) class Example0: # Trivial single inheritance case. class A(O): pass class B(A): pass class C(B): pass class D(C): pass tester = D expected = (D, C, B, A, O) class Example1: class F(O): pass class E(O): pass class D(O): pass class C(D, F): pass class B(D, E): pass class A(B, C): pass tester = A expected = (A, B, C, D, E, F, O) class Example2: class F(O): pass class E(O): pass class D(O): pass class C(D, F): pass class B(E, D): pass class A(B, C): pass tester = A expected = (A, B, E, C, D, F, O) class Example3: class A(O): pass class B(O): pass class C(O): pass class D(O): pass class E(O): pass class K1(A, B, C): pass class K2(D, B, E): pass class K3(D, A): pass class Z(K1, K2, K3): pass assert calc_mro(A) == (A, O) assert calc_mro(B) == (B, O) assert calc_mro(C) == (C, O) assert calc_mro(D) == (D, O) assert calc_mro(E) == (E, O) assert calc_mro(K1) == (K1, A, B, C, O) assert calc_mro(K2) == (K2, D, B, E, O) assert calc_mro(K3) == (K3, D, A, O) tester = Z expected = (Z, K1, K2, K3, D, A, B, C, E, O) for example in [Example0, Example1, Example2, Example3]: # First test that the expected result is the same as what Python # actually generates. assert example.expected == example.tester.__mro__ # Now check the calculated MRO. assert calc_mro(example.tester) == example.expected s3ql-2.26/src/s3ql/daemonize.py0000644000175000017500000000536312615000156020010 0ustar nikrationikratio00000000000000# -*- coding: utf-8 -*- ''' daemonize.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. The functions in this file are based on the python-daemon module by Ben Finney . The reason for not simply using the module instead is that it does a lot of staff that we don't need (not a real problem) and some stuff that we must not do (the real problem). This main issue is that python-daemon unconditionally closes all open file descriptors. We don't want this for S3QL, because we have already opened the database and log files when we daemonize. I think this is good design, because it allows us to give a meaningful error message to the user if these files cannot be opened (if we open them after daemonizing, the user will only see a vanishing daemon process without any indication what went wrong). According to “Advanced Programming in the Unix Environment”, the point of closing all open file descriptors is only to "prevent the daemon from holding open any descriptors that it may have inherited from its parent (which could be a shell or some other process)". In this case the user will have to live with that. ''' from .logging import logging # Ensure use of custom logger class import os import sys log = logging.getLogger(__name__) def daemonize(workdir='/'): '''Daemonize the process''' os.chdir(workdir) detach_process_context() redirect_stream(sys.stdin, None) redirect_stream(sys.stdout, None) redirect_stream(sys.stderr, None) def detach_process_context(): '''Detach the process context from parent and session. Detach from the parent process and session group, allowing the parent to exit while this process continues running. Reference: “Advanced Programming in the Unix Environment”, section 13.3, by W. Richard Stevens, published 1993 by Addison-Wesley. ''' # Protected member #pylint: disable=W0212 pid = os.fork() if pid > 0: os._exit(0) os.setsid() pid = os.fork() if pid > 0: log.info('Daemonizing, new PID is %d', pid) os._exit(0) def redirect_stream(system_stream, target_stream): '''Redirect *system_stream* to *target_stream* `system_stream` is a standard system stream such as ``sys.stdout``. `target_stream` is an open file object that should replace the corresponding system stream object. If `target_stream` is ``None``, defaults to opening the operating system's null device and using its file descriptor. ''' if target_stream is None: target_fd = os.open(os.devnull, os.O_RDWR) else: target_fd = target_stream.fileno() os.dup2(target_fd, system_stream.fileno()) s3ql-2.26/src/s3ql/mkfs.py0000644000175000017500000001574313177323070017007 0ustar nikrationikratio00000000000000''' mkfs.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from .logging import logging, setup_logging, QuietError from . import CURRENT_FS_REV, CTRL_INODE, ROOT_INODE from .backends.comprenc import ComprencBackend from .backends import s3 from .common import (get_backend_cachedir, get_backend, split_by_n, freeze_basic_mapping, time_ns) from .database import Connection from .metadata import dump_and_upload_metadata, create_tables from .parse_args import ArgumentParser from getpass import getpass from base64 import b64encode import os import shutil import stat import sys import time import atexit log = logging.getLogger(__name__) def parse_args(args): parser = ArgumentParser( description="Initializes an S3QL file system") parser.add_cachedir() parser.add_authfile() parser.add_debug() parser.add_quiet() parser.add_backend_options() parser.add_version() parser.add_storage_url() parser.add_argument("-L", default='', help="Filesystem label", dest="label", metavar='',) parser.add_argument("--max-obj-size", type=int, default=10240, metavar='', help="Maximum size of storage objects in KiB. Files bigger than this " "will be spread over multiple objects in the storage backend. " "Default: %(default)d KiB.") parser.add_argument("--plain", action="store_true", default=False, help="Create unencrypted file system.") parser.add_argument("--force", action="store_true", default=False, help="Overwrite any existing data.") options = parser.parse_args(args) return options def init_tables(conn): # Insert root directory now_ns = time_ns() conn.execute("INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?,?)", (ROOT_INODE, stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH, os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1)) # Insert control inode, the actual values don't matter that much conn.execute("INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?,?)", (CTRL_INODE, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, now_ns, now_ns, now_ns, 42)) # Insert lost+found directory inode = conn.rowid("INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) " "VALUES (?,?,?,?,?,?,?)", (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR, os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1)) name_id = conn.rowid('INSERT INTO names (name, refcount) VALUES(?,?)', (b'lost+found', 1)) conn.execute("INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)", (name_id, inode, ROOT_INODE)) def main(args=None): if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) if options.max_obj_size < 1024: # This warning should never be converrted to an exception log.warning('Maximum object sizes less than 1 MiB will degrade ' 'performance.', extra={ 'force_log': True }) plain_backend = get_backend(options, raw=True) atexit.register(plain_backend.close) log.info("Before using S3QL, make sure to read the user's guide, especially\n" "the 'Important Rules to Avoid Loosing Data' section.") if isinstance(plain_backend, s3.Backend) and '.' in plain_backend.bucket_name: log.warning('S3 Buckets with names containing dots cannot be ' 'accessed using SSL!' '(cf. https://forums.aws.amazon.com/thread.jspa?threadID=130560)') if 's3ql_metadata' in plain_backend: if not options.force: raise QuietError("Found existing file system! Use --force to overwrite") log.info('Purging existing file system data..') plain_backend.clear() log.info('Please note that the new file system may appear inconsistent\n' 'for a while until the removals have propagated through the backend.') if not options.plain: if sys.stdin.isatty(): wrap_pw = getpass("Enter encryption password: ") if not wrap_pw == getpass("Confirm encryption password: "): raise QuietError("Passwords don't match.") else: wrap_pw = sys.stdin.readline().rstrip() wrap_pw = wrap_pw.encode('utf-8') # Generate data encryption passphrase log.info('Generating random encryption key...') fh = open('/dev/urandom', "rb", 0) # No buffering data_pw = fh.read(32) fh.close() backend = ComprencBackend(wrap_pw, ('lzma', 2), plain_backend) backend['s3ql_passphrase'] = data_pw backend['s3ql_passphrase_bak1'] = data_pw backend['s3ql_passphrase_bak2'] = data_pw backend['s3ql_passphrase_bak3'] = data_pw else: data_pw = None backend = ComprencBackend(data_pw, ('lzma', 2), plain_backend) atexit.unregister(plain_backend.close) atexit.register(backend.close) # Setup database cachepath = get_backend_cachedir(options.storage_url, options.cachedir) # There can't be a corresponding backend, so we can safely delete # these files. if os.path.exists(cachepath + '.db'): os.unlink(cachepath + '.db') if os.path.exists(cachepath + '-cache'): shutil.rmtree(cachepath + '-cache') log.info('Creating metadata tables...') db = Connection(cachepath + '.db') create_tables(db) init_tables(db) param = dict() param['revision'] = CURRENT_FS_REV param['seq_no'] = int(time.time()) param['label'] = options.label param['max_obj_size'] = options.max_obj_size * 1024 param['needs_fsck'] = False param['inode_gen'] = 0 param['max_inode'] = db.get_val('SELECT MAX(id) FROM inodes') param['last_fsck'] = time.time() param['last-modified'] = time.time() log.info('Dumping metadata...') dump_and_upload_metadata(backend, db, param) backend.store('s3ql_seq_no_%d' % param['seq_no'], b'Empty') with open(cachepath + '.params', 'wb') as fh: fh.write(freeze_basic_mapping(param)) if data_pw is not None: print('Please store the following master key in a safe location. It allows ', 'decryption of the S3QL file system in case the storage objects holding ', 'this information get corrupted:', '---BEGIN MASTER KEY---', ' '.join(split_by_n(b64encode(data_pw).decode(), 4)), '---END MASTER KEY---', sep='\n') if __name__ == '__main__': main(sys.argv[1:]) s3ql-2.26/src/s3ql/mount.py0000644000175000017500000007030313237312454017203 0ustar nikrationikratio00000000000000''' mount.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from .logging import logging, setup_logging, QuietError from . import fs, CURRENT_FS_REV from .backends.pool import BackendPool from .block_cache import BlockCache from .common import (get_backend_cachedir, get_seq_no, get_backend_factory, load_params, save_params) from .daemonize import daemonize from .database import Connection from .inode_cache import InodeCache from .metadata import (download_metadata, upload_metadata, dump_and_upload_metadata, dump_metadata) from .parse_args import ArgumentParser from .exit_stack import ExitStack from threading import Thread import _thread import argparse import faulthandler import llfuse import itertools import os import platform import subprocess import re import signal import resource import sys import tempfile import threading import time import shutil import atexit try: from systemd.daemon import notify as sd_notify except ImportError: sd_notify = None log = logging.getLogger(__name__) def install_thread_excepthook(): """work around sys.excepthook thread bug See http://bugs.python.org/issue1230540. Call once from __main__ before creating any threads. If using psyco, call psyco.cannotcompile(threading.Thread.run) since this replaces a new-style class method. """ init_old = threading.Thread.__init__ def init(self, *args, **kwargs): init_old(self, *args, **kwargs) run_old = self.run def run_with_except_hook(*args, **kw): try: run_old(*args, **kw) except SystemExit: raise except: sys.excepthook(*sys.exc_info()) self.run = run_with_except_hook threading.Thread.__init__ = init install_thread_excepthook() def main(args=None): '''Mount S3QL file system''' if args is None: args = sys.argv[1:] options = parse_args(args) # Save handler so that we can remove it when daemonizing stdout_log_handler = setup_logging(options) if not os.path.exists(options.mountpoint): raise QuietError('Mountpoint does not exist.', exitcode=36) if options.threads is None: options.threads = determine_threads(options) avail_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if avail_fd == resource.RLIM_INFINITY: avail_fd = 4096 resource.setrlimit(resource.RLIMIT_NOFILE, (avail_fd, avail_fd)) # Subtract some fd's for random things we forgot, and a fixed number for # each upload thread (because each thread is using at least one socket and # at least one temporary file) avail_fd -= 32 + 3 * options.threads if options.max_cache_entries is None: if avail_fd <= 64: raise QuietError("Not enough available file descriptors.", exitcode=37) log.info('Autodetected %d file descriptors available for cache entries', avail_fd) options.max_cache_entries = avail_fd else: if options.max_cache_entries > avail_fd: log.warning("Up to %d cache entries requested, but detected only %d " "available file descriptors.", options.max_cache_entries, avail_fd) options.max_cache_entries = avail_fd if options.profile: import cProfile import pstats prof = cProfile.Profile() backend_factory = get_backend_factory(options.storage_url, options.backend_options, options.authfile, options.compress) backend_pool = BackendPool(backend_factory) atexit.register(backend_pool.flush) # Get paths cachepath = get_backend_cachedir(options.storage_url, options.cachedir) # Retrieve metadata with backend_pool() as backend: (param, db) = get_metadata(backend, cachepath) #if param['max_obj_size'] < options.min_obj_size: # raise QuietError('Maximum object size must be bigger than minimum object size.', # exitcode=2) # Handle --cachesize rec_cachesize = options.max_cache_entries * param['max_obj_size'] / 2 avail_cache = shutil.disk_usage(os.path.dirname(cachepath))[2] / 1024 if options.cachesize is None: options.cachesize = min(rec_cachesize, 0.8 * avail_cache) log.info('Setting cache size to %d MB', options.cachesize / 1024) elif options.cachesize > avail_cache: log.warning('Requested cache size %d MB, but only %d MB available', options.cachesize / 1024, avail_cache / 1024) if options.nfs: # NFS may try to look up '..', so we have to speed up this kind of query log.info('Creating NFS indices...') db.execute('CREATE INDEX IF NOT EXISTS ix_contents_inode ON contents(inode)') else: db.execute('DROP INDEX IF EXISTS ix_contents_inode') metadata_upload_thread = MetadataUploadThread(backend_pool, param, db, options.metadata_upload_interval) block_cache = BlockCache(backend_pool, db, cachepath + '-cache', options.cachesize * 1024, options.max_cache_entries) commit_thread = CommitThread(block_cache) operations = fs.Operations(block_cache, db, max_obj_size=param['max_obj_size'], inode_cache=InodeCache(db, param['inode_gen']), upload_event=metadata_upload_thread.event) block_cache.fs = operations metadata_upload_thread.fs = operations with ExitStack() as cm: log.info('Mounting %s at %s...', options.storage_url, options.mountpoint) try: llfuse.init(operations, options.mountpoint, get_fuse_opts(options)) except RuntimeError as exc: raise QuietError(str(exc), exitcode=39) unmount_clean = False def unmount(): log.info("Unmounting file system...") llfuse.close(unmount=unmount_clean) cm.callback(unmount) if options.fg: faulthandler.enable() faulthandler.register(signal.SIGUSR1) else: if stdout_log_handler: logging.getLogger().removeHandler(stdout_log_handler) global crit_log_fh crit_log_fh = open(os.path.join(options.cachedir, 'mount.s3ql_crit.log'), 'a') faulthandler.enable(crit_log_fh) faulthandler.register(signal.SIGUSR1, file=crit_log_fh) daemonize(options.cachedir) mark_metadata_dirty(backend, cachepath, param) block_cache.init(options.threads) cm.callback(block_cache.destroy) metadata_upload_thread.start() cm.callback(metadata_upload_thread.join) cm.callback(metadata_upload_thread.stop) commit_thread.start() cm.callback(commit_thread.join) cm.callback(commit_thread.stop) if options.upstart: os.kill(os.getpid(), signal.SIGSTOP) if sd_notify is not None: sd_notify('READY=1') sd_notify('MAINPID=%d' % os.getpid()) exc_info = setup_exchook() workers = 1 if options.single else None # use default if options.profile: ret = prof.runcall(llfuse.main, workers) else: ret = llfuse.main(workers) if ret is not None: raise RuntimeError('Received signal %d, terminating' % (ret,)) # Allow operations to terminate while block_cache is still available # (destroy() will be called again when from llfuse.close(), but at that # point the block cache is no longer available). with llfuse.lock: operations.destroy() # Re-raise if main loop terminated due to exception in other thread if exc_info: (exc_inst, exc_tb) = exc_info raise exc_inst.with_traceback(exc_tb) log.info("FUSE main loop terminated.") unmount_clean = True # At this point, there should be no other threads left # Do not update .params yet, dump_metadata() may fail if the database is # corrupted, in which case we want to force an fsck. param['max_inode'] = db.get_val('SELECT MAX(id) FROM inodes') if operations.failsafe: log.warning('File system errors encountered, marking for fsck.') param['needs_fsck'] = True with backend_pool() as backend: seq_no = get_seq_no(backend) if metadata_upload_thread.db_mtime == os.stat(cachepath + '.db').st_mtime: log.info('File system unchanged, not uploading metadata.') del backend['s3ql_seq_no_%d' % param['seq_no']] param['seq_no'] -= 1 save_params(cachepath, param) elif seq_no == param['seq_no']: param['last-modified'] = time.time() dump_and_upload_metadata(backend, db, param) save_params(cachepath, param) else: log.error('Remote metadata is newer than local (%d vs %d), ' 'refusing to overwrite!', seq_no, param['seq_no']) log.error('The locally cached metadata will be *lost* the next time the file system ' 'is mounted or checked and has therefore been backed up.') for name in (cachepath + '.params', cachepath + '.db'): for i in range(4)[::-1]: if os.path.exists(name + '.%d' % i): os.rename(name + '.%d' % i, name + '.%d' % (i + 1)) os.rename(name, name + '.0') log.info('Cleaning up local metadata...') db.execute('ANALYZE') db.execute('VACUUM') db.close() if options.profile: with tempfile.NamedTemporaryFile() as tmp, \ open('s3ql_profile.txt', 'w') as fh: prof.dump_stats(tmp.name) p = pstats.Stats(tmp.name, stream=fh) p.strip_dirs() p.sort_stats('cumulative') p.print_stats(50) p.sort_stats('time') p.print_stats(50) log.info('All done.') def get_system_memory(): '''Attempt to determine total system memory If amount cannot be determined, emits warning and returns -1. ''' # MacOS X doesn't support sysconf('SC_PHYS_PAGES') if platform.system() == 'Darwin': try: out = subprocess.check_output(['sysctl', 'hw.memsize'], universal_newlines=True) except subprocess.CalledProcessError as exc: log.warning('Cannot determine system memory, sysctl failed with %s', exc.output) return -1 # output of sysctl is 'hw.memsize: #'. Strip the prefix. hit = re.match(r'^hw.memsize: ([0-9]+)$', out) if not hit: log.warning('Cannot determine system memory, unable to parse sysctl output.') return -1 return int(hit.group(1)) else: try: return os.sysconf('SC_PHYS_PAGES') * os.sysconf('SC_PAGESIZE') except ValueError: log.warning('Unable to determine number of CPU cores (sysconf failed).') return -1 # Memory required for LZMA compression in MB (from xz(1)) LZMA_MEMORY = { 0: 3, 1: 9, 2: 17, 3: 32, 4: 48, 5: 94, 6: 94, 7: 186, 8: 370, 9: 674 } def determine_threads(options): '''Return optimum number of upload threads''' try: cores = os.sysconf('SC_NPROCESSORS_ONLN') except ValueError: log.warning('Unable to determine number of CPU cores (sysconf failed).') cores = -1 memory = get_system_memory() if options.compress[0] == 'lzma': # Keep this in sync with compression level in backends/common.py # Memory usage according to man xz(1) mem_per_thread = LZMA_MEMORY[options.compress[1]] * 1024 ** 2 else: # Only check LZMA memory usage mem_per_thread = 0 if cores == -1: log.warning("Can't determine number of cores, using 2 upload threads.") return 2 elif memory == -1 and mem_per_thread != 0: log.warning("Can't determine available memory, using 2 upload threads.") return 2 elif 2 * cores * mem_per_thread > (memory / 2): threads = min(int((memory / 2) // mem_per_thread), 10) if threads > 0: log.info('Using %d upload threads (memory limited).', threads) else: log.warning('Compression will require %d MiB memory ' '(%d%% of total system memory', mem_per_thread / 1024 ** 2, mem_per_thread * 100 / memory) threads = 1 return threads else: threads = min(2 * cores, 10) log.info("Using %d upload threads.", threads) return threads def get_metadata(backend, cachepath): '''Retrieve metadata''' seq_no = get_seq_no(backend) # When there was a crash during metadata rotation, we may end up # without an s3ql_metadata object. meta_obj_name = 's3ql_metadata' if meta_obj_name not in backend: meta_obj_name += '_new' # Check for cached metadata db = None if os.path.exists(cachepath + '.params'): param = load_params(cachepath) if param['seq_no'] < seq_no: log.info('Ignoring locally cached metadata (outdated).') param = backend.lookup(meta_obj_name) elif param['seq_no'] > seq_no: raise QuietError("File system not unmounted cleanly, run fsck!", exitcode=30) else: log.info('Using cached metadata.') db = Connection(cachepath + '.db') else: param = backend.lookup(meta_obj_name) # Check for unclean shutdown if param['seq_no'] < seq_no: raise QuietError('Backend reports that fs is still mounted elsewhere, aborting.', exitcode=31) # Check revision if param['revision'] < CURRENT_FS_REV: raise QuietError('File system revision too old, please run `s3qladm upgrade` first.', exitcode=32) elif param['revision'] > CURRENT_FS_REV: raise QuietError('File system revision too new, please update your ' 'S3QL installation.', exitcode=33) # Check that the fs itself is clean if param['needs_fsck']: raise QuietError("File system damaged or not unmounted cleanly, run fsck!", exitcode=30) if time.time() - param['last_fsck'] > 60 * 60 * 24 * 31: log.warning('Last file system check was more than 1 month ago, ' 'running fsck.s3ql is recommended.') if param['max_inode'] > 2 ** 32 - 50000: raise QuietError('Insufficient free inodes, fsck run required.', exitcode=34) elif param['max_inode'] > 2 ** 31: log.warning('Few free inodes remaining, running fsck is recommended') if os.path.exists(cachepath + '-cache'): for i in itertools.count(): bak_name = '%s-cache.bak%d' % (cachepath, i) if not os.path.exists(bak_name): break log.warning('Found outdated cache directory (%s), renaming to .bak%d', cachepath + '-cache', i) log.warning('You should delete this directory once you are sure that ' 'everything is in order.') os.rename(cachepath + '-cache', bak_name) # Download metadata if not db: db = download_metadata(backend, cachepath + '.db') save_params(cachepath, param) return (param, db) def mark_metadata_dirty(backend, cachepath, param): '''Mark metadata as dirty and increase sequence number''' param['seq_no'] += 1 param['needs_fsck'] = True save_params(cachepath, param) backend['s3ql_seq_no_%d' % param['seq_no']] = b'Empty' param['needs_fsck'] = False def get_fuse_opts(options): '''Return fuse options for given command line options''' fuse_opts = [ "nonempty", 'fsname=%s' % options.storage_url, 'subtype=s3ql', 'big_writes', 'max_write=131072', 'no_remote_lock' ] if platform.system() == 'Darwin': # FUSE4X and OSXFUSE claim to support nonempty, but # neither of them actually do. fuse_opts.remove('nonempty') if options.allow_other: fuse_opts.append('allow_other') if options.allow_root: fuse_opts.append('allow_root') if options.allow_other or options.allow_root: fuse_opts.append('default_permissions') return fuse_opts def parse_args(args): '''Parse command line''' # Parse fstab-style -o options if '--' in args: max_idx = args.index('--') else: max_idx = len(args) if '-o' in args[:max_idx]: pos = args.index('-o') val = args[pos + 1] del args[pos] del args[pos] for opt in reversed(val.split(',')): if '=' in opt: (key, val) = opt.split('=') args.insert(pos, val) args.insert(pos, '--' + key) else: if opt in ('rw', 'defaults', 'auto', 'noauto', 'user', 'nouser', 'dev', 'nodev', 'suid', 'nosuid', 'atime', 'diratime', 'exec', 'noexec', 'group', 'mand', 'nomand', '_netdev', 'nofail', 'norelatime', 'strictatime', 'owner', 'users', 'nobootwait'): continue elif opt == 'ro': raise QuietError('Read-only mounting not supported.', exitcode=35) args.insert(pos, '--' + opt) def compression_type(s): hit = re.match(r'^([a-z0-9]+)(?:-([0-9]))?$', s) if not hit: raise argparse.ArgumentTypeError('%s is not a valid --compress value' % s) alg = hit.group(1) lvl = hit.group(2) if alg not in ('none', 'zlib', 'bzip2', 'lzma'): raise argparse.ArgumentTypeError('Invalid compression algorithm: %s' % alg) if lvl is None: lvl = 6 else: lvl = int(lvl) if alg == 'none': alg = None return (alg, lvl) parser = ArgumentParser( description="Mount an S3QL file system.") parser.add_log('~/.s3ql/mount.log') parser.add_cachedir() parser.add_authfile() parser.add_debug() parser.add_quiet() parser.add_backend_options() parser.add_version() parser.add_storage_url() parser.add_argument("mountpoint", metavar='', type=os.path.abspath, help='Where to mount the file system') parser.add_argument("--cachesize", type=int, default=None, metavar='', help="Cache size in KiB (default: autodetect).") parser.add_argument("--max-cache-entries", type=int, default=None, metavar='', help="Maximum number of entries in cache (default: autodetect). " 'Each cache entry requires one file descriptor, so if you increase ' 'this number you have to make sure that your process file descriptor ' 'limit (as set with `ulimit -n`) is high enough (at least the number ' 'of cache entries + 100).') parser.add_argument("--allow-other", action="store_true", default=False, help= 'Normally, only the user who called `mount.s3ql` can access the mount ' 'point. This user then also has full access to it, independent of ' 'individual file permissions. If the `--allow-other` option is ' 'specified, other users can access the mount point as well and ' 'individual file permissions are taken into account for all users.') parser.add_argument("--allow-root", action="store_true", default=False, help='Like `--allow-other`, but restrict access to the mounting ' 'user and the root user.') parser.add_argument("--fg", action="store_true", default=False, help="Do not daemonize, stay in foreground") parser.add_argument("--upstart", action="store_true", default=False, help="Stay in foreground and raise SIGSTOP once mountpoint " "is up.") parser.add_argument("--compress", action="store", default='lzma-6', metavar='', type=compression_type, help="Compression algorithm and compression level to use when " "storing new data. *algorithm* may be any of `lzma`, `bzip2`, " "`zlib`, or none. *lvl* may be any integer from 0 (fastest) " "to 9 (slowest). Default: `%(default)s`") parser.add_argument("--metadata-upload-interval", action="store", type=int, default=24 * 60 * 60, metavar='', help='Interval in seconds between complete metadata uploads. ' 'Set to 0 to disable. Default: 24h.') parser.add_argument("--threads", action="store", type=int, default=None, metavar='', help='Number of parallel upload threads to use (default: auto).') parser.add_argument("--nfs", action="store_true", default=False, help='Enable some optimizations for exporting the file system ' 'over NFS. (default: %(default)s)') parser.add_argument("--single", action="store_true", default=False, help=argparse.SUPPRESS) parser.add_argument("--profile", action="store_true", default=False, help=argparse.SUPPRESS) # Not yet implemented. When implementing this, don't forget to # uncomment check against param['max_obj_size'] in main(). #parser.add_argument("--min-obj-size", type=int, default=512, metavar='', # help="Minimum size of storage objects in KiB. Files smaller than this " # "may be combined into groups that are stored as single objects " # "in the storage backend. Default: %(default)d KB.") options = parser.parse_args(args) if options.allow_other and options.allow_root: parser.error("--allow-other and --allow-root are mutually exclusive.") if not options.log and not options.fg: parser.error("Please activate logging to a file or syslog, or use the --fg option.") if options.profile: options.single = True if options.upstart: options.fg = True if options.metadata_upload_interval == 0: options.metadata_upload_interval = None return options class MetadataUploadThread(Thread): ''' Periodically upload metadata. Upload is done every `interval` seconds, and whenever `event` is set. To terminate thread, set `quit` attribute as well as `event` event. This class uses the llfuse global lock. When calling objects passed in the constructor, the global lock is acquired first. ''' def __init__(self, backend_pool, param, db, interval): super().__init__() self.backend_pool = backend_pool self.param = param self.db = db self.interval = interval self.daemon = True self.db_mtime = os.stat(db.file).st_mtime self.event = threading.Event() self.quit = False self.name = 'Metadata-Upload-Thread' # Can't assign in constructor, because Operations instance needs # access to self.event as well. self.fs = None def run(self): log.debug('started') assert self.fs is not None while not self.quit: self.event.wait(self.interval) self.event.clear() if self.quit: break with llfuse.lock: if self.quit: break new_mtime = os.stat(self.db.file).st_mtime if self.db_mtime == new_mtime: log.info('File system unchanged, not uploading metadata.') continue log.info('Dumping metadata...') fh = tempfile.TemporaryFile() dump_metadata(self.db, fh) with self.backend_pool() as backend: seq_no = get_seq_no(backend) if seq_no > self.param['seq_no']: log.error('Remote metadata is newer than local (%d vs %d), ' 'refusing to overwrite and switching to failsafe mode!', seq_no, self.param['seq_no']) self.fs.failsafe = True fh.close() break fh.seek(0) self.param['last-modified'] = time.time() # Temporarily decrease sequence no, this is not the final upload self.param['seq_no'] -= 1 upload_metadata(backend, fh, self.param) self.param['seq_no'] += 1 fh.close() self.db_mtime = new_mtime # Break reference loop self.fs = None log.debug('finished') def stop(self): '''Signal thread to terminate''' log.debug('started') self.quit = True self.event.set() def setup_exchook(): '''Send SIGTERM if any other thread terminates with an exception The exc_info will be saved in the list object returned by this function. ''' main_thread = _thread.get_ident() old_exchook = sys.excepthook exc_info = [] def exchook(exc_type, exc_inst, tb): reporting_thread = _thread.get_ident() if reporting_thread != main_thread: if exc_info: log.warning("Unhandled top-level exception during shutdown " "(will not be re-raised)") else: log.debug('recording exception %s', exc_inst) os.kill(os.getpid(), signal.SIGTERM) exc_info.append(exc_inst) exc_info.append(tb) old_exchook(exc_type, exc_inst, tb) # If the main thread re-raised exception, there is no need to call # excepthook again elif exc_info and exc_info[0] is exc_inst: log.debug('Suppressing exception hook for re-raised %s', exc_inst) else: old_exchook(exc_type, exc_inst, tb) sys.excepthook = exchook return exc_info class CommitThread(Thread): ''' Periodically upload dirty blocks. This class uses the llfuse global lock. When calling objects passed in the constructor, the global lock is acquired first. ''' def __init__(self, block_cache): super().__init__() self.block_cache = block_cache self.stop_event = threading.Event() self.name = 'CommitThread' def run(self): log.debug('started') while not self.stop_event.is_set(): did_sth = False with llfuse.lock: stamp = time.time() # Need to make copy, since we aren't allowed to change # dict while iterating through it. The performance hit doesn't seem # to be that bad: # >>> from timeit import timeit # >>> timeit("k=0\nfor el in list(d.values()):\n k += el", # ... setup='\nfrom collections import OrderedDict\nd = OrderedDict()\nfor i in range(5000):\n d[i]=i\n', # ... number=500)/500 * 1e3 # 1.3769531380003173 # >>> timeit("k=0\nfor el in d.values(n:\n k += el", # ... setup='\nfrom collections import OrderedDict\nd = OrderedDict()\nfor i in range(5000):\n d[i]=i\n', # ... number=500)/500 * 1e3 # 1.456586996000624 for el in list(self.block_cache.cache.values()): if self.stop_event.is_set(): break if stamp - el.last_access < 10: break if self.block_cache.upload_if_dirty(el): did_sth = True if not did_sth: self.stop_event.wait(5) log.debug('finished') def stop(self): '''Signal thread to terminate''' log.debug('started') self.stop_event.set() if __name__ == '__main__': main(sys.argv[1:]) s3ql-2.26/src/s3ql/lock.py0000644000175000017500000000271412742247106016773 0ustar nikrationikratio00000000000000''' lock.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from .logging import logging, setup_logging, QuietError from .common import assert_fs_owner from .parse_args import ArgumentParser import llfuse import os import sys import textwrap log = logging.getLogger(__name__) def parse_args(args): '''Parse command line''' parser = ArgumentParser( description=textwrap.dedent('''\ Makes the given directory tree(s) immutable. No changes of any sort can be performed on the tree after that. Immutable entries can only be deleted with s3qlrm. ''')) parser.add_debug() parser.add_quiet() parser.add_version() parser.add_argument('path', metavar='', nargs='+', help='Directories to make immutable.', type=(lambda x: x.rstrip('/'))) return parser.parse_args(args) def main(args=None): '''Make directory tree immutable''' if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) for name in options.path: if os.path.ismount(name): raise QuietError('%s is a mount point.' % name) ctrlfile = assert_fs_owner(name) fstat = os.stat(name) llfuse.setxattr(ctrlfile, 'lock', ('%d' % fstat.st_ino).encode()) if __name__ == '__main__': main(sys.argv[1:]) s3ql-2.26/src/s3ql/inode_cache.py0000644000175000017500000002111513223730045020252 0ustar nikrationikratio00000000000000''' inode_cache.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from .logging import logging # Ensure use of custom logger class from .database import NoSuchRowError import llfuse import sys log = logging.getLogger(__name__) CACHE_SIZE = 100 ATTRIBUTES = ('mode', 'refcount', 'uid', 'gid', 'size', 'locked', 'rdev', 'atime_ns', 'mtime_ns', 'ctime_ns', 'id') ATTRIBUTE_STR = ', '.join(ATTRIBUTES) UPDATE_ATTRS = ('mode', 'refcount', 'uid', 'gid', 'size', 'locked', 'rdev', 'atime_ns', 'mtime_ns', 'ctime_ns') UPDATE_STR = ', '.join('%s=?' % x for x in UPDATE_ATTRS) MAX_INODE = 2 ** 32 - 1 class _Inode: '''An inode with its attributes''' __slots__ = ATTRIBUTES + ('dirty', 'generation') def __init__(self, generation): super().__init__() self.dirty = False self.generation = generation def entry_attributes(self): attr = llfuse.EntryAttributes() attr.st_nlink = self.refcount attr.st_blocks = (self.size + 511) // 512 attr.st_ino = self.id # Timeout, can effectively be infinite since attribute changes # are only triggered by the kernel's own requests attr.attr_timeout = 3600 attr.entry_timeout = 3600 # We want our blocksize for IO as large as possible to get large # write requests attr.st_blksize = 128 * 1024 attr.st_mode = self.mode attr.st_uid = self.uid attr.st_gid = self.gid attr.st_size = self.size attr.st_rdev = self.rdev attr.st_atime_ns = self.atime_ns attr.st_mtime_ns = self.mtime_ns attr.st_ctime_ns = self.ctime_ns attr.generation = self.generation return attr def __eq__(self, other): # Ill defined - should we compare the inode id or all the attributes? # What does it even mean to have the same id but different attributes? # Maybe we should we raise an Exception in that case? return NotImplemented def __hash__(self): return self.id def copy(self): copy = _Inode(self.generation) for attr in ATTRIBUTES: setattr(copy, attr, getattr(self, attr)) return copy def __setattr__(self, name, value): if name != 'dirty': object.__setattr__(self, 'dirty', True) object.__setattr__(self, name, value) def __del__(self): if not self.dirty: return # Force execution of sys.excepthook (exceptions raised # by __del__ are ignored) try: raise RuntimeError('BUG ALERT: Dirty inode was destroyed!') except RuntimeError: exc_info = sys.exc_info() sys.excepthook(*exc_info) class InodeCache(object): ''' This class maps the `inode` SQL table to a dict, caching the rows. If the cache is full and a row is not in the cache, the least-recently retrieved row is deleted from the cache. This means that accessing cached rows will *not* change the order of their expiration. Attributes: ----------- :attrs: inode indexed dict holding the attributes :cached_rows: list of the inodes that are in cache :pos: position of the most recently retrieved inode in 'cached_rows'. Notes ----- Callers should keep in mind that the changes of the returned inode object will only be written to the database if the inode is still in the cache when its attributes are updated: it is possible for the caller to keep a reference to an inode when that inode has already been expired from the InodeCache. Modifications to this inode object will be lost(!). Callers should therefore use the returned inode objects only as long as they can guarantee that no other calls to InodeCache are made that may result in expiration of inodes from the cache. Moreover, the caller must make sure that he does not call InodeCache methods while a database transaction is active that may be rolled back. This would rollback database updates performed by InodeCache, which are generally for inodes that are expired from the cache and therefore *not* directly related to the effects of the current method call. ''' def __init__(self, db, inode_gen): self.attrs = dict() self.cached_rows = list() self.db = db self.generation = inode_gen # Fill the cache with dummy data, so that we don't have to # check if the cache is full or not (it will always be full) for _ in range(CACHE_SIZE): self.cached_rows.append(None) self.pos = 0 def __delitem__(self, inode): if self.db.execute('DELETE FROM inodes WHERE id=?', (inode,)) != 1: raise KeyError('No such inode') inode = self.attrs.pop(inode, None) if inode is not None: inode.dirty = False def __getitem__(self, id_): try: return self.attrs[id_] except KeyError: try: inode = self.getattr(id_) except NoSuchRowError: raise KeyError('No such inode: %d' % id_) old_id = self.cached_rows[self.pos] self.cached_rows[self.pos] = id_ self.pos = (self.pos + 1) % CACHE_SIZE if old_id is not None: try: old_inode = self.attrs[old_id] except KeyError: # We may have deleted that inode pass else: del self.attrs[old_id] self.setattr(old_inode) self.attrs[id_] = inode return inode def getattr(self, id_): #@ReservedAssignment attrs = self.db.get_row("SELECT %s FROM inodes WHERE id=? " % ATTRIBUTE_STR, (id_,)) inode = _Inode(self.generation) for (i, id_) in enumerate(ATTRIBUTES): setattr(inode, id_, attrs[i]) inode.dirty = False return inode def create_inode(self, **kw): bindings = tuple(kw[x] for x in ATTRIBUTES if x in kw) columns = ', '.join(x for x in ATTRIBUTES if x in kw) values = ', '.join('?' * len(kw)) id_ = self.db.rowid('INSERT INTO inodes (%s) VALUES(%s)' % (columns, values), bindings) if id_ > MAX_INODE - 1: self.db.execute('DELETE FROM inodes WHERE id=?', (id_,)) raise OutOfInodesError() return self[id_] def setattr(self, inode): if not inode.dirty: return inode.dirty = False self.db.execute("UPDATE inodes SET %s WHERE id=?" % UPDATE_STR, [ getattr(inode, x) for x in UPDATE_ATTRS ] + [inode.id]) def flush_id(self, id_): if id_ in self.attrs: self.setattr(self.attrs[id_]) def destroy(self): '''Flush all entries and empty cache''' # Note: this method is currently also used for dropping the cache for i in range(len(self.cached_rows)): id_ = self.cached_rows[i] self.cached_rows[i] = None if id_ is not None: try: inode = self.attrs[id_] except KeyError: # We may have deleted that inode pass else: del self.attrs[id_] self.setattr(inode) assert len(self.attrs) == 0 def flush(self): '''Flush all entries to database''' # We don't want to use dict.itervalues() since # the dict may change while we iterate for i in range(len(self.cached_rows)): id_ = self.cached_rows[i] if id_ is not None: try: inode = self.attrs[id_] except KeyError: # We may have deleted that inode pass else: self.setattr(inode) def drop(self): '''Drop cache (after flushing)''' self.destroy() def __del__(self): if len(self.attrs) == 0: return # Force execution of sys.excepthook (exceptions raised # by __del__ are ignored) try: raise RuntimeError('InodeCache instance was destroyed without calling destroy()') except RuntimeError: exc_info = sys.exc_info() sys.excepthook(*exc_info) class OutOfInodesError(Exception): def __str__(self): return 'Could not find free rowid in inode table' s3ql-2.26/src/s3ql/endian_indep.h0000644000175000017500000000166312742247106020261 0ustar nikrationikratio00000000000000/* endian_indep.h - this file is part of S3QL (http://s3ql.googlecode.com) Copyright (C) Nikolaus Rath This program can be distributed under the terms of the GNU GPLv3. */ #ifndef __ENDIAN_INDEP_H__ #define __ENDIAN_INDEP_H__ #ifdef __APPLE__ #include #define htobe16(x) OSSwapHostToBigInt16(x) #define htole16(x) OSSwapHostToLittleInt16(x) #define be16toh(x) OSSwapBigToHostInt16(x) #define le16toh(x) OSSwapLittleToHostInt16(x) #define htobe32(x) OSSwapHostToBigInt32(x) #define htole32(x) OSSwapHostToLittleInt32(x) #define be32toh(x) OSSwapBigToHostInt32(x) #define le32toh(x) OSSwapLittleToHostInt32(x) #define htobe64(x) OSSwapHostToBigInt64(x) #define htole64(x) OSSwapHostToLittleInt64(x) #define be64toh(x) OSSwapBigToHostInt64(x) #define le64toh(x) OSSwapLittleToHostInt64(x) #elif __FreeBSD__ #include #else #include #endif #endif /* __ENDIAN_INDEP_H__ */ s3ql-2.26/src/s3ql/__init__.py0000644000175000017500000000301213246754204017574 0ustar nikrationikratio00000000000000''' __init__.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' # We must not import s3ql.logging.logging as s3ql.logging, # otherwise future imports of s3ql.logging will incorrectly # use s3ql.logging.logging. from . import logging # Ensure use of custom logger class from llfuse import ROOT_INODE # False positives, pylint doesn't grok that these are module names #pylint: disable=E0603 __all__ = [ 'adm', 'backends', 'block_cache', 'common', 'calc_mro', 'cp', 'ctrl', 'daemonize', 'database', 'deltadump', 'fs', 'fsck', 'inherit_docstrings', 'inode_cache', 'lock', 'logging', 'metadata', 'mkfs', 'mount', 'parse_args', 'remove', 'statfs', 'umount', 'VERSION', 'CURRENT_FS_REV', 'REV_VER_MAP', 'RELEASE', 'BUFSIZE', 'CTRL_NAME', 'CTRL_INODE' ] VERSION = '2.26' RELEASE = '%s' % VERSION # TODO: On next revision bump, consider removing support for TIME # values from deltadump.pyx. It is only present to allow upgrades # from revisions <= 22. CURRENT_FS_REV = 23 # Buffer size when writing objects BUFSIZE = 64 * 1024 # Name and inode of the special s3ql control file CTRL_NAME = '.__s3ql__ctrl__' CTRL_INODE = 2 # Maps file system revisions to the last S3QL version that # supported this revision. REV_VER_MAP = { 22: '2.16', 21: '2.13', 20: '2.9', 16: '1.15', 15: '1.10', 14: '1.8.1', 13: '1.6', 12: '1.3', 11: '1.0.1', } s3ql-2.26/src/s3ql/logging.py0000644000175000017500000001354512742247106017475 0ustar nikrationikratio00000000000000''' logging.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' import logging import logging.handlers import warnings import sys import os.path class QuietError(Exception): ''' QuietError is the base class for exceptions that should not result in a stack trace being printed. It is typically used for exceptions that are the result of the user supplying invalid input data. The exception argument should be a string containing sufficient information about the problem. ''' def __init__(self, msg='', exitcode=1): super().__init__() self.msg = msg #: Exit code to use when terminating process self.exitcode = exitcode def __str__(self): return self.msg class MyFormatter(logging.Formatter): '''Prepend severity to log message if it exceeds threshold''' def format(self, record): s = super().format(record) if record.levelno > logging.INFO: s = '%s: %s' % (record.levelname, s) return s def create_handler(target): '''Create logging handler for given target''' if target.lower() == 'syslog': handler = logging.handlers.SysLogHandler('/dev/log') formatter = logging.Formatter(os.path.basename(sys.argv[0]) + '[%(process)s:%(threadName)s] ' + '%(name)s.%(funcName)s: %(message)s') else: fullpath = os.path.expanduser(target) dirname = os.path.dirname(fullpath) if dirname and not os.path.exists(dirname): try: os.makedirs(dirname) except PermissionError: raise QuietError('No permission to create log file %s' % fullpath, exitcode=10) try: handler = logging.handlers.RotatingFileHandler(fullpath, maxBytes=10 * 1024**2, backupCount=5) except PermissionError: raise QuietError('No permission to write log file %s' % fullpath, exitcode=10) formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(process)s:%(threadName)s ' '%(name)s.%(funcName)s: %(message)s', datefmt="%Y-%m-%d %H:%M:%S") handler.setFormatter(formatter) return handler def setup_logging(options): # We want to be able to detect warnings and higher severities # in the captured test output. 'critical' has too many potential # false positives, so we rename this level to "FATAL". logging.addLevelName(logging.CRITICAL, 'FATAL') root_logger = logging.getLogger() if root_logger.handlers: root_logger.debug("Logging already initialized.") return stdout_handler = add_stdout_logging(options.quiet) if hasattr(options, 'log') and options.log: root_logger.addHandler(create_handler(options.log)) elif options.debug and (not hasattr(options, 'log') or not options.log): # When we have debugging enabled but no separate log target, # make stdout logging more detailed. formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(process)s %(levelname)-8s ' '%(threadName)s %(name)s.%(funcName)s: %(message)s', datefmt="%Y-%m-%d %H:%M:%S") stdout_handler.setFormatter(formatter) stdout_handler.setLevel(logging.NOTSET) setup_excepthook() if options.debug: if 'all' in options.debug: root_logger.setLevel(logging.DEBUG) else: root_logger.setLevel(logging.INFO) for module in options.debug: logging.getLogger(module).setLevel(logging.DEBUG) logging.disable(logging.NOTSET) else: root_logger.setLevel(logging.INFO) logging.disable(logging.DEBUG) logging.captureWarnings(capture=True) return stdout_handler def setup_excepthook(): '''Modify sys.excepthook to log exceptions Also makes sure that exceptions derived from `QuietException` do not result in stacktraces. ''' def excepthook(type_, val, tb): root_logger = logging.getLogger() if isinstance(val, QuietError): root_logger.error(val.msg) sys.exit(val.exitcode) else: root_logger.error('Uncaught top-level exception:', exc_info=(type_, val, tb)) sys.exit(1) sys.excepthook = excepthook def add_stdout_logging(quiet=False): '''Add stdout logging handler to root logger''' root_logger = logging.getLogger() formatter = MyFormatter('%(message)s') handler = logging.StreamHandler(sys.stderr) if quiet: handler.setLevel(logging.WARNING) else: handler.setLevel(logging.INFO) handler.setFormatter(formatter) root_logger.addHandler(handler) return handler class Logger(logging.getLoggerClass()): ''' This class has the following features in addition to `logging.Logger`: * Log messages that are emitted with an *log_once* attribute in the *extra* parameter are only emitted once per logger. ''' def __init__(self, name): super().__init__(name) self.log_cache = set() def handle(self, record): if hasattr(record, 'log_once') and record.log_once: id_ = hash((record.name, record.levelno, record.msg, record.args, record.exc_info)) if id_ in self.log_cache: return self.log_cache.add(id_) return super().handle(record) logging.setLoggerClass(Logger) # Convenience object for use in logging calls, e.g. # log.warning('This will be printed only once', extra=LOG_ONCE) LOG_ONCE = { 'log_once': True } s3ql-2.26/src/s3ql/deltadump.pyx0000644000175000017500000005530312654275324020221 0ustar nikrationikratio00000000000000''' deltadump.pyx - this file is part of S3QL (http://s3ql.googlecode.com) Copyright © 2008 Nikolaus Rath This program can be distributed under the terms of the GNU GPLv3. ''' # Analysis of Cython code not really working yet #@PydevCodeAnalysisIgnore from cpython.long cimport PyLong_AsVoidPtr from cpython.exc cimport PyErr_NoMemory from libc.stdio cimport (FILE, const_char, const_void, fclose as fclose_c, fwrite as fwrite_c, fread as fread_c, ftell) from libc.string cimport strerror from libc.errno cimport errno from libc.stdlib cimport calloc as calloc_c, free as free_c from libc.stdint cimport (int64_t, uint8_t, uint16_t, uint32_t, uint64_t) from posix.unistd cimport dup, lseek, SEEK_SET # These import are not (yet?) in the Cython provided cpython module cdef extern from *: object PyUnicode_FromString(const_char *u) cdef extern from 'stdint.h' nogil: int UINT8_MAX int UINT16_MAX int UINT32_MAX int INT_MAX cdef extern from 'stdio.h' nogil: FILE * fdopen(int fd, const_char * mode) int fflush(FILE * stream) int fileno(FILE * stream) cdef extern from 'endian_indep.h' nogil: uint64_t htole64(uint64_t host_64bits) uint64_t le64toh(uint64_t little_endian_64bits) cdef extern from 'sqlite3.h' nogil: ctypedef int sqlite3 ctypedef int sqlite3_stmt ctypedef int64_t sqlite3_int64 const_char *sqlite3_errmsg(sqlite3*) int sqlite3_prepare_v2(sqlite3 * db, char * zSql, int nByte, sqlite3_stmt ** ppStmt, char ** pzTail) int sqlite3_step(sqlite3_stmt *) sqlite3_int64 sqlite3_column_int64(sqlite3_stmt * , int iCol) const_void * sqlite3_column_blob(sqlite3_stmt * , int iCol) int sqlite3_column_bytes(sqlite3_stmt * , int iCol) int sqlite3_bind_blob(sqlite3_stmt * , int iCol, const_void * , int n, void(*)(void *)) int sqlite3_bind_int64(sqlite3_stmt * , int iCol, sqlite3_int64) int sqlite3_reset(sqlite3_stmt * pStmt) int sqlite3_finalize(sqlite3_stmt * pStmt) int sqlite3_column_type(sqlite3_stmt * , int iCol) double sqlite3_column_double(sqlite3_stmt * , int iCol) int sqlite3_bind_double(sqlite3_stmt * , int, double) const_char *sqlite3_compileoption_get(int N) const_char *sqlite3_libversion() int sqlite3_close(sqlite3*) int sqlite3_open_v2(const_char *filename, sqlite3 **ppDb, int flags, const_char *zVfs) int sqlite3_extended_result_codes(sqlite3*, int onoff) void SQLITE_TRANSIENT(void *) enum: SQLITE_OK SQLITE_DONE SQLITE_ROW SQLITE_NULL SQLITE_OPEN_READWRITE SQLITE_OPEN_READONLY from contextlib import ExitStack import apsw import os from .logging import logging # Ensure use of custom logger class import itertools import sys log = logging.getLogger(__name__) # Column types cdef int _INTEGER = 1 cdef int _BLOB = 2 cdef int _TIME = 3 # Make column types available as Python objects INTEGER = _INTEGER BLOB = _BLOB TIME = _TIME # Integer length codes cdef uint8_t INT8 = 127 cdef uint8_t INT16 = 126 cdef uint8_t INT32 = 125 cdef uint8_t INT64 = 124 # Maximum size of BLOBs MAX_BLOB_SIZE = 4096 # Scale factor from time floats to integers. 1e9 would give nanosecond # resolution but introduces rounding errors, so we use 1 << 30 (which is # ~1.074e9, i.e. we get a little more precision than nanoseconds). cdef double time_scale = 1 << 30 cdef inline int fwrite(const_void * buf, size_t len_, FILE * fp) except -1: '''Call libc's fwrite() and raise exception on failure''' if fwrite_c(buf, len_, 1, fp) != 1: raise_from_errno(IOError) return 0 cdef inline int fread(void * buf, size_t len_, FILE * fp) except -1: '''Call libc's fread() and raise exception on failure''' if fread_c(buf, len_, 1, fp) != 1: raise_from_errno(IOError) return 0 cdef free(void * ptr): '''Call libc.free() This is a Python wrapper, so that we can call free in e.g. a lambda expression. ''' free_c(ptr) return None cdef int raise_from_errno(err_class=OSError) except -1: '''Raise OSError for current errno value''' raise err_class(errno, PyUnicode_FromString(strerror(errno))) cdef int fclose(FILE * fp) except -1: '''Call libc.fclose() and raise exception on failure''' cdef ssize_t off # Explicitly flush data that needs to be written. This is # important, so that we can safely reposition the fd position # below (which is necessary in case there is cached input data) if fflush(fp) != 0: raise_from_errno() # Reposition FD to position of FILE*, otherwise next read from FD will miss # data currently in stream buffer. It seems that call to fflush() achieves # the same thing, but this does not seem to be documented so we don't rely # on it. off = ftell(fp) if off == -1: raise_from_errno() if lseek(fileno(fp), off, SEEK_SET) != off: raise_from_errno() if fclose_c(fp) != 0: raise_from_errno() return 0 cdef void * calloc(size_t cnt, size_t size) except NULL: '''Call libc.calloc and raise exception on failure''' cdef void * ptr ptr = calloc_c(cnt, size) if ptr is NULL: PyErr_NoMemory() return ptr cdef int SQLITE_CHECK_RC(int rc, int success, sqlite3* db) except -1: '''Raise correct exception if *rc* != *success*''' if rc != success: exc = apsw.exceptionfor(rc) raise type(exc)(PyUnicode_FromString(sqlite3_errmsg(db))) return 0 cdef int prep_columns(columns, int** col_types_p, int** col_args_p) except -1: '''Allocate col_types and col_args, return number of columns Both arrays are allocated dynamically, caller has to ensure that they're freed again. ''' cdef size_t col_count cdef int *col_types cdef int *col_args col_count = len(columns) # guaranteed positive col_types = < int *> calloc(col_count, sizeof(int)) col_args = < int *> calloc(col_count, sizeof(int)) # Initialize col_args and col_types for i in range(col_count): if columns[i][1] not in (BLOB, INTEGER, TIME): raise ValueError("Invalid type for column %d" % i) col_types[i] = columns[i][1] if len(columns[i]) == 3: col_args[i] = columns[i][2] else: col_args[i] = 0 col_types_p[0] = col_types col_args_p[0] = col_args # We can safely assume that this fits into an int return col_count cdef FILE* dup_to_fp(fh, const_char* mode) except NULL: '''Duplicate fd from *fh* and open as FILE*''' cdef int fd fd = dup(fh.fileno()) if fd == -1: raise_from_errno() fp = fdopen(fd, mode) if fp == NULL: raise_from_errno() return fp def check_sqlite(): '''Check if deltadump and apsw module use compatible SQLite code. This functions look at versions and compile options of the SQLite code used by the *apsw* module and the *deltadump* module. If they do not match exactly, a `RuntimeError` is raised. Only if both modules use the same SQLite version compiled with the same options can the database object be shared between *apsw* and *deltadump*. ''' cdef const_char *buf apsw_sqlite_version = apsw.sqlitelibversion() s3ql_sqlite_version = PyUnicode_FromString(sqlite3_libversion()) log.debug('apsw sqlite version: %s, ' 's3ql sqlite version: %s', apsw_sqlite_version, s3ql_sqlite_version) if apsw_sqlite_version != s3ql_sqlite_version: raise RuntimeError('SQLite version mismatch between APSW and S3QL ' '(%s vs %s)' % (apsw_sqlite_version, s3ql_sqlite_version)) apsw_sqlite_options = set(apsw.compile_options) s3ql_sqlite_options = set() for idx in itertools.count(0): buf = sqlite3_compileoption_get(idx) if buf is NULL: break s3ql_sqlite_options.add(PyUnicode_FromString(buf)) log.debug('apsw sqlite compile options: %s, ' 's3ql sqlite compile options: %s', apsw_sqlite_options, s3ql_sqlite_options) if apsw_sqlite_options != s3ql_sqlite_options: raise RuntimeError('SQLite code used by APSW was compiled with different ' 'options than SQLite code available to S3QL! ' 'Differing settings: + %s, - %s' % (apsw_sqlite_options - s3ql_sqlite_options, s3ql_sqlite_options - apsw_sqlite_options)) def dump_table(table, order, columns, db, fh): '''Dump *columns* of *table* into *fh* *order* specifies the order in which the rows are written and must be a string that can be inserted after the "ORDER BY" clause in an SQL SELECT statement. *db* is an `s3ql.Connection` instance for the database. *columns* must a list of 3-tuples, one for each column that should be stored. The first element of the tuple must contain the column name and the second element the type of data stored in the column (`INTEGER`, `TIME` or `BLOB`). Times will be converted to nanosecond integers. For integers and seconds, the third tuple element specifies the expected change of the values between rows. For blobs it can be either zero (indicating variable length columns) or an integer specifying the length of the column values in bytes. This function will open a separate connection to the database, so the *db* connection should not be in EXCLUSIVE locking mode. (Using a separate connection avoids the requirement on the *apsw* and *deltadump* modules be linked against against binary compatible SQLite libraries). ''' cdef sqlite3 *sqlite3_db cdef sqlite3_stmt *stmt cdef int *col_types cdef int *col_args cdef int col_count, rc, i cdef size_t len_ cdef int64_t *int64_prev cdef int64_t int64, tmp cdef FILE *fp cdef const_void *buf cdef int64_t row_count if db.file == ':memory:': raise ValueError("Can't access in-memory databases") with ExitStack() as cm: # Get SQLite connection log.debug('Opening connection to %s', db.file) dbfile_b = db.file.encode(sys.getfilesystemencoding(), 'surrogateescape') SQLITE_CHECK_RC(sqlite3_open_v2(dbfile_b, &sqlite3_db, SQLITE_OPEN_READONLY, NULL), SQLITE_OK, sqlite3_db) cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_close(sqlite3_db), SQLITE_OK, sqlite3_db)) SQLITE_CHECK_RC(sqlite3_extended_result_codes(sqlite3_db, 1), SQLITE_OK, sqlite3_db) # Get FILE* for buffered reading from *fh* fp = dup_to_fp(fh, b'wb') cm.callback(lambda: fclose(fp)) # Allocate col_args and col_types col_count = prep_columns(columns, &col_types, &col_args) cm.callback(lambda: free(col_args)) cm.callback(lambda: free(col_types)) # Allocate int64_prev int64_prev = calloc( len(columns), sizeof(int64_t)) cm.callback(lambda: free(int64_prev)) # Prepare statement col_names = [ x[0] for x in columns ] query = ("SELECT %s FROM %s ORDER BY %s " % (', '.join(col_names), table, order)).encode('utf-8') SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), SQLITE_OK, sqlite3_db) cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(stmt), SQLITE_OK, sqlite3_db)) row_count = db.get_val("SELECT COUNT(rowid) FROM %s" % table) log.debug('dump_table(%s): writing %d rows', table, row_count) write_integer(row_count, fp) # Iterate through rows while True: rc = sqlite3_step(stmt) if rc == SQLITE_DONE: break SQLITE_CHECK_RC(rc, SQLITE_ROW, sqlite3_db) for i in range(col_count): if sqlite3_column_type(stmt, i) is SQLITE_NULL: raise ValueError("Can't dump NULL values") if col_types[i] == _INTEGER: int64 = sqlite3_column_int64(stmt, i) tmp = int64 int64 -= int64_prev[i] + col_args[i] int64_prev[i] = tmp write_integer(int64, fp) elif col_types[i] == _TIME: int64 = (sqlite3_column_double(stmt, i) * time_scale) tmp = int64 int64 -= int64_prev[i] + col_args[i] int64_prev[i] = tmp write_integer(int64, fp) elif col_types[i] == _BLOB: buf = sqlite3_column_blob(stmt, i) rc = sqlite3_column_bytes(stmt, i) if rc > MAX_BLOB_SIZE: raise ValueError('Can not dump BLOB of size %d (max: %d)', rc, MAX_BLOB_SIZE) # Safe to cast now len_ = rc if col_args[i] == 0: write_integer(rc - int64_prev[i], fp) int64_prev[i] = rc elif rc != col_args[i]: raise ValueError("Length %d != %d in column %d" % (len_, col_args[i], i)) if len_ != 0: fwrite(buf, len_, fp) def load_table(table, columns, db, fh, trx_rows=5000): '''Load *columns* of *table* from *fh* *db* is an `s3ql.Connection` instance for the database. *columns* must be the same list of 3-tuples that was passed to `dump_table` when creating the dump stored in *fh*. This function will open a separate connection to the database, so the *db* connection should not be in EXCLUSIVE locking mode. (Using a separate connection avoids the requirement on the *apsw* and *deltadump* modules be linked against against binary compatible SQLite libraries). When writing into the table, a new transaction will be started every *trx_rows* rows. ''' cdef sqlite3 *sqlite3_db cdef sqlite3_stmt *stmt cdef sqlite3_stmt *begin_stmt cdef sqlite3_stmt *commit_stmt cdef int *col_types cdef int *col_args cdef int col_count, rc, len_, i, j cdef int64_t *int64_prev cdef FILE *fp cdef void *buf cdef int64_t row_count, int64, tmp if db.file == ':memory:': raise ValueError("Can't access in-memory databases") with ExitStack() as cm: # Get SQLite connection log.debug('Opening connection to %s', db.file) dbfile_b = db.file.encode(sys.getfilesystemencoding(), 'surrogateescape') SQLITE_CHECK_RC(sqlite3_open_v2(dbfile_b, &sqlite3_db, SQLITE_OPEN_READWRITE, NULL), SQLITE_OK, sqlite3_db) cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_close(sqlite3_db), SQLITE_OK, sqlite3_db)) SQLITE_CHECK_RC(sqlite3_extended_result_codes(sqlite3_db, 1), SQLITE_OK, sqlite3_db) # Copy settings for pragma in ('synchronous', 'foreign_keys'): val = db.get_val('PRAGMA %s' % pragma) cmd = ('PRAGMA %s = %s' % (pragma, val)).encode('utf-8') SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, cmd, -1, &stmt, NULL), SQLITE_OK, sqlite3_db) try: rc = sqlite3_step(stmt) if rc == SQLITE_ROW: rc = sqlite3_step(stmt) SQLITE_CHECK_RC(rc, SQLITE_DONE, sqlite3_db) finally: SQLITE_CHECK_RC(sqlite3_finalize(stmt), SQLITE_OK, sqlite3_db) # Get FILE* for buffered reading from *fh* fp = dup_to_fp(fh, b'rb') cm.callback(lambda: fclose(fp)) # Allocate col_args and col_types col_count = prep_columns(columns, &col_types, &col_args) cm.callback(lambda: free(col_args)) cm.callback(lambda: free(col_types)) # Allocate int64_prev int64_prev = calloc( len(columns), sizeof(int64_t)) cm.callback(lambda: free(int64_prev)) # Prepare INSERT statement col_names = [ x[0] for x in columns ] query = ("INSERT INTO %s (%s) VALUES(%s)" % (table, ', '.join(col_names), ', '.join('?' * col_count))).encode('utf-8') SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), SQLITE_OK, sqlite3_db) cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(stmt), SQLITE_OK, sqlite3_db)) # Prepare BEGIN statement query = b'BEGIN TRANSACTION' SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &begin_stmt, NULL), SQLITE_OK, sqlite3_db) cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(begin_stmt), SQLITE_OK, sqlite3_db)) # Prepare COMMIT statement query = b'COMMIT TRANSACTION' SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &commit_stmt, NULL), SQLITE_OK, sqlite3_db) cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(commit_stmt), SQLITE_OK, sqlite3_db)) buf = calloc(MAX_BLOB_SIZE, 1) cm.callback(lambda: free(buf)) read_integer(&row_count, fp) log.debug('load_table(%s): reading %d rows', table, row_count) # Start transaction SQLITE_CHECK_RC(sqlite3_step(begin_stmt), SQLITE_DONE, sqlite3_db) cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_step(commit_stmt), SQLITE_DONE, sqlite3_db)) SQLITE_CHECK_RC(sqlite3_reset(begin_stmt), SQLITE_OK, sqlite3_db) # Iterate through rows for i in range(row_count): for j in range(col_count): if col_types[j] == _INTEGER: read_integer(&int64, fp) int64 += col_args[j] + int64_prev[j] int64_prev[j] = int64 SQLITE_CHECK_RC(sqlite3_bind_int64(stmt, j + 1, int64), SQLITE_OK, sqlite3_db) if col_types[j] == _TIME: read_integer(&int64, fp) int64 += col_args[j] + int64_prev[j] int64_prev[j] = int64 # Cast is safe, we know that the integer was converted from # double at dump time. SQLITE_CHECK_RC(sqlite3_bind_double(stmt, j + 1, int64 / time_scale), SQLITE_OK, sqlite3_db) elif col_types[j] == _BLOB: if col_args[j] == 0: read_integer(&int64, fp) tmp = int64_prev[j] + int64 if tmp < 0 or tmp > INT_MAX: raise RuntimeError('Corrupted input') len_ = tmp int64_prev[j] = tmp else: len_ = col_args[j] if len_ > MAX_BLOB_SIZE: raise RuntimeError('BLOB too large to read (%d vs %d)', len_, MAX_BLOB_SIZE) if len_ > 0: fread(buf, len_, fp) SQLITE_CHECK_RC(sqlite3_bind_blob(stmt, j + 1, buf, len_, SQLITE_TRANSIENT), SQLITE_OK, sqlite3_db) SQLITE_CHECK_RC(sqlite3_step(stmt), SQLITE_DONE, sqlite3_db) SQLITE_CHECK_RC(sqlite3_reset(stmt), SQLITE_OK, sqlite3_db) # Commit every once in a while if i % trx_rows == 0: # This isn't 100% ok -- if we have an exception in step(begin_stmt), # we the cleanup handler will execute the commit statement again # without an active transaction. SQLITE_CHECK_RC(sqlite3_step(commit_stmt), SQLITE_DONE, sqlite3_db) SQLITE_CHECK_RC(sqlite3_step(begin_stmt), SQLITE_DONE, sqlite3_db) SQLITE_CHECK_RC(sqlite3_reset(commit_stmt), SQLITE_OK, sqlite3_db) SQLITE_CHECK_RC(sqlite3_reset(begin_stmt), SQLITE_OK, sqlite3_db) cdef inline int write_integer(int64_t int64, FILE * fp) except -1: '''Write *int64* into *fp*, using as little space as possible Return the number of bytes written, or -1 on error. ''' # This is meant to be a `uint8_t`. However, due to integer promotion # any expression always has at least type `int`. So we would need an # explicit cast for every assignment to this value. It's easier to # declare it as `int` instead, and just cast once at the end. See also # https://stackoverflow.com/questions/32574514/ cdef unsigned int8 cdef uint8_t int8_real cdef size_t len_ cdef uint64_t uint64 if int64 < 0: uint64 = -int64 int8 = 0x80 # Highest bit set else: uint64 = int64 int8 = 0 if uint64 < 0x80 and uint64 not in (INT8, INT16, INT32, INT64): len_ = 0 int8 += uint64 elif uint64 < UINT8_MAX: len_ = 1 int8 += INT8 elif uint64 < UINT16_MAX: len_ = 2 int8 += INT16 elif uint64 < UINT32_MAX: len_ = 4 int8 += INT32 else: len_ = 8 int8 += INT64 # Cast int8_real = int8 fwrite(&int8_real, 1, fp) if len_ != 0: uint64 = htole64(uint64) fwrite(&uint64, len_, fp) # len <= 8, safe to cast return len_ + 1 cdef inline int read_integer(int64_t * out, FILE * fp) except -1: '''Read integer written using `write_integer` from *fp* Return the number of bytes read, or -1 on error. ''' cdef uint8_t int8 cdef size_t len_ cdef uint64_t uint64 cdef char negative fread(&int8, 1, fp) if int8 & 0x80 != 0: negative = 1 # Need to cast again due to integer promotion int8 = (int8 & (~ 0x80)) else: negative = 0 if int8 == INT8: len_ = 1 elif int8 == INT16: len_ = 2 elif int8 == INT32: len_ = 4 elif int8 == INT64: len_ = 8 else: len_ = 0 uint64 = int8 if len_ != 0: uint64 = 0 fread(&uint64, len_, fp) uint64 = le64toh(uint64) if negative == 1: out[0] = - < int64_t > uint64 else: out[0] = < int64_t > uint64 # len <= 8, safe to cast return len_ + 1 s3ql-2.26/src/s3ql/adm.py0000644000175000017500000003133613003765620016603 0ustar nikrationikratio00000000000000''' adm.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from .logging import logging, QuietError, setup_logging from . import CURRENT_FS_REV, REV_VER_MAP from .backends.comprenc import ComprencBackend from .database import Connection from .deltadump import TIME, INTEGER from .common import (get_backend_cachedir, get_seq_no, is_mounted, get_backend, load_params, save_params) from .metadata import dump_and_upload_metadata, download_metadata from . import metadata from .parse_args import ArgumentParser from datetime import datetime as Datetime from getpass import getpass from contextlib import contextmanager import os import shutil import functools import sys import textwrap import time log = logging.getLogger(__name__) def parse_args(args): '''Parse command line''' parser = ArgumentParser( description="Manage S3QL File Systems.", epilog=textwrap.dedent('''\ Hint: run `%(prog)s --help` to get help on the additional arguments that the different actions take.''')) pparser = ArgumentParser(add_help=False, epilog=textwrap.dedent('''\ Hint: run `%(prog)s --help` to get help on other available actions and optional arguments that can be used with all actions.''')) pparser.add_storage_url() subparsers = parser.add_subparsers(metavar='', dest='action', help='may be either of') subparsers.add_parser("passphrase", help="change file system passphrase", parents=[pparser]) subparsers.add_parser("clear", help="delete file system and all data", parents=[pparser]) subparsers.add_parser("download-metadata", help="Interactively download metadata backups. " "Use only if you know what you are doing.", parents=[pparser]) sparser = subparsers.add_parser("upgrade", help="upgrade file system to newest revision", parents=[pparser]) sparser.add_argument("--threads", type=int, default=20, help='Number of threads to use') parser.add_debug() parser.add_quiet() parser.add_log() parser.add_authfile() parser.add_backend_options() parser.add_cachedir() parser.add_version() options = parser.parse_args(args) return options def main(args=None): '''Change or show S3QL file system parameters''' if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) # Check if fs is mounted on this computer # This is not foolproof but should prevent common mistakes if is_mounted(options.storage_url): raise QuietError('Can not work on mounted file system.') if options.action == 'clear': with get_backend(options, raw=True) as backend: return clear(backend, options) with get_backend(options) as backend: if options.action == 'upgrade': return upgrade(backend, get_backend_cachedir(options.storage_url, options.cachedir)) elif options.action == 'passphrase': return change_passphrase(backend) elif options.action == 'download-metadata': return download_metadata_cmd(backend, options.storage_url) def download_metadata_cmd(backend, storage_url): '''Download old metadata backups''' backups = sorted(backend.list('s3ql_metadata')) if not backups: raise QuietError('No metadata backups found.') log.info('The following backups are available:') log.info('%3s %-23s %-15s', 'No', 'Name', 'Date') for (i, name) in enumerate(backups): try: params = backend.lookup(name) except: log.error('Error retrieving information about %s, skipping', name) continue if 'last-modified' in params: date = Datetime.fromtimestamp(params['last-modified']).strftime('%Y-%m-%d %H:%M:%S') else: # (metadata might from an older fs revision) date = '(unknown)' log.info('%3d %-23s %-15s', i, name, date) name = None while name is None: buf = input('Enter no to download: ') try: name = backups[int(buf.strip())] except: log.warning('Invalid input') cachepath = get_backend_cachedir(storage_url, '.') for i in ('.db', '.params'): if os.path.exists(cachepath + i): raise QuietError('%s already exists, aborting.' % cachepath + i) param = backend.lookup(name) download_metadata(backend, cachepath + ".db", name) # Raise sequence number so that fsck.s3ql actually uses the # downloaded backup seq_nos = [ int(x[len('s3ql_seq_no_'):]) for x in backend.list('s3ql_seq_no_') ] param['seq_no'] = max(seq_nos) + 1 save_params(cachepath, param) def change_passphrase(backend): '''Change file system passphrase''' if not isinstance(backend, ComprencBackend) and backend.passphrase: raise QuietError('File system is not encrypted.') data_pw = backend.passphrase if sys.stdin.isatty(): wrap_pw = getpass("Enter new encryption password: ") if not wrap_pw == getpass("Confirm new encryption password: "): raise QuietError("Passwords don't match") else: wrap_pw = sys.stdin.readline().rstrip() wrap_pw = wrap_pw.encode('utf-8') backend.passphrase = wrap_pw backend['s3ql_passphrase'] = data_pw backend['s3ql_passphrase_bak1'] = data_pw backend['s3ql_passphrase_bak2'] = data_pw backend['s3ql_passphrase_bak3'] = data_pw backend.passphrase = data_pw def clear(backend, options): print('I am about to delete all data in %s.' % backend, 'This includes any S3QL file systems as well as any other stored objects.', 'Please enter "yes" to continue.', '> ', sep='\n', end='') sys.stdout.flush() if sys.stdin.readline().strip().lower() != 'yes': raise QuietError() log.info('Deleting...') cachepath = get_backend_cachedir(options.storage_url, options.cachedir) for suffix in ('.db', '.params'): name = cachepath + suffix if os.path.exists(name): os.unlink(name) name = cachepath + '-cache' if os.path.exists(name): shutil.rmtree(name) backend.clear() log.info('File system deleted.') log.info('Note: it may take a while for the removals to propagate through the backend.') def get_old_rev_msg(rev, prog): return textwrap.dedent('''\ The last S3QL version that supported this file system revision was %(version)s. To run this version's %(prog)s, proceed along the following steps: $ wget http://s3ql.googlecode.com/files/s3ql-%(version)s.tar.bz2 \ || wget https://bitbucket.org/nikratio/s3ql/downloads/s3ql-%(version)s.tar.bz2 $ tar xjf s3ql-%(version)s.tar.bz2 $ (cd s3ql-%(version)s; ./setup.py build_ext --inplace) $ s3ql-%(version)s/bin/%(prog)s ''' % { 'version': REV_VER_MAP[rev], 'prog': prog }) def upgrade(backend, cachepath): '''Upgrade file system to newest revision''' log.info('Getting file system parameters..') # Check for cached metadata db = None seq_no = get_seq_no(backend) if os.path.exists(cachepath + '.params'): param = load_params(cachepath) if param['seq_no'] < seq_no: log.info('Ignoring locally cached metadata (outdated).') param = backend.lookup('s3ql_metadata') elif param['seq_no'] > seq_no: print('File system not unmounted cleanly, need to run fsck before upgrade.') print(get_old_rev_msg(param['revision'], 'fsck.s3ql')) raise QuietError() else: log.info('Using cached metadata.') db = Connection(cachepath + '.db') else: param = backend.lookup('s3ql_metadata') # Check for unclean shutdown if param['seq_no'] < seq_no: print(textwrap.fill(textwrap.dedent('''\ Backend reports that fs is still mounted. If this is not the case, the file system may have not been unmounted cleanly or the data from the most-recent mount may have not yet propagated through the backend. In the later case, waiting for a while should fix the problem, in the former case you should try to run fsck on the computer where the file system has been mounted most recently. '''))) print(get_old_rev_msg(param['revision'], 'fsck.s3ql')) raise QuietError() # Check that the fs itself is clean if param['needs_fsck']: print('File system is damaged, need to run fsck before upgrade.') print(get_old_rev_msg(param['revision'], 'fsck.s3ql')) raise QuietError() # Check revision if param['revision'] < CURRENT_FS_REV-1: print(textwrap.dedent(''' File system revision too old to upgrade! You need to use an older S3QL version to upgrade to a more recent revision before you can use this version to upgrade to the newest revision. ''')) print(get_old_rev_msg(param['revision'] + 1, 's3qladm')) raise QuietError() elif param['revision'] >= CURRENT_FS_REV: print('File system already at most-recent revision') return print(textwrap.dedent(''' I am about to update the file system to the newest revision. You will not be able to access the file system with any older version of S3QL after this operation. You should make very sure that this command is not interrupted and that no one else tries to mount, fsck or upgrade the file system at the same time. ''')) print('Please enter "yes" to continue.', '> ', sep='\n', end='') sys.stdout.flush() if sys.stdin.readline().strip().lower() != 'yes': raise QuietError() if not db: with monkeypatch_metadata_retrieval(): db = download_metadata(backend, cachepath + '.db') log.info('Upgrading from revision %d to %d...', param['revision'], CURRENT_FS_REV) param['revision'] = CURRENT_FS_REV param['last-modified'] = time.time() param['seq_no'] += 1 # Upgrade for name in ('atime', 'mtime', 'ctime'): db.execute('ALTER TABLE inodes ADD COLUMN {time}_ns ' 'INT NOT NULL DEFAULT 0'.format(time=name)) db.execute('UPDATE inodes SET {time}_ns = {time} * 1e9'.format(time=name)) dump_and_upload_metadata(backend, db, param) backend['s3ql_seq_no_%d' % param['seq_no']] = b'Empty' # Declare local metadata as outdated so that it won't be used. It still # contains the old [acm]time columns which are NON NULL, and would prevent # us from inserting new rows. param['seq_no'] = 0 save_params(cachepath, param) print('File system upgrade complete.') @contextmanager def monkeypatch_metadata_retrieval(): create_tables_bak = metadata.create_tables @functools.wraps(metadata.create_tables) def create_tables(conn): create_tables_bak(conn) conn.execute('DROP TABLE inodes') conn.execute(""" CREATE TABLE inodes ( id INTEGER PRIMARY KEY AUTOINCREMENT, uid INT NOT NULL, gid INT NOT NULL, mode INT NOT NULL, mtime REAL NOT NULL, atime REAL NOT NULL, ctime REAL NOT NULL, refcount INT NOT NULL, size INT NOT NULL DEFAULT 0, rdev INT NOT NULL DEFAULT 0, locked BOOLEAN NOT NULL DEFAULT 0 )""") metadata.create_tables = create_tables DUMP_SPEC_bak = metadata.DUMP_SPEC[2] metadata.DUMP_SPEC[2] = ('inodes', 'id', (('id', INTEGER, 1), ('uid', INTEGER), ('gid', INTEGER), ('mode', INTEGER), ('mtime', TIME), ('atime', TIME), ('ctime', TIME), ('size', INTEGER), ('rdev', INTEGER), ('locked', INTEGER), ('refcount', INTEGER))) try: yield finally: metadata.DUMP_SPEC[2] = DUMP_SPEC_bak metadata.create_tables = create_tables_bak if __name__ == '__main__': main(sys.argv[1:]) s3ql-2.26/src/s3ql/deltadump.c0000664000175000017500000203201213246754344017621 0ustar nikrationikratio00000000000000/* Generated by Cython 0.25.2 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000) #error Cython requires Python 2.6+ or Python 3.2+. #else #define CYTHON_ABI "0_25_2" #include #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x03030000 || (PY_MAJOR_VERSION == 2 && PY_VERSION_HEX >= 0x02070000) #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__s3ql__deltadump #define __PYX_HAVE_API__s3ql__deltadump #include #include #include #include #include #include #include #include "stdint.h" #include "stdio.h" #include "endian_indep.h" #include "sqlite3.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) && defined (_M_X64) #define __Pyx_sst_abs(value) _abs64(value) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "src/s3ql/deltadump.pyx", "type.pxd", }; /*--- Type declarations ---*/ struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table; struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table; struct __pyx_opt_args_4s3ql_9deltadump_raise_from_errno; /* "s3ql/deltadump.pyx":136 * return None * * cdef int raise_from_errno(err_class=OSError) except -1: # <<<<<<<<<<<<<< * '''Raise OSError for current errno value''' * */ struct __pyx_opt_args_4s3ql_9deltadump_raise_from_errno { int __pyx_n; PyObject *err_class; }; /* "s3ql/deltadump.pyx":280 * s3ql_sqlite_options - apsw_sqlite_options)) * * def dump_table(table, order, columns, db, fh): # <<<<<<<<<<<<<< * '''Dump *columns* of *table* into *fh* * */ struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table { PyObject_HEAD int *__pyx_v_col_args; int *__pyx_v_col_types; FILE *__pyx_v_fp; int64_t *__pyx_v_int64_prev; sqlite3 *__pyx_v_sqlite3_db; sqlite3_stmt *__pyx_v_stmt; }; /* "s3ql/deltadump.pyx":401 * fwrite(buf, len_, fp) * * def load_table(table, columns, db, fh, trx_rows=5000): # <<<<<<<<<<<<<< * '''Load *columns* of *table* from *fh* * */ struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table { PyObject_HEAD sqlite3_stmt *__pyx_v_begin_stmt; void *__pyx_v_buf; int *__pyx_v_col_args; int *__pyx_v_col_types; sqlite3_stmt *__pyx_v_commit_stmt; FILE *__pyx_v_fp; int64_t *__pyx_v_int64_prev; sqlite3 *__pyx_v_sqlite3_db; sqlite3_stmt *__pyx_v_stmt; }; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #endif /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET(); #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* GetModuleGlobalName.proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* PyObjectCallNoArg.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* IncludeStringH.proto */ #include /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* PyObjectLookupSpecial.proto */ #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_LookupSpecial(PyObject* obj, PyObject* attr_name) { PyObject *res; PyTypeObject *tp = Py_TYPE(obj); #if PY_MAJOR_VERSION < 3 if (unlikely(PyInstance_Check(obj))) return __Pyx_PyObject_GetAttrStr(obj, attr_name); #endif res = _PyType_Lookup(tp, attr_name); if (likely(res)) { descrgetfunc f = Py_TYPE(res)->tp_descr_get; if (!f) { Py_INCREF(res); } else { res = f(res, obj, (PyObject *)tp); } } else { PyErr_SetObject(PyExc_AttributeError, attr_name); } return res; } #else #define __Pyx_PyObject_LookupSpecial(o,n) __Pyx_PyObject_GetAttrStr(o,n) #endif /* FetchCommonType.proto */ static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type); /* CythonFunction.proto */ #define __Pyx_CyFunction_USED 1 #include #define __Pyx_CYFUNCTION_STATICMETHOD 0x01 #define __Pyx_CYFUNCTION_CLASSMETHOD 0x02 #define __Pyx_CYFUNCTION_CCLASS 0x04 #define __Pyx_CyFunction_GetClosure(f)\ (((__pyx_CyFunctionObject *) (f))->func_closure) #define __Pyx_CyFunction_GetClassObj(f)\ (((__pyx_CyFunctionObject *) (f))->func_classobj) #define __Pyx_CyFunction_Defaults(type, f)\ ((type *)(((__pyx_CyFunctionObject *) (f))->defaults)) #define __Pyx_CyFunction_SetDefaultsGetter(f, g)\ ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g) typedef struct { PyCFunctionObject func; #if PY_VERSION_HEX < 0x030500A0 PyObject *func_weakreflist; #endif PyObject *func_dict; PyObject *func_name; PyObject *func_qualname; PyObject *func_doc; PyObject *func_globals; PyObject *func_code; PyObject *func_closure; PyObject *func_classobj; void *defaults; int defaults_pyobjects; int flags; PyObject *defaults_tuple; PyObject *defaults_kwdict; PyObject *(*defaults_getter)(PyObject *); PyObject *func_annotations; } __pyx_CyFunctionObject; static PyTypeObject *__pyx_CyFunctionType = 0; #define __Pyx_CyFunction_NewEx(ml, flags, qualname, self, module, globals, code)\ __Pyx_CyFunction_New(__pyx_CyFunctionType, ml, flags, qualname, self, module, globals, code) static PyObject *__Pyx_CyFunction_New(PyTypeObject *, PyMethodDef *ml, int flags, PyObject* qualname, PyObject *self, PyObject *module, PyObject *globals, PyObject* code); static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m, size_t size, int pyobjects); static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m, PyObject *tuple); static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m, PyObject *dict); static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m, PyObject *dict); static int __pyx_CyFunction_init(void); /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, long intval, int inplace); #else #define __Pyx_PyInt_EqObjC(op1, op2, intval, inplace)\ PyObject_RichCompare(op1, op2, Py_EQ) #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int64_t(int64_t value); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE int64_t __Pyx_PyInt_As_int64_t(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* PyIdentifierFromString.proto */ #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif /* ModuleImport.proto */ static PyObject *__Pyx_ImportModule(const char *name); /* TypeImport.proto */ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'cpython.long' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.exc' */ /* Module declarations from 'libc.errno' */ /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'libc.stdint' */ /* Module declarations from 'posix.types' */ /* Module declarations from 'posix.unistd' */ /* Module declarations from 's3ql.deltadump' */ static PyTypeObject *__pyx_ptype_4s3ql_9deltadump___pyx_scope_struct__dump_table = 0; static PyTypeObject *__pyx_ptype_4s3ql_9deltadump___pyx_scope_struct_1_load_table = 0; static int __pyx_v_4s3ql_9deltadump__INTEGER; static int __pyx_v_4s3ql_9deltadump__BLOB; static int __pyx_v_4s3ql_9deltadump__TIME; static uint8_t __pyx_v_4s3ql_9deltadump_INT8; static uint8_t __pyx_v_4s3ql_9deltadump_INT16; static uint8_t __pyx_v_4s3ql_9deltadump_INT32; static uint8_t __pyx_v_4s3ql_9deltadump_INT64; static double __pyx_v_4s3ql_9deltadump_time_scale; static CYTHON_INLINE int __pyx_f_4s3ql_9deltadump_fwrite(const void *, size_t, FILE *); /*proto*/ static CYTHON_INLINE int __pyx_f_4s3ql_9deltadump_fread(void *, size_t, FILE *); /*proto*/ static PyObject *__pyx_f_4s3ql_9deltadump_free(void *); /*proto*/ static int __pyx_f_4s3ql_9deltadump_raise_from_errno(struct __pyx_opt_args_4s3ql_9deltadump_raise_from_errno *__pyx_optional_args); /*proto*/ static int __pyx_f_4s3ql_9deltadump_fclose(FILE *); /*proto*/ static void *__pyx_f_4s3ql_9deltadump_calloc(size_t, size_t); /*proto*/ static int __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(int, int, sqlite3 *); /*proto*/ static int __pyx_f_4s3ql_9deltadump_prep_columns(PyObject *, int **, int **); /*proto*/ static FILE *__pyx_f_4s3ql_9deltadump_dup_to_fp(PyObject *, const char *); /*proto*/ static CYTHON_INLINE int __pyx_f_4s3ql_9deltadump_write_integer(int64_t, FILE *); /*proto*/ static CYTHON_INLINE int __pyx_f_4s3ql_9deltadump_read_integer(int64_t *, FILE *); /*proto*/ #define __Pyx_MODULE_NAME "s3ql.deltadump" int __pyx_module_is_main_s3ql__deltadump = 0; /* Implementation of 's3ql.deltadump' */ static PyObject *__pyx_builtin_OSError; static PyObject *__pyx_builtin_IOError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static const char __pyx_k_i[] = "i"; static const char __pyx_k_j[] = "j"; static const char __pyx_k__4[] = ", "; static const char __pyx_k__9[] = "?"; static const char __pyx_k_cm[] = "cm"; static const char __pyx_k_db[] = "db"; static const char __pyx_k_fh[] = "fh"; static const char __pyx_k_fp[] = "fp"; static const char __pyx_k_os[] = "os"; static const char __pyx_k_rc[] = "rc"; static const char __pyx_k_buf[] = "buf"; static const char __pyx_k_cmd[] = "cmd"; static const char __pyx_k_idx[] = "idx"; static const char __pyx_k_len[] = "len_"; static const char __pyx_k_log[] = "log"; static const char __pyx_k_sys[] = "sys"; static const char __pyx_k_tmp[] = "tmp"; static const char __pyx_k_val[] = "val"; static const char __pyx_k_BLOB[] = "BLOB"; static const char __pyx_k_TIME[] = "TIME"; static const char __pyx_k_apsw[] = "apsw"; static const char __pyx_k_exit[] = "__exit__"; static const char __pyx_k_file[] = "file"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_name[] = "__name__"; static const char __pyx_k_stmt[] = "stmt"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_count[] = "count"; static const char __pyx_k_debug[] = "debug"; static const char __pyx_k_enter[] = "__enter__"; static const char __pyx_k_int64[] = "int64"; static const char __pyx_k_order[] = "order"; static const char __pyx_k_query[] = "query"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_table[] = "table"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_fileno[] = "fileno"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_memory[] = ":memory:"; static const char __pyx_k_pragma[] = "pragma"; static const char __pyx_k_INTEGER[] = "INTEGER"; static const char __pyx_k_IOError[] = "IOError"; static const char __pyx_k_OSError[] = "OSError"; static const char __pyx_k_columns[] = "columns"; static const char __pyx_k_get_val[] = "get_val"; static const char __pyx_k_logging[] = "logging"; static const char __pyx_k_PRAGMA_s[] = "PRAGMA %s"; static const char __pyx_k_callback[] = "callback"; static const char __pyx_k_col_args[] = "col_args"; static const char __pyx_k_dbfile_b[] = "dbfile_b"; static const char __pyx_k_trx_rows[] = "trx_rows"; static const char __pyx_k_ExitStack[] = "ExitStack"; static const char __pyx_k_col_count[] = "col_count"; static const char __pyx_k_col_names[] = "col_names"; static const char __pyx_k_col_types[] = "col_types"; static const char __pyx_k_getLogger[] = "getLogger"; static const char __pyx_k_itertools[] = "itertools"; static const char __pyx_k_row_count[] = "row_count"; static const char __pyx_k_PRAGMA_s_s[] = "PRAGMA %s = %s"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_begin_stmt[] = "begin_stmt"; static const char __pyx_k_contextlib[] = "contextlib"; static const char __pyx_k_dump_table[] = "dump_table"; static const char __pyx_k_int64_prev[] = "int64_prev"; static const char __pyx_k_load_table[] = "load_table"; static const char __pyx_k_sqlite3_db[] = "sqlite3_db"; static const char __pyx_k_commit_stmt[] = "commit_stmt"; static const char __pyx_k_synchronous[] = "synchronous"; static const char __pyx_k_RuntimeError[] = "RuntimeError"; static const char __pyx_k_check_sqlite[] = "check_sqlite"; static const char __pyx_k_exceptionfor[] = "exceptionfor"; static const char __pyx_k_foreign_keys[] = "foreign_keys"; static const char __pyx_k_MAX_BLOB_SIZE[] = "MAX_BLOB_SIZE"; static const char __pyx_k_s3ql_deltadump[] = "s3ql.deltadump"; static const char __pyx_k_Corrupted_input[] = "Corrupted input"; static const char __pyx_k_compile_options[] = "compile_options"; static const char __pyx_k_surrogateescape[] = "surrogateescape"; static const char __pyx_k_sqlitelibversion[] = "sqlitelibversion"; static const char __pyx_k_BEGIN_TRANSACTION[] = "BEGIN TRANSACTION"; static const char __pyx_k_COMMIT_TRANSACTION[] = "COMMIT TRANSACTION"; static const char __pyx_k_apsw_sqlite_options[] = "apsw_sqlite_options"; static const char __pyx_k_apsw_sqlite_version[] = "apsw_sqlite_version"; static const char __pyx_k_s3ql_sqlite_options[] = "s3ql_sqlite_options"; static const char __pyx_k_s3ql_sqlite_version[] = "s3ql_sqlite_version"; static const char __pyx_k_getfilesystemencoding[] = "getfilesystemencoding"; static const char __pyx_k_Can_t_dump_NULL_values[] = "Can't dump NULL values"; static const char __pyx_k_Length_d_d_in_column_d[] = "Length %d != %d in column %d"; static const char __pyx_k_Opening_connection_to_s[] = "Opening connection to %s"; static const char __pyx_k_INSERT_INTO_s_s_VALUES_s[] = "INSERT INTO %s (%s) VALUES(%s)"; static const char __pyx_k_dump_table_locals_lambda[] = "dump_table.."; static const char __pyx_k_load_table_locals_lambda[] = "load_table.."; static const char __pyx_k_Invalid_type_for_column_d[] = "Invalid type for column %d"; static const char __pyx_k_SELECT_COUNT_rowid_FROM_s[] = "SELECT COUNT(rowid) FROM %s"; static const char __pyx_k_SELECT_s_FROM_s_ORDER_BY_s[] = "SELECT %s FROM %s ORDER BY %s "; static const char __pyx_k_dump_table_s_writing_d_rows[] = "dump_table(%s): writing %d rows"; static const char __pyx_k_load_table_s_reading_d_rows[] = "load_table(%s): reading %d rows"; static const char __pyx_k_BLOB_too_large_to_read_d_vs_d[] = "BLOB too large to read (%d vs %d)"; static const char __pyx_k_home_nikratio_in_progress_s3ql[] = "/home/nikratio/in-progress/s3ql/src/s3ql/deltadump.pyx"; static const char __pyx_k_Can_not_dump_BLOB_of_size_d_max[] = "Can not dump BLOB of size %d (max: %d)"; static const char __pyx_k_SQLite_version_mismatch_between[] = "SQLite version mismatch between APSW and S3QL (%s vs %s)"; static const char __pyx_k_deltadump_pyx_this_file_is_part[] = "\ndeltadump.pyx - this file is part of S3QL (http://s3ql.googlecode.com)\n\nCopyright \302\251 2008 Nikolaus Rath \n\nThis program can be distributed under the terms of the GNU GPLv3.\n"; static const char __pyx_k_Can_t_access_in_memory_databases[] = "Can't access in-memory databases"; static const char __pyx_k_SQLite_code_used_by_APSW_was_com[] = "SQLite code used by APSW was compiled with different options than SQLite code available to S3QL! Differing settings: + %s, - %s"; static const char __pyx_k_apsw_sqlite_compile_options_s_s3[] = "apsw sqlite compile options: %s, s3ql sqlite compile options: %s"; static const char __pyx_k_apsw_sqlite_version_s_s3ql_sqlit[] = "apsw sqlite version: %s, s3ql sqlite version: %s"; static PyObject *__pyx_kp_b_BEGIN_TRANSACTION; static PyObject *__pyx_n_s_BLOB; static PyObject *__pyx_kp_u_BLOB_too_large_to_read_d_vs_d; static PyObject *__pyx_kp_b_COMMIT_TRANSACTION; static PyObject *__pyx_kp_u_Can_not_dump_BLOB_of_size_d_max; static PyObject *__pyx_kp_u_Can_t_access_in_memory_databases; static PyObject *__pyx_kp_u_Can_t_dump_NULL_values; static PyObject *__pyx_kp_u_Corrupted_input; static PyObject *__pyx_n_s_ExitStack; static PyObject *__pyx_kp_u_INSERT_INTO_s_s_VALUES_s; static PyObject *__pyx_n_s_INTEGER; static PyObject *__pyx_n_s_IOError; static PyObject *__pyx_kp_u_Invalid_type_for_column_d; static PyObject *__pyx_kp_u_Length_d_d_in_column_d; static PyObject *__pyx_n_s_MAX_BLOB_SIZE; static PyObject *__pyx_n_s_OSError; static PyObject *__pyx_kp_u_Opening_connection_to_s; static PyObject *__pyx_kp_u_PRAGMA_s; static PyObject *__pyx_kp_u_PRAGMA_s_s; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_kp_u_SELECT_COUNT_rowid_FROM_s; static PyObject *__pyx_kp_u_SELECT_s_FROM_s_ORDER_BY_s; static PyObject *__pyx_kp_u_SQLite_code_used_by_APSW_was_com; static PyObject *__pyx_kp_u_SQLite_version_mismatch_between; static PyObject *__pyx_n_s_TIME; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_kp_u__4; static PyObject *__pyx_kp_u__9; static PyObject *__pyx_n_s_apsw; static PyObject *__pyx_kp_u_apsw_sqlite_compile_options_s_s3; static PyObject *__pyx_n_s_apsw_sqlite_options; static PyObject *__pyx_n_s_apsw_sqlite_version; static PyObject *__pyx_kp_u_apsw_sqlite_version_s_s3ql_sqlit; static PyObject *__pyx_n_s_begin_stmt; static PyObject *__pyx_n_s_buf; static PyObject *__pyx_n_s_callback; static PyObject *__pyx_n_s_check_sqlite; static PyObject *__pyx_n_s_cm; static PyObject *__pyx_n_s_cmd; static PyObject *__pyx_n_s_col_args; static PyObject *__pyx_n_s_col_count; static PyObject *__pyx_n_s_col_names; static PyObject *__pyx_n_s_col_types; static PyObject *__pyx_n_s_columns; static PyObject *__pyx_n_s_commit_stmt; static PyObject *__pyx_n_s_compile_options; static PyObject *__pyx_n_s_contextlib; static PyObject *__pyx_n_s_count; static PyObject *__pyx_n_s_db; static PyObject *__pyx_n_s_dbfile_b; static PyObject *__pyx_n_s_debug; static PyObject *__pyx_n_s_dump_table; static PyObject *__pyx_n_s_dump_table_locals_lambda; static PyObject *__pyx_kp_u_dump_table_s_writing_d_rows; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enter; static PyObject *__pyx_n_s_exceptionfor; static PyObject *__pyx_n_s_exit; static PyObject *__pyx_n_s_fh; static PyObject *__pyx_n_s_file; static PyObject *__pyx_n_s_fileno; static PyObject *__pyx_n_u_foreign_keys; static PyObject *__pyx_n_s_fp; static PyObject *__pyx_n_s_getLogger; static PyObject *__pyx_n_s_get_val; static PyObject *__pyx_n_s_getfilesystemencoding; static PyObject *__pyx_kp_s_home_nikratio_in_progress_s3ql; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_idx; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_int64; static PyObject *__pyx_n_s_int64_prev; static PyObject *__pyx_n_s_itertools; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_len; static PyObject *__pyx_n_s_load_table; static PyObject *__pyx_n_s_load_table_locals_lambda; static PyObject *__pyx_kp_u_load_table_s_reading_d_rows; static PyObject *__pyx_n_s_log; static PyObject *__pyx_n_s_logging; static PyObject *__pyx_n_s_main; static PyObject *__pyx_kp_u_memory; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_order; static PyObject *__pyx_n_s_os; static PyObject *__pyx_n_s_pragma; static PyObject *__pyx_n_s_query; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_rc; static PyObject *__pyx_n_s_row_count; static PyObject *__pyx_n_s_s3ql_deltadump; static PyObject *__pyx_n_s_s3ql_sqlite_options; static PyObject *__pyx_n_s_s3ql_sqlite_version; static PyObject *__pyx_n_s_sqlite3_db; static PyObject *__pyx_n_s_sqlitelibversion; static PyObject *__pyx_n_s_stmt; static PyObject *__pyx_n_u_surrogateescape; static PyObject *__pyx_n_u_synchronous; static PyObject *__pyx_n_s_sys; static PyObject *__pyx_n_s_table; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_tmp; static PyObject *__pyx_n_s_trx_rows; static PyObject *__pyx_n_s_val; static PyObject *__pyx_pf_4s3ql_9deltadump_check_sqlite(CYTHON_UNUSED PyObject *__pyx_self); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda1(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda2(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda3(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda4(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda5(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_pf_4s3ql_9deltadump_2dump_table(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_table, PyObject *__pyx_v_order, PyObject *__pyx_v_columns, PyObject *__pyx_v_db, PyObject *__pyx_v_fh); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda6(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda7(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda8(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda9(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda10(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda11(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda12(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda13(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda14(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_lambda_funcdef_lambda15(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_pf_4s3ql_9deltadump_4load_table(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_table, PyObject *__pyx_v_columns, PyObject *__pyx_v_db, PyObject *__pyx_v_fh, PyObject *__pyx_v_trx_rows); /* proto */ static PyObject *__pyx_tp_new_4s3ql_9deltadump___pyx_scope_struct__dump_table(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_4s3ql_9deltadump___pyx_scope_struct_1_load_table(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_4096; static PyObject *__pyx_int_5000; static PyObject *__pyx_k_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__16; static PyObject *__pyx_codeobj__13; static PyObject *__pyx_codeobj__15; static PyObject *__pyx_codeobj__17; /* "s3ql/deltadump.pyx":112 * cdef double time_scale = 1 << 30 * * cdef inline int fwrite(const_void * buf, size_t len_, FILE * fp) except -1: # <<<<<<<<<<<<<< * '''Call libc's fwrite() and raise exception on failure''' * */ static CYTHON_INLINE int __pyx_f_4s3ql_9deltadump_fwrite(const void *__pyx_v_buf, size_t __pyx_v_len_, FILE *__pyx_v_fp) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; struct __pyx_opt_args_4s3ql_9deltadump_raise_from_errno __pyx_t_3; __Pyx_RefNannySetupContext("fwrite", 0); /* "s3ql/deltadump.pyx":115 * '''Call libc's fwrite() and raise exception on failure''' * * if fwrite_c(buf, len_, 1, fp) != 1: # <<<<<<<<<<<<<< * raise_from_errno(IOError) * return 0 */ __pyx_t_1 = ((fwrite(__pyx_v_buf, __pyx_v_len_, 1, __pyx_v_fp) != 1) != 0); if (__pyx_t_1) { /* "s3ql/deltadump.pyx":116 * * if fwrite_c(buf, len_, 1, fp) != 1: * raise_from_errno(IOError) # <<<<<<<<<<<<<< * return 0 * */ __pyx_t_3.__pyx_n = 1; __pyx_t_3.err_class = __pyx_builtin_IOError; __pyx_t_2 = __pyx_f_4s3ql_9deltadump_raise_from_errno(&__pyx_t_3); if (unlikely(__pyx_t_2 == -1)) __PYX_ERR(0, 116, __pyx_L1_error) /* "s3ql/deltadump.pyx":115 * '''Call libc's fwrite() and raise exception on failure''' * * if fwrite_c(buf, len_, 1, fp) != 1: # <<<<<<<<<<<<<< * raise_from_errno(IOError) * return 0 */ } /* "s3ql/deltadump.pyx":117 * if fwrite_c(buf, len_, 1, fp) != 1: * raise_from_errno(IOError) * return 0 # <<<<<<<<<<<<<< * * cdef inline int fread(void * buf, size_t len_, FILE * fp) except -1: */ __pyx_r = 0; goto __pyx_L0; /* "s3ql/deltadump.pyx":112 * cdef double time_scale = 1 << 30 * * cdef inline int fwrite(const_void * buf, size_t len_, FILE * fp) except -1: # <<<<<<<<<<<<<< * '''Call libc's fwrite() and raise exception on failure''' * */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("s3ql.deltadump.fwrite", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":119 * return 0 * * cdef inline int fread(void * buf, size_t len_, FILE * fp) except -1: # <<<<<<<<<<<<<< * '''Call libc's fread() and raise exception on failure''' * */ static CYTHON_INLINE int __pyx_f_4s3ql_9deltadump_fread(void *__pyx_v_buf, size_t __pyx_v_len_, FILE *__pyx_v_fp) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; struct __pyx_opt_args_4s3ql_9deltadump_raise_from_errno __pyx_t_3; __Pyx_RefNannySetupContext("fread", 0); /* "s3ql/deltadump.pyx":122 * '''Call libc's fread() and raise exception on failure''' * * if fread_c(buf, len_, 1, fp) != 1: # <<<<<<<<<<<<<< * raise_from_errno(IOError) * return 0 */ __pyx_t_1 = ((fread(__pyx_v_buf, __pyx_v_len_, 1, __pyx_v_fp) != 1) != 0); if (__pyx_t_1) { /* "s3ql/deltadump.pyx":123 * * if fread_c(buf, len_, 1, fp) != 1: * raise_from_errno(IOError) # <<<<<<<<<<<<<< * return 0 * */ __pyx_t_3.__pyx_n = 1; __pyx_t_3.err_class = __pyx_builtin_IOError; __pyx_t_2 = __pyx_f_4s3ql_9deltadump_raise_from_errno(&__pyx_t_3); if (unlikely(__pyx_t_2 == -1)) __PYX_ERR(0, 123, __pyx_L1_error) /* "s3ql/deltadump.pyx":122 * '''Call libc's fread() and raise exception on failure''' * * if fread_c(buf, len_, 1, fp) != 1: # <<<<<<<<<<<<<< * raise_from_errno(IOError) * return 0 */ } /* "s3ql/deltadump.pyx":124 * if fread_c(buf, len_, 1, fp) != 1: * raise_from_errno(IOError) * return 0 # <<<<<<<<<<<<<< * * cdef free(void * ptr): */ __pyx_r = 0; goto __pyx_L0; /* "s3ql/deltadump.pyx":119 * return 0 * * cdef inline int fread(void * buf, size_t len_, FILE * fp) except -1: # <<<<<<<<<<<<<< * '''Call libc's fread() and raise exception on failure''' * */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("s3ql.deltadump.fread", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":126 * return 0 * * cdef free(void * ptr): # <<<<<<<<<<<<<< * '''Call libc.free() * */ static PyObject *__pyx_f_4s3ql_9deltadump_free(void *__pyx_v_ptr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("free", 0); /* "s3ql/deltadump.pyx":133 * ''' * * free_c(ptr) # <<<<<<<<<<<<<< * return None * */ free(__pyx_v_ptr); /* "s3ql/deltadump.pyx":134 * * free_c(ptr) * return None # <<<<<<<<<<<<<< * * cdef int raise_from_errno(err_class=OSError) except -1: */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "s3ql/deltadump.pyx":126 * return 0 * * cdef free(void * ptr): # <<<<<<<<<<<<<< * '''Call libc.free() * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":136 * return None * * cdef int raise_from_errno(err_class=OSError) except -1: # <<<<<<<<<<<<<< * '''Raise OSError for current errno value''' * */ static int __pyx_f_4s3ql_9deltadump_raise_from_errno(struct __pyx_opt_args_4s3ql_9deltadump_raise_from_errno *__pyx_optional_args) { PyObject *__pyx_v_err_class = __pyx_k_; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; __Pyx_RefNannySetupContext("raise_from_errno", 0); if (__pyx_optional_args) { if (__pyx_optional_args->__pyx_n > 0) { __pyx_v_err_class = __pyx_optional_args->err_class; } } /* "s3ql/deltadump.pyx":139 * '''Raise OSError for current errno value''' * * raise err_class(errno, PyUnicode_FromString(strerror(errno))) # <<<<<<<<<<<<<< * * cdef int fclose(FILE * fp) except -1: */ __pyx_t_2 = __Pyx_PyInt_From_int(errno); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyUnicode_FromString(strerror(errno)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_err_class); __pyx_t_4 = __pyx_v_err_class; __pyx_t_5 = NULL; __pyx_t_6 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); __pyx_t_6 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_2, __pyx_t_3}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 139, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_2, __pyx_t_3}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 139, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { __pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_t_3); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 139, __pyx_L1_error) /* "s3ql/deltadump.pyx":136 * return None * * cdef int raise_from_errno(err_class=OSError) except -1: # <<<<<<<<<<<<<< * '''Raise OSError for current errno value''' * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("s3ql.deltadump.raise_from_errno", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":141 * raise err_class(errno, PyUnicode_FromString(strerror(errno))) * * cdef int fclose(FILE * fp) except -1: # <<<<<<<<<<<<<< * '''Call libc.fclose() and raise exception on failure''' * */ static int __pyx_f_4s3ql_9deltadump_fclose(FILE *__pyx_v_fp) { Py_ssize_t __pyx_v_off; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("fclose", 0); /* "s3ql/deltadump.pyx":149 * # important, so that we can safely reposition the fd position * # below (which is necessary in case there is cached input data) * if fflush(fp) != 0: # <<<<<<<<<<<<<< * raise_from_errno() * */ __pyx_t_1 = ((fflush(__pyx_v_fp) != 0) != 0); if (__pyx_t_1) { /* "s3ql/deltadump.pyx":150 * # below (which is necessary in case there is cached input data) * if fflush(fp) != 0: * raise_from_errno() # <<<<<<<<<<<<<< * * # Reposition FD to position of FILE*, otherwise next read from FD will miss */ __pyx_t_2 = __pyx_f_4s3ql_9deltadump_raise_from_errno(NULL); if (unlikely(__pyx_t_2 == -1)) __PYX_ERR(0, 150, __pyx_L1_error) /* "s3ql/deltadump.pyx":149 * # important, so that we can safely reposition the fd position * # below (which is necessary in case there is cached input data) * if fflush(fp) != 0: # <<<<<<<<<<<<<< * raise_from_errno() * */ } /* "s3ql/deltadump.pyx":156 * # the same thing, but this does not seem to be documented so we don't rely * # on it. * off = ftell(fp) # <<<<<<<<<<<<<< * if off == -1: * raise_from_errno() */ __pyx_v_off = ftell(__pyx_v_fp); /* "s3ql/deltadump.pyx":157 * # on it. * off = ftell(fp) * if off == -1: # <<<<<<<<<<<<<< * raise_from_errno() * */ __pyx_t_1 = ((__pyx_v_off == -1L) != 0); if (__pyx_t_1) { /* "s3ql/deltadump.pyx":158 * off = ftell(fp) * if off == -1: * raise_from_errno() # <<<<<<<<<<<<<< * * if lseek(fileno(fp), off, SEEK_SET) != off: */ __pyx_t_2 = __pyx_f_4s3ql_9deltadump_raise_from_errno(NULL); if (unlikely(__pyx_t_2 == -1)) __PYX_ERR(0, 158, __pyx_L1_error) /* "s3ql/deltadump.pyx":157 * # on it. * off = ftell(fp) * if off == -1: # <<<<<<<<<<<<<< * raise_from_errno() * */ } /* "s3ql/deltadump.pyx":160 * raise_from_errno() * * if lseek(fileno(fp), off, SEEK_SET) != off: # <<<<<<<<<<<<<< * raise_from_errno() * */ __pyx_t_1 = ((lseek(fileno(__pyx_v_fp), __pyx_v_off, SEEK_SET) != __pyx_v_off) != 0); if (__pyx_t_1) { /* "s3ql/deltadump.pyx":161 * * if lseek(fileno(fp), off, SEEK_SET) != off: * raise_from_errno() # <<<<<<<<<<<<<< * * if fclose_c(fp) != 0: */ __pyx_t_2 = __pyx_f_4s3ql_9deltadump_raise_from_errno(NULL); if (unlikely(__pyx_t_2 == -1)) __PYX_ERR(0, 161, __pyx_L1_error) /* "s3ql/deltadump.pyx":160 * raise_from_errno() * * if lseek(fileno(fp), off, SEEK_SET) != off: # <<<<<<<<<<<<<< * raise_from_errno() * */ } /* "s3ql/deltadump.pyx":163 * raise_from_errno() * * if fclose_c(fp) != 0: # <<<<<<<<<<<<<< * raise_from_errno() * */ __pyx_t_1 = ((fclose(__pyx_v_fp) != 0) != 0); if (__pyx_t_1) { /* "s3ql/deltadump.pyx":164 * * if fclose_c(fp) != 0: * raise_from_errno() # <<<<<<<<<<<<<< * * return 0 */ __pyx_t_2 = __pyx_f_4s3ql_9deltadump_raise_from_errno(NULL); if (unlikely(__pyx_t_2 == -1)) __PYX_ERR(0, 164, __pyx_L1_error) /* "s3ql/deltadump.pyx":163 * raise_from_errno() * * if fclose_c(fp) != 0: # <<<<<<<<<<<<<< * raise_from_errno() * */ } /* "s3ql/deltadump.pyx":166 * raise_from_errno() * * return 0 # <<<<<<<<<<<<<< * * cdef void * calloc(size_t cnt, size_t size) except NULL: */ __pyx_r = 0; goto __pyx_L0; /* "s3ql/deltadump.pyx":141 * raise err_class(errno, PyUnicode_FromString(strerror(errno))) * * cdef int fclose(FILE * fp) except -1: # <<<<<<<<<<<<<< * '''Call libc.fclose() and raise exception on failure''' * */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("s3ql.deltadump.fclose", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":168 * return 0 * * cdef void * calloc(size_t cnt, size_t size) except NULL: # <<<<<<<<<<<<<< * '''Call libc.calloc and raise exception on failure''' * */ static void *__pyx_f_4s3ql_9deltadump_calloc(size_t __pyx_v_cnt, size_t __pyx_v_size) { void *__pyx_v_ptr; void *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2; __Pyx_RefNannySetupContext("calloc", 0); /* "s3ql/deltadump.pyx":173 * cdef void * ptr * * ptr = calloc_c(cnt, size) # <<<<<<<<<<<<<< * * if ptr is NULL: */ __pyx_v_ptr = calloc(__pyx_v_cnt, __pyx_v_size); /* "s3ql/deltadump.pyx":175 * ptr = calloc_c(cnt, size) * * if ptr is NULL: # <<<<<<<<<<<<<< * PyErr_NoMemory() * */ __pyx_t_1 = ((__pyx_v_ptr == NULL) != 0); if (__pyx_t_1) { /* "s3ql/deltadump.pyx":176 * * if ptr is NULL: * PyErr_NoMemory() # <<<<<<<<<<<<<< * * return ptr */ __pyx_t_2 = PyErr_NoMemory(); if (unlikely(__pyx_t_2 == NULL)) __PYX_ERR(0, 176, __pyx_L1_error) /* "s3ql/deltadump.pyx":175 * ptr = calloc_c(cnt, size) * * if ptr is NULL: # <<<<<<<<<<<<<< * PyErr_NoMemory() * */ } /* "s3ql/deltadump.pyx":178 * PyErr_NoMemory() * * return ptr # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_ptr; goto __pyx_L0; /* "s3ql/deltadump.pyx":168 * return 0 * * cdef void * calloc(size_t cnt, size_t size) except NULL: # <<<<<<<<<<<<<< * '''Call libc.calloc and raise exception on failure''' * */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("s3ql.deltadump.calloc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":181 * * * cdef int SQLITE_CHECK_RC(int rc, int success, sqlite3* db) except -1: # <<<<<<<<<<<<<< * '''Raise correct exception if *rc* != *success*''' * */ static int __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(int __pyx_v_rc, int __pyx_v_success, sqlite3 *__pyx_v_db) { PyObject *__pyx_v_exc = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("SQLITE_CHECK_RC", 0); /* "s3ql/deltadump.pyx":184 * '''Raise correct exception if *rc* != *success*''' * * if rc != success: # <<<<<<<<<<<<<< * exc = apsw.exceptionfor(rc) * raise type(exc)(PyUnicode_FromString(sqlite3_errmsg(db))) */ __pyx_t_1 = ((__pyx_v_rc != __pyx_v_success) != 0); if (__pyx_t_1) { /* "s3ql/deltadump.pyx":185 * * if rc != success: * exc = apsw.exceptionfor(rc) # <<<<<<<<<<<<<< * raise type(exc)(PyUnicode_FromString(sqlite3_errmsg(db))) * */ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_apsw); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_exceptionfor); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_rc); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (!__pyx_t_5) { __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3}; __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3}; __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_exc = __pyx_t_2; __pyx_t_2 = 0; /* "s3ql/deltadump.pyx":186 * if rc != success: * exc = apsw.exceptionfor(rc) * raise type(exc)(PyUnicode_FromString(sqlite3_errmsg(db))) # <<<<<<<<<<<<<< * * return 0 */ __pyx_t_4 = PyUnicode_FromString(sqlite3_errmsg(__pyx_v_db)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(((PyObject *)Py_TYPE(__pyx_v_exc))); __pyx_t_6 = ((PyObject *)Py_TYPE(__pyx_v_exc)); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); } } if (!__pyx_t_3) { __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_4}; __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_4}; __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_5, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 186, __pyx_L1_error) /* "s3ql/deltadump.pyx":184 * '''Raise correct exception if *rc* != *success*''' * * if rc != success: # <<<<<<<<<<<<<< * exc = apsw.exceptionfor(rc) * raise type(exc)(PyUnicode_FromString(sqlite3_errmsg(db))) */ } /* "s3ql/deltadump.pyx":188 * raise type(exc)(PyUnicode_FromString(sqlite3_errmsg(db))) * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "s3ql/deltadump.pyx":181 * * * cdef int SQLITE_CHECK_RC(int rc, int success, sqlite3* db) except -1: # <<<<<<<<<<<<<< * '''Raise correct exception if *rc* != *success*''' * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("s3ql.deltadump.SQLITE_CHECK_RC", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_exc); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":191 * * * cdef int prep_columns(columns, int** col_types_p, int** col_args_p) except -1: # <<<<<<<<<<<<<< * '''Allocate col_types and col_args, return number of columns * */ static int __pyx_f_4s3ql_9deltadump_prep_columns(PyObject *__pyx_v_columns, int **__pyx_v_col_types_p, int **__pyx_v_col_args_p) { size_t __pyx_v_col_count; int *__pyx_v_col_types; int *__pyx_v_col_args; PyObject *__pyx_v_i = NULL; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; void *__pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *(*__pyx_t_5)(PyObject *); PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_t_10; Py_ssize_t __pyx_t_11; __Pyx_RefNannySetupContext("prep_columns", 0); /* "s3ql/deltadump.pyx":201 * cdef int *col_args * * col_count = len(columns) # guaranteed positive # <<<<<<<<<<<<<< * col_types = < int *> calloc(col_count, sizeof(int)) * col_args = < int *> calloc(col_count, sizeof(int)) */ __pyx_t_1 = PyObject_Length(__pyx_v_columns); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 201, __pyx_L1_error) __pyx_v_col_count = ((size_t)__pyx_t_1); /* "s3ql/deltadump.pyx":202 * * col_count = len(columns) # guaranteed positive * col_types = < int *> calloc(col_count, sizeof(int)) # <<<<<<<<<<<<<< * col_args = < int *> calloc(col_count, sizeof(int)) * */ __pyx_t_2 = __pyx_f_4s3ql_9deltadump_calloc(__pyx_v_col_count, (sizeof(int))); if (unlikely(__pyx_t_2 == NULL)) __PYX_ERR(0, 202, __pyx_L1_error) __pyx_v_col_types = ((int *)__pyx_t_2); /* "s3ql/deltadump.pyx":203 * col_count = len(columns) # guaranteed positive * col_types = < int *> calloc(col_count, sizeof(int)) * col_args = < int *> calloc(col_count, sizeof(int)) # <<<<<<<<<<<<<< * * # Initialize col_args and col_types */ __pyx_t_2 = __pyx_f_4s3ql_9deltadump_calloc(__pyx_v_col_count, (sizeof(int))); if (unlikely(__pyx_t_2 == NULL)) __PYX_ERR(0, 203, __pyx_L1_error) __pyx_v_col_args = ((int *)__pyx_t_2); /* "s3ql/deltadump.pyx":206 * * # Initialize col_args and col_types * for i in range(col_count): # <<<<<<<<<<<<<< * if columns[i][1] not in (BLOB, INTEGER, TIME): * raise ValueError("Invalid type for column %d" % i) */ __pyx_t_3 = __Pyx_PyInt_FromSize_t(__pyx_v_col_count); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 206, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 206, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 206, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (likely(PyList_CheckExact(__pyx_t_3)) || PyTuple_CheckExact(__pyx_t_3)) { __pyx_t_4 = __pyx_t_3; __Pyx_INCREF(__pyx_t_4); __pyx_t_1 = 0; __pyx_t_5 = NULL; } else { __pyx_t_1 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 206, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 206, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; for (;;) { if (likely(!__pyx_t_5)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_1 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_1); __Pyx_INCREF(__pyx_t_3); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(0, 206, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_4, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 206, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif } else { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_1); __Pyx_INCREF(__pyx_t_3); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(0, 206, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_4, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 206, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif } } else { __pyx_t_3 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_3)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 206, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_3); } __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_3); __pyx_t_3 = 0; /* "s3ql/deltadump.pyx":207 * # Initialize col_args and col_types * for i in range(col_count): * if columns[i][1] not in (BLOB, INTEGER, TIME): # <<<<<<<<<<<<<< * raise ValueError("Invalid type for column %d" % i) * col_types[i] = columns[i][1] */ __pyx_t_3 = PyObject_GetItem(__pyx_v_columns, __pyx_v_i); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_3, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_BLOB); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = PyObject_RichCompare(__pyx_t_6, __pyx_t_3, Py_NE); __Pyx_XGOTREF(__pyx_t_8); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 207, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 207, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; if (__pyx_t_9) { } else { __pyx_t_7 = __pyx_t_9; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = __Pyx_GetModuleGlobalName(__pyx_n_s_INTEGER); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_3 = PyObject_RichCompare(__pyx_t_6, __pyx_t_8, Py_NE); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 207, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 207, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_9) { } else { __pyx_t_7 = __pyx_t_9; goto __pyx_L6_bool_binop_done; } __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_TIME); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = PyObject_RichCompare(__pyx_t_6, __pyx_t_3, Py_NE); __Pyx_XGOTREF(__pyx_t_8); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 207, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 207, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_7 = __pyx_t_9; __pyx_L6_bool_binop_done:; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_9 = (__pyx_t_7 != 0); if (__pyx_t_9) { /* "s3ql/deltadump.pyx":208 * for i in range(col_count): * if columns[i][1] not in (BLOB, INTEGER, TIME): * raise ValueError("Invalid type for column %d" % i) # <<<<<<<<<<<<<< * col_types[i] = columns[i][1] * */ __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_Invalid_type_for_column_d, __pyx_v_i); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_8, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(0, 208, __pyx_L1_error) /* "s3ql/deltadump.pyx":207 * # Initialize col_args and col_types * for i in range(col_count): * if columns[i][1] not in (BLOB, INTEGER, TIME): # <<<<<<<<<<<<<< * raise ValueError("Invalid type for column %d" % i) * col_types[i] = columns[i][1] */ } /* "s3ql/deltadump.pyx":209 * if columns[i][1] not in (BLOB, INTEGER, TIME): * raise ValueError("Invalid type for column %d" % i) * col_types[i] = columns[i][1] # <<<<<<<<<<<<<< * * if len(columns[i]) == 3: */ __pyx_t_6 = PyObject_GetItem(__pyx_v_columns, __pyx_v_i); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_GetItemInt(__pyx_t_6, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_10 = __Pyx_PyInt_As_int(__pyx_t_8); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_11 = __Pyx_PyIndex_AsSsize_t(__pyx_v_i); if (unlikely((__pyx_t_11 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 209, __pyx_L1_error) (__pyx_v_col_types[__pyx_t_11]) = __pyx_t_10; /* "s3ql/deltadump.pyx":211 * col_types[i] = columns[i][1] * * if len(columns[i]) == 3: # <<<<<<<<<<<<<< * col_args[i] = columns[i][2] * else: */ __pyx_t_8 = PyObject_GetItem(__pyx_v_columns, __pyx_v_i); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 211, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_11 = PyObject_Length(__pyx_t_8); if (unlikely(__pyx_t_11 == -1)) __PYX_ERR(0, 211, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_9 = ((__pyx_t_11 == 3) != 0); if (__pyx_t_9) { /* "s3ql/deltadump.pyx":212 * * if len(columns[i]) == 3: * col_args[i] = columns[i][2] # <<<<<<<<<<<<<< * else: * col_args[i] = 0 */ __pyx_t_8 = PyObject_GetItem(__pyx_v_columns, __pyx_v_i); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 212, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_8, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 212, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_10 = __Pyx_PyInt_As_int(__pyx_t_6); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 212, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_11 = __Pyx_PyIndex_AsSsize_t(__pyx_v_i); if (unlikely((__pyx_t_11 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 212, __pyx_L1_error) (__pyx_v_col_args[__pyx_t_11]) = __pyx_t_10; /* "s3ql/deltadump.pyx":211 * col_types[i] = columns[i][1] * * if len(columns[i]) == 3: # <<<<<<<<<<<<<< * col_args[i] = columns[i][2] * else: */ goto __pyx_L9; } /* "s3ql/deltadump.pyx":214 * col_args[i] = columns[i][2] * else: * col_args[i] = 0 # <<<<<<<<<<<<<< * * col_types_p[0] = col_types */ /*else*/ { __pyx_t_11 = __Pyx_PyIndex_AsSsize_t(__pyx_v_i); if (unlikely((__pyx_t_11 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 214, __pyx_L1_error) (__pyx_v_col_args[__pyx_t_11]) = 0; } __pyx_L9:; /* "s3ql/deltadump.pyx":206 * * # Initialize col_args and col_types * for i in range(col_count): # <<<<<<<<<<<<<< * if columns[i][1] not in (BLOB, INTEGER, TIME): * raise ValueError("Invalid type for column %d" % i) */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":216 * col_args[i] = 0 * * col_types_p[0] = col_types # <<<<<<<<<<<<<< * col_args_p[0] = col_args * */ (__pyx_v_col_types_p[0]) = __pyx_v_col_types; /* "s3ql/deltadump.pyx":217 * * col_types_p[0] = col_types * col_args_p[0] = col_args # <<<<<<<<<<<<<< * * # We can safely assume that this fits into an int */ (__pyx_v_col_args_p[0]) = __pyx_v_col_args; /* "s3ql/deltadump.pyx":220 * * # We can safely assume that this fits into an int * return col_count # <<<<<<<<<<<<<< * * cdef FILE* dup_to_fp(fh, const_char* mode) except NULL: */ __pyx_r = ((int)__pyx_v_col_count); goto __pyx_L0; /* "s3ql/deltadump.pyx":191 * * * cdef int prep_columns(columns, int** col_types_p, int** col_args_p) except -1: # <<<<<<<<<<<<<< * '''Allocate col_types and col_args, return number of columns * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("s3ql.deltadump.prep_columns", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_i); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":222 * return col_count * * cdef FILE* dup_to_fp(fh, const_char* mode) except NULL: # <<<<<<<<<<<<<< * '''Duplicate fd from *fh* and open as FILE*''' * */ static FILE *__pyx_f_4s3ql_9deltadump_dup_to_fp(PyObject *__pyx_v_fh, const char *__pyx_v_mode) { int __pyx_v_fd; FILE *__pyx_v_fp; FILE *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; __Pyx_RefNannySetupContext("dup_to_fp", 0); /* "s3ql/deltadump.pyx":227 * cdef int fd * * fd = dup(fh.fileno()) # <<<<<<<<<<<<<< * if fd == -1: * raise_from_errno() */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_fh, __pyx_n_s_fileno); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } if (__pyx_t_3) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 227, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 227, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 227, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_fd = dup(__pyx_t_4); /* "s3ql/deltadump.pyx":228 * * fd = dup(fh.fileno()) * if fd == -1: # <<<<<<<<<<<<<< * raise_from_errno() * */ __pyx_t_5 = ((__pyx_v_fd == -1L) != 0); if (__pyx_t_5) { /* "s3ql/deltadump.pyx":229 * fd = dup(fh.fileno()) * if fd == -1: * raise_from_errno() # <<<<<<<<<<<<<< * * fp = fdopen(fd, mode) */ __pyx_t_4 = __pyx_f_4s3ql_9deltadump_raise_from_errno(NULL); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(0, 229, __pyx_L1_error) /* "s3ql/deltadump.pyx":228 * * fd = dup(fh.fileno()) * if fd == -1: # <<<<<<<<<<<<<< * raise_from_errno() * */ } /* "s3ql/deltadump.pyx":231 * raise_from_errno() * * fp = fdopen(fd, mode) # <<<<<<<<<<<<<< * if fp == NULL: * raise_from_errno() */ __pyx_v_fp = fdopen(__pyx_v_fd, __pyx_v_mode); /* "s3ql/deltadump.pyx":232 * * fp = fdopen(fd, mode) * if fp == NULL: # <<<<<<<<<<<<<< * raise_from_errno() * */ __pyx_t_5 = ((__pyx_v_fp == NULL) != 0); if (__pyx_t_5) { /* "s3ql/deltadump.pyx":233 * fp = fdopen(fd, mode) * if fp == NULL: * raise_from_errno() # <<<<<<<<<<<<<< * * return fp */ __pyx_t_4 = __pyx_f_4s3ql_9deltadump_raise_from_errno(NULL); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(0, 233, __pyx_L1_error) /* "s3ql/deltadump.pyx":232 * * fp = fdopen(fd, mode) * if fp == NULL: # <<<<<<<<<<<<<< * raise_from_errno() * */ } /* "s3ql/deltadump.pyx":235 * raise_from_errno() * * return fp # <<<<<<<<<<<<<< * * def check_sqlite(): */ __pyx_r = __pyx_v_fp; goto __pyx_L0; /* "s3ql/deltadump.pyx":222 * return col_count * * cdef FILE* dup_to_fp(fh, const_char* mode) except NULL: # <<<<<<<<<<<<<< * '''Duplicate fd from *fh* and open as FILE*''' * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("s3ql.deltadump.dup_to_fp", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":237 * return fp * * def check_sqlite(): # <<<<<<<<<<<<<< * '''Check if deltadump and apsw module use compatible SQLite code. * */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_1check_sqlite(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static char __pyx_doc_4s3ql_9deltadump_check_sqlite[] = "check_sqlite()\nCheck if deltadump and apsw module use compatible SQLite code.\n\n This functions look at versions and compile options of the SQLite\n code used by the *apsw* module and the *deltadump* module. If they\n do not match exactly, a `RuntimeError` is raised.\n\n Only if both modules use the same SQLite version compiled with the\n same options can the database object be shared between *apsw* and\n *deltadump*.\n "; static PyMethodDef __pyx_mdef_4s3ql_9deltadump_1check_sqlite = {"check_sqlite", (PyCFunction)__pyx_pw_4s3ql_9deltadump_1check_sqlite, METH_NOARGS, __pyx_doc_4s3ql_9deltadump_check_sqlite}; static PyObject *__pyx_pw_4s3ql_9deltadump_1check_sqlite(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("check_sqlite (wrapper)", 0); __pyx_r = __pyx_pf_4s3ql_9deltadump_check_sqlite(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4s3ql_9deltadump_check_sqlite(CYTHON_UNUSED PyObject *__pyx_self) { const char *__pyx_v_buf; PyObject *__pyx_v_apsw_sqlite_version = NULL; PyObject *__pyx_v_s3ql_sqlite_version = NULL; PyObject *__pyx_v_apsw_sqlite_options = NULL; PyObject *__pyx_v_s3ql_sqlite_options = NULL; PyObject *__pyx_v_idx = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); int __pyx_t_9; __Pyx_RefNannySetupContext("check_sqlite", 0); /* "s3ql/deltadump.pyx":251 * cdef const_char *buf * * apsw_sqlite_version = apsw.sqlitelibversion() # <<<<<<<<<<<<<< * s3ql_sqlite_version = PyUnicode_FromString(sqlite3_libversion()) * log.debug('apsw sqlite version: %s, ' */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_apsw); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_sqlitelibversion); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (__pyx_t_2) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 251, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 251, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_apsw_sqlite_version = __pyx_t_1; __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":252 * * apsw_sqlite_version = apsw.sqlitelibversion() * s3ql_sqlite_version = PyUnicode_FromString(sqlite3_libversion()) # <<<<<<<<<<<<<< * log.debug('apsw sqlite version: %s, ' * 's3ql sqlite version: %s', */ __pyx_t_1 = PyUnicode_FromString(sqlite3_libversion()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_s3ql_sqlite_version = __pyx_t_1; __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":253 * apsw_sqlite_version = apsw.sqlitelibversion() * s3ql_sqlite_version = PyUnicode_FromString(sqlite3_libversion()) * log.debug('apsw sqlite version: %s, ' # <<<<<<<<<<<<<< * 's3ql sqlite version: %s', * apsw_sqlite_version, */ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_log); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_debug); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "s3ql/deltadump.pyx":256 * 's3ql sqlite version: %s', * apsw_sqlite_version, * s3ql_sqlite_version) # <<<<<<<<<<<<<< * if apsw_sqlite_version != s3ql_sqlite_version: * raise RuntimeError('SQLite version mismatch between APSW and S3QL ' */ __pyx_t_3 = NULL; __pyx_t_4 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); __pyx_t_4 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[4] = {__pyx_t_3, __pyx_kp_u_apsw_sqlite_version_s_s3ql_sqlit, __pyx_v_apsw_sqlite_version, __pyx_v_s3ql_sqlite_version}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_4, 3+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 253, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[4] = {__pyx_t_3, __pyx_kp_u_apsw_sqlite_version_s_s3ql_sqlit, __pyx_v_apsw_sqlite_version, __pyx_v_s3ql_sqlite_version}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_4, 3+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 253, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { __pyx_t_5 = PyTuple_New(3+__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_3) { __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __pyx_t_3 = NULL; } __Pyx_INCREF(__pyx_kp_u_apsw_sqlite_version_s_s3ql_sqlit); __Pyx_GIVEREF(__pyx_kp_u_apsw_sqlite_version_s_s3ql_sqlit); PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_4, __pyx_kp_u_apsw_sqlite_version_s_s3ql_sqlit); __Pyx_INCREF(__pyx_v_apsw_sqlite_version); __Pyx_GIVEREF(__pyx_v_apsw_sqlite_version); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_4, __pyx_v_apsw_sqlite_version); __Pyx_INCREF(__pyx_v_s3ql_sqlite_version); __Pyx_GIVEREF(__pyx_v_s3ql_sqlite_version); PyTuple_SET_ITEM(__pyx_t_5, 2+__pyx_t_4, __pyx_v_s3ql_sqlite_version); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":257 * apsw_sqlite_version, * s3ql_sqlite_version) * if apsw_sqlite_version != s3ql_sqlite_version: # <<<<<<<<<<<<<< * raise RuntimeError('SQLite version mismatch between APSW and S3QL ' * '(%s vs %s)' % (apsw_sqlite_version, s3ql_sqlite_version)) */ __pyx_t_1 = PyObject_RichCompare(__pyx_v_apsw_sqlite_version, __pyx_v_s3ql_sqlite_version, Py_NE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 257, __pyx_L1_error) __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 257, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_6) { /* "s3ql/deltadump.pyx":259 * if apsw_sqlite_version != s3ql_sqlite_version: * raise RuntimeError('SQLite version mismatch between APSW and S3QL ' * '(%s vs %s)' % (apsw_sqlite_version, s3ql_sqlite_version)) # <<<<<<<<<<<<<< * * apsw_sqlite_options = set(apsw.compile_options) */ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_apsw_sqlite_version); __Pyx_GIVEREF(__pyx_v_apsw_sqlite_version); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_apsw_sqlite_version); __Pyx_INCREF(__pyx_v_s3ql_sqlite_version); __Pyx_GIVEREF(__pyx_v_s3ql_sqlite_version); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_s3ql_sqlite_version); __pyx_t_2 = PyUnicode_Format(__pyx_kp_u_SQLite_version_mismatch_between, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":258 * s3ql_sqlite_version) * if apsw_sqlite_version != s3ql_sqlite_version: * raise RuntimeError('SQLite version mismatch between APSW and S3QL ' # <<<<<<<<<<<<<< * '(%s vs %s)' % (apsw_sqlite_version, s3ql_sqlite_version)) * */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 258, __pyx_L1_error) /* "s3ql/deltadump.pyx":257 * apsw_sqlite_version, * s3ql_sqlite_version) * if apsw_sqlite_version != s3ql_sqlite_version: # <<<<<<<<<<<<<< * raise RuntimeError('SQLite version mismatch between APSW and S3QL ' * '(%s vs %s)' % (apsw_sqlite_version, s3ql_sqlite_version)) */ } /* "s3ql/deltadump.pyx":261 * '(%s vs %s)' % (apsw_sqlite_version, s3ql_sqlite_version)) * * apsw_sqlite_options = set(apsw.compile_options) # <<<<<<<<<<<<<< * s3ql_sqlite_options = set() * for idx in itertools.count(0): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_apsw); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 261, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_compile_options); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 261, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PySet_New(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 261, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_apsw_sqlite_options = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "s3ql/deltadump.pyx":262 * * apsw_sqlite_options = set(apsw.compile_options) * s3ql_sqlite_options = set() # <<<<<<<<<<<<<< * for idx in itertools.count(0): * buf = sqlite3_compileoption_get(idx) */ __pyx_t_2 = PySet_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 262, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_s3ql_sqlite_options = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "s3ql/deltadump.pyx":263 * apsw_sqlite_options = set(apsw.compile_options) * s3ql_sqlite_options = set() * for idx in itertools.count(0): # <<<<<<<<<<<<<< * buf = sqlite3_compileoption_get(idx) * if buf is NULL: */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_itertools); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_count); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_t_2)) || PyTuple_CheckExact(__pyx_t_2)) { __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_8 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 263, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_1))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_7); __Pyx_INCREF(__pyx_t_2); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(0, 263, __pyx_L1_error) #else __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_7); __Pyx_INCREF(__pyx_t_2); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(0, 263, __pyx_L1_error) #else __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); #endif } } else { __pyx_t_2 = __pyx_t_8(__pyx_t_1); if (unlikely(!__pyx_t_2)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 263, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_2); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_2); __pyx_t_2 = 0; /* "s3ql/deltadump.pyx":264 * s3ql_sqlite_options = set() * for idx in itertools.count(0): * buf = sqlite3_compileoption_get(idx) # <<<<<<<<<<<<<< * if buf is NULL: * break */ __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_v_idx); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 264, __pyx_L1_error) __pyx_v_buf = sqlite3_compileoption_get(__pyx_t_4); /* "s3ql/deltadump.pyx":265 * for idx in itertools.count(0): * buf = sqlite3_compileoption_get(idx) * if buf is NULL: # <<<<<<<<<<<<<< * break * s3ql_sqlite_options.add(PyUnicode_FromString(buf)) */ __pyx_t_6 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_6) { /* "s3ql/deltadump.pyx":266 * buf = sqlite3_compileoption_get(idx) * if buf is NULL: * break # <<<<<<<<<<<<<< * s3ql_sqlite_options.add(PyUnicode_FromString(buf)) * */ goto __pyx_L5_break; /* "s3ql/deltadump.pyx":265 * for idx in itertools.count(0): * buf = sqlite3_compileoption_get(idx) * if buf is NULL: # <<<<<<<<<<<<<< * break * s3ql_sqlite_options.add(PyUnicode_FromString(buf)) */ } /* "s3ql/deltadump.pyx":267 * if buf is NULL: * break * s3ql_sqlite_options.add(PyUnicode_FromString(buf)) # <<<<<<<<<<<<<< * * log.debug('apsw sqlite compile options: %s, ' */ __pyx_t_2 = PyUnicode_FromString(__pyx_v_buf); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 267, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_9 = PySet_Add(__pyx_v_s3ql_sqlite_options, __pyx_t_2); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(0, 267, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "s3ql/deltadump.pyx":263 * apsw_sqlite_options = set(apsw.compile_options) * s3ql_sqlite_options = set() * for idx in itertools.count(0): # <<<<<<<<<<<<<< * buf = sqlite3_compileoption_get(idx) * if buf is NULL: */ } __pyx_L5_break:; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":269 * s3ql_sqlite_options.add(PyUnicode_FromString(buf)) * * log.debug('apsw sqlite compile options: %s, ' # <<<<<<<<<<<<<< * 's3ql sqlite compile options: %s', * apsw_sqlite_options, */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_log); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 269, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_debug); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 269, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "s3ql/deltadump.pyx":272 * 's3ql sqlite compile options: %s', * apsw_sqlite_options, * s3ql_sqlite_options) # <<<<<<<<<<<<<< * if apsw_sqlite_options != s3ql_sqlite_options: * raise RuntimeError('SQLite code used by APSW was compiled with different ' */ __pyx_t_2 = NULL; __pyx_t_4 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_4 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_kp_u_apsw_sqlite_compile_options_s_s3, __pyx_v_apsw_sqlite_options, __pyx_v_s3ql_sqlite_options}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_4, 3+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 269, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_kp_u_apsw_sqlite_compile_options_s_s3, __pyx_v_apsw_sqlite_options, __pyx_v_s3ql_sqlite_options}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_4, 3+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 269, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { __pyx_t_3 = PyTuple_New(3+__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 269, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __pyx_t_2 = NULL; } __Pyx_INCREF(__pyx_kp_u_apsw_sqlite_compile_options_s_s3); __Pyx_GIVEREF(__pyx_kp_u_apsw_sqlite_compile_options_s_s3); PyTuple_SET_ITEM(__pyx_t_3, 0+__pyx_t_4, __pyx_kp_u_apsw_sqlite_compile_options_s_s3); __Pyx_INCREF(__pyx_v_apsw_sqlite_options); __Pyx_GIVEREF(__pyx_v_apsw_sqlite_options); PyTuple_SET_ITEM(__pyx_t_3, 1+__pyx_t_4, __pyx_v_apsw_sqlite_options); __Pyx_INCREF(__pyx_v_s3ql_sqlite_options); __Pyx_GIVEREF(__pyx_v_s3ql_sqlite_options); PyTuple_SET_ITEM(__pyx_t_3, 2+__pyx_t_4, __pyx_v_s3ql_sqlite_options); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 269, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":273 * apsw_sqlite_options, * s3ql_sqlite_options) * if apsw_sqlite_options != s3ql_sqlite_options: # <<<<<<<<<<<<<< * raise RuntimeError('SQLite code used by APSW was compiled with different ' * 'options than SQLite code available to S3QL! ' */ __pyx_t_1 = PyObject_RichCompare(__pyx_v_apsw_sqlite_options, __pyx_v_s3ql_sqlite_options, Py_NE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 273, __pyx_L1_error) __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 273, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_6) { /* "s3ql/deltadump.pyx":277 * 'options than SQLite code available to S3QL! ' * 'Differing settings: + %s, - %s' % * (apsw_sqlite_options - s3ql_sqlite_options, # <<<<<<<<<<<<<< * s3ql_sqlite_options - apsw_sqlite_options)) * */ __pyx_t_1 = PyNumber_Subtract(__pyx_v_apsw_sqlite_options, __pyx_v_s3ql_sqlite_options); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 277, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); /* "s3ql/deltadump.pyx":278 * 'Differing settings: + %s, - %s' % * (apsw_sqlite_options - s3ql_sqlite_options, * s3ql_sqlite_options - apsw_sqlite_options)) # <<<<<<<<<<<<<< * * def dump_table(table, order, columns, db, fh): */ __pyx_t_5 = PyNumber_Subtract(__pyx_v_s3ql_sqlite_options, __pyx_v_apsw_sqlite_options); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); /* "s3ql/deltadump.pyx":277 * 'options than SQLite code available to S3QL! ' * 'Differing settings: + %s, - %s' % * (apsw_sqlite_options - s3ql_sqlite_options, # <<<<<<<<<<<<<< * s3ql_sqlite_options - apsw_sqlite_options)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 277, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5); __pyx_t_1 = 0; __pyx_t_5 = 0; /* "s3ql/deltadump.pyx":276 * raise RuntimeError('SQLite code used by APSW was compiled with different ' * 'options than SQLite code available to S3QL! ' * 'Differing settings: + %s, - %s' % # <<<<<<<<<<<<<< * (apsw_sqlite_options - s3ql_sqlite_options, * s3ql_sqlite_options - apsw_sqlite_options)) */ __pyx_t_5 = PyUnicode_Format(__pyx_kp_u_SQLite_code_used_by_APSW_was_com, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "s3ql/deltadump.pyx":274 * s3ql_sqlite_options) * if apsw_sqlite_options != s3ql_sqlite_options: * raise RuntimeError('SQLite code used by APSW was compiled with different ' # <<<<<<<<<<<<<< * 'options than SQLite code available to S3QL! ' * 'Differing settings: + %s, - %s' % */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 274, __pyx_L1_error) /* "s3ql/deltadump.pyx":273 * apsw_sqlite_options, * s3ql_sqlite_options) * if apsw_sqlite_options != s3ql_sqlite_options: # <<<<<<<<<<<<<< * raise RuntimeError('SQLite code used by APSW was compiled with different ' * 'options than SQLite code available to S3QL! ' */ } /* "s3ql/deltadump.pyx":237 * return fp * * def check_sqlite(): # <<<<<<<<<<<<<< * '''Check if deltadump and apsw module use compatible SQLite code. * */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("s3ql.deltadump.check_sqlite", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_apsw_sqlite_version); __Pyx_XDECREF(__pyx_v_s3ql_sqlite_version); __Pyx_XDECREF(__pyx_v_apsw_sqlite_options); __Pyx_XDECREF(__pyx_v_s3ql_sqlite_options); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":280 * s3ql_sqlite_options - apsw_sqlite_options)) * * def dump_table(table, order, columns, db, fh): # <<<<<<<<<<<<<< * '''Dump *columns* of *table* into *fh* * */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_3dump_table(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4s3ql_9deltadump_2dump_table[] = "dump_table(table, order, columns, db, fh)\nDump *columns* of *table* into *fh*\n\n *order* specifies the order in which the rows are written and must be a\n string that can be inserted after the \"ORDER BY\" clause in an SQL SELECT\n statement.\n\n *db* is an `s3ql.Connection` instance for the database.\n\n *columns* must a list of 3-tuples, one for each column that should be\n stored. The first element of the tuple must contain the column name and the\n second element the type of data stored in the column (`INTEGER`, `TIME`\n or `BLOB`). Times will be converted to nanosecond integers.\n\n For integers and seconds, the third tuple element specifies the expected\n change of the values between rows. For blobs it can be either zero\n (indicating variable length columns) or an integer specifying the length of\n the column values in bytes.\n\n This function will open a separate connection to the database, so\n the *db* connection should not be in EXCLUSIVE locking mode.\n (Using a separate connection avoids the requirement on the *apsw*\n and *deltadump* modules be linked against against binary\n compatible SQLite libraries).\n "; static PyMethodDef __pyx_mdef_4s3ql_9deltadump_3dump_table = {"dump_table", (PyCFunction)__pyx_pw_4s3ql_9deltadump_3dump_table, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4s3ql_9deltadump_2dump_table}; static PyObject *__pyx_pw_4s3ql_9deltadump_3dump_table(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_table = 0; PyObject *__pyx_v_order = 0; PyObject *__pyx_v_columns = 0; PyObject *__pyx_v_db = 0; PyObject *__pyx_v_fh = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("dump_table (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_table,&__pyx_n_s_order,&__pyx_n_s_columns,&__pyx_n_s_db,&__pyx_n_s_fh,0}; PyObject* values[5] = {0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_table)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_order)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("dump_table", 1, 5, 5, 1); __PYX_ERR(0, 280, __pyx_L3_error) } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_columns)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("dump_table", 1, 5, 5, 2); __PYX_ERR(0, 280, __pyx_L3_error) } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_db)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("dump_table", 1, 5, 5, 3); __PYX_ERR(0, 280, __pyx_L3_error) } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fh)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("dump_table", 1, 5, 5, 4); __PYX_ERR(0, 280, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "dump_table") < 0)) __PYX_ERR(0, 280, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 5) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); } __pyx_v_table = values[0]; __pyx_v_order = values[1]; __pyx_v_columns = values[2]; __pyx_v_db = values[3]; __pyx_v_fh = values[4]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("dump_table", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 280, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("s3ql.deltadump.dump_table", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4s3ql_9deltadump_2dump_table(__pyx_self, __pyx_v_table, __pyx_v_order, __pyx_v_columns, __pyx_v_db, __pyx_v_fh); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":328 * SQLITE_OPEN_READONLY, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_close(sqlite3_db), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * SQLITE_CHECK_RC(sqlite3_extended_result_codes(sqlite3_db, 1), */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_10dump_table_lambda(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyMethodDef __pyx_mdef_4s3ql_9deltadump_10dump_table_lambda = {"lambda", (PyCFunction)__pyx_pw_4s3ql_9deltadump_10dump_table_lambda, METH_NOARGS, 0}; static PyObject *__pyx_pw_4s3ql_9deltadump_10dump_table_lambda(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda(PyObject *__pyx_self) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *__pyx_cur_scope; struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("lambda", 0); __pyx_outer_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_XDECREF(__pyx_r); /* "s3ql/deltadump.pyx":329 * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_close(sqlite3_db), * SQLITE_OK, sqlite3_db)) # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(sqlite3_extended_result_codes(sqlite3_db, 1), * SQLITE_OK, sqlite3_db) */ __pyx_t_1 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_close(__pyx_cur_scope->__pyx_v_sqlite3_db), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 328, __pyx_L1_error) /* "s3ql/deltadump.pyx":328 * SQLITE_OPEN_READONLY, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_close(sqlite3_db), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * SQLITE_CHECK_RC(sqlite3_extended_result_codes(sqlite3_db, 1), */ __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 328, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("s3ql.deltadump.dump_table.lambda", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":335 * # Get FILE* for buffered reading from *fh* * fp = dup_to_fp(fh, b'wb') * cm.callback(lambda: fclose(fp)) # <<<<<<<<<<<<<< * * # Allocate col_args and col_types */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_10dump_table_1lambda1(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyMethodDef __pyx_mdef_4s3ql_9deltadump_10dump_table_1lambda1 = {"lambda1", (PyCFunction)__pyx_pw_4s3ql_9deltadump_10dump_table_1lambda1, METH_NOARGS, 0}; static PyObject *__pyx_pw_4s3ql_9deltadump_10dump_table_1lambda1(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda1 (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda1(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda1(PyObject *__pyx_self) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *__pyx_cur_scope; struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("lambda1", 0); __pyx_outer_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_f_4s3ql_9deltadump_fclose(__pyx_cur_scope->__pyx_v_fp); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 335, __pyx_L1_error) __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 335, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("s3ql.deltadump.dump_table.lambda1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":339 * # Allocate col_args and col_types * col_count = prep_columns(columns, &col_types, &col_args) * cm.callback(lambda: free(col_args)) # <<<<<<<<<<<<<< * cm.callback(lambda: free(col_types)) * */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_10dump_table_2lambda2(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyMethodDef __pyx_mdef_4s3ql_9deltadump_10dump_table_2lambda2 = {"lambda2", (PyCFunction)__pyx_pw_4s3ql_9deltadump_10dump_table_2lambda2, METH_NOARGS, 0}; static PyObject *__pyx_pw_4s3ql_9deltadump_10dump_table_2lambda2(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda2 (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda2(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda2(PyObject *__pyx_self) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *__pyx_cur_scope; struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("lambda2", 0); __pyx_outer_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_f_4s3ql_9deltadump_free(__pyx_cur_scope->__pyx_v_col_args); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 339, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("s3ql.deltadump.dump_table.lambda2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":340 * col_count = prep_columns(columns, &col_types, &col_args) * cm.callback(lambda: free(col_args)) * cm.callback(lambda: free(col_types)) # <<<<<<<<<<<<<< * * # Allocate int64_prev */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_10dump_table_3lambda3(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyMethodDef __pyx_mdef_4s3ql_9deltadump_10dump_table_3lambda3 = {"lambda3", (PyCFunction)__pyx_pw_4s3ql_9deltadump_10dump_table_3lambda3, METH_NOARGS, 0}; static PyObject *__pyx_pw_4s3ql_9deltadump_10dump_table_3lambda3(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda3 (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda3(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda3(PyObject *__pyx_self) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *__pyx_cur_scope; struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("lambda3", 0); __pyx_outer_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_f_4s3ql_9deltadump_free(__pyx_cur_scope->__pyx_v_col_types); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 340, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("s3ql.deltadump.dump_table.lambda3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":344 * # Allocate int64_prev * int64_prev = calloc( len(columns), sizeof(int64_t)) * cm.callback(lambda: free(int64_prev)) # <<<<<<<<<<<<<< * * # Prepare statement */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_10dump_table_4lambda4(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyMethodDef __pyx_mdef_4s3ql_9deltadump_10dump_table_4lambda4 = {"lambda4", (PyCFunction)__pyx_pw_4s3ql_9deltadump_10dump_table_4lambda4, METH_NOARGS, 0}; static PyObject *__pyx_pw_4s3ql_9deltadump_10dump_table_4lambda4(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda4 (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda4(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda4(PyObject *__pyx_self) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *__pyx_cur_scope; struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("lambda4", 0); __pyx_outer_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_f_4s3ql_9deltadump_free(__pyx_cur_scope->__pyx_v_int64_prev); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 344, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("s3ql.deltadump.dump_table.lambda4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":352 * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(stmt), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_10dump_table_5lambda5(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyMethodDef __pyx_mdef_4s3ql_9deltadump_10dump_table_5lambda5 = {"lambda5", (PyCFunction)__pyx_pw_4s3ql_9deltadump_10dump_table_5lambda5, METH_NOARGS, 0}; static PyObject *__pyx_pw_4s3ql_9deltadump_10dump_table_5lambda5(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda5 (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda5(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda5(PyObject *__pyx_self) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *__pyx_cur_scope; struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("lambda5", 0); __pyx_outer_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_XDECREF(__pyx_r); /* "s3ql/deltadump.pyx":353 * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(stmt), * SQLITE_OK, sqlite3_db)) # <<<<<<<<<<<<<< * * row_count = db.get_val("SELECT COUNT(rowid) FROM %s" % table) */ __pyx_t_1 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_finalize(__pyx_cur_scope->__pyx_v_stmt), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 352, __pyx_L1_error) /* "s3ql/deltadump.pyx":352 * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(stmt), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * */ __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 352, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("s3ql.deltadump.dump_table.lambda5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":280 * s3ql_sqlite_options - apsw_sqlite_options)) * * def dump_table(table, order, columns, db, fh): # <<<<<<<<<<<<<< * '''Dump *columns* of *table* into *fh* * */ static PyObject *__pyx_pf_4s3ql_9deltadump_2dump_table(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_table, PyObject *__pyx_v_order, PyObject *__pyx_v_columns, PyObject *__pyx_v_db, PyObject *__pyx_v_fh) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *__pyx_cur_scope; int __pyx_v_col_count; int __pyx_v_rc; int __pyx_v_i; size_t __pyx_v_len_; int64_t __pyx_v_int64; int64_t __pyx_v_tmp; const void *__pyx_v_buf; int64_t __pyx_v_row_count; PyObject *__pyx_v_cm = NULL; PyObject *__pyx_v_dbfile_b = NULL; PyObject *__pyx_v_col_names = NULL; PyObject *__pyx_v_query = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; const char *__pyx_t_12; FILE *__pyx_t_13; Py_ssize_t __pyx_t_14; void *__pyx_t_15; PyObject *(*__pyx_t_16)(PyObject *); char *__pyx_t_17; int64_t __pyx_t_18; int __pyx_t_19; int __pyx_t_20; PyObject *__pyx_t_21 = NULL; int __pyx_t_22; __Pyx_RefNannySetupContext("dump_table", 0); __pyx_cur_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *)__pyx_tp_new_4s3ql_9deltadump___pyx_scope_struct__dump_table(__pyx_ptype_4s3ql_9deltadump___pyx_scope_struct__dump_table, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 280, __pyx_L1_error) } else { __Pyx_GOTREF(__pyx_cur_scope); } /* "s3ql/deltadump.pyx":318 * cdef int64_t row_count * * if db.file == ':memory:': # <<<<<<<<<<<<<< * raise ValueError("Can't access in-memory databases") * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_db, __pyx_n_s_file); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 318, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_kp_u_memory, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 318, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { /* "s3ql/deltadump.pyx":319 * * if db.file == ':memory:': * raise ValueError("Can't access in-memory databases") # <<<<<<<<<<<<<< * * with ExitStack() as cm: */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 319, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 319, __pyx_L1_error) /* "s3ql/deltadump.pyx":318 * cdef int64_t row_count * * if db.file == ':memory:': # <<<<<<<<<<<<<< * raise ValueError("Can't access in-memory databases") * */ } /* "s3ql/deltadump.pyx":321 * raise ValueError("Can't access in-memory databases") * * with ExitStack() as cm: # <<<<<<<<<<<<<< * # Get SQLite connection * log.debug('Opening connection to %s', db.file) */ /*with:*/ { __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_ExitStack); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 321, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (__pyx_t_4) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 321, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else { __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 321, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = __Pyx_PyObject_LookupSpecial(__pyx_t_1, __pyx_n_s_exit); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 321, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyObject_LookupSpecial(__pyx_t_1, __pyx_n_s_enter); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 321, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (__pyx_t_6) { __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 321, __pyx_L4_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else { __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 321, __pyx_L4_error) } __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*try:*/ { { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); /*try:*/ { __pyx_v_cm = __pyx_t_4; __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":323 * with ExitStack() as cm: * # Get SQLite connection * log.debug('Opening connection to %s', db.file) # <<<<<<<<<<<<<< * dbfile_b = db.file.encode(sys.getfilesystemencoding(), 'surrogateescape') * SQLITE_CHECK_RC(sqlite3_open_v2(dbfile_b, &sqlite3_db, */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_log); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 323, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_debug); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 323, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_db, __pyx_n_s_file); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 323, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = NULL; __pyx_t_10 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_10 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_kp_u_Opening_connection_to_s, __pyx_t_1}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_10, 2+__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 323, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_kp_u_Opening_connection_to_s, __pyx_t_1}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_10, 2+__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 323, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_11 = PyTuple_New(2+__pyx_t_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 323, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_INCREF(__pyx_kp_u_Opening_connection_to_s); __Pyx_GIVEREF(__pyx_kp_u_Opening_connection_to_s); PyTuple_SET_ITEM(__pyx_t_11, 0+__pyx_t_10, __pyx_kp_u_Opening_connection_to_s); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_11, 1+__pyx_t_10, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_11, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 323, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":324 * # Get SQLite connection * log.debug('Opening connection to %s', db.file) * dbfile_b = db.file.encode(sys.getfilesystemencoding(), 'surrogateescape') # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(sqlite3_open_v2(dbfile_b, &sqlite3_db, * SQLITE_OPEN_READONLY, NULL), */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_db, __pyx_n_s_file); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 324, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_encode); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 324, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_sys); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 324, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_getfilesystemencoding); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 324, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); } } if (__pyx_t_1) { __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 324, __pyx_L8_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 324, __pyx_L8_error) } __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; __pyx_t_10 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); __pyx_t_10 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_t_3, __pyx_n_u_surrogateescape}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-__pyx_t_10, 2+__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 324, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_t_3, __pyx_n_u_surrogateescape}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-__pyx_t_10, 2+__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 324, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { __pyx_t_1 = PyTuple_New(2+__pyx_t_10); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 324, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 0+__pyx_t_10, __pyx_t_3); __Pyx_INCREF(__pyx_n_u_surrogateescape); __Pyx_GIVEREF(__pyx_n_u_surrogateescape); PyTuple_SET_ITEM(__pyx_t_1, 1+__pyx_t_10, __pyx_n_u_surrogateescape); __pyx_t_3 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_1, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 324, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_v_dbfile_b = __pyx_t_4; __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":325 * log.debug('Opening connection to %s', db.file) * dbfile_b = db.file.encode(sys.getfilesystemencoding(), 'surrogateescape') * SQLITE_CHECK_RC(sqlite3_open_v2(dbfile_b, &sqlite3_db, # <<<<<<<<<<<<<< * SQLITE_OPEN_READONLY, NULL), * SQLITE_OK, sqlite3_db) */ __pyx_t_12 = __Pyx_PyObject_AsString(__pyx_v_dbfile_b); if (unlikely((!__pyx_t_12) && PyErr_Occurred())) __PYX_ERR(0, 325, __pyx_L8_error) /* "s3ql/deltadump.pyx":327 * SQLITE_CHECK_RC(sqlite3_open_v2(dbfile_b, &sqlite3_db, * SQLITE_OPEN_READONLY, NULL), * SQLITE_OK, sqlite3_db) # <<<<<<<<<<<<<< * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_close(sqlite3_db), * SQLITE_OK, sqlite3_db)) */ __pyx_t_10 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_open_v2(__pyx_t_12, (&__pyx_cur_scope->__pyx_v_sqlite3_db), SQLITE_OPEN_READONLY, NULL), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 325, __pyx_L8_error) /* "s3ql/deltadump.pyx":328 * SQLITE_OPEN_READONLY, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_close(sqlite3_db), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * SQLITE_CHECK_RC(sqlite3_extended_result_codes(sqlite3_db, 1), */ __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_cm, __pyx_n_s_callback); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 328, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_1 = __Pyx_CyFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_10dump_table_lambda, 0, __pyx_n_s_dump_table_locals_lambda, ((PyObject*)__pyx_cur_scope), __pyx_n_s_s3ql_deltadump, __pyx_d, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 328, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); } } if (!__pyx_t_3) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 328, __pyx_L8_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_1}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 328, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_1}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 328, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 328, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 328, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":330 * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_close(sqlite3_db), * SQLITE_OK, sqlite3_db)) * SQLITE_CHECK_RC(sqlite3_extended_result_codes(sqlite3_db, 1), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db) * */ __pyx_t_10 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_extended_result_codes(__pyx_cur_scope->__pyx_v_sqlite3_db, 1), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 330, __pyx_L8_error) /* "s3ql/deltadump.pyx":334 * * # Get FILE* for buffered reading from *fh* * fp = dup_to_fp(fh, b'wb') # <<<<<<<<<<<<<< * cm.callback(lambda: fclose(fp)) * */ __pyx_t_13 = __pyx_f_4s3ql_9deltadump_dup_to_fp(__pyx_v_fh, ((const char *)"wb")); if (unlikely(__pyx_t_13 == NULL)) __PYX_ERR(0, 334, __pyx_L8_error) __pyx_cur_scope->__pyx_v_fp = __pyx_t_13; /* "s3ql/deltadump.pyx":335 * # Get FILE* for buffered reading from *fh* * fp = dup_to_fp(fh, b'wb') * cm.callback(lambda: fclose(fp)) # <<<<<<<<<<<<<< * * # Allocate col_args and col_types */ __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_cm, __pyx_n_s_callback); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 335, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_6 = __Pyx_CyFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_10dump_table_1lambda1, 0, __pyx_n_s_dump_table_locals_lambda, ((PyObject*)__pyx_cur_scope), __pyx_n_s_s3ql_deltadump, __pyx_d, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 335, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); } } if (!__pyx_t_1) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 335, __pyx_L8_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_t_6}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 335, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_t_6}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 335, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 335, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __pyx_t_1 = NULL; __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 335, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":338 * * # Allocate col_args and col_types * col_count = prep_columns(columns, &col_types, &col_args) # <<<<<<<<<<<<<< * cm.callback(lambda: free(col_args)) * cm.callback(lambda: free(col_types)) */ __pyx_t_10 = __pyx_f_4s3ql_9deltadump_prep_columns(__pyx_v_columns, (&__pyx_cur_scope->__pyx_v_col_types), (&__pyx_cur_scope->__pyx_v_col_args)); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 338, __pyx_L8_error) __pyx_v_col_count = __pyx_t_10; /* "s3ql/deltadump.pyx":339 * # Allocate col_args and col_types * col_count = prep_columns(columns, &col_types, &col_args) * cm.callback(lambda: free(col_args)) # <<<<<<<<<<<<<< * cm.callback(lambda: free(col_types)) * */ __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_cm, __pyx_n_s_callback); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 339, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_3 = __Pyx_CyFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_10dump_table_2lambda2, 0, __pyx_n_s_dump_table_locals_lambda, ((PyObject*)__pyx_cur_scope), __pyx_n_s_s3ql_deltadump, __pyx_d, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 339, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); } } if (!__pyx_t_6) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 339, __pyx_L8_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_3}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 339, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_3}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 339, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 339, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 0+1, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_1, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 339, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":340 * col_count = prep_columns(columns, &col_types, &col_args) * cm.callback(lambda: free(col_args)) * cm.callback(lambda: free(col_types)) # <<<<<<<<<<<<<< * * # Allocate int64_prev */ __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_cm, __pyx_n_s_callback); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 340, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_1 = __Pyx_CyFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_10dump_table_3lambda3, 0, __pyx_n_s_dump_table_locals_lambda, ((PyObject*)__pyx_cur_scope), __pyx_n_s_s3ql_deltadump, __pyx_d, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 340, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); } } if (!__pyx_t_3) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 340, __pyx_L8_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_1}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 340, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_1}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 340, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 340, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 340, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":343 * * # Allocate int64_prev * int64_prev = calloc( len(columns), sizeof(int64_t)) # <<<<<<<<<<<<<< * cm.callback(lambda: free(int64_prev)) * */ __pyx_t_14 = PyObject_Length(__pyx_v_columns); if (unlikely(__pyx_t_14 == -1)) __PYX_ERR(0, 343, __pyx_L8_error) __pyx_t_15 = __pyx_f_4s3ql_9deltadump_calloc(((size_t)__pyx_t_14), (sizeof(int64_t))); if (unlikely(__pyx_t_15 == NULL)) __PYX_ERR(0, 343, __pyx_L8_error) __pyx_cur_scope->__pyx_v_int64_prev = ((int64_t *)__pyx_t_15); /* "s3ql/deltadump.pyx":344 * # Allocate int64_prev * int64_prev = calloc( len(columns), sizeof(int64_t)) * cm.callback(lambda: free(int64_prev)) # <<<<<<<<<<<<<< * * # Prepare statement */ __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_cm, __pyx_n_s_callback); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 344, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_6 = __Pyx_CyFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_10dump_table_4lambda4, 0, __pyx_n_s_dump_table_locals_lambda, ((PyObject*)__pyx_cur_scope), __pyx_n_s_s3ql_deltadump, __pyx_d, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 344, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); } } if (!__pyx_t_1) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 344, __pyx_L8_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_t_6}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 344, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_t_6}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 344, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 344, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __pyx_t_1 = NULL; __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 344, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":347 * * # Prepare statement * col_names = [ x[0] for x in columns ] # <<<<<<<<<<<<<< * query = ("SELECT %s FROM %s ORDER BY %s " % * (', '.join(col_names), table, order)).encode('utf-8') */ { /* enter inner scope */ PyObject *__pyx_7genexpr__pyx_v_x = NULL; __pyx_t_4 = PyList_New(0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 347, __pyx_L18_error) __Pyx_GOTREF(__pyx_t_4); if (likely(PyList_CheckExact(__pyx_v_columns)) || PyTuple_CheckExact(__pyx_v_columns)) { __pyx_t_11 = __pyx_v_columns; __Pyx_INCREF(__pyx_t_11); __pyx_t_14 = 0; __pyx_t_16 = NULL; } else { __pyx_t_14 = -1; __pyx_t_11 = PyObject_GetIter(__pyx_v_columns); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 347, __pyx_L18_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_16 = Py_TYPE(__pyx_t_11)->tp_iternext; if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 347, __pyx_L18_error) } for (;;) { if (likely(!__pyx_t_16)) { if (likely(PyList_CheckExact(__pyx_t_11))) { if (__pyx_t_14 >= PyList_GET_SIZE(__pyx_t_11)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyList_GET_ITEM(__pyx_t_11, __pyx_t_14); __Pyx_INCREF(__pyx_t_3); __pyx_t_14++; if (unlikely(0 < 0)) __PYX_ERR(0, 347, __pyx_L18_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_11, __pyx_t_14); __pyx_t_14++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 347, __pyx_L18_error) __Pyx_GOTREF(__pyx_t_3); #endif } else { if (__pyx_t_14 >= PyTuple_GET_SIZE(__pyx_t_11)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_11, __pyx_t_14); __Pyx_INCREF(__pyx_t_3); __pyx_t_14++; if (unlikely(0 < 0)) __PYX_ERR(0, 347, __pyx_L18_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_11, __pyx_t_14); __pyx_t_14++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 347, __pyx_L18_error) __Pyx_GOTREF(__pyx_t_3); #endif } } else { __pyx_t_3 = __pyx_t_16(__pyx_t_11); if (unlikely(!__pyx_t_3)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 347, __pyx_L18_error) } break; } __Pyx_GOTREF(__pyx_t_3); } __Pyx_XDECREF_SET(__pyx_7genexpr__pyx_v_x, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_GetItemInt(__pyx_7genexpr__pyx_v_x, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 347, __pyx_L18_error) __Pyx_GOTREF(__pyx_t_3); if (unlikely(__Pyx_ListComp_Append(__pyx_t_4, (PyObject*)__pyx_t_3))) __PYX_ERR(0, 347, __pyx_L18_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_7genexpr__pyx_v_x); goto __pyx_L21_exit_scope; __pyx_L18_error:; __Pyx_XDECREF(__pyx_7genexpr__pyx_v_x); goto __pyx_L8_error; __pyx_L21_exit_scope:; } /* exit inner scope */ __pyx_v_col_names = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":349 * col_names = [ x[0] for x in columns ] * query = ("SELECT %s FROM %s ORDER BY %s " % * (', '.join(col_names), table, order)).encode('utf-8') # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), * SQLITE_OK, sqlite3_db) */ __pyx_t_4 = PyUnicode_Join(__pyx_kp_u__4, __pyx_v_col_names); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 349, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(3); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 349, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_4); __Pyx_INCREF(__pyx_v_table); __Pyx_GIVEREF(__pyx_v_table); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_v_table); __Pyx_INCREF(__pyx_v_order); __Pyx_GIVEREF(__pyx_v_order); PyTuple_SET_ITEM(__pyx_t_11, 2, __pyx_v_order); __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":348 * # Prepare statement * col_names = [ x[0] for x in columns ] * query = ("SELECT %s FROM %s ORDER BY %s " % # <<<<<<<<<<<<<< * (', '.join(col_names), table, order)).encode('utf-8') * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), */ __pyx_t_4 = PyUnicode_Format(__pyx_kp_u_SELECT_s_FROM_s_ORDER_BY_s, __pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 348, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; /* "s3ql/deltadump.pyx":349 * col_names = [ x[0] for x in columns ] * query = ("SELECT %s FROM %s ORDER BY %s " % * (', '.join(col_names), table, order)).encode('utf-8') # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), * SQLITE_OK, sqlite3_db) */ __pyx_t_11 = PyUnicode_AsUTF8String(((PyObject*)__pyx_t_4)); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 349, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_query = __pyx_t_11; __pyx_t_11 = 0; /* "s3ql/deltadump.pyx":350 * query = ("SELECT %s FROM %s ORDER BY %s " % * (', '.join(col_names), table, order)).encode('utf-8') * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(stmt), */ __pyx_t_17 = __Pyx_PyObject_AsString(__pyx_v_query); if (unlikely((!__pyx_t_17) && PyErr_Occurred())) __PYX_ERR(0, 350, __pyx_L8_error) /* "s3ql/deltadump.pyx":351 * (', '.join(col_names), table, order)).encode('utf-8') * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), * SQLITE_OK, sqlite3_db) # <<<<<<<<<<<<<< * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(stmt), * SQLITE_OK, sqlite3_db)) */ __pyx_t_10 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_prepare_v2(__pyx_cur_scope->__pyx_v_sqlite3_db, __pyx_t_17, -1, (&__pyx_cur_scope->__pyx_v_stmt), NULL), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 350, __pyx_L8_error) /* "s3ql/deltadump.pyx":352 * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(stmt), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_cm, __pyx_n_s_callback); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 352, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_CyFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_10dump_table_5lambda5, 0, __pyx_n_s_dump_table_locals_lambda, ((PyObject*)__pyx_cur_scope), __pyx_n_s_s3ql_deltadump, __pyx_d, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 352, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (!__pyx_t_6) { __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 352, __pyx_L8_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_11); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_3}; __pyx_t_11 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 352, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_3}; __pyx_t_11 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 352, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 352, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 0+1, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_1, NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 352, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; /* "s3ql/deltadump.pyx":355 * SQLITE_OK, sqlite3_db)) * * row_count = db.get_val("SELECT COUNT(rowid) FROM %s" % table) # <<<<<<<<<<<<<< * log.debug('dump_table(%s): writing %d rows', table, row_count) * write_integer(row_count, fp) */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_db, __pyx_n_s_get_val); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 355, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyUnicode_Format(__pyx_kp_u_SELECT_COUNT_rowid_FROM_s, __pyx_v_table); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 355, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (!__pyx_t_3) { __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 355, __pyx_L8_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_11); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_1}; __pyx_t_11 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 355, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_1}; __pyx_t_11 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 355, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 355, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 355, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_18 = __Pyx_PyInt_As_int64_t(__pyx_t_11); if (unlikely((__pyx_t_18 == ((int64_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 355, __pyx_L8_error) __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_v_row_count = __pyx_t_18; /* "s3ql/deltadump.pyx":356 * * row_count = db.get_val("SELECT COUNT(rowid) FROM %s" % table) * log.debug('dump_table(%s): writing %d rows', table, row_count) # <<<<<<<<<<<<<< * write_integer(row_count, fp) * */ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_log); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 356, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_debug); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 356, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyInt_From_int64_t(__pyx_v_row_count); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 356, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = NULL; __pyx_t_10 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_10 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[4] = {__pyx_t_1, __pyx_kp_u_dump_table_s_writing_d_rows, __pyx_v_table, __pyx_t_4}; __pyx_t_11 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_10, 3+__pyx_t_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 356, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[4] = {__pyx_t_1, __pyx_kp_u_dump_table_s_writing_d_rows, __pyx_v_table, __pyx_t_4}; __pyx_t_11 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_10, 3+__pyx_t_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 356, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_3 = PyTuple_New(3+__pyx_t_10); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 356, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_3); if (__pyx_t_1) { __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __pyx_t_1 = NULL; } __Pyx_INCREF(__pyx_kp_u_dump_table_s_writing_d_rows); __Pyx_GIVEREF(__pyx_kp_u_dump_table_s_writing_d_rows); PyTuple_SET_ITEM(__pyx_t_3, 0+__pyx_t_10, __pyx_kp_u_dump_table_s_writing_d_rows); __Pyx_INCREF(__pyx_v_table); __Pyx_GIVEREF(__pyx_v_table); PyTuple_SET_ITEM(__pyx_t_3, 1+__pyx_t_10, __pyx_v_table); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 2+__pyx_t_10, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_3, NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 356, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; /* "s3ql/deltadump.pyx":357 * row_count = db.get_val("SELECT COUNT(rowid) FROM %s" % table) * log.debug('dump_table(%s): writing %d rows', table, row_count) * write_integer(row_count, fp) # <<<<<<<<<<<<<< * * # Iterate through rows */ __pyx_t_10 = __pyx_f_4s3ql_9deltadump_write_integer(__pyx_v_row_count, __pyx_cur_scope->__pyx_v_fp); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 357, __pyx_L8_error) /* "s3ql/deltadump.pyx":360 * * # Iterate through rows * while True: # <<<<<<<<<<<<<< * rc = sqlite3_step(stmt) * if rc == SQLITE_DONE: */ while (1) { /* "s3ql/deltadump.pyx":361 * # Iterate through rows * while True: * rc = sqlite3_step(stmt) # <<<<<<<<<<<<<< * if rc == SQLITE_DONE: * break */ __pyx_v_rc = sqlite3_step(__pyx_cur_scope->__pyx_v_stmt); /* "s3ql/deltadump.pyx":362 * while True: * rc = sqlite3_step(stmt) * if rc == SQLITE_DONE: # <<<<<<<<<<<<<< * break * SQLITE_CHECK_RC(rc, SQLITE_ROW, sqlite3_db) */ __pyx_t_2 = ((__pyx_v_rc == SQLITE_DONE) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":363 * rc = sqlite3_step(stmt) * if rc == SQLITE_DONE: * break # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(rc, SQLITE_ROW, sqlite3_db) * */ goto __pyx_L23_break; /* "s3ql/deltadump.pyx":362 * while True: * rc = sqlite3_step(stmt) * if rc == SQLITE_DONE: # <<<<<<<<<<<<<< * break * SQLITE_CHECK_RC(rc, SQLITE_ROW, sqlite3_db) */ } /* "s3ql/deltadump.pyx":364 * if rc == SQLITE_DONE: * break * SQLITE_CHECK_RC(rc, SQLITE_ROW, sqlite3_db) # <<<<<<<<<<<<<< * * for i in range(col_count): */ __pyx_t_10 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(__pyx_v_rc, SQLITE_ROW, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 364, __pyx_L8_error) /* "s3ql/deltadump.pyx":366 * SQLITE_CHECK_RC(rc, SQLITE_ROW, sqlite3_db) * * for i in range(col_count): # <<<<<<<<<<<<<< * if sqlite3_column_type(stmt, i) is SQLITE_NULL: * raise ValueError("Can't dump NULL values") */ __pyx_t_10 = __pyx_v_col_count; for (__pyx_t_19 = 0; __pyx_t_19 < __pyx_t_10; __pyx_t_19+=1) { __pyx_v_i = __pyx_t_19; /* "s3ql/deltadump.pyx":367 * * for i in range(col_count): * if sqlite3_column_type(stmt, i) is SQLITE_NULL: # <<<<<<<<<<<<<< * raise ValueError("Can't dump NULL values") * */ __pyx_t_2 = ((sqlite3_column_type(__pyx_cur_scope->__pyx_v_stmt, __pyx_v_i) == SQLITE_NULL) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":368 * for i in range(col_count): * if sqlite3_column_type(stmt, i) is SQLITE_NULL: * raise ValueError("Can't dump NULL values") # <<<<<<<<<<<<<< * * if col_types[i] == _INTEGER: */ __pyx_t_11 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 368, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(0, 368, __pyx_L8_error) /* "s3ql/deltadump.pyx":367 * * for i in range(col_count): * if sqlite3_column_type(stmt, i) is SQLITE_NULL: # <<<<<<<<<<<<<< * raise ValueError("Can't dump NULL values") * */ } /* "s3ql/deltadump.pyx":370 * raise ValueError("Can't dump NULL values") * * if col_types[i] == _INTEGER: # <<<<<<<<<<<<<< * int64 = sqlite3_column_int64(stmt, i) * tmp = int64 */ __pyx_t_2 = (((__pyx_cur_scope->__pyx_v_col_types[__pyx_v_i]) == __pyx_v_4s3ql_9deltadump__INTEGER) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":371 * * if col_types[i] == _INTEGER: * int64 = sqlite3_column_int64(stmt, i) # <<<<<<<<<<<<<< * tmp = int64 * int64 -= int64_prev[i] + col_args[i] */ __pyx_v_int64 = sqlite3_column_int64(__pyx_cur_scope->__pyx_v_stmt, __pyx_v_i); /* "s3ql/deltadump.pyx":372 * if col_types[i] == _INTEGER: * int64 = sqlite3_column_int64(stmt, i) * tmp = int64 # <<<<<<<<<<<<<< * int64 -= int64_prev[i] + col_args[i] * int64_prev[i] = tmp */ __pyx_v_tmp = __pyx_v_int64; /* "s3ql/deltadump.pyx":373 * int64 = sqlite3_column_int64(stmt, i) * tmp = int64 * int64 -= int64_prev[i] + col_args[i] # <<<<<<<<<<<<<< * int64_prev[i] = tmp * write_integer(int64, fp) */ __pyx_v_int64 = (__pyx_v_int64 - ((__pyx_cur_scope->__pyx_v_int64_prev[__pyx_v_i]) + (__pyx_cur_scope->__pyx_v_col_args[__pyx_v_i]))); /* "s3ql/deltadump.pyx":374 * tmp = int64 * int64 -= int64_prev[i] + col_args[i] * int64_prev[i] = tmp # <<<<<<<<<<<<<< * write_integer(int64, fp) * */ (__pyx_cur_scope->__pyx_v_int64_prev[__pyx_v_i]) = __pyx_v_tmp; /* "s3ql/deltadump.pyx":375 * int64 -= int64_prev[i] + col_args[i] * int64_prev[i] = tmp * write_integer(int64, fp) # <<<<<<<<<<<<<< * * elif col_types[i] == _TIME: */ __pyx_t_20 = __pyx_f_4s3ql_9deltadump_write_integer(__pyx_v_int64, __pyx_cur_scope->__pyx_v_fp); if (unlikely(__pyx_t_20 == -1)) __PYX_ERR(0, 375, __pyx_L8_error) /* "s3ql/deltadump.pyx":370 * raise ValueError("Can't dump NULL values") * * if col_types[i] == _INTEGER: # <<<<<<<<<<<<<< * int64 = sqlite3_column_int64(stmt, i) * tmp = int64 */ goto __pyx_L28; } /* "s3ql/deltadump.pyx":377 * write_integer(int64, fp) * * elif col_types[i] == _TIME: # <<<<<<<<<<<<<< * int64 = (sqlite3_column_double(stmt, i) * time_scale) * tmp = int64 */ __pyx_t_2 = (((__pyx_cur_scope->__pyx_v_col_types[__pyx_v_i]) == __pyx_v_4s3ql_9deltadump__TIME) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":378 * * elif col_types[i] == _TIME: * int64 = (sqlite3_column_double(stmt, i) * time_scale) # <<<<<<<<<<<<<< * tmp = int64 * int64 -= int64_prev[i] + col_args[i] */ __pyx_v_int64 = ((int64_t)(sqlite3_column_double(__pyx_cur_scope->__pyx_v_stmt, __pyx_v_i) * __pyx_v_4s3ql_9deltadump_time_scale)); /* "s3ql/deltadump.pyx":379 * elif col_types[i] == _TIME: * int64 = (sqlite3_column_double(stmt, i) * time_scale) * tmp = int64 # <<<<<<<<<<<<<< * int64 -= int64_prev[i] + col_args[i] * int64_prev[i] = tmp */ __pyx_v_tmp = __pyx_v_int64; /* "s3ql/deltadump.pyx":380 * int64 = (sqlite3_column_double(stmt, i) * time_scale) * tmp = int64 * int64 -= int64_prev[i] + col_args[i] # <<<<<<<<<<<<<< * int64_prev[i] = tmp * write_integer(int64, fp) */ __pyx_v_int64 = (__pyx_v_int64 - ((__pyx_cur_scope->__pyx_v_int64_prev[__pyx_v_i]) + (__pyx_cur_scope->__pyx_v_col_args[__pyx_v_i]))); /* "s3ql/deltadump.pyx":381 * tmp = int64 * int64 -= int64_prev[i] + col_args[i] * int64_prev[i] = tmp # <<<<<<<<<<<<<< * write_integer(int64, fp) * */ (__pyx_cur_scope->__pyx_v_int64_prev[__pyx_v_i]) = __pyx_v_tmp; /* "s3ql/deltadump.pyx":382 * int64 -= int64_prev[i] + col_args[i] * int64_prev[i] = tmp * write_integer(int64, fp) # <<<<<<<<<<<<<< * * elif col_types[i] == _BLOB: */ __pyx_t_20 = __pyx_f_4s3ql_9deltadump_write_integer(__pyx_v_int64, __pyx_cur_scope->__pyx_v_fp); if (unlikely(__pyx_t_20 == -1)) __PYX_ERR(0, 382, __pyx_L8_error) /* "s3ql/deltadump.pyx":377 * write_integer(int64, fp) * * elif col_types[i] == _TIME: # <<<<<<<<<<<<<< * int64 = (sqlite3_column_double(stmt, i) * time_scale) * tmp = int64 */ goto __pyx_L28; } /* "s3ql/deltadump.pyx":384 * write_integer(int64, fp) * * elif col_types[i] == _BLOB: # <<<<<<<<<<<<<< * buf = sqlite3_column_blob(stmt, i) * rc = sqlite3_column_bytes(stmt, i) */ __pyx_t_2 = (((__pyx_cur_scope->__pyx_v_col_types[__pyx_v_i]) == __pyx_v_4s3ql_9deltadump__BLOB) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":385 * * elif col_types[i] == _BLOB: * buf = sqlite3_column_blob(stmt, i) # <<<<<<<<<<<<<< * rc = sqlite3_column_bytes(stmt, i) * if rc > MAX_BLOB_SIZE: */ __pyx_v_buf = sqlite3_column_blob(__pyx_cur_scope->__pyx_v_stmt, __pyx_v_i); /* "s3ql/deltadump.pyx":386 * elif col_types[i] == _BLOB: * buf = sqlite3_column_blob(stmt, i) * rc = sqlite3_column_bytes(stmt, i) # <<<<<<<<<<<<<< * if rc > MAX_BLOB_SIZE: * raise ValueError('Can not dump BLOB of size %d (max: %d)', */ __pyx_v_rc = sqlite3_column_bytes(__pyx_cur_scope->__pyx_v_stmt, __pyx_v_i); /* "s3ql/deltadump.pyx":387 * buf = sqlite3_column_blob(stmt, i) * rc = sqlite3_column_bytes(stmt, i) * if rc > MAX_BLOB_SIZE: # <<<<<<<<<<<<<< * raise ValueError('Can not dump BLOB of size %d (max: %d)', * rc, MAX_BLOB_SIZE) */ __pyx_t_11 = __Pyx_PyInt_From_int(__pyx_v_rc); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 387, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_MAX_BLOB_SIZE); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 387, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_3 = PyObject_RichCompare(__pyx_t_11, __pyx_t_6, Py_GT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 387, __pyx_L8_error) __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 387, __pyx_L8_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_2) { /* "s3ql/deltadump.pyx":389 * if rc > MAX_BLOB_SIZE: * raise ValueError('Can not dump BLOB of size %d (max: %d)', * rc, MAX_BLOB_SIZE) # <<<<<<<<<<<<<< * # Safe to cast now * len_ = rc */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_rc); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 389, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_MAX_BLOB_SIZE); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 389, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); /* "s3ql/deltadump.pyx":388 * rc = sqlite3_column_bytes(stmt, i) * if rc > MAX_BLOB_SIZE: * raise ValueError('Can not dump BLOB of size %d (max: %d)', # <<<<<<<<<<<<<< * rc, MAX_BLOB_SIZE) * # Safe to cast now */ __pyx_t_11 = PyTuple_New(3); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 388, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_INCREF(__pyx_kp_u_Can_not_dump_BLOB_of_size_d_max); __Pyx_GIVEREF(__pyx_kp_u_Can_not_dump_BLOB_of_size_d_max); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_kp_u_Can_not_dump_BLOB_of_size_d_max); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_11, 2, __pyx_t_6); __pyx_t_3 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_11, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 388, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(0, 388, __pyx_L8_error) /* "s3ql/deltadump.pyx":387 * buf = sqlite3_column_blob(stmt, i) * rc = sqlite3_column_bytes(stmt, i) * if rc > MAX_BLOB_SIZE: # <<<<<<<<<<<<<< * raise ValueError('Can not dump BLOB of size %d (max: %d)', * rc, MAX_BLOB_SIZE) */ } /* "s3ql/deltadump.pyx":391 * rc, MAX_BLOB_SIZE) * # Safe to cast now * len_ = rc # <<<<<<<<<<<<<< * if col_args[i] == 0: * write_integer(rc - int64_prev[i], fp) */ __pyx_v_len_ = ((size_t)__pyx_v_rc); /* "s3ql/deltadump.pyx":392 * # Safe to cast now * len_ = rc * if col_args[i] == 0: # <<<<<<<<<<<<<< * write_integer(rc - int64_prev[i], fp) * int64_prev[i] = rc */ __pyx_t_2 = (((__pyx_cur_scope->__pyx_v_col_args[__pyx_v_i]) == 0) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":393 * len_ = rc * if col_args[i] == 0: * write_integer(rc - int64_prev[i], fp) # <<<<<<<<<<<<<< * int64_prev[i] = rc * elif rc != col_args[i]: */ __pyx_t_20 = __pyx_f_4s3ql_9deltadump_write_integer((__pyx_v_rc - (__pyx_cur_scope->__pyx_v_int64_prev[__pyx_v_i])), __pyx_cur_scope->__pyx_v_fp); if (unlikely(__pyx_t_20 == -1)) __PYX_ERR(0, 393, __pyx_L8_error) /* "s3ql/deltadump.pyx":394 * if col_args[i] == 0: * write_integer(rc - int64_prev[i], fp) * int64_prev[i] = rc # <<<<<<<<<<<<<< * elif rc != col_args[i]: * raise ValueError("Length %d != %d in column %d" % (len_, col_args[i], i)) */ (__pyx_cur_scope->__pyx_v_int64_prev[__pyx_v_i]) = __pyx_v_rc; /* "s3ql/deltadump.pyx":392 * # Safe to cast now * len_ = rc * if col_args[i] == 0: # <<<<<<<<<<<<<< * write_integer(rc - int64_prev[i], fp) * int64_prev[i] = rc */ goto __pyx_L30; } /* "s3ql/deltadump.pyx":395 * write_integer(rc - int64_prev[i], fp) * int64_prev[i] = rc * elif rc != col_args[i]: # <<<<<<<<<<<<<< * raise ValueError("Length %d != %d in column %d" % (len_, col_args[i], i)) * */ __pyx_t_2 = ((__pyx_v_rc != (__pyx_cur_scope->__pyx_v_col_args[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":396 * int64_prev[i] = rc * elif rc != col_args[i]: * raise ValueError("Length %d != %d in column %d" % (len_, col_args[i], i)) # <<<<<<<<<<<<<< * * if len_ != 0: */ __pyx_t_6 = __Pyx_PyInt_FromSize_t(__pyx_v_len_); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 396, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_11 = __Pyx_PyInt_From_int((__pyx_cur_scope->__pyx_v_col_args[__pyx_v_i])); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 396, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 396, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 396, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_6 = 0; __pyx_t_11 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_Length_d_d_in_column_d, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 396, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 396, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 396, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 396, __pyx_L8_error) /* "s3ql/deltadump.pyx":395 * write_integer(rc - int64_prev[i], fp) * int64_prev[i] = rc * elif rc != col_args[i]: # <<<<<<<<<<<<<< * raise ValueError("Length %d != %d in column %d" % (len_, col_args[i], i)) * */ } __pyx_L30:; /* "s3ql/deltadump.pyx":398 * raise ValueError("Length %d != %d in column %d" % (len_, col_args[i], i)) * * if len_ != 0: # <<<<<<<<<<<<<< * fwrite(buf, len_, fp) * */ __pyx_t_2 = ((__pyx_v_len_ != 0) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":399 * * if len_ != 0: * fwrite(buf, len_, fp) # <<<<<<<<<<<<<< * * def load_table(table, columns, db, fh, trx_rows=5000): */ __pyx_t_20 = __pyx_f_4s3ql_9deltadump_fwrite(__pyx_v_buf, __pyx_v_len_, __pyx_cur_scope->__pyx_v_fp); if (unlikely(__pyx_t_20 == -1)) __PYX_ERR(0, 399, __pyx_L8_error) /* "s3ql/deltadump.pyx":398 * raise ValueError("Length %d != %d in column %d" % (len_, col_args[i], i)) * * if len_ != 0: # <<<<<<<<<<<<<< * fwrite(buf, len_, fp) * */ } /* "s3ql/deltadump.pyx":384 * write_integer(int64, fp) * * elif col_types[i] == _BLOB: # <<<<<<<<<<<<<< * buf = sqlite3_column_blob(stmt, i) * rc = sqlite3_column_bytes(stmt, i) */ } __pyx_L28:; } } __pyx_L23_break:; /* "s3ql/deltadump.pyx":321 * raise ValueError("Can't access in-memory databases") * * with ExitStack() as cm: # <<<<<<<<<<<<<< * # Get SQLite connection * log.debug('Opening connection to %s', db.file) */ } __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L15_try_end; __pyx_L8_error:; __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; /*except:*/ { __Pyx_AddTraceback("s3ql.deltadump.dump_table", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_4, &__pyx_t_11) < 0) __PYX_ERR(0, 321, __pyx_L10_except_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GOTREF(__pyx_t_4); __Pyx_GOTREF(__pyx_t_11); __pyx_t_6 = PyTuple_Pack(3, __pyx_t_3, __pyx_t_4, __pyx_t_11); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 321, __pyx_L10_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_21 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_6, NULL); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_21)) __PYX_ERR(0, 321, __pyx_L10_except_error) __Pyx_GOTREF(__pyx_t_21); __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_21); __Pyx_DECREF(__pyx_t_21); __pyx_t_21 = 0; if (__pyx_t_2 < 0) __PYX_ERR(0, 321, __pyx_L10_except_error) __pyx_t_22 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_22) { __Pyx_GIVEREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ErrRestoreWithState(__pyx_t_3, __pyx_t_4, __pyx_t_11); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_11 = 0; __PYX_ERR(0, 321, __pyx_L10_except_error) } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L9_exception_handled; } __pyx_L10_except_error:; __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ExceptionReset(__pyx_t_7, __pyx_t_8, __pyx_t_9); goto __pyx_L1_error; __pyx_L9_exception_handled:; __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ExceptionReset(__pyx_t_7, __pyx_t_8, __pyx_t_9); __pyx_L15_try_end:; } } /*finally:*/ { /*normal exit:*/{ if (__pyx_t_5) { __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_tuple__6, NULL); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 321, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } goto __pyx_L7; } __pyx_L7:; } goto __pyx_L35; __pyx_L4_error:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L1_error; __pyx_L35:; } /* "s3ql/deltadump.pyx":280 * s3ql_sqlite_options - apsw_sqlite_options)) * * def dump_table(table, order, columns, db, fh): # <<<<<<<<<<<<<< * '''Dump *columns* of *table* into *fh* * */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("s3ql.deltadump.dump_table", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_cm); __Pyx_XDECREF(__pyx_v_dbfile_b); __Pyx_XDECREF(__pyx_v_col_names); __Pyx_XDECREF(__pyx_v_query); __Pyx_DECREF(((PyObject *)__pyx_cur_scope)); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":401 * fwrite(buf, len_, fp) * * def load_table(table, columns, db, fh, trx_rows=5000): # <<<<<<<<<<<<<< * '''Load *columns* of *table* from *fh* * */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_5load_table(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4s3ql_9deltadump_4load_table[] = "load_table(table, columns, db, fh, trx_rows=5000)\nLoad *columns* of *table* from *fh*\n\n *db* is an `s3ql.Connection` instance for the database.\n\n *columns* must be the same list of 3-tuples that was passed to\n `dump_table` when creating the dump stored in *fh*.\n\n This function will open a separate connection to the database, so\n the *db* connection should not be in EXCLUSIVE locking mode.\n (Using a separate connection avoids the requirement on the *apsw*\n and *deltadump* modules be linked against against binary\n compatible SQLite libraries).\n\n When writing into the table, a new transaction will be started\n every *trx_rows* rows.\n "; static PyMethodDef __pyx_mdef_4s3ql_9deltadump_5load_table = {"load_table", (PyCFunction)__pyx_pw_4s3ql_9deltadump_5load_table, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4s3ql_9deltadump_4load_table}; static PyObject *__pyx_pw_4s3ql_9deltadump_5load_table(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_table = 0; PyObject *__pyx_v_columns = 0; PyObject *__pyx_v_db = 0; PyObject *__pyx_v_fh = 0; PyObject *__pyx_v_trx_rows = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("load_table (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_table,&__pyx_n_s_columns,&__pyx_n_s_db,&__pyx_n_s_fh,&__pyx_n_s_trx_rows,0}; PyObject* values[5] = {0,0,0,0,0}; values[4] = ((PyObject *)__pyx_int_5000); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_table)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_columns)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("load_table", 0, 4, 5, 1); __PYX_ERR(0, 401, __pyx_L3_error) } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_db)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("load_table", 0, 4, 5, 2); __PYX_ERR(0, 401, __pyx_L3_error) } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fh)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("load_table", 0, 4, 5, 3); __PYX_ERR(0, 401, __pyx_L3_error) } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_trx_rows); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "load_table") < 0)) __PYX_ERR(0, 401, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_table = values[0]; __pyx_v_columns = values[1]; __pyx_v_db = values[2]; __pyx_v_fh = values[3]; __pyx_v_trx_rows = values[4]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("load_table", 0, 4, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 401, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("s3ql.deltadump.load_table", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4s3ql_9deltadump_4load_table(__pyx_self, __pyx_v_table, __pyx_v_columns, __pyx_v_db, __pyx_v_fh, __pyx_v_trx_rows); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":441 * SQLITE_OPEN_READWRITE, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_close(sqlite3_db), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * SQLITE_CHECK_RC(sqlite3_extended_result_codes(sqlite3_db, 1), */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_lambda6(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyMethodDef __pyx_mdef_4s3ql_9deltadump_10load_table_lambda6 = {"lambda6", (PyCFunction)__pyx_pw_4s3ql_9deltadump_10load_table_lambda6, METH_NOARGS, 0}; static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_lambda6(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda6 (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda6(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda6(PyObject *__pyx_self) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_cur_scope; struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("lambda6", 0); __pyx_outer_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_XDECREF(__pyx_r); /* "s3ql/deltadump.pyx":442 * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_close(sqlite3_db), * SQLITE_OK, sqlite3_db)) # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(sqlite3_extended_result_codes(sqlite3_db, 1), * SQLITE_OK, sqlite3_db) */ __pyx_t_1 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_close(__pyx_cur_scope->__pyx_v_sqlite3_db), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 441, __pyx_L1_error) /* "s3ql/deltadump.pyx":441 * SQLITE_OPEN_READWRITE, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_close(sqlite3_db), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * SQLITE_CHECK_RC(sqlite3_extended_result_codes(sqlite3_db, 1), */ __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 441, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("s3ql.deltadump.load_table.lambda6", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":462 * # Get FILE* for buffered reading from *fh* * fp = dup_to_fp(fh, b'rb') * cm.callback(lambda: fclose(fp)) # <<<<<<<<<<<<<< * * # Allocate col_args and col_types */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_1lambda7(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyMethodDef __pyx_mdef_4s3ql_9deltadump_10load_table_1lambda7 = {"lambda7", (PyCFunction)__pyx_pw_4s3ql_9deltadump_10load_table_1lambda7, METH_NOARGS, 0}; static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_1lambda7(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda7 (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda7(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda7(PyObject *__pyx_self) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_cur_scope; struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("lambda7", 0); __pyx_outer_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_f_4s3ql_9deltadump_fclose(__pyx_cur_scope->__pyx_v_fp); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 462, __pyx_L1_error) __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 462, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("s3ql.deltadump.load_table.lambda7", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":466 * # Allocate col_args and col_types * col_count = prep_columns(columns, &col_types, &col_args) * cm.callback(lambda: free(col_args)) # <<<<<<<<<<<<<< * cm.callback(lambda: free(col_types)) * */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_2lambda8(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyMethodDef __pyx_mdef_4s3ql_9deltadump_10load_table_2lambda8 = {"lambda8", (PyCFunction)__pyx_pw_4s3ql_9deltadump_10load_table_2lambda8, METH_NOARGS, 0}; static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_2lambda8(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda8 (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda8(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda8(PyObject *__pyx_self) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_cur_scope; struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("lambda8", 0); __pyx_outer_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_f_4s3ql_9deltadump_free(__pyx_cur_scope->__pyx_v_col_args); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 466, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("s3ql.deltadump.load_table.lambda8", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":467 * col_count = prep_columns(columns, &col_types, &col_args) * cm.callback(lambda: free(col_args)) * cm.callback(lambda: free(col_types)) # <<<<<<<<<<<<<< * * # Allocate int64_prev */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_3lambda9(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyMethodDef __pyx_mdef_4s3ql_9deltadump_10load_table_3lambda9 = {"lambda9", (PyCFunction)__pyx_pw_4s3ql_9deltadump_10load_table_3lambda9, METH_NOARGS, 0}; static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_3lambda9(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda9 (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda9(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda9(PyObject *__pyx_self) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_cur_scope; struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("lambda9", 0); __pyx_outer_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_f_4s3ql_9deltadump_free(__pyx_cur_scope->__pyx_v_col_types); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 467, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("s3ql.deltadump.load_table.lambda9", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":471 * # Allocate int64_prev * int64_prev = calloc( len(columns), sizeof(int64_t)) * cm.callback(lambda: free(int64_prev)) # <<<<<<<<<<<<<< * * # Prepare INSERT statement */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_4lambda10(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyMethodDef __pyx_mdef_4s3ql_9deltadump_10load_table_4lambda10 = {"lambda10", (PyCFunction)__pyx_pw_4s3ql_9deltadump_10load_table_4lambda10, METH_NOARGS, 0}; static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_4lambda10(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda10 (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda10(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda10(PyObject *__pyx_self) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_cur_scope; struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("lambda10", 0); __pyx_outer_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_f_4s3ql_9deltadump_free(__pyx_cur_scope->__pyx_v_int64_prev); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 471, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("s3ql.deltadump.load_table.lambda10", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":480 * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(stmt), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_5lambda11(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyMethodDef __pyx_mdef_4s3ql_9deltadump_10load_table_5lambda11 = {"lambda11", (PyCFunction)__pyx_pw_4s3ql_9deltadump_10load_table_5lambda11, METH_NOARGS, 0}; static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_5lambda11(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda11 (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda11(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda11(PyObject *__pyx_self) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_cur_scope; struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("lambda11", 0); __pyx_outer_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_XDECREF(__pyx_r); /* "s3ql/deltadump.pyx":481 * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(stmt), * SQLITE_OK, sqlite3_db)) # <<<<<<<<<<<<<< * * # Prepare BEGIN statement */ __pyx_t_1 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_finalize(__pyx_cur_scope->__pyx_v_stmt), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 480, __pyx_L1_error) /* "s3ql/deltadump.pyx":480 * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(stmt), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * */ __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 480, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("s3ql.deltadump.load_table.lambda11", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":487 * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &begin_stmt, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(begin_stmt), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_6lambda12(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyMethodDef __pyx_mdef_4s3ql_9deltadump_10load_table_6lambda12 = {"lambda12", (PyCFunction)__pyx_pw_4s3ql_9deltadump_10load_table_6lambda12, METH_NOARGS, 0}; static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_6lambda12(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda12 (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda12(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda12(PyObject *__pyx_self) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_cur_scope; struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("lambda12", 0); __pyx_outer_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_XDECREF(__pyx_r); /* "s3ql/deltadump.pyx":488 * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(begin_stmt), * SQLITE_OK, sqlite3_db)) # <<<<<<<<<<<<<< * * # Prepare COMMIT statement */ __pyx_t_1 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_finalize(__pyx_cur_scope->__pyx_v_begin_stmt), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 487, __pyx_L1_error) /* "s3ql/deltadump.pyx":487 * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &begin_stmt, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(begin_stmt), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * */ __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("s3ql.deltadump.load_table.lambda12", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":494 * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &commit_stmt, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(commit_stmt), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_7lambda13(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyMethodDef __pyx_mdef_4s3ql_9deltadump_10load_table_7lambda13 = {"lambda13", (PyCFunction)__pyx_pw_4s3ql_9deltadump_10load_table_7lambda13, METH_NOARGS, 0}; static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_7lambda13(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda13 (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda13(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda13(PyObject *__pyx_self) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_cur_scope; struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("lambda13", 0); __pyx_outer_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_XDECREF(__pyx_r); /* "s3ql/deltadump.pyx":495 * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(commit_stmt), * SQLITE_OK, sqlite3_db)) # <<<<<<<<<<<<<< * * buf = calloc(MAX_BLOB_SIZE, 1) */ __pyx_t_1 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_finalize(__pyx_cur_scope->__pyx_v_commit_stmt), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 494, __pyx_L1_error) /* "s3ql/deltadump.pyx":494 * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &commit_stmt, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(commit_stmt), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * */ __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 494, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("s3ql.deltadump.load_table.lambda13", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":498 * * buf = calloc(MAX_BLOB_SIZE, 1) * cm.callback(lambda: free(buf)) # <<<<<<<<<<<<<< * read_integer(&row_count, fp) * log.debug('load_table(%s): reading %d rows', table, row_count) */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_8lambda14(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyMethodDef __pyx_mdef_4s3ql_9deltadump_10load_table_8lambda14 = {"lambda14", (PyCFunction)__pyx_pw_4s3ql_9deltadump_10load_table_8lambda14, METH_NOARGS, 0}; static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_8lambda14(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda14 (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda14(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda14(PyObject *__pyx_self) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_cur_scope; struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("lambda14", 0); __pyx_outer_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_f_4s3ql_9deltadump_free(__pyx_cur_scope->__pyx_v_buf); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 498, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("s3ql.deltadump.load_table.lambda14", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":504 * # Start transaction * SQLITE_CHECK_RC(sqlite3_step(begin_stmt), SQLITE_DONE, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_step(commit_stmt), # <<<<<<<<<<<<<< * SQLITE_DONE, sqlite3_db)) * SQLITE_CHECK_RC(sqlite3_reset(begin_stmt), SQLITE_OK, sqlite3_db) */ /* Python wrapper */ static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_9lambda15(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyMethodDef __pyx_mdef_4s3ql_9deltadump_10load_table_9lambda15 = {"lambda15", (PyCFunction)__pyx_pw_4s3ql_9deltadump_10load_table_9lambda15, METH_NOARGS, 0}; static PyObject *__pyx_pw_4s3ql_9deltadump_10load_table_9lambda15(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("lambda15 (wrapper)", 0); __pyx_r = __pyx_lambda_funcdef_lambda15(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_lambda_funcdef_lambda15(PyObject *__pyx_self) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_cur_scope; struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_outer_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("lambda15", 0); __pyx_outer_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *) __Pyx_CyFunction_GetClosure(__pyx_self); __pyx_cur_scope = __pyx_outer_scope; __Pyx_XDECREF(__pyx_r); /* "s3ql/deltadump.pyx":505 * SQLITE_CHECK_RC(sqlite3_step(begin_stmt), SQLITE_DONE, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_step(commit_stmt), * SQLITE_DONE, sqlite3_db)) # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(sqlite3_reset(begin_stmt), SQLITE_OK, sqlite3_db) * */ __pyx_t_1 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_step(__pyx_cur_scope->__pyx_v_commit_stmt), SQLITE_DONE, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 504, __pyx_L1_error) /* "s3ql/deltadump.pyx":504 * # Start transaction * SQLITE_CHECK_RC(sqlite3_step(begin_stmt), SQLITE_DONE, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_step(commit_stmt), # <<<<<<<<<<<<<< * SQLITE_DONE, sqlite3_db)) * SQLITE_CHECK_RC(sqlite3_reset(begin_stmt), SQLITE_OK, sqlite3_db) */ __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("s3ql.deltadump.load_table.lambda15", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":401 * fwrite(buf, len_, fp) * * def load_table(table, columns, db, fh, trx_rows=5000): # <<<<<<<<<<<<<< * '''Load *columns* of *table* from *fh* * */ static PyObject *__pyx_pf_4s3ql_9deltadump_4load_table(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_table, PyObject *__pyx_v_columns, PyObject *__pyx_v_db, PyObject *__pyx_v_fh, PyObject *__pyx_v_trx_rows) { struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_cur_scope; int __pyx_v_col_count; int __pyx_v_rc; int __pyx_v_len_; int __pyx_v_i; int __pyx_v_j; int64_t __pyx_v_row_count; int64_t __pyx_v_int64; int64_t __pyx_v_tmp; PyObject *__pyx_v_cm = NULL; PyObject *__pyx_v_dbfile_b = NULL; PyObject *__pyx_v_pragma = NULL; PyObject *__pyx_v_val = NULL; PyObject *__pyx_v_cmd = NULL; PyObject *__pyx_v_col_names = NULL; PyObject *__pyx_v_query = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; const char *__pyx_t_12; Py_ssize_t __pyx_t_13; PyObject *__pyx_t_14 = NULL; char *__pyx_t_15; int __pyx_t_16; char const *__pyx_t_17; PyObject *__pyx_t_18 = NULL; PyObject *__pyx_t_19 = NULL; PyObject *__pyx_t_20 = NULL; PyObject *__pyx_t_21 = NULL; PyObject *__pyx_t_22 = NULL; PyObject *__pyx_t_23 = NULL; int __pyx_t_24; FILE *__pyx_t_25; void *__pyx_t_26; PyObject *(*__pyx_t_27)(PyObject *); size_t __pyx_t_28; int64_t __pyx_t_29; int __pyx_t_30; int __pyx_t_31; __Pyx_RefNannySetupContext("load_table", 0); __pyx_cur_scope = (struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *)__pyx_tp_new_4s3ql_9deltadump___pyx_scope_struct_1_load_table(__pyx_ptype_4s3ql_9deltadump___pyx_scope_struct_1_load_table, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 401, __pyx_L1_error) } else { __Pyx_GOTREF(__pyx_cur_scope); } /* "s3ql/deltadump.pyx":431 * cdef int64_t row_count, int64, tmp * * if db.file == ':memory:': # <<<<<<<<<<<<<< * raise ValueError("Can't access in-memory databases") * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_db, __pyx_n_s_file); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 431, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_kp_u_memory, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 431, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { /* "s3ql/deltadump.pyx":432 * * if db.file == ':memory:': * raise ValueError("Can't access in-memory databases") # <<<<<<<<<<<<<< * * with ExitStack() as cm: */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 432, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 432, __pyx_L1_error) /* "s3ql/deltadump.pyx":431 * cdef int64_t row_count, int64, tmp * * if db.file == ':memory:': # <<<<<<<<<<<<<< * raise ValueError("Can't access in-memory databases") * */ } /* "s3ql/deltadump.pyx":434 * raise ValueError("Can't access in-memory databases") * * with ExitStack() as cm: # <<<<<<<<<<<<<< * # Get SQLite connection * log.debug('Opening connection to %s', db.file) */ /*with:*/ { __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_ExitStack); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 434, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (__pyx_t_4) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 434, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else { __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 434, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = __Pyx_PyObject_LookupSpecial(__pyx_t_1, __pyx_n_s_exit); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 434, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyObject_LookupSpecial(__pyx_t_1, __pyx_n_s_enter); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (__pyx_t_6) { __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 434, __pyx_L4_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else { __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 434, __pyx_L4_error) } __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*try:*/ { { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); /*try:*/ { __pyx_v_cm = __pyx_t_4; __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":436 * with ExitStack() as cm: * # Get SQLite connection * log.debug('Opening connection to %s', db.file) # <<<<<<<<<<<<<< * dbfile_b = db.file.encode(sys.getfilesystemencoding(), 'surrogateescape') * SQLITE_CHECK_RC(sqlite3_open_v2(dbfile_b, &sqlite3_db, */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_log); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 436, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_debug); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 436, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_db, __pyx_n_s_file); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 436, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = NULL; __pyx_t_10 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_10 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_kp_u_Opening_connection_to_s, __pyx_t_1}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_10, 2+__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 436, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_kp_u_Opening_connection_to_s, __pyx_t_1}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_10, 2+__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 436, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_11 = PyTuple_New(2+__pyx_t_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 436, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_INCREF(__pyx_kp_u_Opening_connection_to_s); __Pyx_GIVEREF(__pyx_kp_u_Opening_connection_to_s); PyTuple_SET_ITEM(__pyx_t_11, 0+__pyx_t_10, __pyx_kp_u_Opening_connection_to_s); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_11, 1+__pyx_t_10, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_11, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 436, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":437 * # Get SQLite connection * log.debug('Opening connection to %s', db.file) * dbfile_b = db.file.encode(sys.getfilesystemencoding(), 'surrogateescape') # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(sqlite3_open_v2(dbfile_b, &sqlite3_db, * SQLITE_OPEN_READWRITE, NULL), */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_db, __pyx_n_s_file); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 437, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_encode); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 437, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_sys); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 437, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_getfilesystemencoding); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 437, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); } } if (__pyx_t_1) { __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 437, __pyx_L8_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 437, __pyx_L8_error) } __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; __pyx_t_10 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); __pyx_t_10 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_t_3, __pyx_n_u_surrogateescape}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-__pyx_t_10, 2+__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 437, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_t_3, __pyx_n_u_surrogateescape}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-__pyx_t_10, 2+__pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 437, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { __pyx_t_1 = PyTuple_New(2+__pyx_t_10); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 437, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 0+__pyx_t_10, __pyx_t_3); __Pyx_INCREF(__pyx_n_u_surrogateescape); __Pyx_GIVEREF(__pyx_n_u_surrogateescape); PyTuple_SET_ITEM(__pyx_t_1, 1+__pyx_t_10, __pyx_n_u_surrogateescape); __pyx_t_3 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_1, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 437, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_v_dbfile_b = __pyx_t_4; __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":438 * log.debug('Opening connection to %s', db.file) * dbfile_b = db.file.encode(sys.getfilesystemencoding(), 'surrogateescape') * SQLITE_CHECK_RC(sqlite3_open_v2(dbfile_b, &sqlite3_db, # <<<<<<<<<<<<<< * SQLITE_OPEN_READWRITE, NULL), * SQLITE_OK, sqlite3_db) */ __pyx_t_12 = __Pyx_PyObject_AsString(__pyx_v_dbfile_b); if (unlikely((!__pyx_t_12) && PyErr_Occurred())) __PYX_ERR(0, 438, __pyx_L8_error) /* "s3ql/deltadump.pyx":440 * SQLITE_CHECK_RC(sqlite3_open_v2(dbfile_b, &sqlite3_db, * SQLITE_OPEN_READWRITE, NULL), * SQLITE_OK, sqlite3_db) # <<<<<<<<<<<<<< * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_close(sqlite3_db), * SQLITE_OK, sqlite3_db)) */ __pyx_t_10 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_open_v2(__pyx_t_12, (&__pyx_cur_scope->__pyx_v_sqlite3_db), SQLITE_OPEN_READWRITE, NULL), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 438, __pyx_L8_error) /* "s3ql/deltadump.pyx":441 * SQLITE_OPEN_READWRITE, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_close(sqlite3_db), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * SQLITE_CHECK_RC(sqlite3_extended_result_codes(sqlite3_db, 1), */ __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_cm, __pyx_n_s_callback); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 441, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_1 = __Pyx_CyFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_10load_table_lambda6, 0, __pyx_n_s_load_table_locals_lambda, ((PyObject*)__pyx_cur_scope), __pyx_n_s_s3ql_deltadump, __pyx_d, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 441, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); } } if (!__pyx_t_3) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 441, __pyx_L8_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_1}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 441, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_1}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 441, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 441, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 441, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":443 * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_close(sqlite3_db), * SQLITE_OK, sqlite3_db)) * SQLITE_CHECK_RC(sqlite3_extended_result_codes(sqlite3_db, 1), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db) * */ __pyx_t_10 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_extended_result_codes(__pyx_cur_scope->__pyx_v_sqlite3_db, 1), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 443, __pyx_L8_error) /* "s3ql/deltadump.pyx":447 * * # Copy settings * for pragma in ('synchronous', 'foreign_keys'): # <<<<<<<<<<<<<< * val = db.get_val('PRAGMA %s' % pragma) * cmd = ('PRAGMA %s = %s' % (pragma, val)).encode('utf-8') */ __pyx_t_4 = __pyx_tuple__8; __Pyx_INCREF(__pyx_t_4); __pyx_t_13 = 0; for (;;) { if (__pyx_t_13 >= 2) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_11 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_13); __Pyx_INCREF(__pyx_t_11); __pyx_t_13++; if (unlikely(0 < 0)) __PYX_ERR(0, 447, __pyx_L8_error) #else __pyx_t_11 = PySequence_ITEM(__pyx_t_4, __pyx_t_13); __pyx_t_13++; if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 447, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); #endif __Pyx_XDECREF_SET(__pyx_v_pragma, ((PyObject*)__pyx_t_11)); __pyx_t_11 = 0; /* "s3ql/deltadump.pyx":448 * # Copy settings * for pragma in ('synchronous', 'foreign_keys'): * val = db.get_val('PRAGMA %s' % pragma) # <<<<<<<<<<<<<< * cmd = ('PRAGMA %s = %s' % (pragma, val)).encode('utf-8') * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, cmd, -1, &stmt, NULL), */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_db, __pyx_n_s_get_val); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 448, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = PyUnicode_Format(__pyx_kp_u_PRAGMA_s, __pyx_v_pragma); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 448, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); } } if (!__pyx_t_3) { __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 448, __pyx_L8_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_11); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_1}; __pyx_t_11 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 448, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_1}; __pyx_t_11 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 448, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_14 = PyTuple_New(1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 448, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_14, 0+1, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_14, NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 448, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; } } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF_SET(__pyx_v_val, __pyx_t_11); __pyx_t_11 = 0; /* "s3ql/deltadump.pyx":449 * for pragma in ('synchronous', 'foreign_keys'): * val = db.get_val('PRAGMA %s' % pragma) * cmd = ('PRAGMA %s = %s' % (pragma, val)).encode('utf-8') # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, cmd, -1, &stmt, NULL), * SQLITE_OK, sqlite3_db) */ __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 449, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_INCREF(__pyx_v_pragma); __Pyx_GIVEREF(__pyx_v_pragma); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_v_pragma); __Pyx_INCREF(__pyx_v_val); __Pyx_GIVEREF(__pyx_v_val); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_v_val); __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_PRAGMA_s_s, __pyx_t_11); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 449, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = PyUnicode_AsUTF8String(((PyObject*)__pyx_t_6)); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 449, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF_SET(__pyx_v_cmd, __pyx_t_11); __pyx_t_11 = 0; /* "s3ql/deltadump.pyx":450 * val = db.get_val('PRAGMA %s' % pragma) * cmd = ('PRAGMA %s = %s' % (pragma, val)).encode('utf-8') * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, cmd, -1, &stmt, NULL), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db) * try: */ __pyx_t_15 = __Pyx_PyObject_AsString(__pyx_v_cmd); if (unlikely((!__pyx_t_15) && PyErr_Occurred())) __PYX_ERR(0, 450, __pyx_L8_error) /* "s3ql/deltadump.pyx":451 * cmd = ('PRAGMA %s = %s' % (pragma, val)).encode('utf-8') * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, cmd, -1, &stmt, NULL), * SQLITE_OK, sqlite3_db) # <<<<<<<<<<<<<< * try: * rc = sqlite3_step(stmt) */ __pyx_t_10 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_prepare_v2(__pyx_cur_scope->__pyx_v_sqlite3_db, __pyx_t_15, -1, (&__pyx_cur_scope->__pyx_v_stmt), NULL), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 450, __pyx_L8_error) /* "s3ql/deltadump.pyx":452 * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, cmd, -1, &stmt, NULL), * SQLITE_OK, sqlite3_db) * try: # <<<<<<<<<<<<<< * rc = sqlite3_step(stmt) * if rc == SQLITE_ROW: */ /*try:*/ { /* "s3ql/deltadump.pyx":453 * SQLITE_OK, sqlite3_db) * try: * rc = sqlite3_step(stmt) # <<<<<<<<<<<<<< * if rc == SQLITE_ROW: * rc = sqlite3_step(stmt) */ __pyx_v_rc = sqlite3_step(__pyx_cur_scope->__pyx_v_stmt); /* "s3ql/deltadump.pyx":454 * try: * rc = sqlite3_step(stmt) * if rc == SQLITE_ROW: # <<<<<<<<<<<<<< * rc = sqlite3_step(stmt) * SQLITE_CHECK_RC(rc, SQLITE_DONE, sqlite3_db) */ __pyx_t_2 = ((__pyx_v_rc == SQLITE_ROW) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":455 * rc = sqlite3_step(stmt) * if rc == SQLITE_ROW: * rc = sqlite3_step(stmt) # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(rc, SQLITE_DONE, sqlite3_db) * finally: */ __pyx_v_rc = sqlite3_step(__pyx_cur_scope->__pyx_v_stmt); /* "s3ql/deltadump.pyx":454 * try: * rc = sqlite3_step(stmt) * if rc == SQLITE_ROW: # <<<<<<<<<<<<<< * rc = sqlite3_step(stmt) * SQLITE_CHECK_RC(rc, SQLITE_DONE, sqlite3_db) */ } /* "s3ql/deltadump.pyx":456 * if rc == SQLITE_ROW: * rc = sqlite3_step(stmt) * SQLITE_CHECK_RC(rc, SQLITE_DONE, sqlite3_db) # <<<<<<<<<<<<<< * finally: * SQLITE_CHECK_RC(sqlite3_finalize(stmt), SQLITE_OK, sqlite3_db) */ __pyx_t_10 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(__pyx_v_rc, SQLITE_DONE, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 456, __pyx_L21_error) } /* "s3ql/deltadump.pyx":458 * SQLITE_CHECK_RC(rc, SQLITE_DONE, sqlite3_db) * finally: * SQLITE_CHECK_RC(sqlite3_finalize(stmt), SQLITE_OK, sqlite3_db) # <<<<<<<<<<<<<< * * # Get FILE* for buffered reading from *fh* */ /*finally:*/ { /*normal exit:*/{ __pyx_t_10 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_finalize(__pyx_cur_scope->__pyx_v_stmt), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 458, __pyx_L8_error) goto __pyx_L22; } /*exception exit:*/{ __Pyx_PyThreadState_declare __pyx_L21_error:; __pyx_t_18 = 0; __pyx_t_19 = 0; __pyx_t_20 = 0; __pyx_t_21 = 0; __pyx_t_22 = 0; __pyx_t_23 = 0; __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_21, &__pyx_t_22, &__pyx_t_23); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_18, &__pyx_t_19, &__pyx_t_20) < 0)) __Pyx_ErrFetch(&__pyx_t_18, &__pyx_t_19, &__pyx_t_20); __Pyx_XGOTREF(__pyx_t_18); __Pyx_XGOTREF(__pyx_t_19); __Pyx_XGOTREF(__pyx_t_20); __Pyx_XGOTREF(__pyx_t_21); __Pyx_XGOTREF(__pyx_t_22); __Pyx_XGOTREF(__pyx_t_23); __pyx_t_10 = __pyx_lineno; __pyx_t_16 = __pyx_clineno; __pyx_t_17 = __pyx_filename; { __pyx_t_24 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_finalize(__pyx_cur_scope->__pyx_v_stmt), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_24 == -1)) __PYX_ERR(0, 458, __pyx_L27_error) } __Pyx_PyThreadState_assign if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_21); __Pyx_XGIVEREF(__pyx_t_22); __Pyx_XGIVEREF(__pyx_t_23); __Pyx_ExceptionReset(__pyx_t_21, __pyx_t_22, __pyx_t_23); } __Pyx_XGIVEREF(__pyx_t_18); __Pyx_XGIVEREF(__pyx_t_19); __Pyx_XGIVEREF(__pyx_t_20); __Pyx_ErrRestore(__pyx_t_18, __pyx_t_19, __pyx_t_20); __pyx_t_18 = 0; __pyx_t_19 = 0; __pyx_t_20 = 0; __pyx_t_21 = 0; __pyx_t_22 = 0; __pyx_t_23 = 0; __pyx_lineno = __pyx_t_10; __pyx_clineno = __pyx_t_16; __pyx_filename = __pyx_t_17; goto __pyx_L8_error; __pyx_L27_error:; __Pyx_PyThreadState_assign if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_21); __Pyx_XGIVEREF(__pyx_t_22); __Pyx_XGIVEREF(__pyx_t_23); __Pyx_ExceptionReset(__pyx_t_21, __pyx_t_22, __pyx_t_23); } __Pyx_XDECREF(__pyx_t_18); __pyx_t_18 = 0; __Pyx_XDECREF(__pyx_t_19); __pyx_t_19 = 0; __Pyx_XDECREF(__pyx_t_20); __pyx_t_20 = 0; __pyx_t_21 = 0; __pyx_t_22 = 0; __pyx_t_23 = 0; goto __pyx_L8_error; } __pyx_L22:; } /* "s3ql/deltadump.pyx":447 * * # Copy settings * for pragma in ('synchronous', 'foreign_keys'): # <<<<<<<<<<<<<< * val = db.get_val('PRAGMA %s' % pragma) * cmd = ('PRAGMA %s = %s' % (pragma, val)).encode('utf-8') */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":461 * * # Get FILE* for buffered reading from *fh* * fp = dup_to_fp(fh, b'rb') # <<<<<<<<<<<<<< * cm.callback(lambda: fclose(fp)) * */ __pyx_t_25 = __pyx_f_4s3ql_9deltadump_dup_to_fp(__pyx_v_fh, ((const char *)"rb")); if (unlikely(__pyx_t_25 == NULL)) __PYX_ERR(0, 461, __pyx_L8_error) __pyx_cur_scope->__pyx_v_fp = __pyx_t_25; /* "s3ql/deltadump.pyx":462 * # Get FILE* for buffered reading from *fh* * fp = dup_to_fp(fh, b'rb') * cm.callback(lambda: fclose(fp)) # <<<<<<<<<<<<<< * * # Allocate col_args and col_types */ __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_cm, __pyx_n_s_callback); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 462, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_6 = __Pyx_CyFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_10load_table_1lambda7, 0, __pyx_n_s_load_table_locals_lambda, ((PyObject*)__pyx_cur_scope), __pyx_n_s_s3ql_deltadump, __pyx_d, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 462, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_14 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { __pyx_t_14 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_14)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_14); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); } } if (!__pyx_t_14) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 462, __pyx_L8_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_14, __pyx_t_6}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 462, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_14, __pyx_t_6}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 462, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 462, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_14); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_14); __pyx_t_14 = NULL; __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_1, 0+1, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_1, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 462, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":465 * * # Allocate col_args and col_types * col_count = prep_columns(columns, &col_types, &col_args) # <<<<<<<<<<<<<< * cm.callback(lambda: free(col_args)) * cm.callback(lambda: free(col_types)) */ __pyx_t_16 = __pyx_f_4s3ql_9deltadump_prep_columns(__pyx_v_columns, (&__pyx_cur_scope->__pyx_v_col_types), (&__pyx_cur_scope->__pyx_v_col_args)); if (unlikely(__pyx_t_16 == -1)) __PYX_ERR(0, 465, __pyx_L8_error) __pyx_v_col_count = __pyx_t_16; /* "s3ql/deltadump.pyx":466 * # Allocate col_args and col_types * col_count = prep_columns(columns, &col_types, &col_args) * cm.callback(lambda: free(col_args)) # <<<<<<<<<<<<<< * cm.callback(lambda: free(col_types)) * */ __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_cm, __pyx_n_s_callback); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 466, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_1 = __Pyx_CyFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_10load_table_2lambda8, 0, __pyx_n_s_load_table_locals_lambda, ((PyObject*)__pyx_cur_scope), __pyx_n_s_s3ql_deltadump, __pyx_d, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 466, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); } } if (!__pyx_t_6) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 466, __pyx_L8_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_1}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 466, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_1}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 466, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_14 = PyTuple_New(1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 466, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_14, 0+1, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_14, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 466, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; } } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":467 * col_count = prep_columns(columns, &col_types, &col_args) * cm.callback(lambda: free(col_args)) * cm.callback(lambda: free(col_types)) # <<<<<<<<<<<<<< * * # Allocate int64_prev */ __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_cm, __pyx_n_s_callback); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 467, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_14 = __Pyx_CyFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_10load_table_3lambda9, 0, __pyx_n_s_load_table_locals_lambda, ((PyObject*)__pyx_cur_scope), __pyx_n_s_s3ql_deltadump, __pyx_d, NULL); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 467, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_14); __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); } } if (!__pyx_t_1) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_14); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 467, __pyx_L8_error) __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_t_14}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 467, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_t_14}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 467, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 467, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1); __pyx_t_1 = NULL; __Pyx_GIVEREF(__pyx_t_14); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_14); __pyx_t_14 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 467, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":470 * * # Allocate int64_prev * int64_prev = calloc( len(columns), sizeof(int64_t)) # <<<<<<<<<<<<<< * cm.callback(lambda: free(int64_prev)) * */ __pyx_t_13 = PyObject_Length(__pyx_v_columns); if (unlikely(__pyx_t_13 == -1)) __PYX_ERR(0, 470, __pyx_L8_error) __pyx_t_26 = __pyx_f_4s3ql_9deltadump_calloc(((size_t)__pyx_t_13), (sizeof(int64_t))); if (unlikely(__pyx_t_26 == NULL)) __PYX_ERR(0, 470, __pyx_L8_error) __pyx_cur_scope->__pyx_v_int64_prev = ((int64_t *)__pyx_t_26); /* "s3ql/deltadump.pyx":471 * # Allocate int64_prev * int64_prev = calloc( len(columns), sizeof(int64_t)) * cm.callback(lambda: free(int64_prev)) # <<<<<<<<<<<<<< * * # Prepare INSERT statement */ __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_cm, __pyx_n_s_callback); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 471, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_6 = __Pyx_CyFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_10load_table_4lambda10, 0, __pyx_n_s_load_table_locals_lambda, ((PyObject*)__pyx_cur_scope), __pyx_n_s_s3ql_deltadump, __pyx_d, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 471, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_14 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { __pyx_t_14 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_14)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_14); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); } } if (!__pyx_t_14) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 471, __pyx_L8_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_14, __pyx_t_6}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 471, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_14, __pyx_t_6}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 471, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 471, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_14); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_14); __pyx_t_14 = NULL; __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_1, 0+1, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_1, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 471, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":474 * * # Prepare INSERT statement * col_names = [ x[0] for x in columns ] # <<<<<<<<<<<<<< * query = ("INSERT INTO %s (%s) VALUES(%s)" * % (table, ', '.join(col_names), */ { /* enter inner scope */ PyObject *__pyx_8genexpr1__pyx_v_x = NULL; __pyx_t_4 = PyList_New(0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 474, __pyx_L30_error) __Pyx_GOTREF(__pyx_t_4); if (likely(PyList_CheckExact(__pyx_v_columns)) || PyTuple_CheckExact(__pyx_v_columns)) { __pyx_t_11 = __pyx_v_columns; __Pyx_INCREF(__pyx_t_11); __pyx_t_13 = 0; __pyx_t_27 = NULL; } else { __pyx_t_13 = -1; __pyx_t_11 = PyObject_GetIter(__pyx_v_columns); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 474, __pyx_L30_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_27 = Py_TYPE(__pyx_t_11)->tp_iternext; if (unlikely(!__pyx_t_27)) __PYX_ERR(0, 474, __pyx_L30_error) } for (;;) { if (likely(!__pyx_t_27)) { if (likely(PyList_CheckExact(__pyx_t_11))) { if (__pyx_t_13 >= PyList_GET_SIZE(__pyx_t_11)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_1 = PyList_GET_ITEM(__pyx_t_11, __pyx_t_13); __Pyx_INCREF(__pyx_t_1); __pyx_t_13++; if (unlikely(0 < 0)) __PYX_ERR(0, 474, __pyx_L30_error) #else __pyx_t_1 = PySequence_ITEM(__pyx_t_11, __pyx_t_13); __pyx_t_13++; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 474, __pyx_L30_error) __Pyx_GOTREF(__pyx_t_1); #endif } else { if (__pyx_t_13 >= PyTuple_GET_SIZE(__pyx_t_11)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_11, __pyx_t_13); __Pyx_INCREF(__pyx_t_1); __pyx_t_13++; if (unlikely(0 < 0)) __PYX_ERR(0, 474, __pyx_L30_error) #else __pyx_t_1 = PySequence_ITEM(__pyx_t_11, __pyx_t_13); __pyx_t_13++; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 474, __pyx_L30_error) __Pyx_GOTREF(__pyx_t_1); #endif } } else { __pyx_t_1 = __pyx_t_27(__pyx_t_11); if (unlikely(!__pyx_t_1)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 474, __pyx_L30_error) } break; } __Pyx_GOTREF(__pyx_t_1); } __Pyx_XDECREF_SET(__pyx_8genexpr1__pyx_v_x, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_GetItemInt(__pyx_8genexpr1__pyx_v_x, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 474, __pyx_L30_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(__Pyx_ListComp_Append(__pyx_t_4, (PyObject*)__pyx_t_1))) __PYX_ERR(0, 474, __pyx_L30_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_8genexpr1__pyx_v_x); goto __pyx_L33_exit_scope; __pyx_L30_error:; __Pyx_XDECREF(__pyx_8genexpr1__pyx_v_x); goto __pyx_L8_error; __pyx_L33_exit_scope:; } /* exit inner scope */ __pyx_v_col_names = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "s3ql/deltadump.pyx":476 * col_names = [ x[0] for x in columns ] * query = ("INSERT INTO %s (%s) VALUES(%s)" * % (table, ', '.join(col_names), # <<<<<<<<<<<<<< * ', '.join('?' * col_count))).encode('utf-8') * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), */ __pyx_t_4 = PyUnicode_Join(__pyx_kp_u__4, __pyx_v_col_names); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 476, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); /* "s3ql/deltadump.pyx":477 * query = ("INSERT INTO %s (%s) VALUES(%s)" * % (table, ', '.join(col_names), * ', '.join('?' * col_count))).encode('utf-8') # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), * SQLITE_OK, sqlite3_db) */ __pyx_t_11 = __Pyx_PyInt_From_int(__pyx_v_col_count); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 477, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_1 = PyNumber_Multiply(__pyx_kp_u__9, __pyx_t_11); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 477, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = PyUnicode_Join(__pyx_kp_u__4, __pyx_t_1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 477, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":476 * col_names = [ x[0] for x in columns ] * query = ("INSERT INTO %s (%s) VALUES(%s)" * % (table, ', '.join(col_names), # <<<<<<<<<<<<<< * ', '.join('?' * col_count))).encode('utf-8') * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), */ __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 476, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_table); __Pyx_GIVEREF(__pyx_v_table); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_table); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_11); __pyx_t_4 = 0; __pyx_t_11 = 0; __pyx_t_11 = PyUnicode_Format(__pyx_kp_u_INSERT_INTO_s_s_VALUES_s, __pyx_t_1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 476, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":477 * query = ("INSERT INTO %s (%s) VALUES(%s)" * % (table, ', '.join(col_names), * ', '.join('?' * col_count))).encode('utf-8') # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), * SQLITE_OK, sqlite3_db) */ __pyx_t_1 = PyUnicode_AsUTF8String(((PyObject*)__pyx_t_11)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 477, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_v_query = __pyx_t_1; __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":478 * % (table, ', '.join(col_names), * ', '.join('?' * col_count))).encode('utf-8') * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(stmt), */ __pyx_t_15 = __Pyx_PyObject_AsString(__pyx_v_query); if (unlikely((!__pyx_t_15) && PyErr_Occurred())) __PYX_ERR(0, 478, __pyx_L8_error) /* "s3ql/deltadump.pyx":479 * ', '.join('?' * col_count))).encode('utf-8') * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), * SQLITE_OK, sqlite3_db) # <<<<<<<<<<<<<< * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(stmt), * SQLITE_OK, sqlite3_db)) */ __pyx_t_16 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_prepare_v2(__pyx_cur_scope->__pyx_v_sqlite3_db, __pyx_t_15, -1, (&__pyx_cur_scope->__pyx_v_stmt), NULL), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_16 == -1)) __PYX_ERR(0, 478, __pyx_L8_error) /* "s3ql/deltadump.pyx":480 * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(stmt), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * */ __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_cm, __pyx_n_s_callback); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 480, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_4 = __Pyx_CyFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_10load_table_5lambda11, 0, __pyx_n_s_load_table_locals_lambda, ((PyObject*)__pyx_cur_scope), __pyx_n_s_s3ql_deltadump, __pyx_d, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 480, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); } } if (!__pyx_t_6) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 480, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_4}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 480, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_4}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 480, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_14 = PyTuple_New(1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 480, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_14, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 480, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; } } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":484 * * # Prepare BEGIN statement * query = b'BEGIN TRANSACTION' # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &begin_stmt, NULL), * SQLITE_OK, sqlite3_db) */ __Pyx_INCREF(__pyx_kp_b_BEGIN_TRANSACTION); __Pyx_DECREF_SET(__pyx_v_query, __pyx_kp_b_BEGIN_TRANSACTION); /* "s3ql/deltadump.pyx":485 * # Prepare BEGIN statement * query = b'BEGIN TRANSACTION' * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &begin_stmt, NULL), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(begin_stmt), */ __pyx_t_15 = __Pyx_PyObject_AsString(__pyx_v_query); if (unlikely((!__pyx_t_15) && PyErr_Occurred())) __PYX_ERR(0, 485, __pyx_L8_error) /* "s3ql/deltadump.pyx":486 * query = b'BEGIN TRANSACTION' * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &begin_stmt, NULL), * SQLITE_OK, sqlite3_db) # <<<<<<<<<<<<<< * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(begin_stmt), * SQLITE_OK, sqlite3_db)) */ __pyx_t_16 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_prepare_v2(__pyx_cur_scope->__pyx_v_sqlite3_db, __pyx_t_15, -1, (&__pyx_cur_scope->__pyx_v_begin_stmt), NULL), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_16 == -1)) __PYX_ERR(0, 485, __pyx_L8_error) /* "s3ql/deltadump.pyx":487 * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &begin_stmt, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(begin_stmt), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * */ __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_cm, __pyx_n_s_callback); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 487, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_14 = __Pyx_CyFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_10load_table_6lambda12, 0, __pyx_n_s_load_table_locals_lambda, ((PyObject*)__pyx_cur_scope), __pyx_n_s_s3ql_deltadump, __pyx_d, NULL); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 487, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_14); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); } } if (!__pyx_t_4) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_14); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 487, __pyx_L8_error) __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_t_14}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 487, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_t_14}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 487, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 487, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __pyx_t_4 = NULL; __Pyx_GIVEREF(__pyx_t_14); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_14); __pyx_t_14 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 487, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":491 * * # Prepare COMMIT statement * query = b'COMMIT TRANSACTION' # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &commit_stmt, NULL), * SQLITE_OK, sqlite3_db) */ __Pyx_INCREF(__pyx_kp_b_COMMIT_TRANSACTION); __Pyx_DECREF_SET(__pyx_v_query, __pyx_kp_b_COMMIT_TRANSACTION); /* "s3ql/deltadump.pyx":492 * # Prepare COMMIT statement * query = b'COMMIT TRANSACTION' * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &commit_stmt, NULL), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(commit_stmt), */ __pyx_t_15 = __Pyx_PyObject_AsString(__pyx_v_query); if (unlikely((!__pyx_t_15) && PyErr_Occurred())) __PYX_ERR(0, 492, __pyx_L8_error) /* "s3ql/deltadump.pyx":493 * query = b'COMMIT TRANSACTION' * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &commit_stmt, NULL), * SQLITE_OK, sqlite3_db) # <<<<<<<<<<<<<< * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(commit_stmt), * SQLITE_OK, sqlite3_db)) */ __pyx_t_16 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_prepare_v2(__pyx_cur_scope->__pyx_v_sqlite3_db, __pyx_t_15, -1, (&__pyx_cur_scope->__pyx_v_commit_stmt), NULL), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_16 == -1)) __PYX_ERR(0, 492, __pyx_L8_error) /* "s3ql/deltadump.pyx":494 * SQLITE_CHECK_RC(sqlite3_prepare_v2(sqlite3_db, query, -1, &commit_stmt, NULL), * SQLITE_OK, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_finalize(commit_stmt), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db)) * */ __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_cm, __pyx_n_s_callback); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 494, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_6 = __Pyx_CyFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_10load_table_7lambda13, 0, __pyx_n_s_load_table_locals_lambda, ((PyObject*)__pyx_cur_scope), __pyx_n_s_s3ql_deltadump, __pyx_d, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 494, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_14 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { __pyx_t_14 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_14)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_14); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); } } if (!__pyx_t_14) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 494, __pyx_L8_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_14, __pyx_t_6}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 494, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_14, __pyx_t_6}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 494, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 494, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_14); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_14); __pyx_t_14 = NULL; __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 494, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":497 * SQLITE_OK, sqlite3_db)) * * buf = calloc(MAX_BLOB_SIZE, 1) # <<<<<<<<<<<<<< * cm.callback(lambda: free(buf)) * read_integer(&row_count, fp) */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_MAX_BLOB_SIZE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 497, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_28 = __Pyx_PyInt_As_size_t(__pyx_t_1); if (unlikely((__pyx_t_28 == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 497, __pyx_L8_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_26 = __pyx_f_4s3ql_9deltadump_calloc(__pyx_t_28, 1); if (unlikely(__pyx_t_26 == NULL)) __PYX_ERR(0, 497, __pyx_L8_error) __pyx_cur_scope->__pyx_v_buf = __pyx_t_26; /* "s3ql/deltadump.pyx":498 * * buf = calloc(MAX_BLOB_SIZE, 1) * cm.callback(lambda: free(buf)) # <<<<<<<<<<<<<< * read_integer(&row_count, fp) * log.debug('load_table(%s): reading %d rows', table, row_count) */ __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_cm, __pyx_n_s_callback); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 498, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_4 = __Pyx_CyFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_10load_table_8lambda14, 0, __pyx_n_s_load_table_locals_lambda, ((PyObject*)__pyx_cur_scope), __pyx_n_s_s3ql_deltadump, __pyx_d, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 498, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); } } if (!__pyx_t_6) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 498, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_4}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 498, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_4}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 498, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_14 = PyTuple_New(1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 498, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_14, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 498, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; } } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":499 * buf = calloc(MAX_BLOB_SIZE, 1) * cm.callback(lambda: free(buf)) * read_integer(&row_count, fp) # <<<<<<<<<<<<<< * log.debug('load_table(%s): reading %d rows', table, row_count) * */ __pyx_t_16 = __pyx_f_4s3ql_9deltadump_read_integer((&__pyx_v_row_count), __pyx_cur_scope->__pyx_v_fp); if (unlikely(__pyx_t_16 == -1)) __PYX_ERR(0, 499, __pyx_L8_error) /* "s3ql/deltadump.pyx":500 * cm.callback(lambda: free(buf)) * read_integer(&row_count, fp) * log.debug('load_table(%s): reading %d rows', table, row_count) # <<<<<<<<<<<<<< * * # Start transaction */ __pyx_t_11 = __Pyx_GetModuleGlobalName(__pyx_n_s_log); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 500, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_11, __pyx_n_s_debug); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 500, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyInt_From_int64_t(__pyx_v_row_count); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 500, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_4 = NULL; __pyx_t_16 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_14))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_14); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_14); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_14, function); __pyx_t_16 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_14)) { PyObject *__pyx_temp[4] = {__pyx_t_4, __pyx_kp_u_load_table_s_reading_d_rows, __pyx_v_table, __pyx_t_11}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_14, __pyx_temp+1-__pyx_t_16, 3+__pyx_t_16); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 500, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_14)) { PyObject *__pyx_temp[4] = {__pyx_t_4, __pyx_kp_u_load_table_s_reading_d_rows, __pyx_v_table, __pyx_t_11}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_14, __pyx_temp+1-__pyx_t_16, 3+__pyx_t_16); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 500, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; } else #endif { __pyx_t_6 = PyTuple_New(3+__pyx_t_16); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 500, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_INCREF(__pyx_kp_u_load_table_s_reading_d_rows); __Pyx_GIVEREF(__pyx_kp_u_load_table_s_reading_d_rows); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_16, __pyx_kp_u_load_table_s_reading_d_rows); __Pyx_INCREF(__pyx_v_table); __Pyx_GIVEREF(__pyx_v_table); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_16, __pyx_v_table); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_6, 2+__pyx_t_16, __pyx_t_11); __pyx_t_11 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_14, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 500, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":503 * * # Start transaction * SQLITE_CHECK_RC(sqlite3_step(begin_stmt), SQLITE_DONE, sqlite3_db) # <<<<<<<<<<<<<< * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_step(commit_stmt), * SQLITE_DONE, sqlite3_db)) */ __pyx_t_16 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_step(__pyx_cur_scope->__pyx_v_begin_stmt), SQLITE_DONE, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_16 == -1)) __PYX_ERR(0, 503, __pyx_L8_error) /* "s3ql/deltadump.pyx":504 * # Start transaction * SQLITE_CHECK_RC(sqlite3_step(begin_stmt), SQLITE_DONE, sqlite3_db) * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_step(commit_stmt), # <<<<<<<<<<<<<< * SQLITE_DONE, sqlite3_db)) * SQLITE_CHECK_RC(sqlite3_reset(begin_stmt), SQLITE_OK, sqlite3_db) */ __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_v_cm, __pyx_n_s_callback); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 504, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_14); __pyx_t_6 = __Pyx_CyFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_10load_table_9lambda15, 0, __pyx_n_s_load_table_locals_lambda, ((PyObject*)__pyx_cur_scope), __pyx_n_s_s3ql_deltadump, __pyx_d, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 504, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_11 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_14))) { __pyx_t_11 = PyMethod_GET_SELF(__pyx_t_14); if (likely(__pyx_t_11)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_14); __Pyx_INCREF(__pyx_t_11); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_14, function); } } if (!__pyx_t_11) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_14, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 504, __pyx_L8_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_14)) { PyObject *__pyx_temp[2] = {__pyx_t_11, __pyx_t_6}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_14, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 504, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_14)) { PyObject *__pyx_temp[2] = {__pyx_t_11, __pyx_t_6}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_14, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 504, __pyx_L8_error) __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 504, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_11); __pyx_t_11 = NULL; __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_14, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 504, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } } __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":506 * cm.callback(lambda: SQLITE_CHECK_RC(sqlite3_step(commit_stmt), * SQLITE_DONE, sqlite3_db)) * SQLITE_CHECK_RC(sqlite3_reset(begin_stmt), SQLITE_OK, sqlite3_db) # <<<<<<<<<<<<<< * * # Iterate through rows */ __pyx_t_16 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_reset(__pyx_cur_scope->__pyx_v_begin_stmt), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_16 == -1)) __PYX_ERR(0, 506, __pyx_L8_error) /* "s3ql/deltadump.pyx":509 * * # Iterate through rows * for i in range(row_count): # <<<<<<<<<<<<<< * for j in range(col_count): * if col_types[j] == _INTEGER: */ __pyx_t_29 = __pyx_v_row_count; for (__pyx_t_16 = 0; __pyx_t_16 < __pyx_t_29; __pyx_t_16+=1) { __pyx_v_i = __pyx_t_16; /* "s3ql/deltadump.pyx":510 * # Iterate through rows * for i in range(row_count): * for j in range(col_count): # <<<<<<<<<<<<<< * if col_types[j] == _INTEGER: * read_integer(&int64, fp) */ __pyx_t_10 = __pyx_v_col_count; for (__pyx_t_24 = 0; __pyx_t_24 < __pyx_t_10; __pyx_t_24+=1) { __pyx_v_j = __pyx_t_24; /* "s3ql/deltadump.pyx":511 * for i in range(row_count): * for j in range(col_count): * if col_types[j] == _INTEGER: # <<<<<<<<<<<<<< * read_integer(&int64, fp) * int64 += col_args[j] + int64_prev[j] */ __pyx_t_2 = (((__pyx_cur_scope->__pyx_v_col_types[__pyx_v_j]) == __pyx_v_4s3ql_9deltadump__INTEGER) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":512 * for j in range(col_count): * if col_types[j] == _INTEGER: * read_integer(&int64, fp) # <<<<<<<<<<<<<< * int64 += col_args[j] + int64_prev[j] * int64_prev[j] = int64 */ __pyx_t_30 = __pyx_f_4s3ql_9deltadump_read_integer((&__pyx_v_int64), __pyx_cur_scope->__pyx_v_fp); if (unlikely(__pyx_t_30 == -1)) __PYX_ERR(0, 512, __pyx_L8_error) /* "s3ql/deltadump.pyx":513 * if col_types[j] == _INTEGER: * read_integer(&int64, fp) * int64 += col_args[j] + int64_prev[j] # <<<<<<<<<<<<<< * int64_prev[j] = int64 * SQLITE_CHECK_RC(sqlite3_bind_int64(stmt, j + 1, int64), */ __pyx_v_int64 = (__pyx_v_int64 + ((__pyx_cur_scope->__pyx_v_col_args[__pyx_v_j]) + (__pyx_cur_scope->__pyx_v_int64_prev[__pyx_v_j]))); /* "s3ql/deltadump.pyx":514 * read_integer(&int64, fp) * int64 += col_args[j] + int64_prev[j] * int64_prev[j] = int64 # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(sqlite3_bind_int64(stmt, j + 1, int64), * SQLITE_OK, sqlite3_db) */ (__pyx_cur_scope->__pyx_v_int64_prev[__pyx_v_j]) = __pyx_v_int64; /* "s3ql/deltadump.pyx":515 * int64 += col_args[j] + int64_prev[j] * int64_prev[j] = int64 * SQLITE_CHECK_RC(sqlite3_bind_int64(stmt, j + 1, int64), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db) * */ __pyx_t_30 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_bind_int64(__pyx_cur_scope->__pyx_v_stmt, (__pyx_v_j + 1), __pyx_v_int64), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_30 == -1)) __PYX_ERR(0, 515, __pyx_L8_error) /* "s3ql/deltadump.pyx":511 * for i in range(row_count): * for j in range(col_count): * if col_types[j] == _INTEGER: # <<<<<<<<<<<<<< * read_integer(&int64, fp) * int64 += col_args[j] + int64_prev[j] */ } /* "s3ql/deltadump.pyx":518 * SQLITE_OK, sqlite3_db) * * if col_types[j] == _TIME: # <<<<<<<<<<<<<< * read_integer(&int64, fp) * int64 += col_args[j] + int64_prev[j] */ __pyx_t_2 = (((__pyx_cur_scope->__pyx_v_col_types[__pyx_v_j]) == __pyx_v_4s3ql_9deltadump__TIME) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":519 * * if col_types[j] == _TIME: * read_integer(&int64, fp) # <<<<<<<<<<<<<< * int64 += col_args[j] + int64_prev[j] * int64_prev[j] = int64 */ __pyx_t_30 = __pyx_f_4s3ql_9deltadump_read_integer((&__pyx_v_int64), __pyx_cur_scope->__pyx_v_fp); if (unlikely(__pyx_t_30 == -1)) __PYX_ERR(0, 519, __pyx_L8_error) /* "s3ql/deltadump.pyx":520 * if col_types[j] == _TIME: * read_integer(&int64, fp) * int64 += col_args[j] + int64_prev[j] # <<<<<<<<<<<<<< * int64_prev[j] = int64 * # Cast is safe, we know that the integer was converted from */ __pyx_v_int64 = (__pyx_v_int64 + ((__pyx_cur_scope->__pyx_v_col_args[__pyx_v_j]) + (__pyx_cur_scope->__pyx_v_int64_prev[__pyx_v_j]))); /* "s3ql/deltadump.pyx":521 * read_integer(&int64, fp) * int64 += col_args[j] + int64_prev[j] * int64_prev[j] = int64 # <<<<<<<<<<<<<< * # Cast is safe, we know that the integer was converted from * # double at dump time. */ (__pyx_cur_scope->__pyx_v_int64_prev[__pyx_v_j]) = __pyx_v_int64; /* "s3ql/deltadump.pyx":524 * # Cast is safe, we know that the integer was converted from * # double at dump time. * SQLITE_CHECK_RC(sqlite3_bind_double(stmt, j + 1, int64 / time_scale), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db) * */ if (unlikely(__pyx_v_4s3ql_9deltadump_time_scale == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "float division"); __PYX_ERR(0, 524, __pyx_L8_error) } /* "s3ql/deltadump.pyx":525 * # double at dump time. * SQLITE_CHECK_RC(sqlite3_bind_double(stmt, j + 1, int64 / time_scale), * SQLITE_OK, sqlite3_db) # <<<<<<<<<<<<<< * * elif col_types[j] == _BLOB: */ __pyx_t_30 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_bind_double(__pyx_cur_scope->__pyx_v_stmt, (__pyx_v_j + 1), (((double)__pyx_v_int64) / __pyx_v_4s3ql_9deltadump_time_scale)), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_30 == -1)) __PYX_ERR(0, 524, __pyx_L8_error) /* "s3ql/deltadump.pyx":518 * SQLITE_OK, sqlite3_db) * * if col_types[j] == _TIME: # <<<<<<<<<<<<<< * read_integer(&int64, fp) * int64 += col_args[j] + int64_prev[j] */ goto __pyx_L39; } /* "s3ql/deltadump.pyx":527 * SQLITE_OK, sqlite3_db) * * elif col_types[j] == _BLOB: # <<<<<<<<<<<<<< * if col_args[j] == 0: * read_integer(&int64, fp) */ __pyx_t_2 = (((__pyx_cur_scope->__pyx_v_col_types[__pyx_v_j]) == __pyx_v_4s3ql_9deltadump__BLOB) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":528 * * elif col_types[j] == _BLOB: * if col_args[j] == 0: # <<<<<<<<<<<<<< * read_integer(&int64, fp) * tmp = int64_prev[j] + int64 */ __pyx_t_2 = (((__pyx_cur_scope->__pyx_v_col_args[__pyx_v_j]) == 0) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":529 * elif col_types[j] == _BLOB: * if col_args[j] == 0: * read_integer(&int64, fp) # <<<<<<<<<<<<<< * tmp = int64_prev[j] + int64 * if tmp < 0 or tmp > INT_MAX: */ __pyx_t_30 = __pyx_f_4s3ql_9deltadump_read_integer((&__pyx_v_int64), __pyx_cur_scope->__pyx_v_fp); if (unlikely(__pyx_t_30 == -1)) __PYX_ERR(0, 529, __pyx_L8_error) /* "s3ql/deltadump.pyx":530 * if col_args[j] == 0: * read_integer(&int64, fp) * tmp = int64_prev[j] + int64 # <<<<<<<<<<<<<< * if tmp < 0 or tmp > INT_MAX: * raise RuntimeError('Corrupted input') */ __pyx_v_tmp = ((__pyx_cur_scope->__pyx_v_int64_prev[__pyx_v_j]) + __pyx_v_int64); /* "s3ql/deltadump.pyx":531 * read_integer(&int64, fp) * tmp = int64_prev[j] + int64 * if tmp < 0 or tmp > INT_MAX: # <<<<<<<<<<<<<< * raise RuntimeError('Corrupted input') * len_ = tmp */ __pyx_t_31 = ((__pyx_v_tmp < 0) != 0); if (!__pyx_t_31) { } else { __pyx_t_2 = __pyx_t_31; goto __pyx_L42_bool_binop_done; } __pyx_t_31 = ((__pyx_v_tmp > INT_MAX) != 0); __pyx_t_2 = __pyx_t_31; __pyx_L42_bool_binop_done:; if (__pyx_t_2) { /* "s3ql/deltadump.pyx":532 * tmp = int64_prev[j] + int64 * if tmp < 0 or tmp > INT_MAX: * raise RuntimeError('Corrupted input') # <<<<<<<<<<<<<< * len_ = tmp * int64_prev[j] = tmp */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 532, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 532, __pyx_L8_error) /* "s3ql/deltadump.pyx":531 * read_integer(&int64, fp) * tmp = int64_prev[j] + int64 * if tmp < 0 or tmp > INT_MAX: # <<<<<<<<<<<<<< * raise RuntimeError('Corrupted input') * len_ = tmp */ } /* "s3ql/deltadump.pyx":533 * if tmp < 0 or tmp > INT_MAX: * raise RuntimeError('Corrupted input') * len_ = tmp # <<<<<<<<<<<<<< * int64_prev[j] = tmp * else: */ __pyx_v_len_ = ((int)__pyx_v_tmp); /* "s3ql/deltadump.pyx":534 * raise RuntimeError('Corrupted input') * len_ = tmp * int64_prev[j] = tmp # <<<<<<<<<<<<<< * else: * len_ = col_args[j] */ (__pyx_cur_scope->__pyx_v_int64_prev[__pyx_v_j]) = __pyx_v_tmp; /* "s3ql/deltadump.pyx":528 * * elif col_types[j] == _BLOB: * if col_args[j] == 0: # <<<<<<<<<<<<<< * read_integer(&int64, fp) * tmp = int64_prev[j] + int64 */ goto __pyx_L40; } /* "s3ql/deltadump.pyx":536 * int64_prev[j] = tmp * else: * len_ = col_args[j] # <<<<<<<<<<<<<< * * if len_ > MAX_BLOB_SIZE: */ /*else*/ { __pyx_v_len_ = (__pyx_cur_scope->__pyx_v_col_args[__pyx_v_j]); } __pyx_L40:; /* "s3ql/deltadump.pyx":538 * len_ = col_args[j] * * if len_ > MAX_BLOB_SIZE: # <<<<<<<<<<<<<< * raise RuntimeError('BLOB too large to read (%d vs %d)', len_, MAX_BLOB_SIZE) * */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_len_); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 538, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_14 = __Pyx_GetModuleGlobalName(__pyx_n_s_MAX_BLOB_SIZE); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 538, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_14); __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_14, Py_GT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 538, __pyx_L8_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 538, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_2) { /* "s3ql/deltadump.pyx":539 * * if len_ > MAX_BLOB_SIZE: * raise RuntimeError('BLOB too large to read (%d vs %d)', len_, MAX_BLOB_SIZE) # <<<<<<<<<<<<<< * * if len_ > 0: */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_len_); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 539, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_14 = __Pyx_GetModuleGlobalName(__pyx_n_s_MAX_BLOB_SIZE); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 539, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_14); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 539, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_kp_u_BLOB_too_large_to_read_d_vs_d); __Pyx_GIVEREF(__pyx_kp_u_BLOB_too_large_to_read_d_vs_d); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_kp_u_BLOB_too_large_to_read_d_vs_d); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_14); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_14); __pyx_t_4 = 0; __pyx_t_14 = 0; __pyx_t_14 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_t_1, NULL); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 539, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_Raise(__pyx_t_14, 0, 0, 0); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __PYX_ERR(0, 539, __pyx_L8_error) /* "s3ql/deltadump.pyx":538 * len_ = col_args[j] * * if len_ > MAX_BLOB_SIZE: # <<<<<<<<<<<<<< * raise RuntimeError('BLOB too large to read (%d vs %d)', len_, MAX_BLOB_SIZE) * */ } /* "s3ql/deltadump.pyx":541 * raise RuntimeError('BLOB too large to read (%d vs %d)', len_, MAX_BLOB_SIZE) * * if len_ > 0: # <<<<<<<<<<<<<< * fread(buf, len_, fp) * */ __pyx_t_2 = ((__pyx_v_len_ > 0) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":542 * * if len_ > 0: * fread(buf, len_, fp) # <<<<<<<<<<<<<< * * SQLITE_CHECK_RC(sqlite3_bind_blob(stmt, j + 1, buf, len_, SQLITE_TRANSIENT), */ __pyx_t_30 = __pyx_f_4s3ql_9deltadump_fread(__pyx_cur_scope->__pyx_v_buf, ((unsigned int)__pyx_v_len_), __pyx_cur_scope->__pyx_v_fp); if (unlikely(__pyx_t_30 == -1)) __PYX_ERR(0, 542, __pyx_L8_error) /* "s3ql/deltadump.pyx":541 * raise RuntimeError('BLOB too large to read (%d vs %d)', len_, MAX_BLOB_SIZE) * * if len_ > 0: # <<<<<<<<<<<<<< * fread(buf, len_, fp) * */ } /* "s3ql/deltadump.pyx":544 * fread(buf, len_, fp) * * SQLITE_CHECK_RC(sqlite3_bind_blob(stmt, j + 1, buf, len_, SQLITE_TRANSIENT), # <<<<<<<<<<<<<< * SQLITE_OK, sqlite3_db) * */ __pyx_t_30 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_bind_blob(__pyx_cur_scope->__pyx_v_stmt, (__pyx_v_j + 1), __pyx_cur_scope->__pyx_v_buf, __pyx_v_len_, SQLITE_TRANSIENT), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_30 == -1)) __PYX_ERR(0, 544, __pyx_L8_error) /* "s3ql/deltadump.pyx":527 * SQLITE_OK, sqlite3_db) * * elif col_types[j] == _BLOB: # <<<<<<<<<<<<<< * if col_args[j] == 0: * read_integer(&int64, fp) */ } __pyx_L39:; } /* "s3ql/deltadump.pyx":547 * SQLITE_OK, sqlite3_db) * * SQLITE_CHECK_RC(sqlite3_step(stmt), SQLITE_DONE, sqlite3_db) # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(sqlite3_reset(stmt), SQLITE_OK, sqlite3_db) * */ __pyx_t_10 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_step(__pyx_cur_scope->__pyx_v_stmt), SQLITE_DONE, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 547, __pyx_L8_error) /* "s3ql/deltadump.pyx":548 * * SQLITE_CHECK_RC(sqlite3_step(stmt), SQLITE_DONE, sqlite3_db) * SQLITE_CHECK_RC(sqlite3_reset(stmt), SQLITE_OK, sqlite3_db) # <<<<<<<<<<<<<< * * # Commit every once in a while */ __pyx_t_10 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_reset(__pyx_cur_scope->__pyx_v_stmt), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 548, __pyx_L8_error) /* "s3ql/deltadump.pyx":551 * * # Commit every once in a while * if i % trx_rows == 0: # <<<<<<<<<<<<<< * # This isn't 100% ok -- if we have an exception in step(begin_stmt), * # we the cleanup handler will execute the commit statement again */ __pyx_t_14 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 551, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_14); __pyx_t_1 = PyNumber_Remainder(__pyx_t_14, __pyx_v_trx_rows); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 551, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __pyx_t_14 = __Pyx_PyInt_EqObjC(__pyx_t_1, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 551, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_14); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 551, __pyx_L8_error) __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; if (__pyx_t_2) { /* "s3ql/deltadump.pyx":555 * # we the cleanup handler will execute the commit statement again * # without an active transaction. * SQLITE_CHECK_RC(sqlite3_step(commit_stmt), SQLITE_DONE, sqlite3_db) # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(sqlite3_step(begin_stmt), SQLITE_DONE, sqlite3_db) * SQLITE_CHECK_RC(sqlite3_reset(commit_stmt), SQLITE_OK, sqlite3_db) */ __pyx_t_10 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_step(__pyx_cur_scope->__pyx_v_commit_stmt), SQLITE_DONE, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 555, __pyx_L8_error) /* "s3ql/deltadump.pyx":556 * # without an active transaction. * SQLITE_CHECK_RC(sqlite3_step(commit_stmt), SQLITE_DONE, sqlite3_db) * SQLITE_CHECK_RC(sqlite3_step(begin_stmt), SQLITE_DONE, sqlite3_db) # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(sqlite3_reset(commit_stmt), SQLITE_OK, sqlite3_db) * SQLITE_CHECK_RC(sqlite3_reset(begin_stmt), SQLITE_OK, sqlite3_db) */ __pyx_t_10 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_step(__pyx_cur_scope->__pyx_v_begin_stmt), SQLITE_DONE, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 556, __pyx_L8_error) /* "s3ql/deltadump.pyx":557 * SQLITE_CHECK_RC(sqlite3_step(commit_stmt), SQLITE_DONE, sqlite3_db) * SQLITE_CHECK_RC(sqlite3_step(begin_stmt), SQLITE_DONE, sqlite3_db) * SQLITE_CHECK_RC(sqlite3_reset(commit_stmt), SQLITE_OK, sqlite3_db) # <<<<<<<<<<<<<< * SQLITE_CHECK_RC(sqlite3_reset(begin_stmt), SQLITE_OK, sqlite3_db) * */ __pyx_t_10 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_reset(__pyx_cur_scope->__pyx_v_commit_stmt), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 557, __pyx_L8_error) /* "s3ql/deltadump.pyx":558 * SQLITE_CHECK_RC(sqlite3_step(begin_stmt), SQLITE_DONE, sqlite3_db) * SQLITE_CHECK_RC(sqlite3_reset(commit_stmt), SQLITE_OK, sqlite3_db) * SQLITE_CHECK_RC(sqlite3_reset(begin_stmt), SQLITE_OK, sqlite3_db) # <<<<<<<<<<<<<< * * cdef inline int write_integer(int64_t int64, FILE * fp) except -1: */ __pyx_t_10 = __pyx_f_4s3ql_9deltadump_SQLITE_CHECK_RC(sqlite3_reset(__pyx_cur_scope->__pyx_v_begin_stmt), SQLITE_OK, __pyx_cur_scope->__pyx_v_sqlite3_db); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 558, __pyx_L8_error) /* "s3ql/deltadump.pyx":551 * * # Commit every once in a while * if i % trx_rows == 0: # <<<<<<<<<<<<<< * # This isn't 100% ok -- if we have an exception in step(begin_stmt), * # we the cleanup handler will execute the commit statement again */ } } /* "s3ql/deltadump.pyx":434 * raise ValueError("Can't access in-memory databases") * * with ExitStack() as cm: # <<<<<<<<<<<<<< * # Get SQLite connection * log.debug('Opening connection to %s', db.file) */ } __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L15_try_end; __pyx_L8_error:; __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; /*except:*/ { __Pyx_AddTraceback("s3ql.deltadump.load_table", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_14, &__pyx_t_1, &__pyx_t_4) < 0) __PYX_ERR(0, 434, __pyx_L10_except_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyTuple_Pack(3, __pyx_t_14, __pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 434, __pyx_L10_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_23 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_6, NULL); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_23)) __PYX_ERR(0, 434, __pyx_L10_except_error) __Pyx_GOTREF(__pyx_t_23); __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_23); __Pyx_DECREF(__pyx_t_23); __pyx_t_23 = 0; if (__pyx_t_2 < 0) __PYX_ERR(0, 434, __pyx_L10_except_error) __pyx_t_31 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_31) { __Pyx_GIVEREF(__pyx_t_14); __Pyx_GIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ErrRestoreWithState(__pyx_t_14, __pyx_t_1, __pyx_t_4); __pyx_t_14 = 0; __pyx_t_1 = 0; __pyx_t_4 = 0; __PYX_ERR(0, 434, __pyx_L10_except_error) } __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; goto __pyx_L9_exception_handled; } __pyx_L10_except_error:; __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ExceptionReset(__pyx_t_7, __pyx_t_8, __pyx_t_9); goto __pyx_L1_error; __pyx_L9_exception_handled:; __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ExceptionReset(__pyx_t_7, __pyx_t_8, __pyx_t_9); __pyx_L15_try_end:; } } /*finally:*/ { /*normal exit:*/{ if (__pyx_t_5) { __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_tuple__11, NULL); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 434, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } goto __pyx_L7; } __pyx_L7:; } goto __pyx_L50; __pyx_L4_error:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L1_error; __pyx_L50:; } /* "s3ql/deltadump.pyx":401 * fwrite(buf, len_, fp) * * def load_table(table, columns, db, fh, trx_rows=5000): # <<<<<<<<<<<<<< * '''Load *columns* of *table* from *fh* * */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_11); __Pyx_XDECREF(__pyx_t_14); __Pyx_AddTraceback("s3ql.deltadump.load_table", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_cm); __Pyx_XDECREF(__pyx_v_dbfile_b); __Pyx_XDECREF(__pyx_v_pragma); __Pyx_XDECREF(__pyx_v_val); __Pyx_XDECREF(__pyx_v_cmd); __Pyx_XDECREF(__pyx_v_col_names); __Pyx_XDECREF(__pyx_v_query); __Pyx_DECREF(((PyObject *)__pyx_cur_scope)); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":560 * SQLITE_CHECK_RC(sqlite3_reset(begin_stmt), SQLITE_OK, sqlite3_db) * * cdef inline int write_integer(int64_t int64, FILE * fp) except -1: # <<<<<<<<<<<<<< * '''Write *int64* into *fp*, using as little space as possible * */ static CYTHON_INLINE int __pyx_f_4s3ql_9deltadump_write_integer(int64_t __pyx_v_int64, FILE *__pyx_v_fp) { unsigned int __pyx_v_int8; uint8_t __pyx_v_int8_real; size_t __pyx_v_len_; uint64_t __pyx_v_uint64; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; uint64_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; __Pyx_RefNannySetupContext("write_integer", 0); /* "s3ql/deltadump.pyx":576 * cdef uint64_t uint64 * * if int64 < 0: # <<<<<<<<<<<<<< * uint64 = -int64 * int8 = 0x80 # Highest bit set */ __pyx_t_1 = ((__pyx_v_int64 < 0) != 0); if (__pyx_t_1) { /* "s3ql/deltadump.pyx":577 * * if int64 < 0: * uint64 = -int64 # <<<<<<<<<<<<<< * int8 = 0x80 # Highest bit set * else: */ __pyx_v_uint64 = ((uint64_t)(-__pyx_v_int64)); /* "s3ql/deltadump.pyx":578 * if int64 < 0: * uint64 = -int64 * int8 = 0x80 # Highest bit set # <<<<<<<<<<<<<< * else: * uint64 = int64 */ __pyx_v_int8 = 0x80; /* "s3ql/deltadump.pyx":576 * cdef uint64_t uint64 * * if int64 < 0: # <<<<<<<<<<<<<< * uint64 = -int64 * int8 = 0x80 # Highest bit set */ goto __pyx_L3; } /* "s3ql/deltadump.pyx":580 * int8 = 0x80 # Highest bit set * else: * uint64 = int64 # <<<<<<<<<<<<<< * int8 = 0 * */ /*else*/ { __pyx_v_uint64 = ((uint64_t)__pyx_v_int64); /* "s3ql/deltadump.pyx":581 * else: * uint64 = int64 * int8 = 0 # <<<<<<<<<<<<<< * * if uint64 < 0x80 and uint64 not in (INT8, INT16, INT32, INT64): */ __pyx_v_int8 = 0; } __pyx_L3:; /* "s3ql/deltadump.pyx":583 * int8 = 0 * * if uint64 < 0x80 and uint64 not in (INT8, INT16, INT32, INT64): # <<<<<<<<<<<<<< * len_ = 0 * int8 += uint64 */ __pyx_t_2 = ((__pyx_v_uint64 < 0x80) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_3 = __pyx_v_uint64; __pyx_t_4 = ((__pyx_t_3 != __pyx_v_4s3ql_9deltadump_INT8) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L7_bool_binop_done; } __pyx_t_4 = ((__pyx_t_3 != __pyx_v_4s3ql_9deltadump_INT16) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L7_bool_binop_done; } __pyx_t_4 = ((__pyx_t_3 != __pyx_v_4s3ql_9deltadump_INT32) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L7_bool_binop_done; } __pyx_t_4 = ((__pyx_t_3 != __pyx_v_4s3ql_9deltadump_INT64) != 0); __pyx_t_2 = __pyx_t_4; __pyx_L7_bool_binop_done:; __pyx_t_4 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_4; __pyx_L5_bool_binop_done:; if (__pyx_t_1) { /* "s3ql/deltadump.pyx":584 * * if uint64 < 0x80 and uint64 not in (INT8, INT16, INT32, INT64): * len_ = 0 # <<<<<<<<<<<<<< * int8 += uint64 * elif uint64 < UINT8_MAX: */ __pyx_v_len_ = 0; /* "s3ql/deltadump.pyx":585 * if uint64 < 0x80 and uint64 not in (INT8, INT16, INT32, INT64): * len_ = 0 * int8 += uint64 # <<<<<<<<<<<<<< * elif uint64 < UINT8_MAX: * len_ = 1 */ __pyx_v_int8 = (__pyx_v_int8 + ((uint8_t)__pyx_v_uint64)); /* "s3ql/deltadump.pyx":583 * int8 = 0 * * if uint64 < 0x80 and uint64 not in (INT8, INT16, INT32, INT64): # <<<<<<<<<<<<<< * len_ = 0 * int8 += uint64 */ goto __pyx_L4; } /* "s3ql/deltadump.pyx":586 * len_ = 0 * int8 += uint64 * elif uint64 < UINT8_MAX: # <<<<<<<<<<<<<< * len_ = 1 * int8 += INT8 */ __pyx_t_1 = ((__pyx_v_uint64 < UINT8_MAX) != 0); if (__pyx_t_1) { /* "s3ql/deltadump.pyx":587 * int8 += uint64 * elif uint64 < UINT8_MAX: * len_ = 1 # <<<<<<<<<<<<<< * int8 += INT8 * elif uint64 < UINT16_MAX: */ __pyx_v_len_ = 1; /* "s3ql/deltadump.pyx":588 * elif uint64 < UINT8_MAX: * len_ = 1 * int8 += INT8 # <<<<<<<<<<<<<< * elif uint64 < UINT16_MAX: * len_ = 2 */ __pyx_v_int8 = (__pyx_v_int8 + __pyx_v_4s3ql_9deltadump_INT8); /* "s3ql/deltadump.pyx":586 * len_ = 0 * int8 += uint64 * elif uint64 < UINT8_MAX: # <<<<<<<<<<<<<< * len_ = 1 * int8 += INT8 */ goto __pyx_L4; } /* "s3ql/deltadump.pyx":589 * len_ = 1 * int8 += INT8 * elif uint64 < UINT16_MAX: # <<<<<<<<<<<<<< * len_ = 2 * int8 += INT16 */ __pyx_t_1 = ((__pyx_v_uint64 < UINT16_MAX) != 0); if (__pyx_t_1) { /* "s3ql/deltadump.pyx":590 * int8 += INT8 * elif uint64 < UINT16_MAX: * len_ = 2 # <<<<<<<<<<<<<< * int8 += INT16 * elif uint64 < UINT32_MAX: */ __pyx_v_len_ = 2; /* "s3ql/deltadump.pyx":591 * elif uint64 < UINT16_MAX: * len_ = 2 * int8 += INT16 # <<<<<<<<<<<<<< * elif uint64 < UINT32_MAX: * len_ = 4 */ __pyx_v_int8 = (__pyx_v_int8 + __pyx_v_4s3ql_9deltadump_INT16); /* "s3ql/deltadump.pyx":589 * len_ = 1 * int8 += INT8 * elif uint64 < UINT16_MAX: # <<<<<<<<<<<<<< * len_ = 2 * int8 += INT16 */ goto __pyx_L4; } /* "s3ql/deltadump.pyx":592 * len_ = 2 * int8 += INT16 * elif uint64 < UINT32_MAX: # <<<<<<<<<<<<<< * len_ = 4 * int8 += INT32 */ __pyx_t_1 = ((__pyx_v_uint64 < UINT32_MAX) != 0); if (__pyx_t_1) { /* "s3ql/deltadump.pyx":593 * int8 += INT16 * elif uint64 < UINT32_MAX: * len_ = 4 # <<<<<<<<<<<<<< * int8 += INT32 * else: */ __pyx_v_len_ = 4; /* "s3ql/deltadump.pyx":594 * elif uint64 < UINT32_MAX: * len_ = 4 * int8 += INT32 # <<<<<<<<<<<<<< * else: * len_ = 8 */ __pyx_v_int8 = (__pyx_v_int8 + __pyx_v_4s3ql_9deltadump_INT32); /* "s3ql/deltadump.pyx":592 * len_ = 2 * int8 += INT16 * elif uint64 < UINT32_MAX: # <<<<<<<<<<<<<< * len_ = 4 * int8 += INT32 */ goto __pyx_L4; } /* "s3ql/deltadump.pyx":596 * int8 += INT32 * else: * len_ = 8 # <<<<<<<<<<<<<< * int8 += INT64 * */ /*else*/ { __pyx_v_len_ = 8; /* "s3ql/deltadump.pyx":597 * else: * len_ = 8 * int8 += INT64 # <<<<<<<<<<<<<< * * # Cast */ __pyx_v_int8 = (__pyx_v_int8 + __pyx_v_4s3ql_9deltadump_INT64); } __pyx_L4:; /* "s3ql/deltadump.pyx":600 * * # Cast * int8_real = int8 # <<<<<<<<<<<<<< * fwrite(&int8_real, 1, fp) * if len_ != 0: */ __pyx_v_int8_real = ((uint8_t)__pyx_v_int8); /* "s3ql/deltadump.pyx":601 * # Cast * int8_real = int8 * fwrite(&int8_real, 1, fp) # <<<<<<<<<<<<<< * if len_ != 0: * uint64 = htole64(uint64) */ __pyx_t_5 = __pyx_f_4s3ql_9deltadump_fwrite((&__pyx_v_int8_real), 1, __pyx_v_fp); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(0, 601, __pyx_L1_error) /* "s3ql/deltadump.pyx":602 * int8_real = int8 * fwrite(&int8_real, 1, fp) * if len_ != 0: # <<<<<<<<<<<<<< * uint64 = htole64(uint64) * fwrite(&uint64, len_, fp) */ __pyx_t_1 = ((__pyx_v_len_ != 0) != 0); if (__pyx_t_1) { /* "s3ql/deltadump.pyx":603 * fwrite(&int8_real, 1, fp) * if len_ != 0: * uint64 = htole64(uint64) # <<<<<<<<<<<<<< * fwrite(&uint64, len_, fp) * */ __pyx_v_uint64 = htole64(__pyx_v_uint64); /* "s3ql/deltadump.pyx":604 * if len_ != 0: * uint64 = htole64(uint64) * fwrite(&uint64, len_, fp) # <<<<<<<<<<<<<< * * # len <= 8, safe to cast */ __pyx_t_5 = __pyx_f_4s3ql_9deltadump_fwrite((&__pyx_v_uint64), __pyx_v_len_, __pyx_v_fp); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(0, 604, __pyx_L1_error) /* "s3ql/deltadump.pyx":602 * int8_real = int8 * fwrite(&int8_real, 1, fp) * if len_ != 0: # <<<<<<<<<<<<<< * uint64 = htole64(uint64) * fwrite(&uint64, len_, fp) */ } /* "s3ql/deltadump.pyx":607 * * # len <= 8, safe to cast * return len_ + 1 # <<<<<<<<<<<<<< * * cdef inline int read_integer(int64_t * out, FILE * fp) except -1: */ __pyx_r = (((int)__pyx_v_len_) + 1); goto __pyx_L0; /* "s3ql/deltadump.pyx":560 * SQLITE_CHECK_RC(sqlite3_reset(begin_stmt), SQLITE_OK, sqlite3_db) * * cdef inline int write_integer(int64_t int64, FILE * fp) except -1: # <<<<<<<<<<<<<< * '''Write *int64* into *fp*, using as little space as possible * */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("s3ql.deltadump.write_integer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "s3ql/deltadump.pyx":609 * return len_ + 1 * * cdef inline int read_integer(int64_t * out, FILE * fp) except -1: # <<<<<<<<<<<<<< * '''Read integer written using `write_integer` from *fp* * */ static CYTHON_INLINE int __pyx_f_4s3ql_9deltadump_read_integer(int64_t *__pyx_v_out, FILE *__pyx_v_fp) { uint8_t __pyx_v_int8; size_t __pyx_v_len_; uint64_t __pyx_v_uint64; char __pyx_v_negative; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("read_integer", 0); /* "s3ql/deltadump.pyx":620 * cdef char negative * * fread(&int8, 1, fp) # <<<<<<<<<<<<<< * * if int8 & 0x80 != 0: */ __pyx_t_1 = __pyx_f_4s3ql_9deltadump_fread((&__pyx_v_int8), 1, __pyx_v_fp); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 620, __pyx_L1_error) /* "s3ql/deltadump.pyx":622 * fread(&int8, 1, fp) * * if int8 & 0x80 != 0: # <<<<<<<<<<<<<< * negative = 1 * # Need to cast again due to integer promotion */ __pyx_t_2 = (((__pyx_v_int8 & 0x80) != 0) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":623 * * if int8 & 0x80 != 0: * negative = 1 # <<<<<<<<<<<<<< * # Need to cast again due to integer promotion * int8 = (int8 & (~ 0x80)) */ __pyx_v_negative = 1; /* "s3ql/deltadump.pyx":625 * negative = 1 * # Need to cast again due to integer promotion * int8 = (int8 & (~ 0x80)) # <<<<<<<<<<<<<< * else: * negative = 0 */ __pyx_v_int8 = ((uint8_t)(__pyx_v_int8 & (~0x80))); /* "s3ql/deltadump.pyx":622 * fread(&int8, 1, fp) * * if int8 & 0x80 != 0: # <<<<<<<<<<<<<< * negative = 1 * # Need to cast again due to integer promotion */ goto __pyx_L3; } /* "s3ql/deltadump.pyx":627 * int8 = (int8 & (~ 0x80)) * else: * negative = 0 # <<<<<<<<<<<<<< * * if int8 == INT8: */ /*else*/ { __pyx_v_negative = 0; } __pyx_L3:; /* "s3ql/deltadump.pyx":629 * negative = 0 * * if int8 == INT8: # <<<<<<<<<<<<<< * len_ = 1 * elif int8 == INT16: */ __pyx_t_2 = ((__pyx_v_int8 == __pyx_v_4s3ql_9deltadump_INT8) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":630 * * if int8 == INT8: * len_ = 1 # <<<<<<<<<<<<<< * elif int8 == INT16: * len_ = 2 */ __pyx_v_len_ = 1; /* "s3ql/deltadump.pyx":629 * negative = 0 * * if int8 == INT8: # <<<<<<<<<<<<<< * len_ = 1 * elif int8 == INT16: */ goto __pyx_L4; } /* "s3ql/deltadump.pyx":631 * if int8 == INT8: * len_ = 1 * elif int8 == INT16: # <<<<<<<<<<<<<< * len_ = 2 * elif int8 == INT32: */ __pyx_t_2 = ((__pyx_v_int8 == __pyx_v_4s3ql_9deltadump_INT16) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":632 * len_ = 1 * elif int8 == INT16: * len_ = 2 # <<<<<<<<<<<<<< * elif int8 == INT32: * len_ = 4 */ __pyx_v_len_ = 2; /* "s3ql/deltadump.pyx":631 * if int8 == INT8: * len_ = 1 * elif int8 == INT16: # <<<<<<<<<<<<<< * len_ = 2 * elif int8 == INT32: */ goto __pyx_L4; } /* "s3ql/deltadump.pyx":633 * elif int8 == INT16: * len_ = 2 * elif int8 == INT32: # <<<<<<<<<<<<<< * len_ = 4 * elif int8 == INT64: */ __pyx_t_2 = ((__pyx_v_int8 == __pyx_v_4s3ql_9deltadump_INT32) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":634 * len_ = 2 * elif int8 == INT32: * len_ = 4 # <<<<<<<<<<<<<< * elif int8 == INT64: * len_ = 8 */ __pyx_v_len_ = 4; /* "s3ql/deltadump.pyx":633 * elif int8 == INT16: * len_ = 2 * elif int8 == INT32: # <<<<<<<<<<<<<< * len_ = 4 * elif int8 == INT64: */ goto __pyx_L4; } /* "s3ql/deltadump.pyx":635 * elif int8 == INT32: * len_ = 4 * elif int8 == INT64: # <<<<<<<<<<<<<< * len_ = 8 * else: */ __pyx_t_2 = ((__pyx_v_int8 == __pyx_v_4s3ql_9deltadump_INT64) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":636 * len_ = 4 * elif int8 == INT64: * len_ = 8 # <<<<<<<<<<<<<< * else: * len_ = 0 */ __pyx_v_len_ = 8; /* "s3ql/deltadump.pyx":635 * elif int8 == INT32: * len_ = 4 * elif int8 == INT64: # <<<<<<<<<<<<<< * len_ = 8 * else: */ goto __pyx_L4; } /* "s3ql/deltadump.pyx":638 * len_ = 8 * else: * len_ = 0 # <<<<<<<<<<<<<< * uint64 = int8 * */ /*else*/ { __pyx_v_len_ = 0; /* "s3ql/deltadump.pyx":639 * else: * len_ = 0 * uint64 = int8 # <<<<<<<<<<<<<< * * if len_ != 0: */ __pyx_v_uint64 = __pyx_v_int8; } __pyx_L4:; /* "s3ql/deltadump.pyx":641 * uint64 = int8 * * if len_ != 0: # <<<<<<<<<<<<<< * uint64 = 0 * fread(&uint64, len_, fp) */ __pyx_t_2 = ((__pyx_v_len_ != 0) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":642 * * if len_ != 0: * uint64 = 0 # <<<<<<<<<<<<<< * fread(&uint64, len_, fp) * uint64 = le64toh(uint64) */ __pyx_v_uint64 = 0; /* "s3ql/deltadump.pyx":643 * if len_ != 0: * uint64 = 0 * fread(&uint64, len_, fp) # <<<<<<<<<<<<<< * uint64 = le64toh(uint64) * */ __pyx_t_1 = __pyx_f_4s3ql_9deltadump_fread((&__pyx_v_uint64), __pyx_v_len_, __pyx_v_fp); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 643, __pyx_L1_error) /* "s3ql/deltadump.pyx":644 * uint64 = 0 * fread(&uint64, len_, fp) * uint64 = le64toh(uint64) # <<<<<<<<<<<<<< * * if negative == 1: */ __pyx_v_uint64 = le64toh(__pyx_v_uint64); /* "s3ql/deltadump.pyx":641 * uint64 = int8 * * if len_ != 0: # <<<<<<<<<<<<<< * uint64 = 0 * fread(&uint64, len_, fp) */ } /* "s3ql/deltadump.pyx":646 * uint64 = le64toh(uint64) * * if negative == 1: # <<<<<<<<<<<<<< * out[0] = - < int64_t > uint64 * else: */ __pyx_t_2 = ((__pyx_v_negative == 1) != 0); if (__pyx_t_2) { /* "s3ql/deltadump.pyx":647 * * if negative == 1: * out[0] = - < int64_t > uint64 # <<<<<<<<<<<<<< * else: * out[0] = < int64_t > uint64 */ (__pyx_v_out[0]) = (-((int64_t)__pyx_v_uint64)); /* "s3ql/deltadump.pyx":646 * uint64 = le64toh(uint64) * * if negative == 1: # <<<<<<<<<<<<<< * out[0] = - < int64_t > uint64 * else: */ goto __pyx_L6; } /* "s3ql/deltadump.pyx":649 * out[0] = - < int64_t > uint64 * else: * out[0] = < int64_t > uint64 # <<<<<<<<<<<<<< * * # len <= 8, safe to cast */ /*else*/ { (__pyx_v_out[0]) = ((int64_t)__pyx_v_uint64); } __pyx_L6:; /* "s3ql/deltadump.pyx":652 * * # len <= 8, safe to cast * return len_ + 1 # <<<<<<<<<<<<<< */ __pyx_r = (((int)__pyx_v_len_) + 1); goto __pyx_L0; /* "s3ql/deltadump.pyx":609 * return len_ + 1 * * cdef inline int read_integer(int64_t * out, FILE * fp) except -1: # <<<<<<<<<<<<<< * '''Read integer written using `write_integer` from *fp* * */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("s3ql.deltadump.read_integer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *__pyx_freelist_4s3ql_9deltadump___pyx_scope_struct__dump_table[8]; static int __pyx_freecount_4s3ql_9deltadump___pyx_scope_struct__dump_table = 0; static PyObject *__pyx_tp_new_4s3ql_9deltadump___pyx_scope_struct__dump_table(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { PyObject *o; if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_4s3ql_9deltadump___pyx_scope_struct__dump_table > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table)))) { o = (PyObject*)__pyx_freelist_4s3ql_9deltadump___pyx_scope_struct__dump_table[--__pyx_freecount_4s3ql_9deltadump___pyx_scope_struct__dump_table]; memset(o, 0, sizeof(struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table)); (void) PyObject_INIT(o, t); } else { o = (*t->tp_alloc)(t, 0); if (unlikely(!o)) return 0; } return o; } static void __pyx_tp_dealloc_4s3ql_9deltadump___pyx_scope_struct__dump_table(PyObject *o) { if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_4s3ql_9deltadump___pyx_scope_struct__dump_table < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table)))) { __pyx_freelist_4s3ql_9deltadump___pyx_scope_struct__dump_table[__pyx_freecount_4s3ql_9deltadump___pyx_scope_struct__dump_table++] = ((struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table *)o); } else { (*Py_TYPE(o)->tp_free)(o); } } static PyTypeObject __pyx_type_4s3ql_9deltadump___pyx_scope_struct__dump_table = { PyVarObject_HEAD_INIT(0, 0) "s3ql.deltadump.__pyx_scope_struct__dump_table", /*tp_name*/ sizeof(struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct__dump_table), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_4s3ql_9deltadump___pyx_scope_struct__dump_table, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ 0, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_4s3ql_9deltadump___pyx_scope_struct__dump_table, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *__pyx_freelist_4s3ql_9deltadump___pyx_scope_struct_1_load_table[8]; static int __pyx_freecount_4s3ql_9deltadump___pyx_scope_struct_1_load_table = 0; static PyObject *__pyx_tp_new_4s3ql_9deltadump___pyx_scope_struct_1_load_table(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { PyObject *o; if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_4s3ql_9deltadump___pyx_scope_struct_1_load_table > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table)))) { o = (PyObject*)__pyx_freelist_4s3ql_9deltadump___pyx_scope_struct_1_load_table[--__pyx_freecount_4s3ql_9deltadump___pyx_scope_struct_1_load_table]; memset(o, 0, sizeof(struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table)); (void) PyObject_INIT(o, t); } else { o = (*t->tp_alloc)(t, 0); if (unlikely(!o)) return 0; } return o; } static void __pyx_tp_dealloc_4s3ql_9deltadump___pyx_scope_struct_1_load_table(PyObject *o) { if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_4s3ql_9deltadump___pyx_scope_struct_1_load_table < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table)))) { __pyx_freelist_4s3ql_9deltadump___pyx_scope_struct_1_load_table[__pyx_freecount_4s3ql_9deltadump___pyx_scope_struct_1_load_table++] = ((struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table *)o); } else { (*Py_TYPE(o)->tp_free)(o); } } static PyTypeObject __pyx_type_4s3ql_9deltadump___pyx_scope_struct_1_load_table = { PyVarObject_HEAD_INIT(0, 0) "s3ql.deltadump.__pyx_scope_struct_1_load_table", /*tp_name*/ sizeof(struct __pyx_obj_4s3ql_9deltadump___pyx_scope_struct_1_load_table), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_4s3ql_9deltadump___pyx_scope_struct_1_load_table, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ 0, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_4s3ql_9deltadump___pyx_scope_struct_1_load_table, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif "deltadump", __pyx_k_deltadump_pyx_this_file_is_part, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_b_BEGIN_TRANSACTION, __pyx_k_BEGIN_TRANSACTION, sizeof(__pyx_k_BEGIN_TRANSACTION), 0, 0, 0, 0}, {&__pyx_n_s_BLOB, __pyx_k_BLOB, sizeof(__pyx_k_BLOB), 0, 0, 1, 1}, {&__pyx_kp_u_BLOB_too_large_to_read_d_vs_d, __pyx_k_BLOB_too_large_to_read_d_vs_d, sizeof(__pyx_k_BLOB_too_large_to_read_d_vs_d), 0, 1, 0, 0}, {&__pyx_kp_b_COMMIT_TRANSACTION, __pyx_k_COMMIT_TRANSACTION, sizeof(__pyx_k_COMMIT_TRANSACTION), 0, 0, 0, 0}, {&__pyx_kp_u_Can_not_dump_BLOB_of_size_d_max, __pyx_k_Can_not_dump_BLOB_of_size_d_max, sizeof(__pyx_k_Can_not_dump_BLOB_of_size_d_max), 0, 1, 0, 0}, {&__pyx_kp_u_Can_t_access_in_memory_databases, __pyx_k_Can_t_access_in_memory_databases, sizeof(__pyx_k_Can_t_access_in_memory_databases), 0, 1, 0, 0}, {&__pyx_kp_u_Can_t_dump_NULL_values, __pyx_k_Can_t_dump_NULL_values, sizeof(__pyx_k_Can_t_dump_NULL_values), 0, 1, 0, 0}, {&__pyx_kp_u_Corrupted_input, __pyx_k_Corrupted_input, sizeof(__pyx_k_Corrupted_input), 0, 1, 0, 0}, {&__pyx_n_s_ExitStack, __pyx_k_ExitStack, sizeof(__pyx_k_ExitStack), 0, 0, 1, 1}, {&__pyx_kp_u_INSERT_INTO_s_s_VALUES_s, __pyx_k_INSERT_INTO_s_s_VALUES_s, sizeof(__pyx_k_INSERT_INTO_s_s_VALUES_s), 0, 1, 0, 0}, {&__pyx_n_s_INTEGER, __pyx_k_INTEGER, sizeof(__pyx_k_INTEGER), 0, 0, 1, 1}, {&__pyx_n_s_IOError, __pyx_k_IOError, sizeof(__pyx_k_IOError), 0, 0, 1, 1}, {&__pyx_kp_u_Invalid_type_for_column_d, __pyx_k_Invalid_type_for_column_d, sizeof(__pyx_k_Invalid_type_for_column_d), 0, 1, 0, 0}, {&__pyx_kp_u_Length_d_d_in_column_d, __pyx_k_Length_d_d_in_column_d, sizeof(__pyx_k_Length_d_d_in_column_d), 0, 1, 0, 0}, {&__pyx_n_s_MAX_BLOB_SIZE, __pyx_k_MAX_BLOB_SIZE, sizeof(__pyx_k_MAX_BLOB_SIZE), 0, 0, 1, 1}, {&__pyx_n_s_OSError, __pyx_k_OSError, sizeof(__pyx_k_OSError), 0, 0, 1, 1}, {&__pyx_kp_u_Opening_connection_to_s, __pyx_k_Opening_connection_to_s, sizeof(__pyx_k_Opening_connection_to_s), 0, 1, 0, 0}, {&__pyx_kp_u_PRAGMA_s, __pyx_k_PRAGMA_s, sizeof(__pyx_k_PRAGMA_s), 0, 1, 0, 0}, {&__pyx_kp_u_PRAGMA_s_s, __pyx_k_PRAGMA_s_s, sizeof(__pyx_k_PRAGMA_s_s), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_kp_u_SELECT_COUNT_rowid_FROM_s, __pyx_k_SELECT_COUNT_rowid_FROM_s, sizeof(__pyx_k_SELECT_COUNT_rowid_FROM_s), 0, 1, 0, 0}, {&__pyx_kp_u_SELECT_s_FROM_s_ORDER_BY_s, __pyx_k_SELECT_s_FROM_s_ORDER_BY_s, sizeof(__pyx_k_SELECT_s_FROM_s_ORDER_BY_s), 0, 1, 0, 0}, {&__pyx_kp_u_SQLite_code_used_by_APSW_was_com, __pyx_k_SQLite_code_used_by_APSW_was_com, sizeof(__pyx_k_SQLite_code_used_by_APSW_was_com), 0, 1, 0, 0}, {&__pyx_kp_u_SQLite_version_mismatch_between, __pyx_k_SQLite_version_mismatch_between, sizeof(__pyx_k_SQLite_version_mismatch_between), 0, 1, 0, 0}, {&__pyx_n_s_TIME, __pyx_k_TIME, sizeof(__pyx_k_TIME), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_kp_u__4, __pyx_k__4, sizeof(__pyx_k__4), 0, 1, 0, 0}, {&__pyx_kp_u__9, __pyx_k__9, sizeof(__pyx_k__9), 0, 1, 0, 0}, {&__pyx_n_s_apsw, __pyx_k_apsw, sizeof(__pyx_k_apsw), 0, 0, 1, 1}, {&__pyx_kp_u_apsw_sqlite_compile_options_s_s3, __pyx_k_apsw_sqlite_compile_options_s_s3, sizeof(__pyx_k_apsw_sqlite_compile_options_s_s3), 0, 1, 0, 0}, {&__pyx_n_s_apsw_sqlite_options, __pyx_k_apsw_sqlite_options, sizeof(__pyx_k_apsw_sqlite_options), 0, 0, 1, 1}, {&__pyx_n_s_apsw_sqlite_version, __pyx_k_apsw_sqlite_version, sizeof(__pyx_k_apsw_sqlite_version), 0, 0, 1, 1}, {&__pyx_kp_u_apsw_sqlite_version_s_s3ql_sqlit, __pyx_k_apsw_sqlite_version_s_s3ql_sqlit, sizeof(__pyx_k_apsw_sqlite_version_s_s3ql_sqlit), 0, 1, 0, 0}, {&__pyx_n_s_begin_stmt, __pyx_k_begin_stmt, sizeof(__pyx_k_begin_stmt), 0, 0, 1, 1}, {&__pyx_n_s_buf, __pyx_k_buf, sizeof(__pyx_k_buf), 0, 0, 1, 1}, {&__pyx_n_s_callback, __pyx_k_callback, sizeof(__pyx_k_callback), 0, 0, 1, 1}, {&__pyx_n_s_check_sqlite, __pyx_k_check_sqlite, sizeof(__pyx_k_check_sqlite), 0, 0, 1, 1}, {&__pyx_n_s_cm, __pyx_k_cm, sizeof(__pyx_k_cm), 0, 0, 1, 1}, {&__pyx_n_s_cmd, __pyx_k_cmd, sizeof(__pyx_k_cmd), 0, 0, 1, 1}, {&__pyx_n_s_col_args, __pyx_k_col_args, sizeof(__pyx_k_col_args), 0, 0, 1, 1}, {&__pyx_n_s_col_count, __pyx_k_col_count, sizeof(__pyx_k_col_count), 0, 0, 1, 1}, {&__pyx_n_s_col_names, __pyx_k_col_names, sizeof(__pyx_k_col_names), 0, 0, 1, 1}, {&__pyx_n_s_col_types, __pyx_k_col_types, sizeof(__pyx_k_col_types), 0, 0, 1, 1}, {&__pyx_n_s_columns, __pyx_k_columns, sizeof(__pyx_k_columns), 0, 0, 1, 1}, {&__pyx_n_s_commit_stmt, __pyx_k_commit_stmt, sizeof(__pyx_k_commit_stmt), 0, 0, 1, 1}, {&__pyx_n_s_compile_options, __pyx_k_compile_options, sizeof(__pyx_k_compile_options), 0, 0, 1, 1}, {&__pyx_n_s_contextlib, __pyx_k_contextlib, sizeof(__pyx_k_contextlib), 0, 0, 1, 1}, {&__pyx_n_s_count, __pyx_k_count, sizeof(__pyx_k_count), 0, 0, 1, 1}, {&__pyx_n_s_db, __pyx_k_db, sizeof(__pyx_k_db), 0, 0, 1, 1}, {&__pyx_n_s_dbfile_b, __pyx_k_dbfile_b, sizeof(__pyx_k_dbfile_b), 0, 0, 1, 1}, {&__pyx_n_s_debug, __pyx_k_debug, sizeof(__pyx_k_debug), 0, 0, 1, 1}, {&__pyx_n_s_dump_table, __pyx_k_dump_table, sizeof(__pyx_k_dump_table), 0, 0, 1, 1}, {&__pyx_n_s_dump_table_locals_lambda, __pyx_k_dump_table_locals_lambda, sizeof(__pyx_k_dump_table_locals_lambda), 0, 0, 1, 1}, {&__pyx_kp_u_dump_table_s_writing_d_rows, __pyx_k_dump_table_s_writing_d_rows, sizeof(__pyx_k_dump_table_s_writing_d_rows), 0, 1, 0, 0}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enter, __pyx_k_enter, sizeof(__pyx_k_enter), 0, 0, 1, 1}, {&__pyx_n_s_exceptionfor, __pyx_k_exceptionfor, sizeof(__pyx_k_exceptionfor), 0, 0, 1, 1}, {&__pyx_n_s_exit, __pyx_k_exit, sizeof(__pyx_k_exit), 0, 0, 1, 1}, {&__pyx_n_s_fh, __pyx_k_fh, sizeof(__pyx_k_fh), 0, 0, 1, 1}, {&__pyx_n_s_file, __pyx_k_file, sizeof(__pyx_k_file), 0, 0, 1, 1}, {&__pyx_n_s_fileno, __pyx_k_fileno, sizeof(__pyx_k_fileno), 0, 0, 1, 1}, {&__pyx_n_u_foreign_keys, __pyx_k_foreign_keys, sizeof(__pyx_k_foreign_keys), 0, 1, 0, 1}, {&__pyx_n_s_fp, __pyx_k_fp, sizeof(__pyx_k_fp), 0, 0, 1, 1}, {&__pyx_n_s_getLogger, __pyx_k_getLogger, sizeof(__pyx_k_getLogger), 0, 0, 1, 1}, {&__pyx_n_s_get_val, __pyx_k_get_val, sizeof(__pyx_k_get_val), 0, 0, 1, 1}, {&__pyx_n_s_getfilesystemencoding, __pyx_k_getfilesystemencoding, sizeof(__pyx_k_getfilesystemencoding), 0, 0, 1, 1}, {&__pyx_kp_s_home_nikratio_in_progress_s3ql, __pyx_k_home_nikratio_in_progress_s3ql, sizeof(__pyx_k_home_nikratio_in_progress_s3ql), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_idx, __pyx_k_idx, sizeof(__pyx_k_idx), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_int64, __pyx_k_int64, sizeof(__pyx_k_int64), 0, 0, 1, 1}, {&__pyx_n_s_int64_prev, __pyx_k_int64_prev, sizeof(__pyx_k_int64_prev), 0, 0, 1, 1}, {&__pyx_n_s_itertools, __pyx_k_itertools, sizeof(__pyx_k_itertools), 0, 0, 1, 1}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_len, __pyx_k_len, sizeof(__pyx_k_len), 0, 0, 1, 1}, {&__pyx_n_s_load_table, __pyx_k_load_table, sizeof(__pyx_k_load_table), 0, 0, 1, 1}, {&__pyx_n_s_load_table_locals_lambda, __pyx_k_load_table_locals_lambda, sizeof(__pyx_k_load_table_locals_lambda), 0, 0, 1, 1}, {&__pyx_kp_u_load_table_s_reading_d_rows, __pyx_k_load_table_s_reading_d_rows, sizeof(__pyx_k_load_table_s_reading_d_rows), 0, 1, 0, 0}, {&__pyx_n_s_log, __pyx_k_log, sizeof(__pyx_k_log), 0, 0, 1, 1}, {&__pyx_n_s_logging, __pyx_k_logging, sizeof(__pyx_k_logging), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_kp_u_memory, __pyx_k_memory, sizeof(__pyx_k_memory), 0, 1, 0, 0}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_order, __pyx_k_order, sizeof(__pyx_k_order), 0, 0, 1, 1}, {&__pyx_n_s_os, __pyx_k_os, sizeof(__pyx_k_os), 0, 0, 1, 1}, {&__pyx_n_s_pragma, __pyx_k_pragma, sizeof(__pyx_k_pragma), 0, 0, 1, 1}, {&__pyx_n_s_query, __pyx_k_query, sizeof(__pyx_k_query), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_rc, __pyx_k_rc, sizeof(__pyx_k_rc), 0, 0, 1, 1}, {&__pyx_n_s_row_count, __pyx_k_row_count, sizeof(__pyx_k_row_count), 0, 0, 1, 1}, {&__pyx_n_s_s3ql_deltadump, __pyx_k_s3ql_deltadump, sizeof(__pyx_k_s3ql_deltadump), 0, 0, 1, 1}, {&__pyx_n_s_s3ql_sqlite_options, __pyx_k_s3ql_sqlite_options, sizeof(__pyx_k_s3ql_sqlite_options), 0, 0, 1, 1}, {&__pyx_n_s_s3ql_sqlite_version, __pyx_k_s3ql_sqlite_version, sizeof(__pyx_k_s3ql_sqlite_version), 0, 0, 1, 1}, {&__pyx_n_s_sqlite3_db, __pyx_k_sqlite3_db, sizeof(__pyx_k_sqlite3_db), 0, 0, 1, 1}, {&__pyx_n_s_sqlitelibversion, __pyx_k_sqlitelibversion, sizeof(__pyx_k_sqlitelibversion), 0, 0, 1, 1}, {&__pyx_n_s_stmt, __pyx_k_stmt, sizeof(__pyx_k_stmt), 0, 0, 1, 1}, {&__pyx_n_u_surrogateescape, __pyx_k_surrogateescape, sizeof(__pyx_k_surrogateescape), 0, 1, 0, 1}, {&__pyx_n_u_synchronous, __pyx_k_synchronous, sizeof(__pyx_k_synchronous), 0, 1, 0, 1}, {&__pyx_n_s_sys, __pyx_k_sys, sizeof(__pyx_k_sys), 0, 0, 1, 1}, {&__pyx_n_s_table, __pyx_k_table, sizeof(__pyx_k_table), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_tmp, __pyx_k_tmp, sizeof(__pyx_k_tmp), 0, 0, 1, 1}, {&__pyx_n_s_trx_rows, __pyx_k_trx_rows, sizeof(__pyx_k_trx_rows), 0, 0, 1, 1}, {&__pyx_n_s_val, __pyx_k_val, sizeof(__pyx_k_val), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_OSError = __Pyx_GetBuiltinName(__pyx_n_s_OSError); if (!__pyx_builtin_OSError) __PYX_ERR(0, 136, __pyx_L1_error) __pyx_builtin_IOError = __Pyx_GetBuiltinName(__pyx_n_s_IOError); if (!__pyx_builtin_IOError) __PYX_ERR(0, 116, __pyx_L1_error) __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 206, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 208, __pyx_L1_error) __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(0, 258, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "s3ql/deltadump.pyx":263 * apsw_sqlite_options = set(apsw.compile_options) * s3ql_sqlite_options = set() * for idx in itertools.count(0): # <<<<<<<<<<<<<< * buf = sqlite3_compileoption_get(idx) * if buf is NULL: */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_int_0); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 263, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "s3ql/deltadump.pyx":319 * * if db.file == ':memory:': * raise ValueError("Can't access in-memory databases") # <<<<<<<<<<<<<< * * with ExitStack() as cm: */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Can_t_access_in_memory_databases); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 319, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "s3ql/deltadump.pyx":368 * for i in range(col_count): * if sqlite3_column_type(stmt, i) is SQLITE_NULL: * raise ValueError("Can't dump NULL values") # <<<<<<<<<<<<<< * * if col_types[i] == _INTEGER: */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Can_t_dump_NULL_values); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 368, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "s3ql/deltadump.pyx":321 * raise ValueError("Can't access in-memory databases") * * with ExitStack() as cm: # <<<<<<<<<<<<<< * # Get SQLite connection * log.debug('Opening connection to %s', db.file) */ __pyx_tuple__6 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(0, 321, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "s3ql/deltadump.pyx":432 * * if db.file == ':memory:': * raise ValueError("Can't access in-memory databases") # <<<<<<<<<<<<<< * * with ExitStack() as cm: */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_Can_t_access_in_memory_databases); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(0, 432, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "s3ql/deltadump.pyx":447 * * # Copy settings * for pragma in ('synchronous', 'foreign_keys'): # <<<<<<<<<<<<<< * val = db.get_val('PRAGMA %s' % pragma) * cmd = ('PRAGMA %s = %s' % (pragma, val)).encode('utf-8') */ __pyx_tuple__8 = PyTuple_Pack(2, __pyx_n_u_synchronous, __pyx_n_u_foreign_keys); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(0, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "s3ql/deltadump.pyx":532 * tmp = int64_prev[j] + int64 * if tmp < 0 or tmp > INT_MAX: * raise RuntimeError('Corrupted input') # <<<<<<<<<<<<<< * len_ = tmp * int64_prev[j] = tmp */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_u_Corrupted_input); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(0, 532, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "s3ql/deltadump.pyx":434 * raise ValueError("Can't access in-memory databases") * * with ExitStack() as cm: # <<<<<<<<<<<<<< * # Get SQLite connection * log.debug('Opening connection to %s', db.file) */ __pyx_tuple__11 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(0, 434, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "s3ql/deltadump.pyx":237 * return fp * * def check_sqlite(): # <<<<<<<<<<<<<< * '''Check if deltadump and apsw module use compatible SQLite code. * */ __pyx_tuple__12 = PyTuple_Pack(6, __pyx_n_s_buf, __pyx_n_s_apsw_sqlite_version, __pyx_n_s_s3ql_sqlite_version, __pyx_n_s_apsw_sqlite_options, __pyx_n_s_s3ql_sqlite_options, __pyx_n_s_idx); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(0, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); __pyx_codeobj__13 = (PyObject*)__Pyx_PyCode_New(0, 0, 6, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__12, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_nikratio_in_progress_s3ql, __pyx_n_s_check_sqlite, 237, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__13)) __PYX_ERR(0, 237, __pyx_L1_error) /* "s3ql/deltadump.pyx":280 * s3ql_sqlite_options - apsw_sqlite_options)) * * def dump_table(table, order, columns, db, fh): # <<<<<<<<<<<<<< * '''Dump *columns* of *table* into *fh* * */ __pyx_tuple__14 = PyTuple_Pack(23, __pyx_n_s_table, __pyx_n_s_order, __pyx_n_s_columns, __pyx_n_s_db, __pyx_n_s_fh, __pyx_n_s_sqlite3_db, __pyx_n_s_stmt, __pyx_n_s_col_types, __pyx_n_s_col_args, __pyx_n_s_col_count, __pyx_n_s_rc, __pyx_n_s_i, __pyx_n_s_len, __pyx_n_s_int64_prev, __pyx_n_s_int64, __pyx_n_s_tmp, __pyx_n_s_fp, __pyx_n_s_buf, __pyx_n_s_row_count, __pyx_n_s_cm, __pyx_n_s_dbfile_b, __pyx_n_s_col_names, __pyx_n_s_query); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(0, 280, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); __pyx_codeobj__15 = (PyObject*)__Pyx_PyCode_New(5, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__14, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_nikratio_in_progress_s3ql, __pyx_n_s_dump_table, 280, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__15)) __PYX_ERR(0, 280, __pyx_L1_error) /* "s3ql/deltadump.pyx":401 * fwrite(buf, len_, fp) * * def load_table(table, columns, db, fh, trx_rows=5000): # <<<<<<<<<<<<<< * '''Load *columns* of *table* from *fh* * */ __pyx_tuple__16 = PyTuple_Pack(29, __pyx_n_s_table, __pyx_n_s_columns, __pyx_n_s_db, __pyx_n_s_fh, __pyx_n_s_trx_rows, __pyx_n_s_sqlite3_db, __pyx_n_s_stmt, __pyx_n_s_begin_stmt, __pyx_n_s_commit_stmt, __pyx_n_s_col_types, __pyx_n_s_col_args, __pyx_n_s_col_count, __pyx_n_s_rc, __pyx_n_s_len, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_int64_prev, __pyx_n_s_fp, __pyx_n_s_buf, __pyx_n_s_row_count, __pyx_n_s_int64, __pyx_n_s_tmp, __pyx_n_s_cm, __pyx_n_s_dbfile_b, __pyx_n_s_pragma, __pyx_n_s_val, __pyx_n_s_cmd, __pyx_n_s_col_names, __pyx_n_s_query); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); __pyx_codeobj__17 = (PyObject*)__Pyx_PyCode_New(5, 0, 29, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__16, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_nikratio_in_progress_s3ql, __pyx_n_s_load_table, 401, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__17)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_4096 = PyInt_FromLong(4096); if (unlikely(!__pyx_int_4096)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_5000 = PyInt_FromLong(5000); if (unlikely(!__pyx_int_5000)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initdeltadump(void); /*proto*/ PyMODINIT_FUNC initdeltadump(void) #else PyMODINIT_FUNC PyInit_deltadump(void); /*proto*/ PyMODINIT_FUNC PyInit_deltadump(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_deltadump(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("deltadump", __pyx_methods, __pyx_k_deltadump_pyx_this_file_is_part, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_s3ql__deltadump) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "s3ql.deltadump")) { if (unlikely(PyDict_SetItemString(modules, "s3ql.deltadump", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ if (PyType_Ready(&__pyx_type_4s3ql_9deltadump___pyx_scope_struct__dump_table) < 0) __PYX_ERR(0, 280, __pyx_L1_error) __pyx_type_4s3ql_9deltadump___pyx_scope_struct__dump_table.tp_print = 0; __pyx_ptype_4s3ql_9deltadump___pyx_scope_struct__dump_table = &__pyx_type_4s3ql_9deltadump___pyx_scope_struct__dump_table; if (PyType_Ready(&__pyx_type_4s3ql_9deltadump___pyx_scope_struct_1_load_table) < 0) __PYX_ERR(0, 401, __pyx_L1_error) __pyx_type_4s3ql_9deltadump___pyx_scope_struct_1_load_table.tp_print = 0; __pyx_ptype_4s3ql_9deltadump___pyx_scope_struct_1_load_table = &__pyx_type_4s3ql_9deltadump___pyx_scope_struct_1_load_table; /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(1, 9, __pyx_L1_error) /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "s3ql/deltadump.pyx":79 * SQLITE_OPEN_READONLY * * from contextlib import ExitStack # <<<<<<<<<<<<<< * import apsw * import os */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_ExitStack); __Pyx_GIVEREF(__pyx_n_s_ExitStack); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_ExitStack); __pyx_t_2 = __Pyx_Import(__pyx_n_s_contextlib, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_ExitStack); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_ExitStack, __pyx_t_1) < 0) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "s3ql/deltadump.pyx":80 * * from contextlib import ExitStack * import apsw # <<<<<<<<<<<<<< * import os * from .logging import logging # Ensure use of custom logger class */ __pyx_t_2 = __Pyx_Import(__pyx_n_s_apsw, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 80, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_apsw, __pyx_t_2) < 0) __PYX_ERR(0, 80, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "s3ql/deltadump.pyx":81 * from contextlib import ExitStack * import apsw * import os # <<<<<<<<<<<<<< * from .logging import logging # Ensure use of custom logger class * import itertools */ __pyx_t_2 = __Pyx_Import(__pyx_n_s_os, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 81, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_os, __pyx_t_2) < 0) __PYX_ERR(0, 81, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "s3ql/deltadump.pyx":82 * import apsw * import os * from .logging import logging # Ensure use of custom logger class # <<<<<<<<<<<<<< * import itertools * import sys */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 82, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_logging); __Pyx_GIVEREF(__pyx_n_s_logging); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_logging); __pyx_t_1 = __Pyx_Import(__pyx_n_s_logging, __pyx_t_2, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 82, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_logging); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 82, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_logging, __pyx_t_2) < 0) __PYX_ERR(0, 82, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":83 * import os * from .logging import logging # Ensure use of custom logger class * import itertools # <<<<<<<<<<<<<< * import sys * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_itertools, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 83, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_itertools, __pyx_t_1) < 0) __PYX_ERR(0, 83, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":84 * from .logging import logging # Ensure use of custom logger class * import itertools * import sys # <<<<<<<<<<<<<< * * log = logging.getLogger(__name__) */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_sys, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 84, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_sys, __pyx_t_1) < 0) __PYX_ERR(0, 84, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":86 * import sys * * log = logging.getLogger(__name__) # <<<<<<<<<<<<<< * * # Column types */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_logging); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 86, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_getLogger); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 86, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_name); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 86, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_4) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 86, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_t_2}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 86, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_t_2}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 86, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 86, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 86, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_log, __pyx_t_1) < 0) __PYX_ERR(0, 86, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":89 * * # Column types * cdef int _INTEGER = 1 # <<<<<<<<<<<<<< * cdef int _BLOB = 2 * cdef int _TIME = 3 */ __pyx_v_4s3ql_9deltadump__INTEGER = 1; /* "s3ql/deltadump.pyx":90 * # Column types * cdef int _INTEGER = 1 * cdef int _BLOB = 2 # <<<<<<<<<<<<<< * cdef int _TIME = 3 * */ __pyx_v_4s3ql_9deltadump__BLOB = 2; /* "s3ql/deltadump.pyx":91 * cdef int _INTEGER = 1 * cdef int _BLOB = 2 * cdef int _TIME = 3 # <<<<<<<<<<<<<< * * # Make column types available as Python objects */ __pyx_v_4s3ql_9deltadump__TIME = 3; /* "s3ql/deltadump.pyx":94 * * # Make column types available as Python objects * INTEGER = _INTEGER # <<<<<<<<<<<<<< * BLOB = _BLOB * TIME = _TIME */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_4s3ql_9deltadump__INTEGER); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_INTEGER, __pyx_t_1) < 0) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":95 * # Make column types available as Python objects * INTEGER = _INTEGER * BLOB = _BLOB # <<<<<<<<<<<<<< * TIME = _TIME * */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_4s3ql_9deltadump__BLOB); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 95, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_BLOB, __pyx_t_1) < 0) __PYX_ERR(0, 95, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":96 * INTEGER = _INTEGER * BLOB = _BLOB * TIME = _TIME # <<<<<<<<<<<<<< * * # Integer length codes */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_4s3ql_9deltadump__TIME); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 96, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_TIME, __pyx_t_1) < 0) __PYX_ERR(0, 96, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":99 * * # Integer length codes * cdef uint8_t INT8 = 127 # <<<<<<<<<<<<<< * cdef uint8_t INT16 = 126 * cdef uint8_t INT32 = 125 */ __pyx_v_4s3ql_9deltadump_INT8 = 0x7F; /* "s3ql/deltadump.pyx":100 * # Integer length codes * cdef uint8_t INT8 = 127 * cdef uint8_t INT16 = 126 # <<<<<<<<<<<<<< * cdef uint8_t INT32 = 125 * cdef uint8_t INT64 = 124 */ __pyx_v_4s3ql_9deltadump_INT16 = 0x7E; /* "s3ql/deltadump.pyx":101 * cdef uint8_t INT8 = 127 * cdef uint8_t INT16 = 126 * cdef uint8_t INT32 = 125 # <<<<<<<<<<<<<< * cdef uint8_t INT64 = 124 * */ __pyx_v_4s3ql_9deltadump_INT32 = 0x7D; /* "s3ql/deltadump.pyx":102 * cdef uint8_t INT16 = 126 * cdef uint8_t INT32 = 125 * cdef uint8_t INT64 = 124 # <<<<<<<<<<<<<< * * # Maximum size of BLOBs */ __pyx_v_4s3ql_9deltadump_INT64 = 0x7C; /* "s3ql/deltadump.pyx":105 * * # Maximum size of BLOBs * MAX_BLOB_SIZE = 4096 # <<<<<<<<<<<<<< * * # Scale factor from time floats to integers. 1e9 would give nanosecond */ if (PyDict_SetItem(__pyx_d, __pyx_n_s_MAX_BLOB_SIZE, __pyx_int_4096) < 0) __PYX_ERR(0, 105, __pyx_L1_error) /* "s3ql/deltadump.pyx":110 * # resolution but introduces rounding errors, so we use 1 << 30 (which is * # ~1.074e9, i.e. we get a little more precision than nanoseconds). * cdef double time_scale = 1 << 30 # <<<<<<<<<<<<<< * * cdef inline int fwrite(const_void * buf, size_t len_, FILE * fp) except -1: */ __pyx_v_4s3ql_9deltadump_time_scale = 1073741824.0; /* "s3ql/deltadump.pyx":136 * return None * * cdef int raise_from_errno(err_class=OSError) except -1: # <<<<<<<<<<<<<< * '''Raise OSError for current errno value''' * */ __Pyx_INCREF(__pyx_builtin_OSError); __pyx_k_ = __pyx_builtin_OSError; __Pyx_GIVEREF(__pyx_builtin_OSError); /* "s3ql/deltadump.pyx":237 * return fp * * def check_sqlite(): # <<<<<<<<<<<<<< * '''Check if deltadump and apsw module use compatible SQLite code. * */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_1check_sqlite, NULL, __pyx_n_s_s3ql_deltadump); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_check_sqlite, __pyx_t_1) < 0) __PYX_ERR(0, 237, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":280 * s3ql_sqlite_options - apsw_sqlite_options)) * * def dump_table(table, order, columns, db, fh): # <<<<<<<<<<<<<< * '''Dump *columns* of *table* into *fh* * */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_3dump_table, NULL, __pyx_n_s_s3ql_deltadump); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_dump_table, __pyx_t_1) < 0) __PYX_ERR(0, 280, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":401 * fwrite(buf, len_, fp) * * def load_table(table, columns, db, fh, trx_rows=5000): # <<<<<<<<<<<<<< * '''Load *columns* of *table* from *fh* * */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4s3ql_9deltadump_5load_table, NULL, __pyx_n_s_s3ql_deltadump); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_load_table, __pyx_t_1) < 0) __PYX_ERR(0, 401, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "s3ql/deltadump.pyx":1 * ''' # <<<<<<<<<<<<<< * deltadump.pyx - this file is part of S3QL (http://s3ql.googlecode.com) * */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init s3ql.deltadump", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init s3ql.deltadump"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL #include "frameobject.h" static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = PyThreadState_GET(); PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = f->f_localsplus; for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif // CPython < 3.6 #endif // CYTHON_FAST_PYCALL /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); return (*((__Pyx_PyCFunctionFast)meth)) (self, args, nargs, NULL); } #endif // CYTHON_FAST_PYCCALL /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* GetModuleGlobalName */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* GetItemInt */ static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* PyObjectCallNoArg */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, NULL, 0); } #endif #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* FetchCommonType */ static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) { PyObject* fake_module; PyTypeObject* cached_type = NULL; fake_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI); if (!fake_module) return NULL; Py_INCREF(fake_module); cached_type = (PyTypeObject*) PyObject_GetAttrString(fake_module, type->tp_name); if (cached_type) { if (!PyType_Check((PyObject*)cached_type)) { PyErr_Format(PyExc_TypeError, "Shared Cython type %.200s is not a type object", type->tp_name); goto bad; } if (cached_type->tp_basicsize != type->tp_basicsize) { PyErr_Format(PyExc_TypeError, "Shared Cython type %.200s has the wrong size, try recompiling", type->tp_name); goto bad; } } else { if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; PyErr_Clear(); if (PyType_Ready(type) < 0) goto bad; if (PyObject_SetAttrString(fake_module, type->tp_name, (PyObject*) type) < 0) goto bad; Py_INCREF(type); cached_type = type; } done: Py_DECREF(fake_module); return cached_type; bad: Py_XDECREF(cached_type); cached_type = NULL; goto done; } /* CythonFunction */ static PyObject * __Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure) { if (unlikely(op->func_doc == NULL)) { if (op->func.m_ml->ml_doc) { #if PY_MAJOR_VERSION >= 3 op->func_doc = PyUnicode_FromString(op->func.m_ml->ml_doc); #else op->func_doc = PyString_FromString(op->func.m_ml->ml_doc); #endif if (unlikely(op->func_doc == NULL)) return NULL; } else { Py_INCREF(Py_None); return Py_None; } } Py_INCREF(op->func_doc); return op->func_doc; } static int __Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value) { PyObject *tmp = op->func_doc; if (value == NULL) { value = Py_None; } Py_INCREF(value); op->func_doc = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op) { if (unlikely(op->func_name == NULL)) { #if PY_MAJOR_VERSION >= 3 op->func_name = PyUnicode_InternFromString(op->func.m_ml->ml_name); #else op->func_name = PyString_InternFromString(op->func.m_ml->ml_name); #endif if (unlikely(op->func_name == NULL)) return NULL; } Py_INCREF(op->func_name); return op->func_name; } static int __Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 if (unlikely(value == NULL || !PyUnicode_Check(value))) { #else if (unlikely(value == NULL || !PyString_Check(value))) { #endif PyErr_SetString(PyExc_TypeError, "__name__ must be set to a string object"); return -1; } tmp = op->func_name; Py_INCREF(value); op->func_name = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op) { Py_INCREF(op->func_qualname); return op->func_qualname; } static int __Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 if (unlikely(value == NULL || !PyUnicode_Check(value))) { #else if (unlikely(value == NULL || !PyString_Check(value))) { #endif PyErr_SetString(PyExc_TypeError, "__qualname__ must be set to a string object"); return -1; } tmp = op->func_qualname; Py_INCREF(value); op->func_qualname = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_self(__pyx_CyFunctionObject *m, CYTHON_UNUSED void *closure) { PyObject *self; self = m->func_closure; if (self == NULL) self = Py_None; Py_INCREF(self); return self; } static PyObject * __Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op) { if (unlikely(op->func_dict == NULL)) { op->func_dict = PyDict_New(); if (unlikely(op->func_dict == NULL)) return NULL; } Py_INCREF(op->func_dict); return op->func_dict; } static int __Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value) { PyObject *tmp; if (unlikely(value == NULL)) { PyErr_SetString(PyExc_TypeError, "function's dictionary may not be deleted"); return -1; } if (unlikely(!PyDict_Check(value))) { PyErr_SetString(PyExc_TypeError, "setting function's dictionary to a non-dict"); return -1; } tmp = op->func_dict; Py_INCREF(value); op->func_dict = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op) { Py_INCREF(op->func_globals); return op->func_globals; } static PyObject * __Pyx_CyFunction_get_closure(CYTHON_UNUSED __pyx_CyFunctionObject *op) { Py_INCREF(Py_None); return Py_None; } static PyObject * __Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op) { PyObject* result = (op->func_code) ? op->func_code : Py_None; Py_INCREF(result); return result; } static int __Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) { int result = 0; PyObject *res = op->defaults_getter((PyObject *) op); if (unlikely(!res)) return -1; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS op->defaults_tuple = PyTuple_GET_ITEM(res, 0); Py_INCREF(op->defaults_tuple); op->defaults_kwdict = PyTuple_GET_ITEM(res, 1); Py_INCREF(op->defaults_kwdict); #else op->defaults_tuple = PySequence_ITEM(res, 0); if (unlikely(!op->defaults_tuple)) result = -1; else { op->defaults_kwdict = PySequence_ITEM(res, 1); if (unlikely(!op->defaults_kwdict)) result = -1; } #endif Py_DECREF(res); return result; } static int __Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value) { PyObject* tmp; if (!value) { value = Py_None; } else if (value != Py_None && !PyTuple_Check(value)) { PyErr_SetString(PyExc_TypeError, "__defaults__ must be set to a tuple object"); return -1; } Py_INCREF(value); tmp = op->defaults_tuple; op->defaults_tuple = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op) { PyObject* result = op->defaults_tuple; if (unlikely(!result)) { if (op->defaults_getter) { if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL; result = op->defaults_tuple; } else { result = Py_None; } } Py_INCREF(result); return result; } static int __Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value) { PyObject* tmp; if (!value) { value = Py_None; } else if (value != Py_None && !PyDict_Check(value)) { PyErr_SetString(PyExc_TypeError, "__kwdefaults__ must be set to a dict object"); return -1; } Py_INCREF(value); tmp = op->defaults_kwdict; op->defaults_kwdict = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op) { PyObject* result = op->defaults_kwdict; if (unlikely(!result)) { if (op->defaults_getter) { if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL; result = op->defaults_kwdict; } else { result = Py_None; } } Py_INCREF(result); return result; } static int __Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value) { PyObject* tmp; if (!value || value == Py_None) { value = NULL; } else if (!PyDict_Check(value)) { PyErr_SetString(PyExc_TypeError, "__annotations__ must be set to a dict object"); return -1; } Py_XINCREF(value); tmp = op->func_annotations; op->func_annotations = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op) { PyObject* result = op->func_annotations; if (unlikely(!result)) { result = PyDict_New(); if (unlikely(!result)) return NULL; op->func_annotations = result; } Py_INCREF(result); return result; } static PyGetSetDef __pyx_CyFunction_getsets[] = { {(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, {(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, {(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, {(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, {(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0}, {(char *) "__self__", (getter)__Pyx_CyFunction_get_self, 0, 0, 0}, {(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, {(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, {(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, {(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, {(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, {(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, {(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, {(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, {(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, {(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, {(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0}, {(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0}, {0, 0, 0, 0, 0} }; static PyMemberDef __pyx_CyFunction_members[] = { {(char *) "__module__", T_OBJECT, offsetof(__pyx_CyFunctionObject, func.m_module), PY_WRITE_RESTRICTED, 0}, {0, 0, 0, 0, 0} }; static PyObject * __Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, CYTHON_UNUSED PyObject *args) { #if PY_MAJOR_VERSION >= 3 return PyUnicode_FromString(m->func.m_ml->ml_name); #else return PyString_FromString(m->func.m_ml->ml_name); #endif } static PyMethodDef __pyx_CyFunction_methods[] = { {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0}, {0, 0, 0, 0} }; #if PY_VERSION_HEX < 0x030500A0 #define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist) #else #define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func.m_weakreflist) #endif static PyObject *__Pyx_CyFunction_New(PyTypeObject *type, PyMethodDef *ml, int flags, PyObject* qualname, PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { __pyx_CyFunctionObject *op = PyObject_GC_New(__pyx_CyFunctionObject, type); if (op == NULL) return NULL; op->flags = flags; __Pyx_CyFunction_weakreflist(op) = NULL; op->func.m_ml = ml; op->func.m_self = (PyObject *) op; Py_XINCREF(closure); op->func_closure = closure; Py_XINCREF(module); op->func.m_module = module; op->func_dict = NULL; op->func_name = NULL; Py_INCREF(qualname); op->func_qualname = qualname; op->func_doc = NULL; op->func_classobj = NULL; op->func_globals = globals; Py_INCREF(op->func_globals); Py_XINCREF(code); op->func_code = code; op->defaults_pyobjects = 0; op->defaults = NULL; op->defaults_tuple = NULL; op->defaults_kwdict = NULL; op->defaults_getter = NULL; op->func_annotations = NULL; PyObject_GC_Track(op); return (PyObject *) op; } static int __Pyx_CyFunction_clear(__pyx_CyFunctionObject *m) { Py_CLEAR(m->func_closure); Py_CLEAR(m->func.m_module); Py_CLEAR(m->func_dict); Py_CLEAR(m->func_name); Py_CLEAR(m->func_qualname); Py_CLEAR(m->func_doc); Py_CLEAR(m->func_globals); Py_CLEAR(m->func_code); Py_CLEAR(m->func_classobj); Py_CLEAR(m->defaults_tuple); Py_CLEAR(m->defaults_kwdict); Py_CLEAR(m->func_annotations); if (m->defaults) { PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); int i; for (i = 0; i < m->defaults_pyobjects; i++) Py_XDECREF(pydefaults[i]); PyObject_Free(m->defaults); m->defaults = NULL; } return 0; } static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m) { PyObject_GC_UnTrack(m); if (__Pyx_CyFunction_weakreflist(m) != NULL) PyObject_ClearWeakRefs((PyObject *) m); __Pyx_CyFunction_clear(m); PyObject_GC_Del(m); } static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg) { Py_VISIT(m->func_closure); Py_VISIT(m->func.m_module); Py_VISIT(m->func_dict); Py_VISIT(m->func_name); Py_VISIT(m->func_qualname); Py_VISIT(m->func_doc); Py_VISIT(m->func_globals); Py_VISIT(m->func_code); Py_VISIT(m->func_classobj); Py_VISIT(m->defaults_tuple); Py_VISIT(m->defaults_kwdict); if (m->defaults) { PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); int i; for (i = 0; i < m->defaults_pyobjects; i++) Py_VISIT(pydefaults[i]); } return 0; } static PyObject *__Pyx_CyFunction_descr_get(PyObject *func, PyObject *obj, PyObject *type) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; if (m->flags & __Pyx_CYFUNCTION_STATICMETHOD) { Py_INCREF(func); return func; } if (m->flags & __Pyx_CYFUNCTION_CLASSMETHOD) { if (type == NULL) type = (PyObject *)(Py_TYPE(obj)); return __Pyx_PyMethod_New(func, type, (PyObject *)(Py_TYPE(type))); } if (obj == Py_None) obj = NULL; return __Pyx_PyMethod_New(func, obj, type); } static PyObject* __Pyx_CyFunction_repr(__pyx_CyFunctionObject *op) { #if PY_MAJOR_VERSION >= 3 return PyUnicode_FromFormat("", op->func_qualname, (void *)op); #else return PyString_FromFormat("", PyString_AsString(op->func_qualname), (void *)op); #endif } static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) { PyCFunctionObject* f = (PyCFunctionObject*)func; PyCFunction meth = f->m_ml->ml_meth; Py_ssize_t size; switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { case METH_VARARGS: if (likely(kw == NULL || PyDict_Size(kw) == 0)) return (*meth)(self, arg); break; case METH_VARARGS | METH_KEYWORDS: return (*(PyCFunctionWithKeywords)meth)(self, arg, kw); case METH_NOARGS: if (likely(kw == NULL || PyDict_Size(kw) == 0)) { size = PyTuple_GET_SIZE(arg); if (likely(size == 0)) return (*meth)(self, NULL); PyErr_Format(PyExc_TypeError, "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", f->m_ml->ml_name, size); return NULL; } break; case METH_O: if (likely(kw == NULL || PyDict_Size(kw) == 0)) { size = PyTuple_GET_SIZE(arg); if (likely(size == 1)) { PyObject *result, *arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL; result = (*meth)(self, arg0); Py_DECREF(arg0); return result; } PyErr_Format(PyExc_TypeError, "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", f->m_ml->ml_name, size); return NULL; } break; default: PyErr_SetString(PyExc_SystemError, "Bad call flags in " "__Pyx_CyFunction_Call. METH_OLDARGS is no " "longer supported!"); return NULL; } PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", f->m_ml->ml_name); return NULL; } static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw); } static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) { PyObject *result; __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { Py_ssize_t argc; PyObject *new_args; PyObject *self; argc = PyTuple_GET_SIZE(args); new_args = PyTuple_GetSlice(args, 1, argc); if (unlikely(!new_args)) return NULL; self = PyTuple_GetItem(args, 0); if (unlikely(!self)) { Py_DECREF(new_args); return NULL; } result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw); Py_DECREF(new_args); } else { result = __Pyx_CyFunction_Call(func, args, kw); } return result; } static PyTypeObject __pyx_CyFunctionType_type = { PyVarObject_HEAD_INIT(0, 0) "cython_function_or_method", sizeof(__pyx_CyFunctionObject), 0, (destructor) __Pyx_CyFunction_dealloc, 0, 0, 0, #if PY_MAJOR_VERSION < 3 0, #else 0, #endif (reprfunc) __Pyx_CyFunction_repr, 0, 0, 0, 0, __Pyx_CyFunction_CallAsMethod, 0, 0, 0, 0, Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, 0, (traverseproc) __Pyx_CyFunction_traverse, (inquiry) __Pyx_CyFunction_clear, 0, #if PY_VERSION_HEX < 0x030500A0 offsetof(__pyx_CyFunctionObject, func_weakreflist), #else offsetof(PyCFunctionObject, m_weakreflist), #endif 0, 0, __pyx_CyFunction_methods, __pyx_CyFunction_members, __pyx_CyFunction_getsets, 0, 0, __Pyx_CyFunction_descr_get, 0, offsetof(__pyx_CyFunctionObject, func_dict), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, #if PY_VERSION_HEX >= 0x030400a1 0, #endif }; static int __pyx_CyFunction_init(void) { __pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type); if (__pyx_CyFunctionType == NULL) { return -1; } return 0; } static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults = PyObject_Malloc(size); if (!m->defaults) return PyErr_NoMemory(); memset(m->defaults, 0, size); m->defaults_pyobjects = pyobjects; return m->defaults; } static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults_tuple = tuple; Py_INCREF(tuple); } static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults_kwdict = dict; Py_INCREF(dict); } static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->func_annotations = dict; Py_INCREF(dict); } /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { #endif PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { if (op1 == op2) { Py_RETURN_TRUE; } #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long a = PyInt_AS_LONG(op1); if (a == b) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a; const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } #if PyLong_SHIFT < 30 && PyLong_SHIFT != 15 default: return PyLong_Type.tp_richcompare(op1, op2, Py_EQ); #else default: Py_RETURN_FALSE; #endif } } if (a == b) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); if ((double)a == (double)b) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } return PyObject_RichCompare(op1, op2, Py_EQ); } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int64_t(int64_t value) { const int64_t neg_one = (int64_t) -1, const_zero = (int64_t) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int64_t) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int64_t) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int64_t) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int64_t) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int64_t) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int64_t), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE int64_t __Pyx_PyInt_As_int64_t(PyObject *x) { const int64_t neg_one = (int64_t) -1, const_zero = (int64_t) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int64_t) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int64_t, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int64_t) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int64_t) 0; case 1: __PYX_VERIFY_RETURN_INT(int64_t, digit, digits[0]) case 2: if (8 * sizeof(int64_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int64_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int64_t) >= 2 * PyLong_SHIFT) { return (int64_t) (((((int64_t)digits[1]) << PyLong_SHIFT) | (int64_t)digits[0])); } } break; case 3: if (8 * sizeof(int64_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int64_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int64_t) >= 3 * PyLong_SHIFT) { return (int64_t) (((((((int64_t)digits[2]) << PyLong_SHIFT) | (int64_t)digits[1]) << PyLong_SHIFT) | (int64_t)digits[0])); } } break; case 4: if (8 * sizeof(int64_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int64_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int64_t) >= 4 * PyLong_SHIFT) { return (int64_t) (((((((((int64_t)digits[3]) << PyLong_SHIFT) | (int64_t)digits[2]) << PyLong_SHIFT) | (int64_t)digits[1]) << PyLong_SHIFT) | (int64_t)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int64_t) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int64_t) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int64_t, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int64_t) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int64_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int64_t) 0; case -1: __PYX_VERIFY_RETURN_INT(int64_t, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int64_t, digit, +digits[0]) case -2: if (8 * sizeof(int64_t) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int64_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int64_t) - 1 > 2 * PyLong_SHIFT) { return (int64_t) (((int64_t)-1)*(((((int64_t)digits[1]) << PyLong_SHIFT) | (int64_t)digits[0]))); } } break; case 2: if (8 * sizeof(int64_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int64_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int64_t) - 1 > 2 * PyLong_SHIFT) { return (int64_t) ((((((int64_t)digits[1]) << PyLong_SHIFT) | (int64_t)digits[0]))); } } break; case -3: if (8 * sizeof(int64_t) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int64_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int64_t) - 1 > 3 * PyLong_SHIFT) { return (int64_t) (((int64_t)-1)*(((((((int64_t)digits[2]) << PyLong_SHIFT) | (int64_t)digits[1]) << PyLong_SHIFT) | (int64_t)digits[0]))); } } break; case 3: if (8 * sizeof(int64_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int64_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int64_t) - 1 > 3 * PyLong_SHIFT) { return (int64_t) ((((((((int64_t)digits[2]) << PyLong_SHIFT) | (int64_t)digits[1]) << PyLong_SHIFT) | (int64_t)digits[0]))); } } break; case -4: if (8 * sizeof(int64_t) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int64_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int64_t) - 1 > 4 * PyLong_SHIFT) { return (int64_t) (((int64_t)-1)*(((((((((int64_t)digits[3]) << PyLong_SHIFT) | (int64_t)digits[2]) << PyLong_SHIFT) | (int64_t)digits[1]) << PyLong_SHIFT) | (int64_t)digits[0]))); } } break; case 4: if (8 * sizeof(int64_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int64_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int64_t) - 1 > 4 * PyLong_SHIFT) { return (int64_t) ((((((((((int64_t)digits[3]) << PyLong_SHIFT) | (int64_t)digits[2]) << PyLong_SHIFT) | (int64_t)digits[1]) << PyLong_SHIFT) | (int64_t)digits[0]))); } } break; } #endif if (sizeof(int64_t) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int64_t, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int64_t) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int64_t, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int64_t val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int64_t) -1; } } else { int64_t val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int64_t) -1; val = __Pyx_PyInt_As_int64_t(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int64_t"); return (int64_t) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int64_t"); return (int64_t) -1; } /* CIntFromPy */ static CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *x) { const size_t neg_one = (size_t) -1, const_zero = (size_t) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(size_t) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(size_t, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (size_t) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (size_t) 0; case 1: __PYX_VERIFY_RETURN_INT(size_t, digit, digits[0]) case 2: if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) >= 2 * PyLong_SHIFT) { return (size_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } } break; case 3: if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) >= 3 * PyLong_SHIFT) { return (size_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } } break; case 4: if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) >= 4 * PyLong_SHIFT) { return (size_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (size_t) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(size_t) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(size_t) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (size_t) 0; case -1: __PYX_VERIFY_RETURN_INT(size_t, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(size_t, digit, +digits[0]) case -2: if (8 * sizeof(size_t) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { return (size_t) (((size_t)-1)*(((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case 2: if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { return (size_t) ((((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case -3: if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { return (size_t) (((size_t)-1)*(((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case 3: if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { return (size_t) ((((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case -4: if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) { return (size_t) (((size_t)-1)*(((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case 4: if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) { return (size_t) ((((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; } #endif if (sizeof(size_t) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(size_t, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(size_t) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(size_t, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else size_t val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (size_t) -1; } } else { size_t val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (size_t) -1; val = __Pyx_PyInt_As_size_t(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to size_t"); return (size_t) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* ModuleImport */ #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", module_name, class_name, basicsize, size); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", module_name, class_name, basicsize, size); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else if (__Pyx_PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif #endif } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif #else res = PyNumber_Int(x); #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */ s3ql-2.26/src/s3ql/multi_lock.py0000644000175000017500000000353512615000156020176 0ustar nikrationikratio00000000000000''' multi_lock.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' import threading import logging from contextlib import contextmanager __all__ = [ "MultiLock" ] log = logging.getLogger(__name__) class MultiLock: """Provides locking for multiple objects. This class provides locking for a dynamically changing set of objects: The `acquire` and `release` methods have an additional argument, the locking key. Only locks with the same key can actually see each other, so that several threads can hold locks with different locking keys at the same time. MultiLock instances can be used as context managers. Note that it is actually possible for one thread to release a lock that has been obtained by a different thread. This is not a bug but a feature. """ def __init__(self): self.locked_keys = set() self.cond = threading.Condition(threading.Lock()) @contextmanager def __call__(self, *key): self.acquire(*key) try: yield finally: self.release(*key) def acquire(self, *key, timeout=None): '''Acquire lock for given key If timeout is exceeded, return False. Otherwise return True. ''' with self.cond: if not self.cond.wait_for(lambda: key not in self.locked_keys, timeout): return False self.locked_keys.add(key) def release(self, *key, noerror=False): """Release lock on given key If noerror is False, do not raise exception if *key* is not locked. """ with self.cond: if noerror: self.locked_keys.discard(key) else: self.locked_keys.remove(key) self.cond.notifyAll() s3ql-2.26/src/s3ql/ctrl.py0000644000175000017500000000735213223730045017004 0ustar nikrationikratio00000000000000''' ctrl.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from .logging import logging, setup_logging from .common import assert_fs_owner from .parse_args import ArgumentParser import llfuse import sys import textwrap log = logging.getLogger(__name__) def parse_args(args): '''Parse command line''' parser = ArgumentParser( description='''Control a mounted S3QL File System''', epilog=textwrap.dedent('''\ Hint: run `%(prog)s --help` to get help on the additional arguments that the different actions take.''')) pparser = ArgumentParser(add_help=False, epilog=textwrap.dedent('''\ Hint: run `%(prog)s --help` to get help on other available actions and optional arguments that can be used with all actions.''')) pparser.add_argument("mountpoint", metavar='', type=(lambda x: x.rstrip('/')), help='Mountpoint of the file system') parser.add_debug() parser.add_quiet() parser.add_version() subparsers = parser.add_subparsers(metavar='', dest='action', help='may be either of') subparsers.required = True subparsers.add_parser('flushcache', help='flush file system cache', parents=[pparser]) subparsers.add_parser('dropcache', help='drop file system cache', parents=[pparser]) subparsers.add_parser('upload-meta', help='Upload metadata', parents=[pparser]) sparser = subparsers.add_parser('cachesize', help='Change cache size', parents=[pparser]) sparser.add_argument('cachesize', metavar='', type=int, help='New cache size in KiB') sparser = subparsers.add_parser('log', help='Change log level', parents=[pparser]) sparser.add_argument('level', choices=('debug', 'info', 'warn'), metavar='', help='Desired new log level for mount.s3ql process. ' 'Allowed values: %(choices)s') sparser.add_argument('modules', nargs='*', metavar='', help='Modules to enable debugging output for. Specify ' '`all` to enable debugging for all modules.') options = parser.parse_args(args) if options.action == 'log': if options.level != 'debug' and options.modules: parser.error('Modules can only be specified with `debug` logging level.') if not options.modules: options.modules = [ 'all' ] return options def main(args=None): '''Control a mounted S3QL File System.''' if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) path = options.mountpoint ctrlfile = assert_fs_owner(path, mountpoint=True) if options.action == 'flushcache': llfuse.setxattr(ctrlfile, 's3ql_flushcache!', b'dummy') elif options.action == 'dropcache': llfuse.setxattr(ctrlfile, 's3ql_dropcache!', b'dummy') elif options.action == 'upload-meta': llfuse.setxattr(ctrlfile, 'upload-meta', b'dummy') elif options.action == 'log': level = getattr(logging, options.level.upper()) cmd = ('(%r, %r)' % (level, ','.join(options.modules))).encode() llfuse.setxattr(ctrlfile, 'logging', cmd) elif options.action == 'cachesize': llfuse.setxattr(ctrlfile, 'cachesize', ('%d' % (options.cachesize * 1024,)).encode()) if __name__ == '__main__': main(sys.argv[1:]) s3ql-2.26/src/s3ql/umount.py0000644000175000017500000001312312742247106017366 0ustar nikrationikratio00000000000000''' umount.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from .logging import logging, setup_logging from . import CTRL_NAME from .common import assert_s3ql_mountpoint, parse_literal from .parse_args import ArgumentParser import llfuse import os import subprocess import platform import sys import textwrap import time log = logging.getLogger(__name__) def parse_args(args): '''Parse command line This function writes to stdout/stderr and may call `system.exit()` instead of throwing an exception if it encounters errors. ''' parser = ArgumentParser( description=textwrap.dedent('''\ Unmounts an S3QL file system. The command returns only after all data has been uploaded to the backend.''')) parser.add_debug() parser.add_quiet() parser.add_version() parser.add_argument("mountpoint", metavar='', type=(lambda x: x.rstrip('/')), help='Mount point to un-mount') parser.add_argument('--lazy', "-z", action="store_true", default=False, help="Lazy umount. Detaches the file system immediately, even if there " 'are still open files. The data will be uploaded in the background ' 'once all open files have been closed.') return parser.parse_args(args) class UmountError(Exception): """ Base class for unmount errors. """ message = 'internal error' exitcode = 3 def __init__(self, mountpoint): super().__init__() self.mountpoint = mountpoint def __str__(self): return self.message class UmountSubError(UmountError): message = 'Unmount subprocess failed.' exitcode = 2 class MountInUseError(UmountError): message = 'In use.' exitcode = 1 def lazy_umount(mountpoint): '''Invoke fusermount -u -z for mountpoint''' if os.getuid() == 0 or platform.system() == 'Darwin': # MacOS X always uses umount rather than fusermount umount_cmd = ('umount', '-l', mountpoint) else: umount_cmd = ('fusermount', '-u', '-z', mountpoint) if subprocess.call(umount_cmd) != 0: raise UmountSubError(mountpoint) def get_cmdline(pid): '''Return command line for *pid* If *pid* doesn't exists, return None. If command line cannot be determined for other reasons, log warning and return None. ''' try: output = subprocess.check_output(['ps', '-p', str(pid), '-o', 'args='], universal_newlines=True).strip() except subprocess.CalledProcessError: log.warning('Unable to execute ps, assuming process %d has terminated.' % pid) return None if output: return output else: return None def blocking_umount(mountpoint): '''Invoke fusermount and wait for daemon to terminate.''' with open('/dev/null', 'wb') as devnull: if subprocess.call(['fuser', '-m', mountpoint], stdout=devnull, stderr=devnull) == 0: raise MountInUseError(mountpoint) ctrlfile = os.path.join(mountpoint, CTRL_NAME) log.debug('Flushing cache...') llfuse.setxattr(ctrlfile, 's3ql_flushcache!', b'dummy') # Get pid log.debug('Trying to get pid') pid = parse_literal(llfuse.getxattr(ctrlfile, 's3ql_pid?'), int) log.debug('PID is %d', pid) # Get command line to make race conditions less-likely cmdline = get_cmdline(pid) # Unmount log.debug('Unmounting...') if os.getuid() == 0 or platform.system() == 'Darwin': # MacOS X always uses umount rather than fusermount umount_cmd = ['umount', mountpoint] else: umount_cmd = ['fusermount', '-u', mountpoint] if subprocess.call(umount_cmd) != 0: raise UmountSubError(mountpoint) # Wait for daemon log.debug('Uploading metadata...') step = 0.1 while True: try: os.kill(pid, 0) except OSError: log.debug('Kill failed, assuming daemon has quit.') break # Check that the process did not terminate and the PID # was reused by a different process cmdline2 = get_cmdline(pid) if cmdline2 is None: log.debug('Reading cmdline failed, assuming daemon has quit.') break elif cmdline2 == cmdline: log.debug('PID still alive and commandline unchanged.') else: log.debug('PID still alive, but cmdline changed') break # Process still exists, we wait log.debug('Daemon seems to be alive, waiting...') time.sleep(step) if step < 1: step += 0.1 def main(args=None): '''Umount S3QL file system''' if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) assert_s3ql_mountpoint(options.mountpoint) try: if options.lazy: lazy_umount(options.mountpoint) else: blocking_umount(options.mountpoint) except MountInUseError as err: print('Cannot unmount, the following processes still access the mountpoint:', file=sys.stderr) subprocess.call(['fuser', '-v', '-m', options.mountpoint], stdout=sys.stderr, stderr=sys.stderr) sys.exit(err.exitcode) except UmountError as err: print('%s: %s' % (options.mountpoint, err), file=sys.stderr) sys.exit(err.exitcode) sys.exit(0) if __name__ == '__main__': main(sys.argv[1:]) s3ql-2.26/src/s3ql/common.py0000644000175000017500000004527713223730045017340 0ustar nikrationikratio00000000000000''' common.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from .logging import logging, QuietError # Ensure use of custom logger class from . import BUFSIZE, CTRL_NAME, ROOT_INODE from dugong import HostnameNotResolvable from getpass import getpass from ast import literal_eval from base64 import b64decode, b64encode import binascii import configparser import re import stat import threading import traceback import sys import os import time import subprocess import errno import hashlib import llfuse import posixpath import functools import contextlib log = logging.getLogger(__name__) file_system_encoding = sys.getfilesystemencoding() def path2bytes(s): return s.encode(file_system_encoding, 'surrogateescape') def bytes2path(s): return s.decode(file_system_encoding, 'surrogateescape') def get_seq_no(backend): '''Get current metadata sequence number''' from .backends.common import NoSuchObject seq_nos = list(backend.list('s3ql_seq_no_')) if not seq_nos: # Maybe list result is outdated seq_nos = [ 's3ql_seq_no_1' ] seq_nos = [ int(x[len('s3ql_seq_no_'):]) for x in seq_nos ] seq_no = max(seq_nos) # Make sure that object really exists while ('s3ql_seq_no_%d' % seq_no) not in backend: seq_no -= 1 if seq_no == 0: raise QuietError('No S3QL file system found at given storage URL.', exitcode=18) while ('s3ql_seq_no_%d' % seq_no) in backend: seq_no += 1 seq_no -= 1 # Delete old seq nos for i in [ x for x in seq_nos if x < seq_no - 10 ]: try: del backend['s3ql_seq_no_%d' % i] except NoSuchObject: pass # Key list may not be up to date return seq_no def is_mounted(storage_url): '''Try to determine if *storage_url* is mounted Note that the result may be wrong.. this is really just a best-effort guess. ''' match = storage_url + ' ' if os.path.exists('/proc/mounts'): with open('/proc/mounts', 'r') as fh: for line in fh: if line.startswith(match): return True return False try: for line in subprocess.check_output(['mount'], stderr=subprocess.STDOUT, universal_newlines=True): if line.startswith(match): return True except subprocess.CalledProcessError: log.warning('Warning! Unable to check if file system is mounted ' '(/proc/mounts missing and mount call failed)') return False def inode_for_path(path, conn): """Return inode of directory entry at `path` Raises `KeyError` if the path does not exist. """ from .database import NoSuchRowError if not isinstance(path, bytes): raise TypeError('path must be of type bytes') # Remove leading and trailing / path = path.lstrip(b"/").rstrip(b"/") # Traverse inode = ROOT_INODE for el in path.split(b'/'): try: inode = conn.get_val("SELECT inode FROM contents_v WHERE name=? AND parent_inode=?", (el, inode)) except NoSuchRowError: raise KeyError('Path %s does not exist' % path) return inode def get_path(id_, conn, name=None): """Return a full path for inode `id_`. If `name` is specified, it is appended at the very end of the path (useful if looking up the path for file name with parent inode). """ if name is None: path = list() else: if not isinstance(name, bytes): raise TypeError('name must be of type bytes') path = [ name ] maxdepth = 255 while id_ != ROOT_INODE: # This can be ambiguous if directories are hardlinked (name2, id_) = conn.get_row("SELECT name, parent_inode FROM contents_v " "WHERE inode=? LIMIT 1", (id_,)) path.append(name2) maxdepth -= 1 if maxdepth == 0: raise RuntimeError('Failed to resolve name "%s" at inode %d to path', name, id_) path.append(b'') path.reverse() return b'/'.join(path) def _escape(s): '''Escape '/', '=' and '\0' in s''' s = s.replace('=', '=3D') s = s.replace('/', '=2F') s = s.replace('\0', '=00') return s def get_backend_cachedir(storage_url, cachedir): if not os.path.exists(cachedir): try: os.mkdir(cachedir, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) except PermissionError: raise QuietError('No permission to create cache directory (%s)' % cachedir, exitcode=45) if not os.access(cachedir, os.R_OK | os.W_OK | os.X_OK): raise QuietError('No permission to access cache directory (%s)' % cachedir, exitcode=45) return os.path.abspath(os.path.join(cachedir, _escape(storage_url))) def sha256_fh(fh): fh.seek(0) # Bogus error about hashlib not having a sha256 member #pylint: disable=E1101 sha = hashlib.sha256() while True: buf = fh.read(BUFSIZE) if not buf: break sha.update(buf) return sha.digest() def assert_s3ql_fs(path): '''Raise `QuietError` if *path* is not on an S3QL file system Returns name of the S3QL control file. ''' try: os.stat(path) except FileNotFoundError: raise QuietError('%s does not exist' % path) except OSError as exc: if exc.errno is errno.ENOTCONN: raise QuietError('File system appears to have crashed.') raise ctrlfile = os.path.join(path, CTRL_NAME) if not (CTRL_NAME not in llfuse.listdir(path) and os.path.exists(ctrlfile)): raise QuietError('%s is not on an S3QL file system' % path) return ctrlfile def assert_fs_owner(path, mountpoint=False): '''Raise `QuietError` if user is not owner of S3QL fs at *path* Implicitly calls `assert_s3ql_fs` first. Returns name of the S3QL control file. If *mountpoint* is True, also call `assert_s3ql_mountpoint`, i.e. fail if *path* is not the mount point of the file system. ''' if mountpoint: ctrlfile = assert_s3ql_mountpoint(path) else: ctrlfile = assert_s3ql_fs(path) if os.stat(ctrlfile).st_uid != os.geteuid() and os.geteuid() != 0: raise QuietError('Permission denied. %s is was not mounted by you ' 'and you are not root.' % path) return ctrlfile def assert_s3ql_mountpoint(mountpoint): '''Raise QuietError if *mountpoint* is not an S3QL mountpoint Implicitly calls `assert_s3ql_fs` first. Returns name of the S3QL control file. ''' ctrlfile = assert_s3ql_fs(mountpoint) if not posixpath.ismount(mountpoint): raise QuietError('%s is not a mount point' % mountpoint) return ctrlfile def get_backend(options, raw=False): '''Return backend for given storage-url If *raw* is true, don't attempt to unlock and don't wrap into ComprencBackend. ''' return get_backend_factory(options.storage_url, options.backend_options, options.authfile, getattr(options, 'compress', ('lzma', 2)), raw)() def get_backend_factory(storage_url, backend_options, authfile, compress=('lzma', 2), raw=False): '''Return factory producing backend objects for given storage-url If *raw* is true, don't attempt to unlock and don't wrap into ComprencBackend. ''' from .backends import prefix_map from .backends.common import (CorruptedObjectError, NoSuchObject, AuthenticationError, DanglingStorageURLError, AuthorizationError) from .backends.comprenc import ComprencBackend hit = re.match(r'^([a-zA-Z0-9]+)://', storage_url) if not hit: raise QuietError('Unable to parse storage url "%s"' % storage_url, exitcode=2) backend = hit.group(1) try: backend_class = prefix_map[backend] except KeyError: raise QuietError('No such backend: %s' % backend, exitcode=11) # Validate backend options for opt in backend_options.keys(): if opt not in backend_class.known_options: raise QuietError('Unknown backend option: %s' % opt, exitcode=3) # Read authfile config = configparser.ConfigParser() if os.path.isfile(authfile): mode = os.stat(authfile).st_mode if mode & (stat.S_IRGRP | stat.S_IROTH): raise QuietError("%s has insecure permissions, aborting." % authfile, exitcode=12) config.read(authfile) backend_login = None backend_passphrase = None fs_passphrase = None for section in config.sections(): def getopt(name): try: return config.get(section, name) except configparser.NoOptionError: return None pattern = getopt('storage-url') if not pattern or not storage_url.startswith(pattern): continue backend_login = getopt('backend-login') or backend_login backend_passphrase = getopt('backend-password') or backend_passphrase fs_passphrase = getopt('fs-passphrase') or fs_passphrase if not backend_login and backend_class.needs_login: if sys.stdin.isatty(): backend_login = getpass("Enter backend login: ") else: backend_login = sys.stdin.readline().rstrip() if not backend_passphrase and backend_class.needs_login: if sys.stdin.isatty(): backend_passphrase = getpass("Enter backend passphrase: ") else: backend_passphrase = sys.stdin.readline().rstrip() backend = None try: backend = backend_class(storage_url, backend_login, backend_passphrase, backend_options) # Do not use backend.lookup(), this would use a HEAD request and # not provide any useful error messages if something goes wrong # (e.g. wrong credentials) backend.fetch('s3ql_passphrase') except AuthenticationError: raise QuietError('Invalid credentials (or skewed system clock?).', exitcode=14) except AuthorizationError: raise QuietError('No permission to access backend.', exitcode=15) except HostnameNotResolvable: raise QuietError("Can't connect to backend: unable to resolve hostname", exitcode=19) except DanglingStorageURLError as exc: raise QuietError(str(exc), exitcode=16) except NoSuchObject: encrypted = False else: encrypted = True finally: if backend is not None: backend.close() if raw: return lambda: backend_class(storage_url, backend_login, backend_passphrase, backend_options) if encrypted and not fs_passphrase: if sys.stdin.isatty(): fs_passphrase = getpass("Enter file system encryption passphrase: ") else: fs_passphrase = sys.stdin.readline().rstrip() elif not encrypted: fs_passphrase = None if fs_passphrase is not None: fs_passphrase = fs_passphrase.encode('utf-8') if not encrypted: return lambda: ComprencBackend(None, compress, backend_class(storage_url, backend_login, backend_passphrase, backend_options)) with ComprencBackend(fs_passphrase, compress, backend) as tmp_backend: try: data_pw = tmp_backend['s3ql_passphrase'] except CorruptedObjectError: raise QuietError('Wrong file system passphrase', exitcode=17) # To support upgrade, temporarily store the backend # passphrase in every backend object. def factory(): b = ComprencBackend(data_pw, compress, backend_class(storage_url, backend_login, backend_passphrase, backend_options)) b.fs_passphrase = fs_passphrase return b return factory def pretty_print_size(i): '''Return *i* as string with appropriate suffix (MiB, GiB, etc)''' if i < 1024: return '%d bytes' % i if i < 1024**2: unit = 'KiB' i /= 1024 elif i < 1024**3: unit = 'MiB' i /= 1024**2 elif i < 1024**4: unit = 'GiB' i /= 1024**3 else: unit = 'TB' i /= 1024**4 if i < 10: form = '%.2f %s' elif i < 100: form = '%.1f %s' else: form = '%d %s' return form % (i, unit) class ExceptionStoringThread(threading.Thread): def __init__(self): super().__init__() self._exc_info = None self._joined = False def run_protected(self): pass def run(self): try: self.run_protected() except: # This creates a circular reference chain self._exc_info = sys.exc_info() log.exception('Thread %s terminated with exception', self.name) def join_get_exc(self): self._joined = True self.join() return self._exc_info def join_and_raise(self, timeout=None): '''Wait for the thread to finish, raise any occurred exceptions''' self._joined = True if self.is_alive(): self.join(timeout=timeout) if self._exc_info is not None: # Break reference chain exc_info = self._exc_info self._exc_info = None raise EmbeddedException(exc_info, self.name) def __del__(self): if not self._joined: raise RuntimeError("ExceptionStoringThread instance was destroyed " "without calling join_and_raise()!") class EmbeddedException(Exception): '''Encapsulates an exception that happened in a different thread ''' def __init__(self, exc_info, threadname): super().__init__() self.exc_info = exc_info self.threadname = threadname def __str__(self): return ''.join(['caused by an exception in thread %s.\n' % self.threadname, 'Original/inner traceback (most recent call last): \n' ] + traceback.format_exception(*self.exc_info)) class AsyncFn(ExceptionStoringThread): def __init__(self, fn, *args, **kwargs): super().__init__() self.target = fn self.args = args self.kwargs = kwargs def run_protected(self): self.target(*self.args, **self.kwargs) def split_by_n(seq, n): '''Yield elements in iterable *seq* in groups of *n*''' while seq: yield seq[:n] seq = seq[n:] def handle_on_return(fn): '''Provide fresh ExitStack instance in `on_return` argument''' @functools.wraps(fn) def wrapper(*a, **kw): assert 'on_return' not in kw with contextlib.ExitStack() as on_return: kw['on_return'] = on_return return fn(*a, **kw) return wrapper def parse_literal(buf, type_spec): '''Try to parse *buf* as *type_spec* Raise `ValueError` if *buf* does not contain a valid Python literal, or if the literal does not correspond to *type_spec*. Example use:: buf = b'[1, 'a', 3]' parse_literal(buf, [int, str, int]) ''' try: obj = literal_eval(buf.decode()) except UnicodeDecodeError: raise ValueError('unable to decode as utf-8') except (ValueError, SyntaxError): raise ValueError('unable to parse as python literal') if (isinstance(type_spec, list) and type(obj) == list and [ type(x) for x in obj ] == type_spec): return obj elif (isinstance(type_spec, tuple) and type(obj) == tuple and [ type(x) for x in obj ] == list(type_spec)): return obj elif type(obj) == type_spec: return obj raise ValueError('literal has wrong type') class ThawError(Exception): def __str__(self): return 'Malformed serialization data' def thaw_basic_mapping(buf): '''Reconstruct dict from serialized representation *buf* must be a bytes-like object as created by `freeze_basic_mapping`. Raises `ThawError` if *buf* is not a valid representation. This procedure is safe even if *buf* comes from an untrusted source. ''' try: d = literal_eval(buf.decode('utf-8')) except (UnicodeDecodeError, SyntaxError, ValueError): raise ThawError() # Decode bytes values for (k,v) in d.items(): if not isinstance(v, bytes): continue try: d[k] = b64decode(v) except binascii.Error: raise ThawError() return d def freeze_basic_mapping(d): '''Serialize mapping of elementary types Keys of *d* must be strings. Values of *d* must be of elementary type (i.e., `str`, `bytes`, `int`, `float`, `complex`, `bool` or None). The output is a bytestream that can be used to reconstruct the mapping. The bytestream is not guaranteed to be deterministic. Look at `checksum_basic_mapping` if you need a deterministic bytestream. ''' els = [] for (k,v) in d.items(): if not isinstance(k, str): raise ValueError('key %s must be str, not %s' % (k, type(k))) if (not isinstance(v, (str, bytes, bytearray, int, float, complex, bool)) and v is not None): raise ValueError('value for key %s (%s) is not elementary' % (k, v)) # To avoid wasting space, we b64encode non-ascii byte values. if isinstance(v, (bytes, bytearray)): v = b64encode(v) # This should be a pretty safe assumption for elementary types, but we # add an assert just to be safe (Python docs just say that repr makes # "best effort" to produce something parseable) (k_repr, v_repr) = (repr(k), repr(v)) assert (literal_eval(k_repr), literal_eval(v_repr)) == (k, v) els.append(('%s: %s' % (k_repr, v_repr))) buf = '{ %s }' % ', '.join(els) return buf.encode('utf-8') def load_params(cachepath): with open(cachepath + '.params' , 'rb') as fh: return thaw_basic_mapping(fh.read()) def save_params(cachepath, param): with open(cachepath + '.params', 'wb') as fh: fh.write(freeze_basic_mapping(param)) # Fsync to make sure that the updated sequence number is committed to # disk. Otherwise, a crash immediately after mount could result in both # the local and remote metadata appearing to be out of date. fh.flush() os.fsync(fh.fileno()) def time_ns(): return int(time.time() * 1e9) s3ql-2.26/src/s3ql/inherit_docstrings.py0000644000175000017500000000770112615000156021734 0ustar nikrationikratio00000000000000''' inherit_docstrings.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. --- This module defines a metaclass and function decorator that allows to inherit the docstring for a function from the superclass. ''' from functools import partial from abc import ABCMeta from .calc_mro import calc_mro __all__ = [ 'copy_ancestor_docstring', 'prepend_ancestor_docstring', 'InheritableDocstrings', 'ABCDocstMeta' ] # This definition is only used to assist static code analyzers def copy_ancestor_docstring(fn): '''Copy docstring for method from superclass For this decorator to work, the class has to use the `InheritableDocstrings` metaclass. ''' raise RuntimeError('Decorator can only be used in classes ' 'using the `InheritableDocstrings` metaclass') def _copy_ancestor_docstring(mro, fn): '''Decorator to set docstring for *fn* from *mro*''' if fn.__doc__ is not None: raise RuntimeError('Function already has docstring') # Search for docstring in superclass for cls in mro: super_fn = getattr(cls, fn.__name__, None) if super_fn is None: continue fn.__doc__ = super_fn.__doc__ break else: raise RuntimeError("Can't inherit docstring for %s: method does not " "exist in superclass" % fn.__name__) return fn # This definition is only used to assist static code analyzers def prepend_ancestor_docstring(fn): '''Prepend docstring from superclass method For this decorator to work, the class has to use the `InheritableDocstrings` metaclass. ''' raise RuntimeError('Decorator can only be used in classes ' 'using the `InheritableDocstrings` metaclass') def _prepend_ancestor_docstring(mro, fn): '''Decorator to prepend ancestor docstring to *fn*''' if fn.__doc__ is None: fn.__doc__ = '' # Search for docstring in superclass for cls in mro: super_fn = getattr(cls, fn.__name__, None) if super_fn is None: continue if super_fn.__doc__.endswith('\n') and fn.__doc__.startswith('\n'): fn.__doc__ = super_fn.__doc__ + fn.__doc__ else: fn.__doc__ = '%s\n%s' % (super_fn.__doc__, fn.__doc__) break else: raise RuntimeError("Can't find ancestor docstring for %s: method does not " "exist in superclass" % fn.__name__) return fn DECORATORS = (('copy_ancestor_docstring', _copy_ancestor_docstring), ('prepend_ancestor_docstring', _prepend_ancestor_docstring)) class InheritableDocstrings(type): @classmethod def __prepare__(cls, name, bases, **kwds): classdict = super().__prepare__(name, bases, *kwds) mro = calc_mro(*bases) # Inject decorators into class namespace for (name, fn) in DECORATORS: classdict[name] = partial(fn, mro) return classdict def __new__(cls, name, bases, classdict): for (dec_name, fn) in DECORATORS: # Decorators may not exist in class dict if the class (metaclass # instance) was constructed with an explicit call to `type` # (Pythonbug? reported as http://bugs.python.org/issue18334) if dec_name not in classdict: continue # Make sure that class definition hasn't messed with decorator if getattr(classdict[dec_name], 'func', None) is not fn: raise RuntimeError('No %s attribute may be created in classes using ' 'the InheritableDocstrings metaclass' % name) # Delete decorator from class namespace del classdict[dec_name] return super().__new__(cls, name, bases, classdict) # Derive new metaclass to add docstring inheritance class ABCDocstMeta(ABCMeta, InheritableDocstrings): pass s3ql-2.26/src/s3ql/verify.py0000644000175000017500000001601412615000156017334 0ustar nikrationikratio00000000000000''' verify.py - this file is part of S3QL. Copyright © 2014 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from .logging import logging, setup_logging from .mount import get_metadata from . import BUFSIZE from .common import (get_backend_factory, get_backend_cachedir, pretty_print_size, AsyncFn) from .backends.common import NoSuchObject, CorruptedObjectError from .parse_args import ArgumentParser from queue import Queue, Full as QueueFull import os import argparse import time import signal import faulthandler import sys import textwrap import atexit log = logging.getLogger(__name__) def _new_file_type(s, encoding='utf-8'): '''An argparse type for a file that does not yet exist''' if os.path.exists(s): msg = 'File already exists - refusing to overwrite: %s' % s raise argparse.ArgumentTypeError(msg) fh = open(s, 'w', encoding=encoding) atexit.register(fh.close) return fh def parse_args(args): '''Parse command line''' parser = ArgumentParser( description=textwrap.dedent('''\ Verifies that all data in an S3QL file system can be downloaded from the storage backend. In contrast to fsck.s3ql, this program does not trust the object listing returned by the backend, but actually attempts to retrieve every object. It therefore takes a lot longer. ''')) parser.add_debug() parser.add_quiet() parser.add_version() parser.add_cachedir() parser.add_authfile() parser.add_backend_options() parser.add_storage_url() parser.add_argument("--missing-file", type=_new_file_type, metavar='', default='missing_objects.txt', help="File to store keys of missing objects.") parser.add_argument("--corrupted-file", type=_new_file_type, metavar='', default='corrupted_objects.txt', help="File to store keys of corrupted objects.") parser.add_argument("--data", action="store_true", default=False, help="Read every object completely, instead of checking " "just the metadata.") parser.add_argument("--parallel", default=4, type=int, help="Number of connections to use in parallel.") parser.add_argument("--start-with", default=0, type=int, metavar='', help="Skip over first objects and with verifying " "object +1.") options = parser.parse_args(args) return options def main(args=None): faulthandler.enable() faulthandler.register(signal.SIGUSR1) if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) backend_factory = get_backend_factory(options.storage_url, options.backend_options, options.authfile) # Get paths cachepath = get_backend_cachedir(options.storage_url, options.cachedir) # Retrieve metadata with backend_factory() as backend: (param, db) = get_metadata(backend, cachepath) retrieve_objects(db, backend_factory, options.corrupted_file, options.missing_file, thread_count=options.parallel, full=options.data, offset=options.start_with) if options.corrupted_file.tell() or options.missing_file.tell(): sys.exit(46) else: os.unlink(options.corrupted_file.name) os.unlink(options.missing_file.name) sys.exit(0) def retrieve_objects(db, backend_factory, corrupted_fh, missing_fh, thread_count=1, full=False, offset=0): """Attempt to retrieve every object""" log.info('Reading all objects...') queue = Queue(thread_count) threads = [] for _ in range(thread_count): t = AsyncFn(_retrieve_loop, queue, backend_factory, corrupted_fh, missing_fh, full) # Don't wait for worker threads, gives deadlock if main thread # terminates with exception t.daemon = True t.start() threads.append(t) total_size = db.get_val('SELECT SUM(size) FROM objects') total_count = db.get_val('SELECT COUNT(id) FROM objects') size_acc = 0 sql = 'SELECT id, size FROM objects ORDER BY id' i = 0 # Make sure this is set if there are zero objects stamp1 = 0 try: for (i, (obj_id, size)) in enumerate(db.query(sql)): stamp2 = time.time() if stamp2 - stamp1 > 1: stamp1 = stamp2 progress = '%d objects (%.2f%%)' % (i, i/total_count * 100) if full: s = pretty_print_size(size_acc) progress += ' / %s (%.2f%%)' % (s, size_acc / total_size * 100) sys.stdout.write('\r..processed %s so far..' % progress) sys.stdout.flush() # Terminate early if any thread failed with an exception for t in threads: if not t.is_alive(): t.join_and_raise() size_acc += size if i < offset: continue # Avoid blocking if all threads terminated while True: try: queue.put(obj_id, timeout=1) except QueueFull: pass else: break for t in threads: if not t.is_alive(): t.join_and_raise() finally: sys.stdout.write('\n') queue.maxsize += len(threads) for t in threads: queue.put(None) for t in threads: t.join_and_raise() log.info('Verified all %d storage objects.', i) def _retrieve_loop(queue, backend_factory, corrupted_fh, missing_fh, full=False): '''Retrieve object ids arriving in *queue* from *backend* If *full* is False, lookup and read metadata. If *full* is True, read entire object. Corrupted objects are written into *corrupted_fh*. Missing objects are written into *missing_fh*. Terminate when None is received. ''' with backend_factory() as backend: while True: obj_id = queue.get() if obj_id is None: break log.debug('reading object %s', obj_id) def do_read(fh): while True: buf = fh.read(BUFSIZE) if not buf: break key = 's3ql_data_%d' % obj_id try: if full: backend.perform_read(do_read, key) else: backend.lookup(key) except NoSuchObject: log.warning('Backend seems to have lost object %d', obj_id) print(key, file=missing_fh) except CorruptedObjectError: log.warning('Object %d is corrupted', obj_id) print(key, file=corrupted_fh) if __name__ == '__main__': main(sys.argv[1:]) s3ql-2.26/src/s3ql/remove.py0000644000175000017500000000305512742247106017337 0ustar nikrationikratio00000000000000''' remove.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from .logging import logging, setup_logging, QuietError from .common import assert_fs_owner, path2bytes from .parse_args import ArgumentParser import llfuse import os import sys import textwrap log = logging.getLogger(__name__) def parse_args(args): '''Parse command line''' parser = ArgumentParser( description=textwrap.dedent('''\ Recursively delete files and directories in an S3QL file system, including immutable entries. ''')) parser.add_debug() parser.add_quiet() parser.add_version() parser.add_argument('path', metavar='', nargs='+', help='Directories to remove', type=(lambda x: x.rstrip('/'))) return parser.parse_args(args) def main(args=None): '''Recursively delete files and directories in an S3QL file system''' if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) for name in options.path: if os.path.ismount(name): raise QuietError('%s is a mount point.' % name) ctrlfile = assert_fs_owner(name) fstat_p = os.stat(os.path.dirname(os.path.abspath(name))) cmd = ('(%d, %r)' % (fstat_p.st_ino, path2bytes(os.path.basename(name)))).encode() llfuse.setxattr(ctrlfile, 'rmtree', cmd) if __name__ == '__main__': main(sys.argv[1:]) s3ql-2.26/src/s3ql/metadata.py0000644000175000017500000002432513237312454017624 0ustar nikrationikratio00000000000000''' metadata.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from .logging import logging # Ensure use of custom logger class from .database import Connection from . import BUFSIZE from .common import pretty_print_size from .deltadump import INTEGER, BLOB, dump_table, load_table from .backends.common import NoSuchObject, CorruptedObjectError import os import tempfile import bz2 import stat log = logging.getLogger(__name__) # Has to be kept in sync with create_tables()! DUMP_SPEC = [ ('objects', 'id', (('id', INTEGER, 1), ('size', INTEGER), ('refcount', INTEGER))), ('blocks', 'id', (('id', INTEGER, 1), ('hash', BLOB, 32), ('size', INTEGER), ('obj_id', INTEGER, 1), ('refcount', INTEGER))), ('inodes', 'id', (('id', INTEGER, 1), ('uid', INTEGER), ('gid', INTEGER), ('mode', INTEGER), ('mtime_ns', INTEGER), ('atime_ns', INTEGER), ('ctime_ns', INTEGER), ('size', INTEGER), ('rdev', INTEGER), ('locked', INTEGER), ('refcount', INTEGER))), ('inode_blocks', 'inode, blockno', (('inode', INTEGER), ('blockno', INTEGER, 1), ('block_id', INTEGER, 1))), ('symlink_targets', 'inode', (('inode', INTEGER, 1), ('target', BLOB))), ('names', 'id', (('id', INTEGER, 1), ('name', BLOB), ('refcount', INTEGER))), ('contents', 'parent_inode, name_id', (('name_id', INTEGER, 1), ('inode', INTEGER, 1), ('parent_inode', INTEGER))), ('ext_attributes', 'inode', (('inode', INTEGER), ('name_id', INTEGER), ('value', BLOB))), ] def restore_metadata(fh, dbfile): '''Read metadata from *fh* and write into *dbfile* Return database connection to *dbfile*. *fh* must be able to return an actual file descriptor from its `fileno` method. *dbfile* will be created with 0600 permissions. Data is first written into a temporary file *dbfile* + '.tmp', and the file is renamed once all data has been loaded. ''' tmpfile = dbfile + '.tmp' fd = os.open(tmpfile, os.O_RDWR | os.O_CREAT | os.O_TRUNC, stat.S_IRUSR | stat.S_IWUSR) try: os.close(fd) db = Connection(tmpfile) db.execute('PRAGMA locking_mode = NORMAL') db.execute('PRAGMA synchronous = OFF') db.execute('PRAGMA journal_mode = OFF') create_tables(db) for (table, _, columns) in DUMP_SPEC: log.info('..%s..', table) load_table(table, columns, db=db, fh=fh) db.execute('ANALYZE') # We must close the database to rename it db.close() except: os.unlink(tmpfile) raise os.rename(tmpfile, dbfile) return Connection(dbfile) def cycle_metadata(backend, keep=10): '''Rotate metadata backups''' # Since we always overwrite the source afterwards, we can # use either copy or rename - so we pick whatever is faster. if backend.has_native_rename: cycle_fn = backend.rename else: cycle_fn = backend.copy log.info('Backing up old metadata...') for i in range(keep)[::-1]: try: cycle_fn("s3ql_metadata_bak_%d" % i, "s3ql_metadata_bak_%d" % (i + 1)) except NoSuchObject: pass # If we use backend.rename() and crash right after this instruction, # we will end up without an s3ql_metadata object. However, fsck.s3ql # is smart enough to use s3ql_metadata_new in this case. try: cycle_fn("s3ql_metadata", "s3ql_metadata_bak_0") except NoSuchObject: # In case of mkfs, there may be no metadata object yet pass cycle_fn("s3ql_metadata_new", "s3ql_metadata") # Note that we can't compare with "is" (maybe because the bound-method # is re-created on the fly on access?) if cycle_fn == backend.copy: backend.delete('s3ql_metadata_new') def dump_metadata(db, fh): '''Dump metadata into fh *fh* must be able to return an actual file descriptor from its `fileno` method. ''' locking_mode = db.get_val('PRAGMA locking_mode') try: # Ensure that we don't hold a lock on the db # (need to access DB to actually release locks) db.execute('PRAGMA locking_mode = NORMAL') db.has_val('SELECT rowid FROM %s LIMIT 1' % DUMP_SPEC[0][0]) for (table, order, columns) in DUMP_SPEC: log.info('..%s..', table) dump_table(table, order, columns, db=db, fh=fh) finally: db.execute('PRAGMA locking_mode = %s' % locking_mode) def create_tables(conn): # Table of storage objects # Refcount is included for performance reasons # size == -1 indicates block has not been uploaded yet conn.execute(""" CREATE TABLE objects ( id INTEGER PRIMARY KEY AUTOINCREMENT, refcount INT NOT NULL, size INT NOT NULL )""") # Table of known data blocks # Refcount is included for performance reasons conn.execute(""" CREATE TABLE blocks ( id INTEGER PRIMARY KEY, hash BLOB(16) UNIQUE, refcount INT, size INT NOT NULL, obj_id INTEGER NOT NULL REFERENCES objects(id) )""") # Table with filesystem metadata # The number of links `refcount` to an inode can in theory # be determined from the `contents` table. However, managing # this separately should be significantly faster (the information # is required for every getattr!) conn.execute(""" CREATE TABLE inodes ( -- id has to specified *exactly* as follows to become -- an alias for the rowid. id INTEGER PRIMARY KEY AUTOINCREMENT, uid INT NOT NULL, gid INT NOT NULL, mode INT NOT NULL, mtime_ns INT NOT NULL, atime_ns INT NOT NULL, ctime_ns INT NOT NULL, refcount INT NOT NULL, size INT NOT NULL DEFAULT 0, rdev INT NOT NULL DEFAULT 0, locked BOOLEAN NOT NULL DEFAULT 0 )""") # Further Blocks used by inode (blockno >= 1) conn.execute(""" CREATE TABLE inode_blocks ( inode INTEGER NOT NULL REFERENCES inodes(id), blockno INT NOT NULL, block_id INTEGER NOT NULL REFERENCES blocks(id), PRIMARY KEY (inode, blockno) )""") # Symlinks conn.execute(""" CREATE TABLE symlink_targets ( inode INTEGER PRIMARY KEY REFERENCES inodes(id), target BLOB NOT NULL )""") # Names of file system objects conn.execute(""" CREATE TABLE names ( id INTEGER PRIMARY KEY, name BLOB NOT NULL, refcount INT NOT NULL, UNIQUE (name) )""") # Table of filesystem objects # rowid is used by readdir() to restart at the correct position conn.execute(""" CREATE TABLE contents ( rowid INTEGER PRIMARY KEY AUTOINCREMENT, name_id INT NOT NULL REFERENCES names(id), inode INT NOT NULL REFERENCES inodes(id), parent_inode INT NOT NULL REFERENCES inodes(id), UNIQUE (parent_inode, name_id) )""") # Extended attributes conn.execute(""" CREATE TABLE ext_attributes ( inode INTEGER NOT NULL REFERENCES inodes(id), name_id INTEGER NOT NULL REFERENCES names(id), value BLOB NOT NULL, PRIMARY KEY (inode, name_id) )""") # Shortcuts conn.execute(""" CREATE VIEW contents_v AS SELECT * FROM contents JOIN names ON names.id = name_id """) conn.execute(""" CREATE VIEW ext_attributes_v AS SELECT * FROM ext_attributes JOIN names ON names.id = name_id """) def stream_write_bz2(ifh, ofh): '''Compress *ifh* into *ofh* using bz2 compression''' compr = bz2.BZ2Compressor(9) while True: buf = ifh.read(BUFSIZE) if not buf: break buf = compr.compress(buf) if buf: ofh.write(buf) buf = compr.flush() if buf: ofh.write(buf) def stream_read_bz2(ifh, ofh): '''Uncompress bz2 compressed *ifh* into *ofh*''' decompressor = bz2.BZ2Decompressor() while True: buf = ifh.read(BUFSIZE) if not buf: break buf = decompressor.decompress(buf) if buf: ofh.write(buf) if decompressor.unused_data or ifh.read(1) != b'': raise CorruptedObjectError('Data after end of bz2 stream') def download_metadata(backend, db_file, name='s3ql_metadata'): with tempfile.TemporaryFile() as tmpfh: def do_read(fh): tmpfh.seek(0) tmpfh.truncate() stream_read_bz2(fh, tmpfh) log.info('Downloading and decompressing metadata...') backend.perform_read(do_read, name) log.info("Reading metadata...") tmpfh.seek(0) return restore_metadata(tmpfh, db_file) def dump_and_upload_metadata(backend, db, param): with tempfile.TemporaryFile() as fh: log.info('Dumping metadata...') dump_metadata(db, fh) upload_metadata(backend, fh, param) def upload_metadata(backend, fh, param): log.info("Compressing and uploading metadata...") def do_write(obj_fh): fh.seek(0) stream_write_bz2(fh, obj_fh) return obj_fh obj_fh = backend.perform_write(do_write, "s3ql_metadata_new", metadata=param, is_compressed=True) log.info('Wrote %s of compressed metadata.', pretty_print_size(obj_fh.get_obj_size())) log.info('Cycling metadata backups...') cycle_metadata(backend) s3ql-2.26/src/s3ql/block_cache.py0000644000175000017500000010202513241372155020252 0ustar nikrationikratio00000000000000''' block_cache.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from . import BUFSIZE from .database import NoSuchRowError from .backends.common import NoSuchObject from .multi_lock import MultiLock from .logging import logging # Ensure use of custom logger class from collections import OrderedDict from contextlib import contextmanager from llfuse import lock, lock_released from queue import Queue, Empty as QueueEmpty, Full as QueueFull import os import hashlib import shutil import threading import time import sys # standard logger for this module log = logging.getLogger(__name__) # Special queue entry that signals threads to terminate QuitSentinel = object() # Special queue entry that signals that removal queue should # be flushed FlushSentinel = object() class NoWorkerThreads(Exception): ''' Raised when trying to enqueue an object, but there are no active consumer threads. ''' pass class Distributor(object): ''' Distributes objects to consumers. ''' def __init__(self): super().__init__() self.slot = None self.cv = threading.Condition() #: Number of threads waiting to consume an object self.readers = 0 def put(self, obj, timeout=None): '''Offer *obj* for consumption The method blocks until another thread calls `get()` to consume the object. Return `True` if the object was consumed, and `False` if *timeout* was exceeded without any activity in the queue (this means an individual invocation may wait for longer than *timeout* if objects from other threads are being consumed). ''' if obj is None: raise ValueError("Can't put None into Queue") with self.cv: # Wait until a thread is ready to read while self.readers == 0 or self.slot is not None: log.debug('waiting for reader..') if not self.cv.wait(timeout): log.debug('timeout, returning') return False log.debug('got reader, enqueueing %s', obj) self.readers -= 1 assert self.slot is None self.slot = obj self.cv.notify_all() # notify readers return True def get(self): '''Consume and return an object The method blocks until another thread offers an object by calling the `put` method. ''' with self.cv: self.readers += 1 self.cv.notify_all() while self.slot is None: log.debug('waiting for writer..') self.cv.wait() tmp = self.slot self.slot = None self.cv.notify_all() return tmp class SimpleEvent(object): ''' Like threading.Event, but without any internal flag. Calls to `wait` always block until some other thread calls `notify` or `notify_all`. ''' def __init__(self): super().__init__() self.__cond = threading.Condition(threading.Lock()) def notify_all(self): self.__cond.acquire() try: self.__cond.notify_all() finally: self.__cond.release() def notify(self): self.__cond.acquire() try: self.__cond.notify() finally: self.__cond.release() def wait(self, timeout=None): self.__cond.acquire() try: return self.__cond.wait(timeout) finally: self.__cond.release() class CacheEntry(object): """An element in the block cache Attributes: ----------- :dirty: entry has been changed since it was last uploaded. :size: current file size :pos: current position in file """ __slots__ = [ 'dirty', 'inode', 'blockno', 'last_access', 'size', 'pos', 'fh', 'removed' ] def __init__(self, inode, blockno, filename): super().__init__() # Writing 100MB in 128k chunks takes 90ms unbuffered and # 116ms with 1 MB buffer. Reading time does not depend on # buffer size. self.fh = open(filename, "w+b", 0) self.dirty = False self.inode = inode self.blockno = blockno self.last_access = 0 self.pos = 0 self.size = os.fstat(self.fh.fileno()).st_size def read(self, size=None): buf = self.fh.read(size) self.pos += len(buf) return buf def flush(self): self.fh.flush() def seek(self, off): if self.pos != off: self.fh.seek(off) self.pos = off def tell(self): return self.pos def truncate(self, size=None): self.dirty = True self.fh.truncate(size) if size is None: if self.pos < self.size: self.size = self.pos elif size < self.size: self.size = size def write(self, buf): self.dirty = True self.fh.write(buf) self.pos += len(buf) self.size = max(self.pos, self.size) def close(self): self.fh.close() def unlink(self): os.unlink(self.fh.name) def __str__(self): return ('<%sCacheEntry, inode=%d, blockno=%d>' % ('Dirty ' if self.dirty else '', self.inode, self.blockno)) class CacheDict(OrderedDict): ''' An ordered dictionary designed to store CacheEntries. Attributes: :max_size: maximum size to which cache can grow :max_entries: maximum number of entries in cache :size: current size of all entries together ''' def __init__(self, max_size, max_entries): super().__init__() self.max_size = max_size self.max_entries = max_entries self.size = 0 def remove(self, key): '''Remove *key* from disk and cache, update size''' el = self.pop(key) el.close() el.unlink() self.size -= el.size def is_full(self): return (self.size > self.max_size or len(self) > self.max_entries) class BlockCache(object): """Provides access to file blocks This class manages access to file blocks. It takes care of creation, uploading, downloading and deduplication. This class uses the llfuse global lock. Methods which release the lock have are marked as such in their docstring. Attributes ---------- :path: where cached data is stored :cache: ordered dictionary of cache entries :mlock: MultiLock to synchronize access to objects and cache entries :in_transit: set of cache entries that are currently being uploaded :to_upload: distributes objects to upload to worker threads :to_remove: distributes objects to remove to worker threads :transfer_complete: signals completion of an object upload :upload_threads: list of threads processing upload queue :removal_threads: list of threads processing removal queue :db: Handle to SQL DB :backend_pool: BackendPool instance """ def __init__(self, backend_pool, db, cachedir, max_size, max_entries=768): log.debug('Initializing') self.path = cachedir self.db = db self.backend_pool = backend_pool self.cache = CacheDict(max_size, max_entries) self.mlock = MultiLock() self.in_transit = set() self.upload_threads = [] self.removal_threads = [] self.transfer_completed = SimpleEvent() # Will be initialized once threads are available self.to_upload = None self.to_remove = None if not os.path.exists(self.path): os.mkdir(self.path) # Initialized fromt the outside to prevent cyclic dependency self.fs = None def __len__(self): '''Get number of objects in cache''' return len(self.cache) def init(self, threads=1): '''Start worker threads''' self.to_upload = Distributor() for _ in range(threads): t = threading.Thread(target=self._upload_loop) t.start() self.upload_threads.append(t) self.to_remove = Queue(1000) for _ in range(10): t = threading.Thread(target=self._removal_loop) t.daemon = True # interruption will do no permanent harm t.start() self.removal_threads.append(t) def _lock_obj(self, obj_id, release_global=False): '''Acquire lock on *obj*id*''' if release_global: with lock_released: self.mlock.acquire(obj_id) else: self.mlock.acquire(obj_id) def _unlock_obj(self, obj_id, release_global=False, noerror=False): '''Release lock on *obj*id*''' if release_global: with lock_released: self.mlock.release(obj_id, noerror=noerror) else: self.mlock.release(obj_id, noerror=noerror) def _lock_entry(self, inode, blockno, release_global=False): '''Acquire lock on cache entry''' if release_global: with lock_released: self.mlock.acquire((inode, blockno)) else: self.mlock.acquire((inode, blockno)) def _unlock_entry(self, inode, blockno, release_global=False, noerror=False): '''Release lock on cache entry''' if release_global: with lock_released: self.mlock.release((inode, blockno), noerror=noerror) else: self.mlock.release((inode, blockno), noerror=noerror) def destroy(self): '''Clean up and stop worker threads This method should be called without the global lock held. ''' log.debug('Dropping cache...') try: with lock: self.drop() except NoWorkerThreads: log.error('Unable to drop cache, no upload threads left alive') # Signal termination to worker threads. If some of them # terminated prematurely, continue gracefully. log.debug('Signaling upload threads...') try: for t in self.upload_threads: self._queue_upload(QuitSentinel) except NoWorkerThreads: pass log.debug('Signaling removal threads...') try: for t in self.removal_threads: self._queue_removal(QuitSentinel) except NoWorkerThreads: pass log.debug('waiting for upload threads...') for t in self.upload_threads: t.join() log.debug('waiting for removal threads...') for t in self.removal_threads: t.join() assert len(self.in_transit) == 0 try: while self.to_remove.get_nowait() is QuitSentinel: pass except QueueEmpty: pass else: log.error('Could not complete object removals, ' 'no removal threads left alive') self.to_upload = None self.to_remove = None self.upload_threads = None self.removal_threads = None os.rmdir(self.path) log.debug('cleanup done.') def _upload_loop(self): '''Process upload queue''' while True: tmp = self.to_upload.get() if tmp is QuitSentinel: break self._do_upload(*tmp) def _do_upload(self, el, obj_id): '''Upload object''' def do_write(fh): el.seek(0) while True: buf = el.read(BUFSIZE) if not buf: break fh.write(buf) return fh try: with self.backend_pool() as backend: if log.isEnabledFor(logging.DEBUG): time_ = time.time() obj_size = backend.perform_write(do_write, 's3ql_data_%d' % obj_id).get_obj_size() time_ = time.time() - time_ rate = el.size / (1024 ** 2 * time_) if time_ != 0 else 0 log.debug('uploaded %d bytes in %.3f seconds, %.2f MiB/s', el.size, time_, rate) else: obj_size = backend.perform_write(do_write, 's3ql_data_%d' % obj_id).get_obj_size() with lock: self.db.execute('UPDATE objects SET size=? WHERE id=?', (obj_size, obj_id)) el.dirty = False except Exception as exc: log.debug('upload of %d failed: %s', obj_id, exc) # At this point we have to remove references to this storage object # from the objects and blocks table to prevent future cache elements # to be de-duplicated against this (missing) one. However, this may # already have happened during the attempted upload. The only way to # avoid this problem is to insert the hash into the blocks table # *after* successfull upload. But this would open a window without # de-duplication just to handle the special case of an upload # failing. # # On the other hand, we also want to prevent future deduplication # against this block: otherwise the next attempt to upload the same # cache element (by a different upload thread that has not # encountered problems yet) is guaranteed to link against the # non-existing block, and the data will be lost. # # Therefore, we just set the hash of the missing block to NULL, # and rely on fsck to pick up the pieces. Note that we cannot # delete the row from the blocks table, because the id will get # assigned to a new block, so the inode_blocks entries will # refer to incorrect data. # with lock: self.db.execute('UPDATE blocks SET hash=NULL WHERE obj_id=?', (obj_id,)) raise finally: self.in_transit.remove(el) self._unlock_obj(obj_id) self._unlock_entry(el.inode, el.blockno) self.transfer_completed.notify_all() def wait(self): '''Wait until an object has been uploaded If there are no objects in transit, return immediately. This method releases the global lock. ''' # Loop to avoid the race condition of a transfer terminating # between the call to transfer_in_progress() and wait(). while True: if not self.transfer_in_progress(): return with lock_released: if self.transfer_completed.wait(timeout=5): return def upload_if_dirty(self, el): '''Upload cache entry asynchronously This method releases the global lock. Return True if the object is actually scheduled for upload. ''' log.debug('started with %s', el) if el in self.in_transit: return True elif not el.dirty: return False # Calculate checksum with lock_released: self._lock_entry(el.inode, el.blockno) added_to_transit = False try: if el is not self.cache.get((el.inode, el.blockno), None): log.debug('%s got removed while waiting for lock', el) self._unlock_entry(el.inode, el.blockno) return False if el in self.in_transit: log.debug('%s already in transit', el) self._unlock_entry(el.inode, el.blockno) return True if not el.dirty: log.debug('no longer dirty, returning') self._unlock_entry(el.inode, el.blockno) return False log.debug('uploading %s..', el) self.in_transit.add(el) added_to_transit = True sha = hashlib.sha256() el.seek(0) while True: buf = el.read(BUFSIZE) if not buf: break sha.update(buf) hash_ = sha.digest() except: if added_to_transit: self.in_transit.discard(el) self._unlock_entry(el.inode, el.blockno) raise obj_lock_taken = False try: try: old_block_id = self.db.get_val('SELECT block_id FROM inode_blocks ' 'WHERE inode=? AND blockno=?', (el.inode, el.blockno)) except NoSuchRowError: old_block_id = None try: block_id = self.db.get_val('SELECT id FROM blocks WHERE hash=?', (hash_,)) # No block with same hash except NoSuchRowError: obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, -1)') log.debug('created new object %d', obj_id) block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, hash, size) ' 'VALUES(?,?,?,?)', (1, obj_id, hash_, el.size)) log.debug('created new block %d', block_id) log.debug('adding to upload queue') # Note: we must finish all db transactions before adding to # in_transit, otherwise commit() may return before all blocks # are available in db. self.db.execute('INSERT OR REPLACE INTO inode_blocks (block_id, inode, blockno) ' 'VALUES(?,?,?)', (block_id, el.inode, el.blockno)) with lock_released: self._lock_obj(obj_id) obj_lock_taken = True self._queue_upload((el, obj_id)) # There is a block with the same hash else: if old_block_id != block_id: log.debug('(re)linking to %d', block_id) self.db.execute('UPDATE blocks SET refcount=refcount+1 WHERE id=?', (block_id,)) self.db.execute('INSERT OR REPLACE INTO inode_blocks (block_id, inode, blockno) ' 'VALUES(?,?,?)', (block_id, el.inode, el.blockno)) el.dirty = False self.in_transit.remove(el) self._unlock_entry(el.inode, el.blockno, release_global=True) if old_block_id == block_id: log.debug('unchanged, block_id=%d', block_id) return False except: self.in_transit.discard(el) with lock_released: self._unlock_entry(el.inode, el.blockno, noerror=True) if obj_lock_taken: self._unlock_obj(obj_id) raise if old_block_id: self._deref_block(old_block_id) else: log.debug('no old block') return obj_lock_taken def _queue_upload(self, obj): '''Put *obj* into upload queue''' while True: if self.to_upload.put(obj, timeout=5): return for t in self.upload_threads: if t.is_alive(): break else: raise NoWorkerThreads('no upload threads') def _queue_removal(self, obj): '''Put *obj* into removal queue''' while True: try: self.to_remove.put(obj, timeout=5) except QueueFull: pass else: return for t in self.removal_threads: if t.is_alive(): break else: raise NoWorkerThreads('no removal threads') def _deref_block(self, block_id): '''Decrease reference count for *block_id* If reference counter drops to zero, remove block and propagate to objects table (possibly removing the referenced object as well). This method releases the global lock. ''' refcount = self.db.get_val('SELECT refcount FROM blocks WHERE id=?', (block_id,)) if refcount > 1: log.debug('decreased refcount for block: %d', block_id) self.db.execute('UPDATE blocks SET refcount=refcount-1 WHERE id=?', (block_id,)) return log.debug('removing block %d', block_id) obj_id = self.db.get_val('SELECT obj_id FROM blocks WHERE id=?', (block_id,)) self.db.execute('DELETE FROM blocks WHERE id=?', (block_id,)) (refcount, size) = self.db.get_row('SELECT refcount, size FROM objects WHERE id=?', (obj_id,)) if refcount > 1: log.debug('decreased refcount for obj: %d', obj_id) self.db.execute('UPDATE objects SET refcount=refcount-1 WHERE id=?', (obj_id,)) return log.debug('removing object %d', obj_id) self.db.execute('DELETE FROM objects WHERE id=?', (obj_id,)) # Taking the lock ensures that the object is no longer in # transit itself. We can release it immediately after, because # the object is no longer in the database. log.debug('adding %d to removal queue', obj_id) with lock_released: self._lock_obj(obj_id) self._unlock_obj(obj_id) if size == -1: # size == -1 indicates that object has not yet been uploaded. # However, since we just acquired a lock on the object, we know # that the upload must have failed. Therefore, trying to remove # this object would just give us another error. return self._queue_removal(obj_id) def transfer_in_progress(self): '''Return True if there are any cache entries being uploaded''' return len(self.in_transit) > 0 def _removal_loop(self): '''Process removal queue''' # This method may look more complicated than necessary, but # it ensures that we read as many objects from the queue # as we can without blocking, and then hand them over to # the backend all at once. ids = [] while True: try: log.debug('reading from queue (blocking=%s)', len(ids)==0) tmp = self.to_remove.get(block=len(ids)==0) except QueueEmpty: tmp = FlushSentinel if tmp in (FlushSentinel,QuitSentinel) and ids: log.debug('removing: %s', ids) try: with self.backend_pool() as backend: backend.delete_multi(['s3ql_data_%d' % i for i in ids]) except NoSuchObject: log.warning('Backend lost object s3ql_data_%d' % ids.pop(0)) self.fs.failsafe = True ids = [] else: ids.append(tmp) if tmp is QuitSentinel: break @contextmanager def get(self, inode, blockno): """Get file handle for block `blockno` of `inode` This method releases the global lock. The managed block, however, is executed with the global lock acquired and MUST NOT release it. This ensures that only one thread is accessing a given block at a time. Note: if `get` and `remove` are called concurrently, then it is possible that a block that has been requested with `get` and passed to `remove` for deletion will not be deleted. """ #log.debug('started with %d, %d', inode, blockno) if self.cache.is_full(): self.expire() self._lock_entry(inode, blockno, release_global=True) try: el = self._get_entry(inode, blockno) el.last_access = time.time() oldsize = el.size try: yield el finally: # Update cachesize. NOTE: this requires that at most one # thread has access to a cache entry at any time. self.cache.size += el.size - oldsize finally: self._unlock_entry(inode, blockno, release_global=True) #log.debug('finished') def _get_entry(self, inode, blockno): '''Get cache entry for `blockno` of `inode` Assume that cache entry lock has been acquired. ''' log.debug('started with %d, %d', inode, blockno) try: el = self.cache[(inode, blockno)] # Not in cache except KeyError: filename = os.path.join(self.path, '%d-%d' % (inode, blockno)) try: block_id = self.db.get_val('SELECT block_id FROM inode_blocks ' 'WHERE inode=? AND blockno=?', (inode, blockno)) # No corresponding object except NoSuchRowError: log.debug('creating new block') el = CacheEntry(inode, blockno, filename) self.cache[(inode, blockno)] = el return el # Need to download corresponding object obj_id = self.db.get_val('SELECT obj_id FROM blocks WHERE id=?', (block_id,)) log.debug('downloading object %d..', obj_id) el = CacheEntry(inode, blockno, filename) try: def do_read(fh): el.seek(0) el.truncate() shutil.copyfileobj(fh, el, BUFSIZE) with lock_released: # Lock object. This ensures that we wait until the object # is uploaded. We don't have to worry about deletion, because # as long as the current cache entry exists, there will always be # a reference to the object (and we already have a lock on the # cache entry). self._lock_obj(obj_id) self._unlock_obj(obj_id) with self.backend_pool() as backend: backend.perform_read(do_read, 's3ql_data_%d' % obj_id) except: el.unlink() el.close() raise self.cache[(inode, blockno)] = el el.dirty = False # (writing will have set dirty flag) self.cache.size += el.size # In Cache else: #log.debug('in cache') self.cache.move_to_end((inode, blockno), last=True) # move to head return el def expire(self): """Perform cache expiry This method releases the global lock. """ # Note that we have to make sure that the cache entry is written into # the database before we remove it from the cache! log.debug('started') while True: need_size = self.cache.size - self.cache.max_size need_entries = len(self.cache) - self.cache.max_entries if need_size <= 0 and need_entries <= 0: break # Need to make copy, since we aren't allowed to change dict while # iterating through it. Look at the comments in CommitThread.run() # (mount.py) for an estimate of the resulting performance hit. sth_in_transit = False for el in list(self.cache.values()): if need_size <= 0 and need_entries <= 0: break need_entries -= 1 need_size -= el.size if self.upload_if_dirty(el): # Releases global lock sth_in_transit = True continue self._lock_entry(el.inode, el.blockno, release_global=True) try: # May have changed while we were waiting for lock if el is not self.cache.get((el.inode, el.blockno), None): log.debug('%s removed while waiting for lock', el) continue if el.dirty: log.debug('%s got dirty while waiting for lock', el) continue log.debug('removing %s from cache', el) self.cache.remove((el.inode, el.blockno)) finally: self._unlock_entry(el.inode, el.blockno, release_global=True) if sth_in_transit: log.debug('waiting for transfer threads..') self.wait() # Releases global lock log.debug('finished') def remove(self, inode, start_no, end_no=None): """Remove blocks for `inode` If `end_no` is not specified, remove just the `start_no` block. Otherwise removes all blocks from `start_no` to, but not including, `end_no`. This method releases the global lock. """ log.debug('started with %d, %d, %s', inode, start_no, end_no) if end_no is None: end_no = start_no + 1 for blockno in range(start_no, end_no): self._lock_entry(inode, blockno, release_global=True) try: if (inode, blockno) in self.cache: log.debug('removing from cache') self.cache.remove((inode, blockno)) try: block_id = self.db.get_val('SELECT block_id FROM inode_blocks ' 'WHERE inode=? AND blockno=?', (inode, blockno)) except NoSuchRowError: log.debug('block not in db') continue # Detach inode from block self.db.execute('DELETE FROM inode_blocks WHERE inode=? AND blockno=?', (inode, blockno)) finally: self._unlock_entry(inode, blockno, release_global=True) # Decrease block refcount self._deref_block(block_id) log.debug('finished') def flush_local(self, inode, blockno): """Flush buffers for given block""" try: el = self.cache[(inode, blockno)] except KeyError: return el.flush() def start_flush(self): """Initiate upload of all dirty blocks When the method returns, all blocks have been registered in the database (but the actual uploads may still be in progress). This method releases the global lock. """ # Need to make copy, since dict() may change while global lock is # released. Look at the comments in CommitThread.run() (mount.py) for an # estimate of the performance impact. for el in list(self.cache.values()): self.upload_if_dirty(el) # Releases global lock def flush(self): """Upload all dirty blocks This method releases the global lock. """ log.debug('started') while True: sth_in_transit = False # Need to make copy, since dict() may change while global lock is # released. Look at the comments in CommitThread.run() (mount.py) # for an estimate of the performance impact. for el in list(self.cache.values()): if self.upload_if_dirty(el): # Releases global lock sth_in_transit = True if not sth_in_transit: break log.debug('waiting for transfer threads..') self.wait() # Releases global lock log.debug('finished') def drop(self): """Drop cache This method releases the global lock. """ log.debug('started') bak = self.cache.max_entries self.cache.max_entries = 0 self.expire() # Releases global lock self.cache.max_entries = bak log.debug('finished') def get_usage(self): '''Get cache usage information. Return a tuple of * cache entries * cache size * dirty cache entries * dirty cache size * pending removals This method is O(n) in the number of cache entries. ''' used = self.cache.size dirty_size = 0 dirty_cnt = 0 for el in self.cache.values(): if el.dirty: dirty_size += el.size dirty_cnt += 1 if self.to_remove is None: remove_cnt = 0 else: # This is an estimate which may be negative remove_cnt = max(0, self.to_remove.qsize()) return (len(self.cache), used, dirty_cnt, dirty_size, remove_cnt) def __del__(self): # break reference loop self.fs = None if len(self.cache) == 0: return # Force execution of sys.excepthook (exceptions raised # by __del__ are ignored) try: raise RuntimeError("BlockManager instance was destroyed without " "calling destroy()!") except RuntimeError: exc_info = sys.exc_info() sys.excepthook(*exc_info) s3ql-2.26/src/s3ql/oauth_client.py0000644000175000017500000000610312615000156020504 0ustar nikrationikratio00000000000000''' oauth_client.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from .logging import logging, setup_logging, QuietError from .parse_args import ArgumentParser import sys import textwrap import requests import time log = logging.getLogger(__name__) # S3QL client id and client secret for Google APIs. # Don't get your hopes up, this isn't truly secret. CLIENT_ID = '381875429714-6pch5vnnmqab454c68pkt8ugm86ef95v.apps.googleusercontent.com' CLIENT_SECRET = 'HGl8fJeVML-gZ-1HSZRNZPz_' def parse_args(args): '''Parse command line''' parser = ArgumentParser( description=textwrap.dedent('''\ Obtain OAuth2 refresh token for Google Storage ''')) parser.add_debug() parser.add_quiet() parser.add_version() return parser.parse_args(args) def _log_response(r): '''Log server response''' if not log.isEnabledFor(logging.DEBUG): return s = [ 'Server response:', '%03d %s' % (r.status_code, r.reason) ] for tup in r.headers.items(): s.append('%s: %s' % tup) s.append('') s.append(r.text) log.debug('\n'.join(s)) def _parse_response(r): _log_response(r) if r.status_code != requests.codes.ok: raise QuietError('Connection failed with: %d %s' % (r.status_code, r.reason)) return r.json() def main(args=None): if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) cli = requests.Session() # We need full control in order to be able to update metadata # cf. https://stackoverflow.com/questions/24718787 r = cli.post('https://accounts.google.com/o/oauth2/device/code', data={ 'client_id': CLIENT_ID, 'scope': 'https://www.googleapis.com/auth/devstorage.full_control' }, verify=True, allow_redirects=False, timeout=20) req_json = _parse_response(r) print(textwrap.fill('Please open %s in your browser and enter the following ' 'user code: %s' % (req_json['verification_url'], req_json['user_code']))) while True: log.debug('polling..') time.sleep(req_json['interval']) r = cli.post('https://accounts.google.com/o/oauth2/token', data={ 'client_id': CLIENT_ID, 'client_secret': CLIENT_SECRET, 'code': req_json['device_code'], 'grant_type': 'http://oauth.net/grant_type/device/1.0' }, verify=True, allow_redirects=False, timeout=20) resp_json = _parse_response(r) r.close() if 'error' in resp_json: if resp_json['error'] == 'authorization_pending': continue else: raise QuietError('Authentication failed: ' + resp_json['error']) else: break print('Success. Your refresh token is:\n', resp_json['refresh_token']) s3ql-2.26/src/s3ql/fs.py0000644000175000017500000013266413223730045016455 0ustar nikrationikratio00000000000000''' fs.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from .logging import logging from . import deltadump, CTRL_NAME, CTRL_INODE from .backends.common import NoSuchObject, CorruptedObjectError from .common import get_path, parse_literal, time_ns from .database import NoSuchRowError from .inode_cache import OutOfInodesError from io import BytesIO from llfuse import FUSEError import collections import errno import llfuse import math import os import stat import struct import time # We work in bytes CTRL_NAME = CTRL_NAME.encode('us-ascii') # standard logger for this module log = logging.getLogger(__name__) # For long requests, we force a GIL release in the following interval GIL_RELEASE_INTERVAL = 0.05 # ACL_ERRNO is the error code returned for requests that try # to modify or access extendeda attributes associated with ACL. # Since we currently don't know how to keep these in sync # with permission bits, we cannot support ACLs despite having # full support for extended attributes. # # What errno to use for ACL_ERRNO is a bit tricky. acl_set_fd(3) # returns ENOTSUP if the file system does not support ACLs. However, # this function uses the setxattr() syscall internally, and # setxattr(3) states that ENOTSUP means that the file system does not # support extended attributes at all. A test with btrfs mounted with # -o noacl shows that the actual errno returned by setxattr() is # EOPNOTSUPP. Also, some Python versions do not know about # errno.ENOTSUPP and errno(3) says that on Linux, EOPNOTSUPP and ENOTSUP # have the same value (despite this violating POSIX). # # All in all, the situation seems complicated, so we try to use # EOPNOTSUPP with a fallback on ENOTSUP just in case. if not hasattr(errno, 'EOPNOTSUPP'): ACL_ERRNO = errno.ENOTSUP else: ACL_ERRNO = errno.EOPNOTSUPP class Operations(llfuse.Operations): """A full-featured file system for online data storage This class implements low-level FUSE operations and is meant to be passed to llfuse.init(). The ``access`` method of this class always gives full access, independent of file permissions. If the FUSE library is initialized with ``allow_other`` or ``allow_root``, the ``default_permissions`` option should therefore always be passed as well. Attributes: ----------- :cache: Holds information about cached blocks :inode_cache: A cache for the attributes of the currently opened inodes. :open_inodes: dict of currently opened inodes. This is used to not remove the blocks of unlinked inodes that are still open. :upload_event: If set, triggers a metadata upload :failsafe: Set when backend problems are encountered. In that case, fs only allows read access. :broken_blocks: Caches information about corrupted blocks to avoid repeated (pointless) attempts to retrieve them. This attribute is a dict (indexed by inodes) of sets of block indices. Broken blocks are removed from the cache when an inode is forgotten. Multithreading -------------- All methods are reentrant and may release the global lock while they are running. Directory Entry Types ---------------------- S3QL is quite agnostic when it comes to directory entry types. Every directory entry can contain other entries *and* have a associated data, size, link target and device number. However, S3QL makes some provisions for users relying on unlink()/rmdir() to fail for a directory/file. For that, it explicitly checks the st_mode attribute. """ def __init__(self, block_cache, db, max_obj_size, inode_cache, upload_event=None): super().__init__() self.inodes = inode_cache self.db = db self.upload_event = upload_event self.open_inodes = collections.defaultdict(lambda: 0) self.max_obj_size = max_obj_size self.cache = block_cache self.failsafe = False self.broken_blocks = collections.defaultdict(set) # Root inode is always open self.open_inodes[llfuse.ROOT_INODE] += 1 def destroy(self): self.forget(list(self.open_inodes.items())) self.inodes.destroy() def lookup(self, id_p, name, ctx): return self._lookup(id_p, name, ctx).entry_attributes() def _lookup(self, id_p, name, ctx): log.debug('started with %d, %r', id_p, name) if name == CTRL_NAME: inode = self.inodes[CTRL_INODE] # Make sure the control file is only writable by the user # who mounted the file system (but don't mark inode as dirty) object.__setattr__(inode, 'uid', os.getuid()) object.__setattr__(inode, 'gid', os.getgid()) elif name == '.': inode = self.inodes[id_p] elif name == '..': id_ = self.db.get_val("SELECT parent_inode FROM contents WHERE inode=?", (id_p,)) inode = self.inodes[id_] else: try: id_ = self.db.get_val("SELECT inode FROM contents_v WHERE name=? AND parent_inode=?", (name, id_p)) except NoSuchRowError: raise llfuse.FUSEError(errno.ENOENT) inode = self.inodes[id_] self.open_inodes[inode.id] += 1 return inode def getattr(self, id_, ctx): log.debug('started with %d', id_) if id_ == CTRL_INODE: # Make sure the control file is only writable by the user # who mounted the file system (but don't mark inode as dirty) inode = self.inodes[CTRL_INODE] object.__setattr__(inode, 'uid', os.getuid()) object.__setattr__(inode, 'gid', os.getgid()) return inode.entry_attributes() return self.inodes[id_].entry_attributes() def readlink(self, id_, ctx): log.debug('started with %d', id_) now_ns = time_ns() inode = self.inodes[id_] if inode.atime_ns < inode.ctime_ns or inode.atime_ns < inode.mtime_ns: inode.atime_ns = now_ns try: return self.db.get_val("SELECT target FROM symlink_targets WHERE inode=?", (id_,)) except NoSuchRowError: log.warning('Inode does not have symlink target: %d', id_) raise FUSEError(errno.EINVAL) def opendir(self, id_, ctx): log.debug('started with %d', id_) return id_ def readdir(self, id_, off): log.debug('started with %d, %d', id_, off) if off == 0: off = -1 inode = self.inodes[id_] if inode.atime_ns < inode.ctime_ns or inode.atime_ns < inode.mtime_ns: inode.atime_ns = time_ns() # NFS treats offsets 1 and 2 special, so we have to exclude # them. with self.db.query("SELECT name_id, name, inode FROM contents_v " 'WHERE parent_inode=? AND name_id > ? ORDER BY name_id', (id_, off-3)) as res: for (next_, name, cid_) in res: yield (name, self.inodes[cid_].entry_attributes(), next_+3) def getxattr(self, id_, name, ctx): log.debug('started with %d, %r', id_, name) # Handle S3QL commands if id_ == CTRL_INODE: if name == b's3ql_pid?': return ('%d' % os.getpid()).encode() elif name == b's3qlstat': return self.extstat() raise llfuse.FUSEError(errno.EINVAL) # http://code.google.com/p/s3ql/issues/detail?id=385 elif name in (b'system.posix_acl_access', b'system.posix_acl_default'): raise FUSEError(ACL_ERRNO) else: try: value = self.db.get_val('SELECT value FROM ext_attributes_v WHERE inode=? AND name=?', (id_, name)) except NoSuchRowError: raise llfuse.FUSEError(llfuse.ENOATTR) return value def listxattr(self, id_, ctx): log.debug('started with %d', id_) names = list() with self.db.query('SELECT name FROM ext_attributes_v WHERE inode=?', (id_,)) as res: for (name,) in res: names.append(name) return names def setxattr(self, id_, name, value, ctx): log.debug('started with %d, %r, %r', id_, name, value) # Handle S3QL commands if id_ == CTRL_INODE: if name == b's3ql_flushcache!': self.inodes.flush() self.cache.flush() elif name == b's3ql_dropcache!': self.inodes.drop() self.cache.drop() elif name == b'copy': try: tup = parse_literal(value, (int, int)) except ValueError: log.warning('Received malformed command via control inode') raise FUSEError.EINVAL() self.copy_tree(*tup) elif name == b'upload-meta': if self.upload_event is not None: self.inodes.flush() self.upload_event.set() else: raise llfuse.FUSEError(errno.ENOTTY) elif name == b'lock': try: id_ = parse_literal(value, int) except ValueError: log.warning('Received malformed command via control inode') raise FUSEError.EINVAL() self.lock_tree(id_) elif name == b'rmtree': try: tup = parse_literal(value, (int, bytes)) except ValueError: log.warning('Received malformed command via control inode') raise FUSEError.EINVAL() self.remove_tree(*tup) elif name == b'logging': try: (lvl, modules)= parse_literal(value, (int, str)) except (ValueError, KeyError): log.warning('Received malformed command via control inode') raise FUSEError.EINVAL() update_logging(lvl, modules.split(',') if modules else None) elif name == b'cachesize': try: self.cache.cache.max_size = parse_literal(value, int) except ValueError: log.warning('Received malformed command via control inode') raise FUSEError.EINVAL() log.debug('updated cache size to %d bytes', self.cache.cache.max_size) else: log.warning('Received unknown command via control inode') raise llfuse.FUSEError(errno.EINVAL) # http://code.google.com/p/s3ql/issues/detail?id=385 elif name in (b'system.posix_acl_access', b'system.posix_acl_default'): raise FUSEError(ACL_ERRNO) else: if self.failsafe or self.inodes[id_].locked: raise FUSEError(errno.EPERM) if len(value) > deltadump.MAX_BLOB_SIZE: raise FUSEError(errno.EINVAL) self.db.execute('INSERT OR REPLACE INTO ext_attributes (inode, name_id, value) ' 'VALUES(?, ?, ?)', (id_, self._add_name(name), value)) self.inodes[id_].ctime_ns = time_ns() def removexattr(self, id_, name, ctx): log.debug('started with %d, %r', id_, name) if self.failsafe or self.inodes[id_].locked: raise FUSEError(errno.EPERM) try: name_id = self._del_name(name) except NoSuchRowError: raise llfuse.FUSEError(llfuse.ENOATTR) changes = self.db.execute('DELETE FROM ext_attributes WHERE inode=? AND name_id=?', (id_, name_id)) if changes == 0: raise llfuse.FUSEError(llfuse.ENOATTR) self.inodes[id_].ctime_ns = time_ns() def lock_tree(self, id0): '''Lock directory tree''' if self.failsafe: raise FUSEError(errno.EPERM) log.debug('started with %d', id0) queue = [ id0 ] self.inodes[id0].locked = True processed = 0 # Number of steps since last GIL release stamp = time_ns() # Time of last GIL release gil_step = 250 # Approx. number of steps between GIL releases while True: id_p = queue.pop() with self.db.query('SELECT inode FROM contents WHERE parent_inode=?', (id_p,)) as res: for (id_,) in res: self.inodes[id_].locked = True processed += 1 if self.db.has_val('SELECT 1 FROM contents WHERE parent_inode=?', (id_,)): queue.append(id_) if not queue: break if processed > gil_step: dt = time.time() - stamp gil_step = max(int(gil_step * GIL_RELEASE_INTERVAL / dt), 250) log.debug('Adjusting gil_step to %d', gil_step) processed = 0 llfuse.lock.yield_(100) log.debug('re-acquired lock') stamp = time.time() log.debug('finished') def remove_tree(self, id_p0, name0): '''Remove directory tree''' if self.failsafe: raise FUSEError(errno.EPERM) log.debug('started with %d, %s', id_p0, name0) if self.inodes[id_p0].locked: raise FUSEError(errno.EPERM) id0 = self._lookup(id_p0, name0, ctx=None).id queue = [ id0 ] # Directories that we still need to delete processed = 0 # Number of steps since last GIL release stamp = time.time() # Time of last GIL release gil_step = 250 # Approx. number of steps between GIL releases while queue: # For every directory found_subdirs = False # Does current directory have subdirectories? id_p = queue.pop() if id_p in self.open_inodes: inval_entry = lambda x: llfuse.invalidate_entry(id_p, x) else: inval_entry = lambda x: None with self.db.query('SELECT name_id, inode FROM contents WHERE ' 'parent_inode=?', (id_p,)) as res: for (name_id, id_) in res: if self.db.has_val('SELECT 1 FROM contents WHERE parent_inode=?', (id_,)): if not found_subdirs: # When current directory has subdirectories, we must reinsert # it into queue found_subdirs = True queue.append(id_p) queue.append(id_) else: name = self.db.get_val("SELECT name FROM names WHERE id=?", (name_id,)) inval_entry(name) self._remove(id_p, name, id_, force=True) processed += 1 if processed > gil_step: # Also reinsert current directory if we need to yield to other threads if not found_subdirs: queue.append(id_p) break if processed > gil_step: dt = time.time() - stamp gil_step = max(int(gil_step * GIL_RELEASE_INTERVAL / dt), 250) log.debug('Adjusting gil_step to %d and yielding', gil_step) processed = 0 llfuse.lock.yield_(100) log.debug('re-acquired lock') stamp = time.time() if id_p0 in self.open_inodes: log.debug('invalidate_entry(%d, %r)', id_p0, name0) llfuse.invalidate_entry(id_p0, name0) self._remove(id_p0, name0, id0, force=True) self.forget([(id0, 1)]) log.debug('finished') def copy_tree(self, src_id, target_id): '''Efficiently copy directory tree''' if self.failsafe: raise FUSEError(errno.EPERM) log.debug('started with %d, %d', src_id, target_id) # To avoid lookups and make code tidier make_inode = self.inodes.create_inode db = self.db # First we make sure that all blocks are in the database self.cache.start_flush() # Copy target attributes # These come from setxattr, so they may have been deleted # without being in open_inodes try: src_inode = self.inodes[src_id] target_inode = self.inodes[target_id] except KeyError: raise FUSEError(errno.ENOENT) for attr in ('atime_ns', 'ctime_ns', 'mtime_ns', 'mode', 'uid', 'gid'): setattr(target_inode, attr, getattr(src_inode, attr)) # We first replicate into a dummy inode, so that we # need to invalidate only once. now_ns = time_ns() tmp = make_inode(mtime_ns=now_ns, ctime_ns=now_ns, atime_ns=now_ns, uid=0, gid=0, mode=0, refcount=0) queue = [ (src_id, tmp.id, 0) ] id_cache = dict() processed = 0 # Number of steps since last GIL release stamp = time.time() # Time of last GIL release gil_step = 250 # Approx. number of steps between GIL releases while queue: (src_id, target_id, off) = queue.pop() log.debug('Processing directory (%d, %d, %d)', src_id, target_id, off) with db.query('SELECT name_id, inode FROM contents WHERE parent_inode=? ' 'AND name_id > ? ORDER BY name_id', (src_id, off)) as res: for (name_id, id_) in res: if id_ not in id_cache: inode = self.inodes[id_] try: inode_new = make_inode(refcount=1, mode=inode.mode, size=inode.size, uid=inode.uid, gid=inode.gid, mtime_ns=inode.mtime_ns, atime_ns=inode.atime_ns, ctime_ns=inode.ctime_ns, rdev=inode.rdev) except OutOfInodesError: log.warning('Could not find a free inode') raise FUSEError(errno.ENOSPC) id_new = inode_new.id if inode.refcount != 1: id_cache[id_] = id_new db.execute('INSERT INTO symlink_targets (inode, target) ' 'SELECT ?, target FROM symlink_targets WHERE inode=?', (id_new, id_)) db.execute('INSERT INTO ext_attributes (inode, name_id, value) ' 'SELECT ?, name_id, value FROM ext_attributes WHERE inode=?', (id_new, id_)) db.execute('UPDATE names SET refcount = refcount + 1 WHERE ' 'id IN (SELECT name_id FROM ext_attributes WHERE inode=?)', (id_,)) processed += db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) ' 'SELECT ?, blockno, block_id FROM inode_blocks ' 'WHERE inode=?', (id_new, id_)) db.execute('REPLACE INTO blocks (id, hash, refcount, size, obj_id) ' 'SELECT id, hash, refcount+COUNT(id), size, obj_id ' 'FROM inode_blocks JOIN blocks ON block_id = id ' 'WHERE inode = ? GROUP BY id', (id_new,)) if db.has_val('SELECT 1 FROM contents WHERE parent_inode=?', (id_,)): queue.append((id_, id_new, 0)) else: id_new = id_cache[id_] self.inodes[id_new].refcount += 1 db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?, ?, ?)', (name_id, id_new, target_id)) db.execute('UPDATE names SET refcount=refcount+1 WHERE id=?', (name_id,)) processed += 1 if processed > gil_step: log.debug('Requeueing (%d, %d, %d) to yield lock', src_id, target_id, name_id) queue.append((src_id, target_id, name_id)) break if processed > gil_step: dt = time.time() - stamp gil_step = max(int(gil_step * GIL_RELEASE_INTERVAL / dt), 250) log.debug('Adjusting gil_step to %d and yielding', gil_step) processed = 0 llfuse.lock.yield_(100) log.debug('re-acquired lock') stamp = time.time() # Make replication visible self.db.execute('UPDATE contents SET parent_inode=? WHERE parent_inode=?', (target_inode.id, tmp.id)) del self.inodes[tmp.id] llfuse.invalidate_inode(target_inode.id) log.debug('finished') def unlink(self, id_p, name, ctx): log.debug('started with %d, %r', id_p, name) if self.failsafe: raise FUSEError(errno.EPERM) inode = self._lookup(id_p, name, ctx) if stat.S_ISDIR(inode.mode): raise llfuse.FUSEError(errno.EISDIR) self._remove(id_p, name, inode.id) self.forget([(inode.id, 1)]) def rmdir(self, id_p, name, ctx): log.debug('started with %d, %r', id_p, name) if self.failsafe: raise FUSEError(errno.EPERM) inode = self._lookup(id_p, name, ctx) if self.inodes[id_p].locked: raise FUSEError(errno.EPERM) if not stat.S_ISDIR(inode.mode): raise llfuse.FUSEError(errno.ENOTDIR) self._remove(id_p, name, inode.id) self.forget([(inode.id, 1)]) def _remove(self, id_p, name, id_, force=False): '''Remove entry `name` with parent inode `id_p` `id_` must be the inode of `name`. If `force` is True, then the `locked` attribute is ignored. This method releases the global lock. ''' log.debug('started with %d, %r', id_p, name) now_ns = time_ns() # Check that there are no child entries if self.db.has_val("SELECT 1 FROM contents WHERE parent_inode=?", (id_,)): log.debug("Attempted to remove entry with children: %s", get_path(id_p, self.db, name)) raise llfuse.FUSEError(errno.ENOTEMPTY) if self.inodes[id_p].locked and not force: raise FUSEError(errno.EPERM) name_id = self._del_name(name) self.db.execute("DELETE FROM contents WHERE name_id=? AND parent_inode=?", (name_id, id_p)) inode = self.inodes[id_] inode.refcount -= 1 inode.ctime_ns = now_ns inode_p = self.inodes[id_p] inode_p.mtime_ns = now_ns inode_p.ctime_ns = now_ns if inode.refcount == 0 and id_ not in self.open_inodes: log.debug('removing from cache') self.cache.remove(id_, 0, int(math.ceil(inode.size / self.max_obj_size))) # Since the inode is not open, it's not possible that new blocks # get created at this point and we can safely delete the inode self.db.execute('UPDATE names SET refcount = refcount - 1 WHERE ' 'id IN (SELECT name_id FROM ext_attributes WHERE inode=?)', (id_,)) self.db.execute('DELETE FROM names WHERE refcount=0 AND ' 'id IN (SELECT name_id FROM ext_attributes WHERE inode=?)', (id_,)) self.db.execute('DELETE FROM ext_attributes WHERE inode=?', (id_,)) self.db.execute('DELETE FROM symlink_targets WHERE inode=?', (id_,)) del self.inodes[id_] log.debug('finished') def symlink(self, id_p, name, target, ctx): log.debug('started with %d, %r, %r', id_p, name, target) if self.failsafe: raise FUSEError(errno.EPERM) mode = (stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH) # Unix semantics require the size of a symlink to be the length # of its target. Therefore, we create symlink directory entries # with this size. If the kernel ever learns to open and read # symlinks directly, it will read the corresponding number of \0 # bytes. inode = self._create(id_p, name, mode, ctx, size=len(target)) self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (inode.id, target)) self.open_inodes[inode.id] += 1 return inode.entry_attributes() def rename(self, id_p_old, name_old, id_p_new, name_new, ctx): log.debug('started with %d, %r, %d, %r', id_p_old, name_old, id_p_new, name_new) if name_new == CTRL_NAME or name_old == CTRL_NAME: log.warning('Attempted to rename s3ql control file (%s -> %s)', get_path(id_p_old, self.db, name_old), get_path(id_p_new, self.db, name_new)) raise llfuse.FUSEError(errno.EACCES) if (self.failsafe or self.inodes[id_p_old].locked or self.inodes[id_p_new].locked): raise FUSEError(errno.EPERM) inode_old = self._lookup(id_p_old, name_old, ctx) try: inode_new = self._lookup(id_p_new, name_new, ctx) except llfuse.FUSEError as exc: if exc.errno != errno.ENOENT: raise else: target_exists = False else: target_exists = True if target_exists: self._replace(id_p_old, name_old, id_p_new, name_new, inode_old.id, inode_new.id) self.forget([(inode_old.id, 1), (inode_new.id, 1)]) else: self._rename(id_p_old, name_old, id_p_new, name_new) self.forget([(inode_old.id, 1)]) def _add_name(self, name): '''Get id for *name* and increase refcount Name is inserted in table if it does not yet exist. ''' try: name_id = self.db.get_val('SELECT id FROM names WHERE name=?', (name,)) except NoSuchRowError: name_id = self.db.rowid('INSERT INTO names (name, refcount) VALUES(?,?)', (name, 1)) else: self.db.execute('UPDATE names SET refcount=refcount+1 WHERE id=?', (name_id,)) return name_id def _del_name(self, name): '''Decrease refcount for *name* Name is removed from table if refcount drops to zero. Returns the (possibly former) id of the name. ''' (name_id, refcount) = self.db.get_row('SELECT id, refcount FROM names WHERE name=?', (name,)) if refcount > 1: self.db.execute('UPDATE names SET refcount=refcount-1 WHERE id=?', (name_id,)) else: self.db.execute('DELETE FROM names WHERE id=?', (name_id,)) return name_id def _rename(self, id_p_old, name_old, id_p_new, name_new): now_ns = time_ns() name_id_new = self._add_name(name_new) name_id_old = self._del_name(name_old) self.db.execute("UPDATE contents SET name_id=?, parent_inode=? WHERE name_id=? " "AND parent_inode=?", (name_id_new, id_p_new, name_id_old, id_p_old)) inode_p_old = self.inodes[id_p_old] inode_p_new = self.inodes[id_p_new] inode_p_old.mtime_ns = now_ns inode_p_new.mtime_ns = now_ns inode_p_old.ctime_ns = now_ns inode_p_new.ctime_ns = now_ns def _replace(self, id_p_old, name_old, id_p_new, name_new, id_old, id_new): now_ns = time_ns() if self.db.has_val("SELECT 1 FROM contents WHERE parent_inode=?", (id_new,)): log.info("Attempted to overwrite entry with children: %s", get_path(id_p_new, self.db, name_new)) raise llfuse.FUSEError(errno.EINVAL) # Replace target name_id_new = self.db.get_val('SELECT id FROM names WHERE name=?', (name_new,)) self.db.execute("UPDATE contents SET inode=? WHERE name_id=? AND parent_inode=?", (id_old, name_id_new, id_p_new)) # Delete old name name_id_old = self._del_name(name_old) self.db.execute('DELETE FROM contents WHERE name_id=? AND parent_inode=?', (name_id_old, id_p_old)) inode_new = self.inodes[id_new] inode_new.refcount -= 1 inode_new.ctime_ns = now_ns inode_p_old = self.inodes[id_p_old] inode_p_old.ctime_ns = now_ns inode_p_old.mtime_ns = now_ns inode_p_new = self.inodes[id_p_new] inode_p_new.ctime_ns = now_ns inode_p_new.mtime_ns = now_ns if inode_new.refcount == 0 and id_new not in self.open_inodes: self.cache.remove(id_new, 0, int(math.ceil(inode_new.size / self.max_obj_size))) # Since the inode is not open, it's not possible that new blocks # get created at this point and we can safely delete the inode self.db.execute('UPDATE names SET refcount = refcount - 1 WHERE ' 'id IN (SELECT name_id FROM ext_attributes WHERE inode=?)', (id_new,)) self.db.execute('DELETE FROM names WHERE refcount=0') self.db.execute('DELETE FROM ext_attributes WHERE inode=?', (id_new,)) self.db.execute('DELETE FROM symlink_targets WHERE inode=?', (id_new,)) del self.inodes[id_new] def link(self, id_, new_id_p, new_name, ctx): log.debug('started with %d, %d, %r', id_, new_id_p, new_name) if new_name == CTRL_NAME or id_ == CTRL_INODE: log.warning('Attempted to create s3ql control file at %s', get_path(new_id_p, self.db, new_name)) raise llfuse.FUSEError(errno.EACCES) now_ns = time_ns() inode_p = self.inodes[new_id_p] if inode_p.refcount == 0: log.warning('Attempted to create entry %s with unlinked parent %d', new_name, new_id_p) raise FUSEError(errno.EINVAL) if self.failsafe or inode_p.locked: raise FUSEError(errno.EPERM) inode_p.ctime_ns = now_ns inode_p.mtime_ns = now_ns self.db.execute("INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)", (self._add_name(new_name), id_, new_id_p)) inode = self.inodes[id_] inode.refcount += 1 inode.ctime_ns = now_ns self.open_inodes[inode.id] += 1 return inode.entry_attributes() def setattr(self, id_, attr, fields, fh, ctx): """Handles FUSE setattr() requests""" inode = self.inodes[id_] if fh is not None: assert fh == id_ now_ns = time_ns() if self.failsafe or inode.locked: raise FUSEError(errno.EPERM) if fields.update_mode: inode.mode = attr.st_mode if fields.update_uid: inode.uid = attr.st_uid if fields.update_gid: inode.gid = attr.st_gid if fields.update_atime: inode.atime_ns = attr.st_atime_ns if fields.update_mtime: inode.mtime_ns = attr.st_mtime_ns inode.ctime_ns = now_ns # This needs to go last, because the call to cache.remove and cache.get # will release the global lock and may thus evict the *inode* object # from the cache. if fields.update_size: len_ = attr.st_size # Determine blocks to delete last_block = len_ // self.max_obj_size cutoff = len_ % self.max_obj_size total_blocks = int(math.ceil(inode.size / self.max_obj_size)) # Adjust file size inode.size = len_ # Delete blocks and truncate last one if required if cutoff == 0: self.cache.remove(id_, last_block, total_blocks) else: self.cache.remove(id_, last_block + 1, total_blocks) try: with self.cache.get(id_, last_block) as fh: fh.truncate(cutoff) except NoSuchObject as exc: log.warning('Backend lost block %d of inode %d (id %s)!', last_block, id_, exc.key) raise except CorruptedObjectError as exc: log.warning('Backend returned malformed data for block %d of inode %d (%s)', last_block, id_, exc) self.failsafe = True self.broken_blocks[id_].add(last_block) raise FUSEError(errno.EIO) return inode.entry_attributes() def mknod(self, id_p, name, mode, rdev, ctx): log.debug('started with %d, %r', id_p, name) if self.failsafe: raise FUSEError(errno.EPERM) inode = self._create(id_p, name, mode, ctx, rdev=rdev) self.open_inodes[inode.id] += 1 return inode.entry_attributes() def mkdir(self, id_p, name, mode, ctx): log.debug('started with %d, %r', id_p, name) if self.failsafe: raise FUSEError(errno.EPERM) inode = self._create(id_p, name, mode, ctx) self.open_inodes[inode.id] += 1 return inode.entry_attributes() def extstat(self): '''Return extended file system statistics''' log.debug('started') # Flush inode cache to get better estimate of total fs size self.inodes.flush() entries = self.db.get_val("SELECT COUNT(rowid) FROM contents") blocks = self.db.get_val("SELECT COUNT(id) FROM objects") inodes = self.db.get_val("SELECT COUNT(id) FROM inodes") fs_size = self.db.get_val('SELECT SUM(size) FROM inodes') or 0 dedup_size = self.db.get_val('SELECT SUM(size) FROM blocks') or 0 # Objects that are currently being uploaded/compressed have size == -1 compr_size = self.db.get_val('SELECT SUM(size) FROM objects ' 'WHERE size > 0') or 0 return struct.pack('QQQQQQQQQQQQ', entries, blocks, inodes, fs_size, dedup_size, compr_size, self.db.get_size(), *self.cache.get_usage()) def statfs(self, ctx): log.debug('started') stat_ = llfuse.StatvfsData() # Get number of blocks & inodes blocks = self.db.get_val("SELECT COUNT(id) FROM objects") inodes = self.db.get_val("SELECT COUNT(id) FROM inodes") size = self.db.get_val('SELECT SUM(size) FROM blocks') if size is None: size = 0 # file system block size, i.e. the minimum amount of space that can # be allocated. This doesn't make much sense for S3QL, so we just # return the average size of stored blocks. stat_.f_frsize = max(4096, size // blocks) if blocks != 0 else 4096 # This should actually be the "preferred block size for doing IO. However, `df` incorrectly # interprets f_blocks, f_bfree and f_bavail in terms of f_bsize rather than f_frsize as it # should (according to statvfs(3)), so the only way to return correct values *and* have df # print something sensible is to set f_bsize and f_frsize to the same value. (cf. # http://bugs.debian.org/671490) stat_.f_bsize = stat_.f_frsize # size of fs in f_frsize units. Since backend is supposed to be unlimited, # always return a half-full filesystem, but at least 1 TB) fs_size = max(2 * size, 1024 ** 4) stat_.f_blocks = fs_size // stat_.f_frsize stat_.f_bfree = (fs_size - size) // stat_.f_frsize stat_.f_bavail = stat_.f_bfree # free for non-root total_inodes = max(2 * inodes, 1000000) stat_.f_files = total_inodes stat_.f_ffree = total_inodes - inodes stat_.f_favail = total_inodes - inodes # free for non-root return stat_ def open(self, id_, flags, ctx): log.debug('started with %d', id_) if ((flags & os.O_RDWR or flags & os.O_WRONLY) and (self.failsafe or self.inodes[id_].locked)): raise FUSEError(errno.EPERM) return id_ def access(self, id_, mode, ctx): '''Check if requesting process has `mode` rights on `inode`. This method always returns true, since it should only be called when permission checking is disabled (if permission checking is enabled, the `default_permissions` FUSE option should be set). ''' # Yeah, could be a function and has unused arguments #pylint: disable=R0201,W0613 log.debug('started with %d', id_) return True def create(self, id_p, name, mode, flags, ctx): log.debug('started with id_p=%d, %s', id_p, name) if self.failsafe: raise FUSEError(errno.EPERM) try: id_ = self.db.get_val("SELECT inode FROM contents_v WHERE name=? AND parent_inode=?", (name, id_p)) except NoSuchRowError: inode = self._create(id_p, name, mode, ctx) else: self.open(id_, flags, ctx) inode = self.inodes[id_] self.open_inodes[inode.id] += 1 return (inode.id, inode.entry_attributes()) def _create(self, id_p, name, mode, ctx, rdev=0, size=0): if name == CTRL_NAME: log.warning('Attempted to create s3ql control file at %s', get_path(id_p, self.db, name)) raise FUSEError(errno.EACCES) now_ns = time_ns() inode_p = self.inodes[id_p] if inode_p.locked: raise FUSEError(errno.EPERM) if inode_p.refcount == 0: log.warning('Attempted to create entry %s with unlinked parent %d', name, id_p) raise FUSEError(errno.EINVAL) inode_p.mtime_ns = now_ns inode_p.ctime_ns = now_ns if inode_p.mode & stat.S_ISGID: gid = inode_p.gid if stat.S_ISDIR(mode): mode |= stat.S_ISGID else: gid = ctx.gid try: inode = self.inodes.create_inode(mtime_ns=now_ns, ctime_ns=now_ns, atime_ns=now_ns, uid=ctx.uid, gid=gid, mode=mode, refcount=1, rdev=rdev, size=size) except OutOfInodesError: log.warning('Could not find a free inode') raise FUSEError(errno.ENOSPC) self.db.execute("INSERT INTO contents(name_id, inode, parent_inode) VALUES(?,?,?)", (self._add_name(name), inode.id, id_p)) return inode def read(self, fh, offset, length): '''Read `size` bytes from `fh` at position `off` Unless EOF is reached, returns exactly `size` bytes. This method releases the global lock while it is running. ''' #log.debug('started with %d, %d, %d', fh, offset, length) buf = BytesIO() inode = self.inodes[fh] # Make sure that we don't read beyond the file size. This # should not happen unless direct_io is activated, but it's # cheap and nice for testing. size = inode.size length = min(size - offset, length) while length > 0: tmp = self._readwrite(fh, offset, length=length) buf.write(tmp) length -= len(tmp) offset += len(tmp) # Inode may have expired from cache inode = self.inodes[fh] if inode.atime_ns < inode.ctime_ns or inode.atime_ns < inode.mtime_ns: inode.atime_ns = time_ns() return buf.getvalue() def write(self, fh, offset, buf): '''Handle FUSE write requests. This method releases the global lock while it is running. ''' #log.debug('started with %d, %d, datalen=%d', fh, offset, len(buf)) if self.failsafe or self.inodes[fh].locked: raise FUSEError(errno.EPERM) total = len(buf) minsize = offset + total while buf: written = self._readwrite(fh, offset, buf=buf) offset += written buf = buf[written:] # Update file size if changed # Fuse does not ensure that we do not get concurrent write requests, # so we have to be careful not to undo a size extension made by # a concurrent write (because _readwrite() releases the global # lock). now_ns = time_ns() inode = self.inodes[fh] inode.size = max(inode.size, minsize) inode.mtime_ns = now_ns inode.ctime_ns = now_ns return total def _readwrite(self, id_, offset, *, buf=None, length=None): """Read or write as much as we can. If *buf* is None, read and return up to *length* bytes. If *length* is None, write from *buf* and return the number of bytes written. This is one method to reduce code duplication. This method releases the global lock while it is running. """ # Calculate required block blockno = offset // self.max_obj_size offset_rel = offset - blockno * self.max_obj_size if id_ in self.broken_blocks and blockno in self.broken_blocks[id_]: raise FUSEError(errno.EIO) if length is None: write = True length = len(buf) elif buf is None: write = False else: raise TypeError("Don't know what to do!") # Don't try to write/read into the next block if offset_rel + length > self.max_obj_size: length = self.max_obj_size - offset_rel try: with self.cache.get(id_, blockno) as fh: fh.seek(offset_rel) if write: fh.write(buf[:length]) else: buf = fh.read(length) except NoSuchObject as exc: log.error('Backend lost block %d of inode %d (id %s)!', blockno, id_, exc.key) self.failsafe = True self.broken_blocks[id_].add(blockno) raise FUSEError(errno.EIO) except CorruptedObjectError as exc: log.error('Backend returned malformed data for block %d of inode %d (%s)', blockno, id_, exc) self.failsafe = True self.broken_blocks[id_].add(blockno) raise FUSEError(errno.EIO) if write: return length elif len(buf) == length: return buf else: # If we can't read enough, add null bytes return buf + b"\0" * (length - len(buf)) def fsync(self, fh, datasync): log.debug('started with %d, %s', fh, datasync) if not datasync: self.inodes.flush_id(fh) for blockno in range(0, self.inodes[fh].size // self.max_obj_size + 1): self.cache.flush_local(fh, blockno) def forget(self, forget_list): log.debug('started with %s', forget_list) for (id_, nlookup) in forget_list: self.open_inodes[id_] -= nlookup if self.open_inodes[id_] == 0: del self.open_inodes[id_] if id_ in self.broken_blocks: del self.broken_blocks[id_] inode = self.inodes[id_] if inode.refcount == 0: log.debug('removing %d from cache', id_) self.cache.remove(id_, 0, inode.size // self.max_obj_size + 1) # Since the inode is not open, it's not possible that new blocks # get created at this point and we can safely delete the inode self.db.execute('UPDATE names SET refcount = refcount - 1 WHERE ' 'id IN (SELECT name_id FROM ext_attributes WHERE inode=?)', (id_,)) self.db.execute('DELETE FROM names WHERE refcount=0 AND ' 'id IN (SELECT name_id FROM ext_attributes WHERE inode=?)', (id_,)) self.db.execute('DELETE FROM ext_attributes WHERE inode=?', (id_,)) self.db.execute('DELETE FROM symlink_targets WHERE inode=?', (id_,)) del self.inodes[id_] def fsyncdir(self, fh, datasync): log.debug('started with %d, %s', fh, datasync) if not datasync: self.inodes.flush_id(fh) def releasedir(self, fh): log.debug('started with %d', fh) def release(self, fh): log.debug('started with %d', fh) def flush(self, fh): log.debug('started with %d', fh) def update_logging(level, modules): root_logger = logging.getLogger() root_logger.setLevel(logging.INFO) if level == logging.DEBUG: logging.disable(logging.NOTSET) if 'all' in modules: root_logger.setLevel(logging.DEBUG) else: for module in modules: logging.getLogger(module).setLevel(logging.DEBUG) else: logging.disable(logging.DEBUG) s3ql-2.26/src/s3ql/exit_stack.py0000644000175000017500000000514212615000156020166 0ustar nikrationikratio00000000000000''' exit_stack.py - this file is part of S3QL. Copyright © 2013 Nikolaus Rath Copyright © 2013 the Python Software Foundation. This module provides the ExitStack class. For Python versions 3.3.3 or newer, it simply exports the vanilla version from contextlib. For Python 3.3.0 - 3.3.2, it provides a derived class that overwrites the __exit__ method with the version from CPython revision 423736775f6b to fix Python issue 19092 (cf. http://bugs.python.org/issue19092). This work can be distributed under the terms of the GNU GPLv3. ''' import sys from contextlib import ExitStack as _ExitStack if sys.version_info < (3,3): raise RuntimeError("Unsupported Python version: %s" % sys.version_info) if sys.version_info >= (3,3,3): ExitStack = _ExitStack else: class ExitStack(_ExitStack): def __exit__(self, *exc_details): received_exc = exc_details[0] is not None # We manipulate the exception state so it behaves as though # we were actually nesting multiple with statements frame_exc = sys.exc_info()[1] def _fix_exception_context(new_exc, old_exc): while 1: exc_context = new_exc.__context__ if exc_context in (None, frame_exc): break new_exc = exc_context new_exc.__context__ = old_exc # Callbacks are invoked in LIFO order to match the behaviour of # nested context managers suppressed_exc = False pending_raise = False while self._exit_callbacks: cb = self._exit_callbacks.pop() try: if cb(*exc_details): suppressed_exc = True pending_raise = False exc_details = (None, None, None) except: new_exc_details = sys.exc_info() # simulate the stack of exceptions by setting the context _fix_exception_context(new_exc_details[1], exc_details[1]) pending_raise = True exc_details = new_exc_details if pending_raise: try: # bare "raise exc_details[1]" replaces our carefully # set-up context fixed_ctx = exc_details[1].__context__ raise exc_details[1] except BaseException: exc_details[1].__context__ = fixed_ctx raise return received_exc and suppressed_exc s3ql-2.26/src/s3ql/statfs.py0000644000175000017500000000522512742247106017347 0ustar nikrationikratio00000000000000''' statfs.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. ''' from .logging import logging, setup_logging from .common import assert_fs_owner, pretty_print_size from .parse_args import ArgumentParser import llfuse import struct import sys log = logging.getLogger(__name__) def parse_args(args): '''Parse command line''' parser = ArgumentParser( description="Print file system statistics.") parser.add_debug() parser.add_quiet() parser.add_version() parser.add_argument("mountpoint", metavar='', type=(lambda x: x.rstrip('/')), help='Mount point of the file system to examine') parser.add_argument("--raw", action="store_true", default=False, help="Do not pretty-print numbers") return parser.parse_args(args) def main(args=None): '''Print file system statistics to sys.stdout''' if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) if options.raw: pprint = lambda x: '%d bytes' % x else: pprint = pretty_print_size ctrlfile = assert_fs_owner(options.mountpoint, mountpoint=True) # Use a decent sized buffer, otherwise the statistics have to be # calculated three(!) times because we need to invoke getxattr # three times. buf = llfuse.getxattr(ctrlfile, 's3qlstat', size_guess=256) (entries, blocks, inodes, fs_size, dedup_size, compr_size, db_size, cache_cnt, cache_size, dirty_cnt, dirty_size, removal_cnt) = struct.unpack('QQQQQQQQQQQQ', buf) p_dedup = dedup_size * 100 / fs_size if fs_size else 0 p_compr_1 = compr_size * 100 / fs_size if fs_size else 0 p_compr_2 = compr_size * 100 / dedup_size if dedup_size else 0 print ('Directory entries: %d' % entries, 'Inodes: %d' % inodes, 'Data blocks: %d' % blocks, 'Total data size: %s' % pprint(fs_size), 'After de-duplication: %s (%.2f%% of total)' % (pprint(dedup_size), p_dedup), 'After compression: %s (%.2f%% of total, %.2f%% of de-duplicated)' % (pprint(compr_size), p_compr_1, p_compr_2), 'Database size: %s (uncompressed)' % pprint(db_size), 'Cache size: %s, %d entries' % (pprint(cache_size), cache_cnt), 'Cache size (dirty): %s, %d entries' % (pprint(dirty_size), dirty_cnt), 'Queued object removals: %d' % (removal_cnt,), sep='\n') if __name__ == '__main__': main(sys.argv[1:]) s3ql-2.26/src/s3ql/database.py0000644000175000017500000001346512615000156017603 0ustar nikrationikratio00000000000000''' database.py - this file is part of S3QL. Copyright © 2008 Nikolaus Rath This work can be distributed under the terms of the GNU GPLv3. Module Attributes: ----------- :initsql: SQL commands that are executed whenever a new connection is created. ''' from .logging import logging, QuietError # Ensure use of custom logger class import apsw import os log = logging.getLogger(__name__) sqlite_ver = tuple([ int(x) for x in apsw.sqlitelibversion().split('.') ]) if sqlite_ver < (3, 7, 0): raise QuietError('SQLite version too old, must be 3.7.0 or newer!\n') initsql = ( # WAL mode causes trouble with e.g. copy_tree, so we don't use it at the moment # (cf. http://article.gmane.org/gmane.comp.db.sqlite.general/65243). # However, if we start using it we must initiaze it *before* setting # locking_mode to EXCLUSIVE, otherwise we can't switch the locking # mode without first disabling WAL. 'PRAGMA synchronous = OFF', 'PRAGMA journal_mode = OFF', #'PRAGMA synchronous = NORMAL', #'PRAGMA journal_mode = WAL', 'PRAGMA foreign_keys = OFF', 'PRAGMA locking_mode = EXCLUSIVE', 'PRAGMA recursize_triggers = on', 'PRAGMA page_size = 4096', 'PRAGMA wal_autocheckpoint = 25000', 'PRAGMA temp_store = FILE', 'PRAGMA legacy_file_format = off', ) class Connection(object): ''' This class wraps an APSW connection object. It should be used instead of any native APSW cursors. It provides methods to directly execute SQL commands and creates apsw cursors dynamically. Instances are not thread safe. They can be passed between threads, but must not be called concurrently. Attributes ---------- :conn: apsw connection object ''' def __init__(self, file_): self.conn = apsw.Connection(file_) self.file = file_ cur = self.conn.cursor() for s in initsql: cur.execute(s) def close(self): self.conn.close() def get_size(self): '''Return size of database file''' if self.file is not None and self.file not in ('', ':memory:'): return os.path.getsize(self.file) else: return 0 def query(self, *a, **kw): '''Return iterator over results of given SQL statement If the caller does not retrieve all rows the iterator's close() method should be called as soon as possible to terminate the SQL statement (otherwise it may block execution of other statements). To this end, the iterator may also be used as a context manager. ''' return ResultSet(self.conn.cursor().execute(*a, **kw)) def execute(self, *a, **kw): '''Execute the given SQL statement. Return number of affected rows ''' self.conn.cursor().execute(*a, **kw) return self.changes() def rowid(self, *a, **kw): """Execute SQL statement and return last inserted rowid""" self.conn.cursor().execute(*a, **kw) return self.conn.last_insert_rowid() def has_val(self, *a, **kw): '''Execute statement and check if it gives result rows''' res = self.conn.cursor().execute(*a, **kw) try: next(res) except StopIteration: return False else: # Finish the active SQL statement res.close() return True def get_val(self, *a, **kw): """Execute statement and return first element of first result row. If there is no result row, raises `NoSuchRowError`. If there is more than one row, raises `NoUniqueValueError`. """ return self.get_row(*a, **kw)[0] def get_list(self, *a, **kw): """Execute select statement and returns result list""" return list(self.query(*a, **kw)) def get_row(self, *a, **kw): """Execute select statement and return first row. If there are no result rows, raises `NoSuchRowError`. If there is more than one result row, raises `NoUniqueValueError`. """ res = self.conn.cursor().execute(*a, **kw) try: row = next(res) except StopIteration: raise NoSuchRowError() try: next(res) except StopIteration: # Fine, we only wanted one row pass else: # Finish the active SQL statement res.close() raise NoUniqueValueError() return row def last_rowid(self): """Return rowid most recently inserted in the current thread""" return self.conn.last_insert_rowid() def changes(self): """Return number of rows affected by most recent sql statement""" return self.conn.changes() class NoUniqueValueError(Exception): '''Raised if get_val or get_row was called with a query that generated more than one result row. ''' def __str__(self): return 'Query generated more than 1 result row' class NoSuchRowError(Exception): '''Raised if the query did not produce any result rows''' def __str__(self): return 'Query produced 0 result rows' class ResultSet(object): ''' Provide iteration over encapsulated apsw cursor. Additionally, `ResultSet` instances may be used as context managers to terminate the query before all result rows have been retrieved. ''' def __init__(self, cur): self.cur = cur def __next__(self): return next(self.cur) def __iter__(self): return self def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.cur.close() def close(self): '''Terminate query''' self.cur.close() s3ql-2.26/src/s3ql.egg-info/0000775000175000017500000000000013246754372017171 5ustar nikrationikratio00000000000000s3ql-2.26/src/s3ql.egg-info/top_level.txt0000664000175000017500000000000513246754372021716 0ustar nikrationikratio00000000000000s3ql s3ql-2.26/src/s3ql.egg-info/PKG-INFO0000664000175000017500000001774613246754372020305 0ustar nikrationikratio00000000000000Metadata-Version: 1.1 Name: s3ql Version: 2.26 Summary: a full-featured file system for online data storage Home-page: https://bitbucket.org/nikratio/s3ql/ Author: Nikolaus Rath Author-email: Nikolaus@rath.org License: GPLv3 Download-URL: https://bitbucket.org/nikratio/s3ql/downloads Description: .. NOTE: We cannot use sophisticated ReST syntax here because this file is rendered by Bitbucket. ====== S3QL ====== S3QL is a file system that stores all its data online using storage services like `Google Storage`_, `Amazon S3`_, or OpenStack_. S3QL effectively provides a hard disk of dynamic, infinite capacity that can be accessed from any computer with internet access. S3QL is a standard conforming, full featured UNIX file system that is conceptually indistinguishable from any local file system. Furthermore, S3QL has additional features like compression, encryption, data de-duplication, immutable trees and snapshotting which make it especially suitable for online backup and archival. S3QL is designed to favor simplicity and elegance over performance and feature-creep. Care has been taken to make the source code as readable and serviceable as possible. Solid error detection and error handling have been included from the very first line, and S3QL comes with extensive automated test cases for all its components. .. _`Google Storage`: http://code.google.com/apis/storage/ .. _`Amazon S3`: http://aws.amazon.com/s3 .. _OpenStack: http://openstack.org/projects/storage/ Features ======== * **Transparency.** Conceptually, S3QL is indistinguishable from a local file system. For example, it supports hardlinks, symlinks, standard unix permissions, extended attributes and file sizes up to 2 TB. * **Dynamic Size.** The size of an S3QL file system grows and shrinks dynamically as required. * **Compression.** Before storage, all data may compressed with the LZMA, bzip2 or deflate (gzip) algorithm. * **Encryption.** After compression (but before upload), all data can be AES encrypted with a 256 bit key. An additional SHA256 HMAC checksum is used to protect the data against manipulation. * **Data De-duplication.** If several files have identical contents, the redundant data will be stored only once. This works across all files stored in the file system, and also if only some parts of the files are identical while other parts differ. * **Immutable Trees.** Directory trees can be made immutable, so that their contents can no longer be changed in any way whatsoever. This can be used to ensure that backups can not be modified after they have been made. * **Copy-on-Write/Snapshotting.** S3QL can replicate entire directory trees without using any additional storage space. Only if one of the copies is modified, the part of the data that has been modified will take up additional storage space. This can be used to create intelligent snapshots that preserve the state of a directory at different points in time using a minimum amount of space. * **High Performance independent of network latency.** All operations that do not write or read file contents (like creating directories or moving, renaming, and changing permissions of files and directories) are very fast because they are carried out without any network transactions. S3QL achieves this by saving the entire file and directory structure in a database. This database is locally cached and the remote copy updated asynchronously. * **Support for low bandwidth connections.** S3QL splits file contents into smaller blocks and caches blocks locally. This minimizes both the number of network transactions required for reading and writing data, and the amount of data that has to be transferred when only parts of a file are read or written. Development Status ================== S3QL is considered stable and suitable for production use. Starting with version 2.17.1, S3QL uses semantic versioning. This means that backwards-incompatible versions (e.g., versions that require an upgrade of the file system revision) will be reflected in an increase of the major version number. Supported Platforms =================== S3QL is developed and tested under Linux. Users have also reported running S3QL successfully on OS-X, FreeBSD and NetBSD. We try to maintain compatibility with these systems, but (due to lack of pre-release testers) we cannot guarantee that every release will run on all non-Linux systems. Please report any bugs you find, and we will try to fix them. Typical Usage ============= Before a file system can be mounted, the backend which will hold the data has to be initialized. This is done with the *mkfs.s3ql* command. Here we are using the Amazon S3 backend, and *nikratio-s3ql-bucket* is the S3 bucket in which the file system will be stored. :: mkfs.s3ql s3://ap-south-1/nikratio-s3ql-bucket To mount the S3QL file system stored in the S3 bucket *nikratio_s3ql_bucket* in the directory ``/mnt/s3ql``, enter:: mount.s3ql s3://ap-south-1/nikratio-s3ql-bucket /mnt/s3ql Now you can instruct your favorite backup program to run a backup into the directory ``/mnt/s3ql`` and the data will be stored an Amazon S3. When you are done, the file system has to be unmounted with :: umount.s3ql /mnt/s3ql Need Help? ========== The following resources are available: * The `S3QL User's Guide`_. * The `S3QL Wiki`_, which also contains the `S3QL FAQ`_. * The `S3QL Mailing List`_. You can subscribe by sending a mail to `s3ql+subscribe@googlegroups.com `_. Please report any bugs you may encounter in the `Bitbucket Issue Tracker`_. Contributing ============ The S3QL source code is available both on GitHub_ and BitBucket_. Professional Support -------------------- Professional support is offered via `Rath Consulting`_. .. _`S3QL User's Guide`: http://www.rath.org/s3ql-docs/index.html .. _`S3QL Wiki`: https://bitbucket.org/nikratio/s3ql/wiki/ .. _`Installation Instructions`: https://bitbucket.org/nikratio/s3ql/wiki/Installation .. _`S3QL FAQ`: https://bitbucket.org/nikratio/s3ql/wiki/FAQ .. _`S3QL Mailing List`: http://groups.google.com/group/s3ql .. _`Bitbucket Issue Tracker`: https://bitbucket.org/nikratio/s3ql/issues .. _BitBucket: https://bitbucket.org/nikratio/s3ql/ .. _GitHub: https://github.com/s3ql/main .. _`Rath Consulting`: http://www.rath-consulting.biz/ Keywords: FUSE,backup,archival,compression,encryption,deduplication,aws,s3 Platform: POSIX Platform: UNIX Platform: Linux Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: No Input/Output (Daemon) Classifier: Environment :: Console Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (GPLv3) Classifier: Topic :: Internet Classifier: Operating System :: POSIX Classifier: Topic :: System :: Archiving Provides: s3ql s3ql-2.26/src/s3ql.egg-info/SOURCES.txt0000664000175000017500000001455113246754372021063 0ustar nikrationikratio00000000000000Changes.txt LICENSE README.rst setup.cfg setup.py /home/nikratio/in-progress/s3ql/doc/man/fsck.s3ql.1 /home/nikratio/in-progress/s3ql/doc/man/mkfs.s3ql.1 /home/nikratio/in-progress/s3ql/doc/man/mount.s3ql.1 /home/nikratio/in-progress/s3ql/doc/man/s3ql_oauth_client.1 /home/nikratio/in-progress/s3ql/doc/man/s3ql_verify.1 /home/nikratio/in-progress/s3ql/doc/man/s3qladm.1 /home/nikratio/in-progress/s3ql/doc/man/s3qlcp.1 /home/nikratio/in-progress/s3ql/doc/man/s3qlctrl.1 /home/nikratio/in-progress/s3ql/doc/man/s3qllock.1 /home/nikratio/in-progress/s3ql/doc/man/s3qlrm.1 /home/nikratio/in-progress/s3ql/doc/man/s3qlstat.1 /home/nikratio/in-progress/s3ql/doc/man/umount.s3ql.1 bin/fsck.s3ql bin/mkfs.s3ql bin/mount.s3ql bin/s3ql_oauth_client bin/s3ql_verify bin/s3qladm bin/s3qlcp bin/s3qlctrl bin/s3qllock bin/s3qlrm bin/s3qlstat bin/umount.s3ql contrib/benchmark.py contrib/clone_fs.py contrib/expire_backups.1 contrib/expire_backups.py contrib/fsck_db.py contrib/pcp.1 contrib/pcp.py contrib/remove_objects.py contrib/s3ql_backup.sh contrib/scramble_db.py doc/manual.pdf doc/html/.buildinfo doc/html/about.html doc/html/adm.html doc/html/authinfo.html doc/html/backends.html doc/html/contrib.html doc/html/durability.html doc/html/fsck.html doc/html/impl_details.html doc/html/index.html doc/html/installation.html doc/html/issues.html doc/html/mkfs.html doc/html/mount.html doc/html/objects.inv doc/html/resources.html doc/html/search.html doc/html/searchindex.js doc/html/special.html doc/html/tips.html doc/html/umount.html doc/html/_sources/about.txt doc/html/_sources/adm.txt doc/html/_sources/authinfo.txt doc/html/_sources/backends.txt doc/html/_sources/contrib.txt doc/html/_sources/durability.txt doc/html/_sources/fsck.txt doc/html/_sources/impl_details.txt doc/html/_sources/index.txt doc/html/_sources/installation.txt doc/html/_sources/issues.txt doc/html/_sources/mkfs.txt doc/html/_sources/mount.txt doc/html/_sources/resources.txt doc/html/_sources/special.txt doc/html/_sources/tips.txt doc/html/_sources/umount.txt doc/html/_sources/include/about.txt doc/html/_sources/include/exitcodes.txt doc/html/_sources/include/postman.txt doc/html/_sources/man/adm.txt doc/html/_sources/man/cp.txt doc/html/_sources/man/ctrl.txt doc/html/_sources/man/expire_backups.txt doc/html/_sources/man/fsck.txt doc/html/_sources/man/index.txt doc/html/_sources/man/lock.txt doc/html/_sources/man/mkfs.txt doc/html/_sources/man/mount.txt doc/html/_sources/man/oauth_client.txt doc/html/_sources/man/pcp.txt doc/html/_sources/man/rm.txt doc/html/_sources/man/stat.txt doc/html/_sources/man/umount.txt doc/html/_sources/man/verify.txt doc/html/_static/ajax-loader.gif doc/html/_static/basic.css doc/html/_static/comment-bright.png doc/html/_static/comment-close.png doc/html/_static/comment.png doc/html/_static/contents.png doc/html/_static/doctools.js doc/html/_static/down-pressed.png doc/html/_static/down.png doc/html/_static/file.png doc/html/_static/jquery.js doc/html/_static/minus.png doc/html/_static/navigation.png doc/html/_static/plus.png doc/html/_static/pygments.css doc/html/_static/searchtools.js doc/html/_static/sphinxdoc.css doc/html/_static/underscore.js doc/html/_static/up-pressed.png doc/html/_static/up.png doc/html/_static/websupport.js doc/html/include/about.html doc/html/include/exitcodes.html doc/html/include/postman.html doc/html/man/adm.html doc/html/man/cp.html doc/html/man/ctrl.html doc/html/man/expire_backups.html doc/html/man/fsck.html doc/html/man/index.html doc/html/man/lock.html doc/html/man/mkfs.html doc/html/man/mount.html doc/html/man/oauth_client.html doc/html/man/pcp.html doc/html/man/rm.html doc/html/man/stat.html doc/html/man/umount.html doc/html/man/verify.html doc/latex/Makefile doc/latex/fncychap.sty doc/latex/manual.aux doc/latex/manual.idx doc/latex/manual.out doc/latex/manual.tex doc/latex/manual.toc doc/latex/python.ist doc/latex/sphinx.sty doc/latex/sphinxhowto.cls doc/latex/sphinxmanual.cls doc/latex/tabulary.sty doc/man/fsck.s3ql.1 doc/man/mkfs.s3ql.1 doc/man/mount.s3ql.1 doc/man/s3ql_oauth_client.1 doc/man/s3ql_verify.1 doc/man/s3qladm.1 doc/man/s3qlcp.1 doc/man/s3qlctrl.1 doc/man/s3qllock.1 doc/man/s3qlrm.1 doc/man/s3qlstat.1 doc/man/umount.s3ql.1 rst/about.rst rst/adm.rst rst/authinfo.rst rst/backends.rst rst/conf.py rst/contrib.rst rst/durability.rst rst/fsck.rst rst/impl_details.rst rst/index.rst rst/installation.rst rst/issues.rst rst/mkfs.rst rst/mount.rst rst/resources.rst rst/special.rst rst/tips.rst rst/umount.rst rst/_static/sphinxdoc.css rst/_templates/layout.html rst/include/about.rst rst/include/exitcodes.rst rst/include/postman.rst rst/man/adm.rst rst/man/cp.rst rst/man/ctrl.rst rst/man/expire_backups.rst rst/man/fsck.rst rst/man/index.rst rst/man/lock.rst rst/man/mkfs.rst rst/man/mount.rst rst/man/oauth_client.rst rst/man/pcp.rst rst/man/rm.rst rst/man/stat.rst rst/man/umount.rst rst/man/verify.rst src/s3ql/__init__.py src/s3ql/adm.py src/s3ql/block_cache.py src/s3ql/calc_mro.py src/s3ql/common.py src/s3ql/cp.py src/s3ql/ctrl.py src/s3ql/daemonize.py src/s3ql/database.py src/s3ql/deltadump.c src/s3ql/deltadump.pyx src/s3ql/endian_indep.h src/s3ql/exit_stack.py src/s3ql/fs.py src/s3ql/fsck.py src/s3ql/inherit_docstrings.py src/s3ql/inode_cache.py src/s3ql/lock.py src/s3ql/logging.py src/s3ql/metadata.py src/s3ql/mkfs.py src/s3ql/mount.py src/s3ql/multi_lock.py src/s3ql/oauth_client.py src/s3ql/parse_args.py src/s3ql/remove.py src/s3ql/statfs.py src/s3ql/umount.py src/s3ql/verify.py src/s3ql.egg-info/PKG-INFO src/s3ql.egg-info/SOURCES.txt src/s3ql.egg-info/dependency_links.txt src/s3ql.egg-info/entry_points.txt src/s3ql.egg-info/requires.txt src/s3ql.egg-info/top_level.txt src/s3ql.egg-info/zip-safe src/s3ql/backends/__init__.py src/s3ql/backends/common.py src/s3ql/backends/comprenc.py src/s3ql/backends/gs.py src/s3ql/backends/local.py src/s3ql/backends/pool.py src/s3ql/backends/rackspace.py src/s3ql/backends/s3.py src/s3ql/backends/s3c.py src/s3ql/backends/swift.py src/s3ql/backends/swiftks.py tests/common.py tests/conftest.py tests/mock_server.py tests/pytest.ini tests/pytest_checklogs.py tests/t1_backends.py tests/t1_dump.py tests/t1_retry.py tests/t1_serialization.py tests/t2_block_cache.py tests/t3_fs_api.py tests/t3_fsck.py tests/t3_inode_cache.py tests/t3_verify.py tests/t4_adm.py tests/t4_fuse.py tests/t5_cp.py tests/t5_ctrl.py tests/t5_failsafe.py tests/t5_fsck.py tests/t5_full.py tests/t5_lock_rm.py tests/t6_upgrade.py util/cmdline_lexer.py util/sphinx_pipeinclude.pys3ql-2.26/src/s3ql.egg-info/entry_points.txt0000664000175000017500000000054713246754372022475 0ustar nikrationikratio00000000000000[console_scripts] fsck.s3ql = s3ql.fsck:main mkfs.s3ql = s3ql.mkfs:main mount.s3ql = s3ql.mount:main s3ql_oauth_client = s3ql.oauth_client:main s3ql_verify = s3ql.verify:main s3qladm = s3ql.adm:main s3qlcp = s3ql.cp:main s3qlctrl = s3ql.ctrl:main s3qllock = s3ql.lock:main s3qlrm = s3ql.remove:main s3qlstat = s3ql.statfs:main umount.s3ql = s3ql.umount:main s3ql-2.26/src/s3ql.egg-info/requires.txt0000664000175000017500000000011313246754372021564 0ustar nikrationikratio00000000000000apsw>=3.7.0 pycrypto requests defusedxml dugong<4.0,>=3.4 llfuse<2.0,>=1.0 s3ql-2.26/src/s3ql.egg-info/zip-safe0000664000175000017500000000000112557015177020616 0ustar nikrationikratio00000000000000 s3ql-2.26/src/s3ql.egg-info/dependency_links.txt0000664000175000017500000000000113246754372023237 0ustar nikrationikratio00000000000000 s3ql-2.26/bin/0000775000175000017500000000000013246754372014576 5ustar nikrationikratio00000000000000s3ql-2.26/bin/s3qlctrl0000755000175000017500000000146612577121514016270 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' s3qlctrl - this file is part of S3QL (http://s3ql.googlecode.com) Copyright © 2008 Nikolaus Rath This program can be distributed under the terms of the GNU GPLv3. ''' import sys import os.path # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path # When running from HG repo, enable all warnings if os.path.exists(os.path.join(basedir, 'MANIFEST.in')): import warnings warnings.simplefilter('default') import s3ql.ctrl s3ql.ctrl.main(sys.argv[1:]) s3ql-2.26/bin/s3qlrm0000755000175000017500000000147112577121514015736 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' s3qlrm - this file is part of S3QL (http://s3ql.googlecode.com) Copyright © 2008 Nikolaus Rath This program can be distributed under the terms of the GNU GPLv3. ''' import sys import os.path # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path # When running from HG repo, enable all warnings if os.path.exists(os.path.join(basedir, 'MANIFEST.in')): import warnings warnings.simplefilter('default') import s3ql.remove s3ql.remove.main(sys.argv[1:]) s3ql-2.26/bin/s3ql_verify0000755000175000017500000000147512577121514016767 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' s3ql_verify - this file is part of S3QL (http://s3ql.googlecode.com) Copyright © 2014 Nikolaus Rath This program can be distributed under the terms of the GNU GPLv3. ''' import sys import os.path # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path # When running from HG repo, enable all warnings if os.path.exists(os.path.join(basedir, 'MANIFEST.in')): import warnings warnings.simplefilter('default') import s3ql.verify s3ql.verify.main(sys.argv[1:]) s3ql-2.26/bin/s3qladm0000755000175000017500000000146312577121514016062 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' s3qladm - this file is part of S3QL (http://s3ql.googlecode.com) Copyright © 2008 Nikolaus Rath This program can be distributed under the terms of the GNU GPLv3. ''' import sys import os.path # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path # When running from HG repo, enable all warnings if os.path.exists(os.path.join(basedir, 'MANIFEST.in')): import warnings warnings.simplefilter('default') import s3ql.adm s3ql.adm.main(sys.argv[1:]) s3ql-2.26/bin/mount.s3ql0000755000175000017500000000147212577121514016541 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' mount.s3ql - this file is part of S3QL (http://s3ql.googlecode.com) Copyright © 2008 Nikolaus Rath This program can be distributed under the terms of the GNU GPLv3. ''' import sys import os.path # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path # When running from HG repo, enable all warnings if os.path.exists(os.path.join(basedir, 'MANIFEST.in')): import warnings warnings.simplefilter('default') import s3ql.mount s3ql.mount.main(sys.argv[1:]) s3ql-2.26/bin/s3qlcp0000755000175000017500000000146012577121514015720 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' s3qlcp - this file is part of S3QL (http://s3ql.googlecode.com) Copyright © 2008 Nikolaus Rath This program can be distributed under the terms of the GNU GPLv3. ''' import sys import os.path # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path # When running from HG repo, enable all warnings if os.path.exists(os.path.join(basedir, 'MANIFEST.in')): import warnings warnings.simplefilter('default') import s3ql.cp s3ql.cp.main(sys.argv[1:]) s3ql-2.26/bin/s3ql_oauth_client0000755000175000017500000000151712577121514020136 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' s3ql_oauth_client - this file is part of S3QL (http://s3ql.googlecode.com) Copyright © 2008 Nikolaus Rath This program can be distributed under the terms of the GNU GPLv3. ''' import sys import os.path # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path # When running from HG repo, enable all warnings if os.path.exists(os.path.join(basedir, 'MANIFEST.in')): import warnings warnings.simplefilter('default') import s3ql.oauth_client s3ql.oauth_client.main(sys.argv[1:]) s3ql-2.26/bin/s3qllock0000755000175000017500000000146612577121514016254 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' s3qllock - this file is part of S3QL (http://s3ql.googlecode.com) Copyright © 2008 Nikolaus Rath This program can be distributed under the terms of the GNU GPLv3. ''' import sys import os.path # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path # When running from HG repo, enable all warnings if os.path.exists(os.path.join(basedir, 'MANIFEST.in')): import warnings warnings.simplefilter('default') import s3ql.lock s3ql.lock.main(sys.argv[1:]) s3ql-2.26/bin/fsck.s3ql0000755000175000017500000000146712577121514016331 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' fsck.s3ql - this file is part of S3QL (http://s3ql.googlecode.com) Copyright © 2008 Nikolaus Rath This program can be distributed under the terms of the GNU GPLv3. ''' import sys import os.path # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path # When running from HG repo, enable all warnings if os.path.exists(os.path.join(basedir, 'MANIFEST.in')): import warnings warnings.simplefilter('default') import s3ql.fsck s3ql.fsck.main(sys.argv[1:]) s3ql-2.26/bin/mkfs.s3ql0000755000175000017500000000146712577121514016343 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' mkfs.s3ql - this file is part of S3QL (http://s3ql.googlecode.com) Copyright © 2008 Nikolaus Rath This program can be distributed under the terms of the GNU GPLv3. ''' import sys import os.path # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path # When running from HG repo, enable all warnings if os.path.exists(os.path.join(basedir, 'MANIFEST.in')): import warnings warnings.simplefilter('default') import s3ql.mkfs s3ql.mkfs.main(sys.argv[1:]) s3ql-2.26/bin/umount.s3ql0000755000175000017500000000147512577121514016731 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' umount.s3ql - this file is part of S3QL (http://s3ql.googlecode.com) Copyright © 2008 Nikolaus Rath This program can be distributed under the terms of the GNU GPLv3. ''' import sys import os.path # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path # When running from HG repo, enable all warnings if os.path.exists(os.path.join(basedir, 'MANIFEST.in')): import warnings warnings.simplefilter('default') import s3ql.umount s3ql.umount.main(sys.argv[1:]) s3ql-2.26/bin/s3qlstat0000755000175000017500000000147212577121514016274 0ustar nikrationikratio00000000000000#!/usr/bin/env python3 ''' s3qlstat - this file is part of S3QL (http://s3ql.googlecode.com) Copyright © 2008 Nikolaus Rath This program can be distributed under the terms of the GNU GPLv3. ''' import sys import os.path # We are running from the S3QL source directory, make sure # that we use modules from this directory basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) if (os.path.exists(os.path.join(basedir, 'setup.py')) and os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))): sys.path = [os.path.join(basedir, 'src')] + sys.path # When running from HG repo, enable all warnings if os.path.exists(os.path.join(basedir, 'MANIFEST.in')): import warnings warnings.simplefilter('default') import s3ql.statfs s3ql.statfs.main(sys.argv[1:])